diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index feb687200ed..300886a6973 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,52 +1,5 @@ - +# This repository is locked -##### ISSUE TYPE - - - Bug Report - - Feature Idea - - Documentation Report +Please open all new issues and pull requests in https://github.com/ansible/ansible -##### COMPONENT NAME - - -##### ANSIBLE VERSION -``` - -``` - -##### CONFIGURATION - - -##### OS / ENVIRONMENT - - -##### SUMMARY - - -##### STEPS TO REPRODUCE - - -``` - -``` - - - -##### EXPECTED RESULTS - - -##### ACTUAL RESULTS - - -``` - -``` +For more information please see http://docs.ansible.com/ansible/dev_guide/repomerge.html diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d8b8e17cbd5..300886a6973 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,27 +1,5 @@ -##### ISSUE TYPE - - - Feature Pull Request - - New Module Pull Request - - Bugfix Pull Request - - Docs Pull Request +# This repository is locked -##### COMPONENT NAME - +Please open all new issues and pull requests in https://github.com/ansible/ansible -##### ANSIBLE VERSION -``` - -``` - -##### SUMMARY - - - - -``` - -``` +For more information please see http://docs.ansible.com/ansible/dev_guide/repomerge.html diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 91d1b9585d7..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -sudo: false -language: python -python: - - "2.7" -addons: - apt: - sources: - - deadsnakes - packages: - - python2.4 - - python2.6 -script: - - python2.4 -m compileall -fq -x 'cloud/' . - - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - - python2.6 -m compileall -fq . - - python2.7 -m compileall -fq . - #- ./test-docs.sh core diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 858b757902e..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,32 +0,0 @@ -Welcome To Ansible GitHub -========================= - -Hi! Nice to see you here! - -If you'd like to ask a question -=============================== - -Please see [this web page ](http://docs.ansible.com/community.html) for community information, which includes pointers on how to ask questions on the [mailing lists](http://docs.ansible.com/community.html#mailing-list-information) and IRC. - -The github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those. - -If you'd like to contribute code -================================ - -Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page. - -If you'd like to file a bug -=========================== - -I'd also read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible-modules-core/blob/devel/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it. - -Also please make sure you are testing on the latest released version of Ansible or the development branch. - -If you'd like to contribute code to an existing module -====================================================== -Each module in Core is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team. - -Thanks! - - - diff --git a/COPYING b/COPYING deleted file mode 100644 index 10926e87f11..00000000000 --- a/COPYING +++ /dev/null @@ -1,675 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - diff --git a/README.md b/README.md index 09a35854879..3bb1f395c56 100644 --- a/README.md +++ b/README.md @@ -1,28 +1,6 @@ -ansible-modules-core -==================== +**NOTE:** As of Ansible 2.3, modules are now in the +[main Ansible repository](https://github.com/ansible/ansible/tree/devel/lib/ansible/modules). -This repo contains Ansible's most popular modules that are shipped with Ansible. +See the [repo merge guide](https://docs.ansible.com/ansible/dev_guide/repomerge.html) for more information. -New module submissions for modules that do not yet exist should be submitted to ansible-modules-extras, rather than this repo. - -Take care to submit tickets to the appropriate repo where modules are contained. The docs.ansible.com website indicates this at the bottom of each module documentation page. - -Reporting bugs -============== - -Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of module documentation page at [docs.ansible.com](http://docs.ansible.com/). - -Testing modules -=============== - -Ansible [module development guide](http://docs.ansible.com/developing_modules.html#testing-modules) contains the latest info about that. - -License -======= - -As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. - -Installation -============ - -There should be no need to install this repo separately as it should be included in any Ansible install using the official documented methods. +This repo still exists to allow bug fixes for `stable-2.2` and older releases. diff --git a/VERSION b/VERSION deleted file mode 100644 index 47c909bbc53..00000000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -2.0.0-0.5.beta3 diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 7984b67a29f..a497ab851d9 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -17,6 +17,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_ami_search @@ -56,9 +60,9 @@ description: EC2 region required: false default: us-east-1 - choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2", - "eu-central-1", "eu-west-1", "sa-east-1", "us-east-1", - "us-west-1", "us-west-2", "us-gov-west-1"] + choices: ["ap-northeast-1", "ap-southeast-1", "ap-northeast-2", + "ap-southeast-2", "eu-central-1", "eu-west-1", "sa-east-1", + "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"] virt: description: virutalization type required: false @@ -74,10 +78,18 @@ connection: local tasks: - name: Get the Ubuntu precise AMI - ec2_ami_search: distro=ubuntu release=precise region=us-west-1 store=instance-store + ec2_ami_search: + distro: ubuntu + release: precise + region: us-west-1 + store: instance-store register: ubuntu_image + - name: Start the EC2 instance - ec2: image={{ ubuntu_image.ami }} instance_type=m1.small key_name=mykey + ec2: + image: "{{ ubuntu_image.ami }}" + instance_type: m1.small + key_name: mykey ''' import csv @@ -88,7 +100,9 @@ AWS_REGIONS = ['ap-northeast-1', 'ap-southeast-1', + 'ap-northeast-2', 'ap-southeast-2', + 'ap-south-1', 'eu-central-1', 'eu-west-1', 'sa-east-1', diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index 8a142fd1ee1..c05ae2df280 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -14,12 +14,25 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# upcoming features: +# - Ted's multifile YAML concatenation +# - changesets (and blocking/waiting for them) +# - finish AWSRetry conversion +# - move create/update code out of main +# - unit tests + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cloudformation short_description: Create or delete an AWS CloudFormation stack description: - Launches an AWS CloudFormation stack and waits for it complete. +notes: + - As of version 2.3, migrated to boto3 to enable new features. To match existing behavior, YAML parsing is done in the module, not given to AWS as YAML. This will change (in fact, it may change before 2.3 is out). version_added: "1.1" options: stack_name: @@ -56,23 +69,16 @@ version_added: "2.0" stack_policy: description: - - the path of the cloudformation stack policy + - the path of the cloudformation stack policy. A policy cannot be removed once placed, but it can be modified. (for instance, [allow all updates](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051) required: false default: null version_added: "1.9" tags: description: - - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later. - Requires at least Boto version 2.6.0. + - Dictionary of tags to associate with stack and its resources during stack creation. Can be updated later, updating tags removes previous entries. required: false default: null version_added: "1.4" - region: - description: - - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used. - required: true - aliases: ['aws_region', 'ec2_region'] - version_added: "1.5" template_url: description: - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region as the stack. This parameter is mutually exclusive with 'template'. Either one of them is required if "state" parameter is "present" @@ -80,23 +86,32 @@ version_added: "2.0" template_format: description: - - For local templates, allows specification of json or yaml format + - (deprecated) For local templates, allows specification of json or yaml format. Templates are now passed raw to CloudFormation regardless of format. This parameter is ignored since Ansible 2.3. default: json choices: [ json, yaml ] required: false version_added: "2.0" + role_arn: + description: + - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html) + required: false + default: null + version_added: "2.3" author: "James S. Martin (@jsmartin)" -extends_documentation_fragment: aws +extends_documentation_fragment: +- aws +- ec2 +requires: [ botocore>=1.4.57 ] ''' EXAMPLES = ''' # Basic task example - name: launch ansible cloudformation example cloudformation: - stack_name: "ansible-cloudformation" + stack_name: "ansible-cloudformation" state: "present" - region: "us-east-1" + region: "us-east-1" disable_rollback: true template: "files/cloudformation-example.json" template_parameters: @@ -110,9 +125,9 @@ # Basic role example - name: launch ansible cloudformation example cloudformation: - stack_name: "ansible-cloudformation" + stack_name: "ansible-cloudformation" state: "present" - region: "us-east-1" + region: "us-east-1" disable_rollback: true template: "roles/cloudformation/files/cloudformation-example.json" template_parameters: @@ -132,9 +147,29 @@ # Use a template from a URL - name: launch ansible cloudformation example cloudformation: - stack_name="ansible-cloudformation" state=present - region=us-east-1 disable_rollback=true - template_url=https://s3.amazonaws.com/my-bucket/cloudformation.template + stack_name: "ansible-cloudformation" + state: present + region: us-east-1 + disable_rollback: true + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + args: + template_parameters: + KeyName: jmartin + DiskType: ephemeral + InstanceType: m1.small + ClusterSize: 3 + tags: + Stack: ansible-cloudformation + +# Use a template from a URL, and assume a role to execute +- name: launch ansible cloudformation example with role assumption + cloudformation: + stack_name: "ansible-cloudformation" + state: present + region: us-east-1 + disable_rollback: true + template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template + role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role' args: template_parameters: KeyName: jmartin @@ -145,24 +180,59 @@ Stack: ansible-cloudformation ''' +RETURN = ''' +events: + type: list + description: Most recent events in Cloudformation's event log. This may be from a previous run in some cases. + returned: always + sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"] +log: + description: Debugging logs. Useful when modifying or finding an error. + returned: always + type: list + sample: ["updating stack"] +stack_resources: + description: AWS stack resources and their status. List of dictionaries, one dict per resource. + type: list + sample: [ + { + "last_updated_time": "2016-10-11T19:40:14.979000+00:00", + "logical_resource_id": "CFTestSg", + "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F", + "resource_type": "AWS::EC2::SecurityGroup", + "status": "UPDATE_COMPLETE", + "status_reason": null + } + ] +stack_outputs: + type: dict + description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary. + returned: always + sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"} +''' + import json import time -import yaml +import sys +import traceback + try: - import boto - import boto.cloudformation.connection - HAS_BOTO = True + import boto3 + import botocore + HAS_BOTO3 = True except ImportError: - HAS_BOTO = False + HAS_BOTO3 = False +# import a class, otherwise we'll use a fully qualified path +from ansible.module_utils.ec2 import AWSRetry def boto_exception(err): '''generic error message handler''' if hasattr(err, 'error_message'): error = err.error_message elif hasattr(err, 'message'): - error = err.message + error = err.message + ' ' + str(err) + ' - ' + str(type(err)) else: error = '%s: %s' % (Exception, err) @@ -170,73 +240,112 @@ def boto_exception(err): def boto_version_required(version_tuple): - parts = boto.Version.split('.') + parts = boto3.__version__.split('.') boto_version = [] try: for part in parts: boto_version.append(int(part)) - except: + except ValueError: boto_version.append(-1) return tuple(boto_version) >= tuple(version_tuple) +def get_stack_events(cfn, stack_name): + '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.''' + ret = {'events':[], 'log':[]} + + try: + events = cfn.describe_stack_events(StackName=stack_name) + except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: + error_msg = boto_exception(err) + if 'does not exist' in error_msg: + # missing stack, don't bail. + ret['log'].append('Stack does not exist.') + return ret + ret['log'].append('Unknown error: ' + str(error_msg)) + return ret + + for e in events.get('StackEvents', []): + eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e) + ret['events'].append(eventline) + + if e['ResourceStatus'].endswith('FAILED'): + failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e) + ret['log'].append(failline) + + return ret def stack_operation(cfn, stack_name, operation): '''gets the status of a stack while it is created/updated/deleted''' existed = [] - result = {} - operation_complete = False - while operation_complete == False: + while True: try: - stack = invoke_with_throttling_retries(cfn.describe_stacks, stack_name)[0] + stack = get_stack_facts(cfn, stack_name) existed.append('yes') except: - if 'yes' in existed: - result = dict(changed=True, - output='Stack Deleted', - events=map(str, list(stack.describe_events()))) + # If the stack previously existed, and now can't be found then it's + # been deleted successfully. + if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + ret = get_stack_events(cfn, stack_name) + ret.update({'changed': True, 'output': 'Stack Deleted'}) + return ret + else: + return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()} + ret = get_stack_events(cfn, stack_name) + if not stack: + if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + ret = get_stack_events(cfn, stack_name) + ret.update({'changed': True, 'output': 'Stack Deleted'}) + return ret else: - result = dict(changed= True, output='Stack Not Found') - break - if '%s_COMPLETE' % operation == stack.stack_status: - result = dict(changed=True, - events = map(str, list(stack.describe_events())), - output = 'Stack %s complete' % operation) - break - if 'ROLLBACK_COMPLETE' == stack.stack_status or '%s_ROLLBACK_COMPLETE' % operation == stack.stack_status: - result = dict(changed=True, failed=True, - events = map(str, list(stack.describe_events())), - output = 'Problem with %s. Rollback complete' % operation) - break - elif '%s_FAILED' % operation == stack.stack_status: - result = dict(changed=True, failed=True, - events = map(str, list(stack.describe_events())), - output = 'Stack %s failed' % operation) - break - elif '%s_ROLLBACK_FAILED' % operation == stack.stack_status: - result = dict(changed=True, failed=True, - events = map(str, list(stack.describe_events())), - output = 'Stack %s rollback failed' % operation) - break + ret.update({'changed': False, 'failed': True, 'output' : 'Stack not found.'}) + return ret + elif stack['StackStatus'].endswith('_ROLLBACK_COMPLETE'): + ret.update({'changed': True, 'failed' :True, 'output': 'Problem with %s. Rollback complete' % operation}) + return ret + # note the ordering of ROLLBACK_COMPLETE and COMPLETE, because otherwise COMPLETE will match both cases. + elif stack['StackStatus'].endswith('_COMPLETE'): + ret.update({'changed': True, 'output' : 'Stack %s complete' % operation }) + return ret + elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'): + ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation}) + return ret + # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases. + elif stack['StackStatus'].endswith('_FAILED'): + ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation}) + return ret else: + # this can loop forever :/ time.sleep(5) - return result + return {'failed': True, 'output':'Failed for unknown reasons.'} + +@AWSRetry.backoff(tries=3, delay=5) +def describe_stacks(cfn, stack_name): + return cfn.describe_stacks(StackName=stack_name) + +def get_stack_facts(cfn, stack_name): + try: + stack_response = describe_stacks(cfn, stack_name) + stack_info = stack_response['Stacks'][0] + #except AmazonCloudFormationException as e: + except (botocore.exceptions.ValidationError,botocore.exceptions.ClientError) as err: + error_msg = boto_exception(err) + if 'does not exist'.format(stack_name) in error_msg: + # missing stack, don't bail. + return None + + # other error, bail. + raise err + + if stack_response and stack_response.get('Stacks', None): + stacks = stack_response['Stacks'] + if len(stacks): + stack_info = stacks[0] + + return stack_info -IGNORE_CODE = 'Throttling' -MAX_RETRIES=3 -def invoke_with_throttling_retries(function_ref, *argv): - retries=0 - while True: - try: - retval=function_ref(*argv) - return retval - except boto.exception.BotoServerError, e: - if e.code != IGNORE_CODE or retries==MAX_RETRIES: - raise e - time.sleep(5 * (2**retries)) - retries += 1 def main(): - argument_spec = ec2_argument_spec() + argument_spec = ansible.module_utils.ec2.ec2_argument_spec() argument_spec.update(dict( stack_name=dict(required=True), template_parameters=dict(required=False, type='dict', default={}), @@ -246,7 +355,8 @@ def main(): stack_policy=dict(default=None, required=False), disable_rollback=dict(default=False, type='bool'), template_url=dict(default=None, required=False), - template_format=dict(default='json', choices=['json', 'yaml'], required=False), + template_format=dict(default=None, choices=['json', 'yaml'], required=False), + role_arn=dict(default=None, required=False), tags=dict(default=None, type='dict') ) ) @@ -255,122 +365,105 @@ def main(): argument_spec=argument_spec, mutually_exclusive=[['template_url', 'template']], ) - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') + if not HAS_BOTO3: + module.fail_json(msg='boto3 and botocore are required for this module') + # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. + stack_params = { + 'Capabilities':['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'], + } state = module.params['state'] - stack_name = module.params['stack_name'] + stack_params['StackName'] = module.params['stack_name'] if module.params['template'] is None and module.params['template_url'] is None: if state == 'present': - module.fail_json('Module parameter "template" or "template_url" is required if "state" is "present"') + module.fail_json(msg='Module parameter "template" or "template_url" is required if "state" is "present"') if module.params['template'] is not None: - template_body = open(module.params['template'], 'r').read() - else: - template_body = None - - if module.params['template_format'] == 'yaml': - if template_body is None: - module.fail_json(msg='yaml format only supported for local templates') - else: - template_body = json.dumps(yaml.load(template_body), indent=2) + stack_params['TemplateBody'] = open(module.params['template'], 'r').read() - notification_arns = module.params['notification_arns'] + if module.params.get('notification_arns'): + stack_params['NotificationARNs'] = module.params['notification_arns'].split(',') + else: + stack_params['NotificationARNs'] = [] if module.params['stack_policy'] is not None: - stack_policy_body = open(module.params['stack_policy'], 'r').read() - else: - stack_policy_body = None + stack_params['StackPolicyBody'] = open(module.params['stack_policy'], 'r').read() - disable_rollback = module.params['disable_rollback'] template_parameters = module.params['template_parameters'] - tags = module.params['tags'] - template_url = module.params['template_url'] - - region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + stack_params['Parameters'] = [{'ParameterKey':k, 'ParameterValue':v} for k, v in template_parameters.items()] - kwargs = dict() - if tags is not None: - if not boto_version_required((2,6,0)): - module.fail_json(msg='Module parameter "tags" requires at least Boto version 2.6.0') - kwargs['tags'] = tags + if isinstance(module.params.get('tags'), dict): + stack_params['Tags'] = ansible.module_utils.ec2.ansible_dict_to_boto3_tag_list(module.params['tags']) + if module.params.get('template_url'): + stack_params['TemplateURL'] = module.params['template_url'] - # convert the template parameters ansible passes into a tuple for boto - template_parameters_tup = [(k, v) for k, v in template_parameters.items()] - stack_outputs = {} + if module.params.get('role_arn'): + stack_params['RoleARN'] = module.params['role_arn'] - try: - cfn = connect_to_aws(boto.cloudformation, region, **aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) update = False result = {} - operation = None + + try: + region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True) + cfn = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg=boto_exception(e)) + + stack_info = get_stack_facts(cfn, stack_params['StackName']) # if state is present we are going to ensure that the stack is either # created or updated - if state == 'present': + if state == 'present' and not stack_info: try: - cfn.create_stack(stack_name, parameters=template_parameters_tup, - template_body=template_body, - notification_arns=notification_arns, - stack_policy_body=stack_policy_body, - template_url=template_url, - disable_rollback=disable_rollback, - capabilities=['CAPABILITY_IAM'], - **kwargs) - operation = 'CREATE' - except Exception, err: - error_msg = boto_exception(err) - if 'AlreadyExistsException' in error_msg or 'already exists' in error_msg: - update = True - else: - module.fail_json(msg=error_msg) - if not update: - result = stack_operation(cfn, stack_name, operation) + # 'disablerollback' only applies on creation, not update. + stack_params['DisableRollback'] = module.params['disable_rollback'] - # if the state is present and the stack already exists, we try to update it - # AWS will tell us if the stack template and parameters are the same and - # don't need to be updated. - if update: + cfn.create_stack(**stack_params) + except Exception as err: + error_msg = boto_exception(err) + #return {'error': error_msg} + module.fail_json(msg=error_msg) + result = stack_operation(cfn, stack_params['StackName'], 'CREATE') + if not result: module.fail_json(msg="empty result") + + if state == 'present' and stack_info: + # if the state is present and the stack already exists, we try to update it. + # AWS will tell us if the stack template and parameters are the same and + # don't need to be updated. try: - cfn.update_stack(stack_name, parameters=template_parameters_tup, - template_body=template_body, - notification_arns=notification_arns, - stack_policy_body=stack_policy_body, - disable_rollback=disable_rollback, - template_url=template_url, - capabilities=['CAPABILITY_IAM']) - operation = 'UPDATE' - except Exception, err: + cfn.update_stack(**stack_params) + result = stack_operation(cfn, stack_params['StackName'], 'UPDATE') + except Exception as err: error_msg = boto_exception(err) if 'No updates are to be performed.' in error_msg: result = dict(changed=False, output='Stack is already up-to-date.') else: module.fail_json(msg=error_msg) - - if operation == 'UPDATE': - result = stack_operation(cfn, stack_name, operation) + if not result: module.fail_json(msg="empty result") # check the status of the stack while we are creating/updating it. # and get the outputs of the stack if state == 'present' or update: - stack = invoke_with_throttling_retries(cfn.describe_stacks,stack_name)[0] - for output in stack.outputs: - stack_outputs[output.key] = output.value - result['stack_outputs'] = stack_outputs - stack_resources = [] - for res in cfn.list_stack_resources(stack_name): + stack = get_stack_facts(cfn, stack_params['StackName']) + if result.get('stack_outputs') is None: + # always define stack_outputs, but it may be empty + result['stack_outputs'] = {} + for output in stack.get('Outputs', []): + result['stack_outputs'][output['OutputKey']] = output['OutputValue'] + stack_resources = [] + reslist = cfn.list_stack_resources(StackName=stack_params['StackName']) + for res in reslist.get('StackResourceSummaries', []): stack_resources.append({ - "last_updated_time": res.last_updated_time, - "logical_resource_id": res.logical_resource_id, - "physical_resource_id": res.physical_resource_id, - "status": res.resource_status, - "status_reason": res.resource_status_reason, - "resource_type": res.resource_type }) + "logical_resource_id": res['LogicalResourceId'], + "physical_resource_id": res.get('PhysicalResourceId', ''), + "resource_type": res['ResourceType'], + "last_updated_time": res['LastUpdatedTimestamp'], + "status": res['ResourceStatus'], + "status_reason": res.get('ResourceStatusReason') # can be blank, apparently + }) result['stack_resources'] = stack_resources # absent state is different because of the way delete_stack works. @@ -378,24 +471,28 @@ def main(): # so must describe the stack first if state == 'absent': + #result = {} try: - invoke_with_throttling_retries(cfn.describe_stacks,stack_name) - operation = 'DELETE' - except Exception, err: - error_msg = boto_exception(err) - if 'Stack:%s does not exist' % stack_name in error_msg: - result = dict(changed=False, output='Stack not found.') + stack = get_stack_facts(cfn, stack_params['StackName']) + if not stack: + result = {'changed': False, 'output': 'Stack not found.'} else: - module.fail_json(msg=error_msg) - if operation == 'DELETE': - cfn.delete_stack(stack_name) - result = stack_operation(cfn, stack_name, operation) - + cfn.delete_stack(StackName=stack_params['StackName']) + result = stack_operation(cfn, stack_params['StackName'], 'DELETE') + except Exception as err: + module.fail_json(msg=boto_exception(err), exception=traceback.format_exc()) + + if module.params['template_format'] is not None: + result['warnings'] = [('Argument `template_format` is deprecated ' + 'since Ansible 2.3, JSON and YAML templates are now passed ' + 'directly to the CloudFormation API.')] module.exit_json(**result) + # import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * +from ansible.module_utils.basic import AnsibleModule +import ansible.module_utils.ec2 + if __name__ == '__main__': main() diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 0b660e0dba7..d19f06f3695 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -14,12 +14,17 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2 short_description: create, terminate, start or stop an instance in ec2 description: - Creates or terminates ec2 instances. + - C(state=restarted) was added in 2.2 version_added: "0.9" options: key_name: @@ -28,6 +33,13 @@ required: false default: null aliases: ['keypair'] + id: + version_added: "1.1" + description: + - identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + required: false + default: null + aliases: [] group: description: - security group (or list of groups) to use with the instance @@ -205,6 +217,13 @@ required: false default: no choices: [ "yes", "no" ] + instance_initiated_shutdown_behavior: + version_added: "2.2" + description: + - Set whether AWS will Stop or Terminate an instance on shutdown + required: false + default: 'stop' + choices: [ "stop", "terminate" ] state: version_added: "1.3" description: @@ -212,7 +231,7 @@ required: false default: 'present' aliases: [] - choices: ['present', 'absent', 'running', 'stopped'] + choices: ['present', 'absent', 'running', 'restarted', 'stopped'] volumes: version_added: "1.5" description: @@ -419,12 +438,21 @@ vpc_subnet_id: subnet-29e63245 assign_public_ip: yes register: ec2 + - name: Add new instance to host group - add_host: hostname={{ item.public_ip }} groupname=launched - with_items: ec2.instances + add_host: + hostname: "{{ item.public_ip }}" + groupname: launched + with_items: "{{ ec2.instances }}" + - name: Wait for SSH to come up - wait_for: host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started - with_items: ec2.instances + wait_for: + host: "{{ item.public_dns_name }}" + port: 22 + delay: 60 + timeout: 320 + state: started + with_items: "{{ ec2.instances }}" - name: Configure instance(s) hosts: launched @@ -498,6 +526,15 @@ Name: ExtraPower state: running +# +# Restart instances specified by tag +# +- local_action: + module: ec2 + instance_tags: + Name: ExtraPower + state: restarted + # # Enforce that 5 instances with a tag "foo" are running # (Highly recommended!) @@ -563,6 +600,8 @@ import time from ast import literal_eval +from ansible.module_utils.six import iteritems +from ansible.module_utils.six import get_function_code try: import boto.ec2 @@ -590,8 +629,8 @@ def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None): def _set_none_to_blank(dictionary): result = dictionary - for k in result.iterkeys(): - if type(result[k]) == dict: + for k in result: + if isinstance(result[k], dict): result[k] = _set_none_to_blank(result[k]) elif not result[k]: result[k] = "" @@ -605,29 +644,29 @@ def get_reservations(module, ec2, tags=None, state=None, zone=None): if tags is not None: - if type(tags) is str: + if isinstance(tags, str): try: tags = literal_eval(tags) except: pass # if string, we only care that a tag of that name exists - if type(tags) is str: + if isinstance(tags, str): filters.update({"tag-key": tags}) # if list, append each item to filters - if type(tags) is list: + if isinstance(tags, list): for x in tags: - if type(x) is dict: + if isinstance(x, dict): x = _set_none_to_blank(x) - filters.update(dict(("tag:"+tn, tv) for (tn,tv) in x.iteritems())) + filters.update(dict(("tag:"+tn, tv) for (tn,tv) in iteritems(x))) else: filters.update({"tag-key": x}) # if dict, add the key and value to the filter - if type(tags) is dict: + if isinstance(tags, dict): tags = _set_none_to_blank(tags) - filters.update(dict(("tag:"+tn, tv) for (tn,tv) in tags.iteritems())) + filters.update(dict(("tag:"+tn, tv) for (tn,tv) in iteritems(tags))) if state: # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api @@ -727,15 +766,15 @@ def boto_supports_profile_name_arg(ec2): True if Boto library accept instance_profile_name argument, else false """ run_instances_method = getattr(ec2, 'run_instances') - return 'instance_profile_name' in run_instances_method.func_code.co_varnames + return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames def create_block_device(module, ec2, volume): # Not aware of a way to determine this programatically # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ MAX_IOPS_TO_SIZE_RATIO = 30 - # device_type has been used historically to represent volume_type, - # however ec2_vol uses volume_type, as does the BlockDeviceType, so + # device_type has been used historically to represent volume_type, + # however ec2_vol uses volume_type, as does the BlockDeviceType, so # we add handling for either/or but not both if all(key in volume for key in ['device_type','volume_type']): module.fail_json(msg = 'device_type is a deprecated name for volume_type. Do not use both device_type and volume_type') @@ -755,7 +794,7 @@ def create_block_device(module, ec2, volume): if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size: module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) if 'encrypted' in volume: - module.fail_json(msg = 'You can not set encyrption when creating a volume from a snapshot') + module.fail_json(msg = 'You can not set encryption when creating a volume from a snapshot') if 'ephemeral' in volume: if 'snapshot' in volume: module.fail_json(msg = 'Cannot set both ephemeral and snapshot') @@ -777,7 +816,64 @@ def boto_supports_param_in_spot_request(ec2, param): True if boto library has the named param as an argument on the request_spot_instances method, else False """ method = getattr(ec2, 'request_spot_instances') - return param in method.func_code.co_varnames + return param in get_function_code(method).co_varnames + +def await_spot_requests(module, ec2, spot_requests, count): + """ + Wait for a group of spot requests to be fulfilled, or fail. + + module: Ansible module object + ec2: authenticated ec2 connection object + spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances + count: Total number of instances to be created by the spot requests + + Returns: + list of instance ID's created by the spot request(s) + """ + spot_wait_timeout = int(module.params.get('spot_wait_timeout')) + wait_complete = time.time() + spot_wait_timeout + + spot_req_inst_ids = dict() + while time.time() < wait_complete: + reqs = ec2.get_all_spot_instance_requests() + for sirb in spot_requests: + if sirb.id in spot_req_inst_ids: + continue + for sir in reqs: + if sir.id != sirb.id: + continue # this is not our spot instance + if sir.instance_id is not None: + spot_req_inst_ids[sirb.id] = sir.instance_id + elif sir.state == 'open': + continue # still waiting, nothing to do here + elif sir.state == 'active': + continue # Instance is created already, nothing to do here + elif sir.state == 'failed': + module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % ( + sir.id, sir.status.code, sir.fault.code, sir.fault.message)) + elif sir.state == 'cancelled': + module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id) + elif sir.state == 'closed': + # instance is terminating or marked for termination + # this may be intentional on the part of the operator, + # or it may have been terminated by AWS due to capacity, + # price, or group constraints in this case, we'll fail + # the module if the reason for the state is anything + # other than termination by user. Codes are documented at + # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html + if sir.status.code == 'instance-terminated-by-user': + # do nothing, since the user likely did this on purpose + pass + else: + spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s" + module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message)) + + if len(spot_req_inst_ids) < count: + time.sleep(5) + else: + return spot_req_inst_ids.values() + module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime()) + def enforce_count(module, ec2, vpc): @@ -828,7 +924,7 @@ def enforce_count(module, ec2, vpc): # ensure all instances are dictionaries all_instances = [] for inst in instances: - if type(inst) is not dict: + if not isinstance(inst, dict): inst = get_instance_info(inst) all_instances.append(inst) @@ -882,6 +978,7 @@ def create_instances(module, ec2, vpc, override_count=None): termination_protection = module.boolean(module.params.get('termination_protection')) network_interfaces = module.params.get('network_interfaces') spot_launch_group = module.params.get('spot_launch_group') + instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior') # group_id and group_name are exclusive of each other if group_id and group_name: @@ -916,7 +1013,7 @@ def create_instances(module, ec2, vpc, override_count=None): group_id = [group_id] grp_details = ec2.get_all_security_groups(group_ids=group_id) group_name = [grp_item.name for grp_item in grp_details] - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg = str(e)) # Lookup any instances that much our run id. @@ -951,7 +1048,7 @@ def create_instances(module, ec2, vpc, override_count=None): if ebs_optimized: params['ebs_optimized'] = ebs_optimized - # 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest + # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request if not spot_price: params['tenancy'] = tenancy @@ -1033,6 +1130,10 @@ def create_instances(module, ec2, vpc, override_count=None): private_ip_address = private_ip, )) + # For ordinary (not spot) instances, we can select 'stop' + # (the default) or 'terminate' here. + params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop' + res = ec2.run_instances(**params) instids = [ i.id for i in res.instances ] while True: @@ -1067,6 +1168,12 @@ def create_instances(module, ec2, vpc, override_count=None): module.fail_json( msg="placement_group parameter requires Boto version 2.3.0 or higher.") + # You can't tell spot instances to 'stop'; they will always be + # 'terminate'd. For convenience, we'll ignore the latter value. + if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate': + module.fail_json( + msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.") + if spot_launch_group and isinstance(spot_launch_group, basestring): params['launch_group'] = spot_launch_group @@ -1078,24 +1185,8 @@ def create_instances(module, ec2, vpc, override_count=None): # Now we have to do the intermediate waiting if wait: - spot_req_inst_ids = dict() - spot_wait_timeout = time.time() + spot_wait_timeout - while spot_wait_timeout > time.time(): - reqs = ec2.get_all_spot_instance_requests() - for sirb in res: - if sirb.id in spot_req_inst_ids: - continue - for sir in reqs: - if sir.id == sirb.id and sir.instance_id is not None: - spot_req_inst_ids[sirb.id] = sir.instance_id - if len(spot_req_inst_ids) < count: - time.sleep(5) - else: - break - if spot_wait_timeout <= time.time(): - module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime()) - instids = spot_req_inst_ids.values() - except boto.exception.BotoServerError, e: + instids = await_spot_requests(module, ec2, res, count) + except boto.exception.BotoServerError as e: module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message)) # wait here until the instances are up @@ -1104,7 +1195,7 @@ def create_instances(module, ec2, vpc, override_count=None): while wait_timeout > time.time() and num_running < len(instids): try: res_list = ec2.get_all_instances(instids) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: if e.error_code == 'InvalidInstanceID.NotFound': time.sleep(1) continue @@ -1146,7 +1237,7 @@ def create_instances(module, ec2, vpc, override_count=None): if instance_tags: try: ec2.create_tags(instids, instance_tags) - except boto.exception.EC2ResponseError, e: + except boto.exception.EC2ResponseError as e: module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message)) instance_dict_array = [] @@ -1195,7 +1286,7 @@ def terminate_instances(module, ec2, instance_ids): instance_dict_array.append(get_instance_info(inst)) try: ec2.terminate_instances([inst.id]) - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e)) changed = True @@ -1208,8 +1299,8 @@ def terminate_instances(module, ec2, instance_ids): instance_ids=terminated_instance_ids, \ filters={'instance-state-name':'terminated'}) try: - num_terminated = len(response.pop().instances) - except Exception, e: + num_terminated = sum([len(res.instances) for res in response]) + except Exception as e: # got a bad response of some sort, possibly due to # stale/cached data. Wait a second and then try again time.sleep(1) @@ -1260,8 +1351,6 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags): termination_protection = module.params.get('termination_protection') changed = False instance_dict_array = [] - source_dest_check = module.params.get('source_dest_check') - termination_protection = module.params.get('termination_protection') if not isinstance(instance_ids, list) or len(instance_ids) < 1: # Fail unless the user defined instance tags @@ -1279,17 +1368,31 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags): # Check that our instances are not in the state we want to take # Check (and eventually change) instances attributes and instances state - running_instances_array = [] + existing_instances_array = [] for res in ec2.get_all_instances(instance_ids, filters=filters): for inst in res.instances: # Check "source_dest_check" attribute - if inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: - inst.modify_attribute('sourceDestCheck', source_dest_check) - changed = True + try: + if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: + inst.modify_attribute('sourceDestCheck', source_dest_check) + changed = True + except boto.exception.EC2ResponseError as exc: + # instances with more than one Elastic Network Interface will + # fail, because they have the sourceDestCheck attribute defined + # per-interface + if exc.code == 'InvalidInstanceID': + for interface in inst.interfaces: + if interface.source_dest_check != source_dest_check: + ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check) + changed = True + else: + module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc), + exception=traceback.format_exc(exc)) # Check "termination_protection" attribute - if inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection: + if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection + and termination_protection is not None): inst.modify_attribute('disableApiTermination', termination_protection) changed = True @@ -1301,10 +1404,12 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags): inst.start() else: inst.stop() - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) changed = True + existing_instances_array.append(inst.id) + instance_ids = list(set(existing_instances_array + (instance_ids or []))) ## Wait for all the instances to finish starting or stopping wait_timeout = time.time() + wait_timeout while wait and wait_timeout > time.time(): @@ -1326,6 +1431,89 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags): return (changed, instance_dict_array, instance_ids) +def restart_instances(module, ec2, instance_ids, state, instance_tags): + """ + Restarts a list of existing instances + + module: Ansible module object + ec2: authenticated ec2 connection object + instance_ids: The list of instances to start in the form of + [ {id: }, ..] + instance_tags: A dict of tag keys and values in the form of + {key: value, ... } + state: Intended state ("restarted") + + Returns a dictionary of instance information + about the instances. + + If the instance was not able to change state, + "changed" will be set to False. + + Wait will not apply here as this is a OS level operation. + + Note that if instance_ids and instance_tags are both non-empty, + this method will process the intersection of the two. + """ + + source_dest_check = module.params.get('source_dest_check') + termination_protection = module.params.get('termination_protection') + changed = False + instance_dict_array = [] + + if not isinstance(instance_ids, list) or len(instance_ids) < 1: + # Fail unless the user defined instance tags + if not instance_tags: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + # To make an EC2 tag filter, we need to prepend 'tag:' to each key. + # An empty filter does no filtering, so it's safe to pass it to the + # get_all_instances method even if the user did not specify instance_tags + filters = {} + if instance_tags: + for key, value in instance_tags.items(): + filters["tag:" + key] = value + + # Check that our instances are not in the state we want to take + + # Check (and eventually change) instances attributes and instances state + for res in ec2.get_all_instances(instance_ids, filters=filters): + for inst in res.instances: + + # Check "source_dest_check" attribute + try: + if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: + inst.modify_attribute('sourceDestCheck', source_dest_check) + changed = True + except boto.exception.EC2ResponseError as exc: + # instances with more than one Elastic Network Interface will + # fail, because they have the sourceDestCheck attribute defined + # per-interface + if exc.code == 'InvalidInstanceID': + for interface in inst.interfaces: + if interface.source_dest_check != source_dest_check: + ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check) + changed = True + else: + module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc), + exception=traceback.format_exc(exc)) + + # Check "termination_protection" attribute + if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection + and termination_protection is not None): + inst.modify_attribute('disableApiTermination', termination_protection) + changed = True + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + inst.reboot() + except EC2ResponseError as e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) + changed = True + + return (changed, instance_dict_array, instance_ids) + def main(): argument_spec = ec2_argument_spec() @@ -1356,8 +1544,9 @@ def main(): instance_profile_name = dict(), instance_ids = dict(type='list', aliases=['instance_id']), source_dest_check = dict(type='bool', default=True), - termination_protection = dict(type='bool', default=False), - state = dict(default='present', choices=['present', 'absent', 'running', 'stopped']), + termination_protection = dict(type='bool', default=None), + state = dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']), + instance_initiated_shutdown_behavior=dict(default=None, choices=['stop', 'terminate']), exact_count = dict(type='int', default=None), count_tag = dict(), volumes = dict(type='list'), @@ -1391,7 +1580,7 @@ def main(): if region: try: vpc = connect_to_aws(boto.vpc, region, **aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg = str(e)) else: vpc = None @@ -1415,6 +1604,14 @@ def main(): (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags) + elif state in ('restarted'): + instance_ids = module.params.get('instance_ids') + instance_tags = module.params.get('instance_tags') + if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): + module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) + + (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags) + elif state == 'present': # Changed is always set to true when provisioning new instances if not module.params.get('image'): @@ -1431,4 +1628,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index 58ac76f3c93..de3a31c4a76 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_ami @@ -72,12 +76,13 @@ default: null delete_snapshot: description: - - Whether or not to delete an AMI while deregistering it. + - Whether or not to delete snapshots when deregistering AMI. required: false - default: null + default: "no" + choices: [ "yes", "no" ] tags: description: - - a hash/dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}' + - a dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}' required: false default: null version_added: "2.0" @@ -87,7 +92,9 @@ required: false default: null version_added: "2.0" -author: "Evan Duffield (@scicoin-project) " +author: + - "Evan Duffield (@scicoin-project) " + - "Constantin Bugneac (@Constantin07) " extends_documentation_fragment: - aws - ec2 @@ -119,7 +126,7 @@ register: instance # AMI Creation, with a custom root-device size and another EBS attached -- ec2_ami +- ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx instance_id: i-xxxxxx @@ -136,7 +143,7 @@ register: instance # AMI Creation, excluding a volume attached at /dev/sdb -- ec2_ami +- ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx instance_id: i-xxxxxx @@ -150,22 +157,22 @@ no_device: yes register: instance -# Deregister/Delete AMI +# Deregister/Delete AMI (keep associated snapshots) - ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx image_id: "{{ instance.image_id }}" - delete_snapshot: True + delete_snapshot: False state: absent -# Deregister AMI +# Deregister AMI (delete associated snapshots too) - ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx image_id: "{{ instance.image_id }}" - delete_snapshot: False + delete_snapshot: True state: absent # Update AMI Launch Permissions, making it public @@ -189,6 +196,103 @@ user_ids: ['123456789012'] ''' +RETURN = ''' +architecture: + description: architecture of image + returned: when AMI is created or already exists + type: string + sample: "x86_64" +block_device_mapping: + description: block device mapping associated with image + returned: when AMI is created or already exists + type: a dictionary of block devices + sample: { + "/dev/sda1": { + "delete_on_termination": true, + "encrypted": false, + "size": 10, + "snapshot_id": "snap-1a03b80e7", + "volume_type": "standard" + } + } +creationDate: + description: creation date of image + returned: when AMI is created or already exists + type: string + sample: "2015-10-15T22:43:44.000Z" +description: + description: description of image + returned: when AMI is created or already exists + type: string + sample: "nat-server" +hypervisor: + description: type of hypervisor + returned: when AMI is created or already exists + type: string + sample: "xen" +is_public: + description: whether image is public + returned: when AMI is created or already exists + type: bool + sample: false +location: + description: location of image + returned: when AMI is created or already exists + type: string + sample: "315210894379/nat-server" +name: + description: ami name of image + returned: when AMI is created or already exists + type: string + sample: "nat-server" +owner_id: + description: owner of image + returned: when AMI is created or already exists + type: string + sample: "435210894375" +platform: + description: platform of image + returned: when AMI is created or already exists + type: string + sample: null +root_device_name: + description: root device name of image + returned: when AMI is created or already exists + type: string + sample: "/dev/sda1" +root_device_type: + description: root device type of image + returned: when AMI is created or already exists + type: string + sample: "ebs" +state: + description: state of image + returned: when AMI is created or already exists + type: string + sample: "available" +tags: + description: a dictionary of tags assigned to image + returned: when AMI is created or already exists + type: dictionary of tags + sample: { + "Env": "devel", + "Name": "nat-server" + } +virtualization_type: + description: image virtualization type + returned: when AMI is created or already exists + type: string + sample: "hvm" +snapshots_deleted: + description: a list of snapshot ids deleted after deregistering image + returned: after AMI is deregistered, if 'delete_snapshot' is set to 'yes' + type: list + sample: [ + "snap-fbcccb8f", + "snap-cfe7cdb4" + ] +''' + import sys import time @@ -201,6 +305,47 @@ HAS_BOTO = False +def get_block_device_mapping(image): + """ + Retrieves block device mapping from AMI + """ + + bdm_dict = dict() + + if image is not None and hasattr(image, 'block_device_mapping'): + bdm = getattr(image,'block_device_mapping') + for device_name in bdm.keys(): + bdm_dict[device_name] = { + 'size': bdm[device_name].size, + 'snapshot_id': bdm[device_name].snapshot_id, + 'volume_type': bdm[device_name].volume_type, + 'encrypted': bdm[device_name].encrypted, + 'delete_on_termination': bdm[device_name].delete_on_termination + } + + return bdm_dict + + +def get_ami_info(image): + + return dict( + image_id=image.id, + state=image.state, + architecture=image.architecture, + block_device_mapping=get_block_device_mapping(image), + creationDate=image.creationDate, + description=image.description, + hypervisor=image.hypervisor, + is_public=image.is_public, + location=image.location, + ownerId=image.ownerId, + root_device_name=image.root_device_name, + root_device_type=image.root_device_type, + tags=image.tags, + virtualization_type = image.virtualization_type + ) + + def create_image(module, ec2): """ Creates new AMI @@ -225,6 +370,11 @@ def create_image(module, ec2): 'description': description, 'no_reboot': no_reboot} + images = ec2.get_all_images(filters={'name': name}) + + if images and images[0]: + module.exit_json(msg="AMI name already present", image_id=images[0].id, state=images[0].state, changed=False) + if device_mapping: bdm = BlockDeviceMapping() for device in device_mapping: @@ -237,16 +387,8 @@ def create_image(module, ec2): params['block_device_mapping'] = bdm image_id = ec2.create_image(**params) - except boto.exception.BotoServerError, e: - if e.error_code == 'InvalidAMIName.Duplicate': - images = ec2.get_all_images() - for img in images: - if img.name == name: - module.exit_json(msg="AMI name already present", image_id=img.id, state=img.state, changed=False) - else: - module.fail_json(msg="Error in retrieving duplicate AMI details") - else: - module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) + except boto.exception.BotoServerError as e: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) # Wait until the image is recognized. EC2 API has eventual consistency, # such that a successful CreateImage API call doesn't guarantee the success @@ -254,37 +396,33 @@ def create_image(module, ec2): for i in range(wait_timeout): try: img = ec2.get_image(image_id) - break - except boto.exception.EC2ResponseError, e: - if 'InvalidAMIID.NotFound' in e.error_code and wait: - time.sleep(1) - else: - module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.") - else: - module.fail_json(msg="timed out waiting for image to be recognized") - # wait here until the image is created - wait_timeout = time.time() + wait_timeout - while wait and wait_timeout > time.time() and (img is None or img.state != 'available'): - img = ec2.get_image(image_id) - time.sleep(3) - if wait and wait_timeout <= time.time(): - # waiting took too long - module.fail_json(msg = "timed out waiting for image to be created") + if img.state == 'available': + break + elif img.state == 'failed': + module.fail_json(msg="AMI creation failed, please see the AWS console for more details") + except boto.exception.EC2ResponseError as e: + if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1: + module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help. %s: %s" % (e.error_code, e.error_message)) + finally: + time.sleep(1) + + if img.state != 'available': + module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.") if tags: try: ec2.create_tags(image_id, tags) - except boto.exception.EC2ResponseError, e: + except boto.exception.EC2ResponseError as e: module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message)) if launch_permissions: try: img = ec2.get_image(image_id) img.set_launch_permissions(**launch_permissions) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id) - module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True) + module.exit_json(msg="AMI creation operation complete", changed=True, **get_ami_info(img)) def deregister_image(module, ec2): @@ -301,13 +439,23 @@ def deregister_image(module, ec2): if img == None: module.fail_json(msg = "Image %s does not exist" % image_id, changed=False) - try: - params = {'image_id': image_id, - 'delete_snapshot': delete_snapshot} + # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable + snapshots = [] + if hasattr(img, 'block_device_mapping'): + for key in img.block_device_mapping: + snapshots.append(img.block_device_mapping[key].snapshot_id) - res = ec2.deregister_image(**params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + # When trying to re-delete already deleted image it doesn't raise an exception + # It just returns an object without image attributes + if hasattr(img, 'id'): + try: + params = {'image_id': image_id, + 'delete_snapshot': delete_snapshot} + res = ec2.deregister_image(**params) + except boto.exception.BotoServerError as e: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + else: + module.exit_json(msg = "Image %s has already been deleted" % image_id, changed=False) # wait here until the image is gone img = ec2.get_image(image_id) @@ -317,9 +465,21 @@ def deregister_image(module, ec2): time.sleep(3) if wait and wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg = "timed out waiting for image to be reregistered/deleted") + module.fail_json(msg = "timed out waiting for image to be deregistered/deleted") - module.exit_json(msg="AMI deregister/delete operation complete", changed=True) + # Boto library has hardcoded the deletion of the snapshot for the root volume mounted as '/dev/sda1' only + # Make it possible to delete all snapshots which belong to image, including root block device mapped as '/dev/xvda' + if delete_snapshot: + try: + for snapshot_id in snapshots: + ec2.delete_snapshot(snapshot_id) + except boto.exception.BotoServerError as e: + if e.error_code == 'InvalidSnapshot.NotFound': + # Don't error out if root volume snapshot was already deleted as part of deregister_image + pass + module.exit_json(msg="AMI deregister/delete operation complete", changed=True, snapshots_deleted=snapshots) + else: + module.exit_json(msg="AMI deregister/delete operation complete", changed=True) def update_image(module, ec2): @@ -349,7 +509,7 @@ def update_image(module, ec2): else: module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) def main(): @@ -357,12 +517,12 @@ def main(): argument_spec.update(dict( instance_id = dict(), image_id = dict(), - delete_snapshot = dict(), + delete_snapshot = dict(default=False, type='bool'), name = dict(), - wait = dict(type="bool", default=False), + wait = dict(type='bool', default=False), wait_timeout = dict(default=900), description = dict(default=""), - no_reboot = dict(default=False, type="bool"), + no_reboot = dict(default=False, type='bool'), state = dict(default='present'), device_mapping = dict(type='list'), tags = dict(type='dict'), @@ -376,7 +536,7 @@ def main(): try: ec2 = ec2_connect(module) - except Exception, e: + except Exception as e: module.fail_json(msg="Error while connecting to aws: %s" % str(e)) if module.params.get('state') == 'absent': @@ -402,4 +562,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_ami_find.py b/cloud/amazon/ec2_ami_find.py index bc7ad944aa5..c6b986bdd80 100644 --- a/cloud/amazon/ec2_ami_find.py +++ b/cloud/amazon/ec2_ami_find.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_ami_find @@ -228,7 +232,7 @@ type: string sample: "435210894375" platform: - description: plaform of image + description: platform of image returned: when AMI found type: string sample: null diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 6564c4c26bb..be7c634d742 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -13,6 +13,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = """ --- module: ec2_asg @@ -26,8 +30,9 @@ state: description: - register or deregister the instance - required: true + required: false choices: ['present', 'absent'] + default: present name: description: - Unique name for group to be created or deleted @@ -52,6 +57,12 @@ description: - Maximum number of instances in group, if unspecified then the current group value will be used. required: false + placement_group: + description: + - Physical location of your cluster placement group created in Amazon EC2. + required: false + version_added: "2.3" + default: None desired_capacity: description: - Desired number of instances in group, if unspecified then the current group value will be used. @@ -76,7 +87,7 @@ default: None lc_check: description: - - Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config. + - Check to make sure instances that are being replaced with replace_instances do not already have the current launch_config. required: false version_added: "1.8" default: True @@ -112,7 +123,7 @@ version_added: "2.0" wait_timeout: description: - - how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option. + - how long before wait instances to become viable when replaced. Used in conjunction with instance_ids option. default: 300 version_added: "1.8" wait_for_instances: @@ -124,11 +135,30 @@ termination_policies: description: - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. - - For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained + - For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the current termination policies are maintained. required: false default: Default choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] version_added: "2.0" + notification_topic: + description: + - A SNS topic ARN to send auto scaling notifications to. + default: None + required: false + version_added: "2.2" + notification_types: + description: + - A list of auto scaling events to trigger notifications on. + default: ['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'] + required: false + version_added: "2.2" + suspend_processes: + description: + - A list of scaling processes to suspend. + required: False + default: [] + choices: ['Launch', 'Terminate', 'HealthCheck', 'ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer'] + version_added: "2.3" extends_documentation_fragment: - aws - ec2 @@ -202,6 +232,7 @@ import time import logging as log +import traceback from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * @@ -292,7 +323,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id): if as_group.load_balancers and as_group.health_check_type == 'ELB': try: elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) else: return @@ -318,7 +349,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id): def elb_healthy(asg_connection, elb_connection, module, group_name): - healthy_instances = [] + healthy_instances = set() as_group = asg_connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) # get healthy, inservice instances from ASG @@ -333,11 +364,15 @@ def elb_healthy(asg_connection, elb_connection, module, group_name): # but has not yet show up in the ELB try: lb_instances = elb_connection.describe_instance_health(lb, instances=instances) - except boto.exception.InvalidInstance: - pass + except boto.exception.BotoServerError as e: + if e.error_code == 'InvalidInstance': + return None + + module.fail_json(msg=str(e)) + for i in lb_instances: if i.state == "InService": - healthy_instances.append(i.instance_id) + healthy_instances.add(i.instance_id) log.debug("{0}: {1}".format(i.instance_id, i.state)) return len(healthy_instances) @@ -351,10 +386,10 @@ def wait_for_elb(asg_connection, module, group_name): as_group = asg_connection.get_all_groups(names=[group_name])[0] if as_group.load_balancers and as_group.health_check_type == 'ELB': - log.debug("Waiting for ELB to consider intances healthy.") + log.debug("Waiting for ELB to consider instances healthy.") try: elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) wait_timeout = time.time() + wait_timeout @@ -369,6 +404,28 @@ def wait_for_elb(asg_connection, module, group_name): module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime()) log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances)) + +def suspend_processes(as_group, module): + suspend_processes = set(module.params.get('suspend_processes')) + + try: + suspended_processes = set([p.process_name for p in as_group.suspended_processes]) + except AttributeError: + # New ASG being created, no suspended_processes defined yet + suspended_processes = set() + + if suspend_processes == suspended_processes: + return False + + resume_processes = list(suspended_processes - suspend_processes) + if resume_processes: + as_group.resume_processes(resume_processes) + + if suspend_processes: + as_group.suspend_processes(list(suspend_processes)) + + return True + def create_autoscaling_group(connection, module): group_name = module.params.get('name') load_balancers = module.params['load_balancers'] @@ -376,6 +433,7 @@ def create_autoscaling_group(connection, module): launch_config_name = module.params.get('launch_config_name') min_size = module.params['min_size'] max_size = module.params['max_size'] + placement_group = module.params.get('placement_group') desired_capacity = module.params.get('desired_capacity') vpc_zone_identifier = module.params.get('vpc_zone_identifier') set_tags = module.params.get('tags') @@ -386,12 +444,14 @@ def create_autoscaling_group(connection, module): as_groups = connection.get_all_groups(names=[group_name]) wait_timeout = module.params.get('wait_timeout') termination_policies = module.params.get('termination_policies') + notification_topic = module.params.get('notification_topic') + notification_types = module.params.get('notification_types') if not vpc_zone_identifier and not availability_zones: region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) elif vpc_zone_identifier: vpc_zone_identifier = ','.join(vpc_zone_identifier) @@ -410,6 +470,8 @@ def create_autoscaling_group(connection, module): availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()] enforce_required_arguments(module) launch_configs = connection.get_all_launch_configurations(names=[launch_config_name]) + if len(launch_configs) == 0: + module.fail_json(msg="No launch config found with name %s" % launch_config_name) ag = AutoScalingGroup( group_name=group_name, load_balancers=load_balancers, @@ -417,6 +479,7 @@ def create_autoscaling_group(connection, module): launch_config=launch_configs[0], min_size=min_size, max_size=max_size, + placement_group=placement_group, desired_capacity=desired_capacity, vpc_zone_identifier=vpc_zone_identifier, connection=connection, @@ -428,18 +491,27 @@ def create_autoscaling_group(connection, module): try: connection.create_auto_scaling_group(ag) + suspend_processes(ag, module) if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) + + if notification_topic: + ag.put_notification_configuration(notification_topic, notification_types) + as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) changed = True return(changed, asg_properties) - except BotoServerError, e: - module.fail_json(msg=str(e)) + except BotoServerError as e: + module.fail_json(msg="Failed to create Autoscaling Group: %s" % str(e), exception=traceback.format_exc(e)) else: as_group = as_groups[0] changed = False + + if suspend_processes(as_group, module): + changed = True + for attr in ASG_ATTRIBUTES: if module.params.get(attr, None) is not None: module_attr = module.params.get(attr) @@ -448,14 +520,15 @@ def create_autoscaling_group(connection, module): group_attr = getattr(as_group, attr) # we do this because AWS and the module may return the same list # sorted differently - try: - module_attr.sort() - except: - pass - try: - group_attr.sort() - except: - pass + if attr != 'termination_policies': + try: + module_attr.sort() + except: + pass + try: + group_attr.sort() + except: + pass if group_attr != module_attr: changed = True setattr(as_group, attr, module_attr) @@ -490,8 +563,14 @@ def create_autoscaling_group(connection, module): if changed: try: as_group.update() - except BotoServerError, e: - module.fail_json(msg=str(e)) + except BotoServerError as e: + module.fail_json(msg="Failed to update Autoscaling Group: %s" % str(e), exception=traceback.format_exc(e)) + + if notification_topic: + try: + as_group.put_notification_configuration(notification_topic, notification_types) + except BotoServerError as e: + module.fail_json(msg="Failed to update Autoscaling Group notifications: %s" % str(e), exception=traceback.format_exc(e)) if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') @@ -499,13 +578,18 @@ def create_autoscaling_group(connection, module): try: as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) - except BotoServerError, e: - module.fail_json(msg=str(e)) + except BotoServerError as e: + module.fail_json(msg="Failed to read existing Autoscaling Groups: %s" % str(e), exception=traceback.format_exc(e)) return(changed, asg_properties) def delete_autoscaling_group(connection, module): group_name = module.params.get('name') + notification_topic = module.params.get('notification_topic') + + if notification_topic: + ag.delete_notification_configuration(notification_topic) + groups = connection.get_all_groups(names=[group_name]) if groups: group = groups[0] @@ -560,6 +644,14 @@ def replace(connection, module): instances = props['instances'] if replace_instances: instances = replace_instances + + #check if min_size/max_size/desired capacity have been specified and if not use ASG values + if min_size is None: + min_size = as_group.min_size + if max_size is None: + max_size = as_group.max_size + if desired_capacity is None: + desired_capacity = as_group.desired_capacity # check to see if instances are replaceable if checking launch configs new_instances, old_instances = get_instances_by_lc(props, lc_check, instances) @@ -582,16 +674,9 @@ def replace(connection, module): if not old_instances: changed = False return(changed, props) - - #check if min_size/max_size/desired capacity have been specified and if not use ASG values - if min_size is None: - min_size = as_group.min_size - if max_size is None: - max_size = as_group.max_size - if desired_capacity is None: - desired_capacity = as_group.desired_capacity + # set temporary settings and wait for them to be reached - # This should get overriden if the number of instances left is less than the batch size. + # This should get overwritten if the number of instances left is less than the batch size. as_group = connection.get_all_groups(names=[group_name])[0] update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) @@ -742,7 +827,7 @@ def wait_for_term_inst(connection, module, term_instances): lifecycle = instance_facts[i]['lifecycle_state'] health = instance_facts[i]['health_status'] log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health )) - if lifecycle == 'Terminating' or healthy == 'Unhealthy': + if lifecycle == 'Terminating' or health == 'Unhealthy': count += 1 time.sleep(10) @@ -780,6 +865,7 @@ def main(): launch_config_name=dict(type='str'), min_size=dict(type='int'), max_size=dict(type='int'), + placement_group=dict(type='str'), desired_capacity=dict(type='int'), vpc_zone_identifier=dict(type='list'), replace_batch_size=dict(type='int', default=1), @@ -793,7 +879,15 @@ def main(): health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), wait_for_instances=dict(type='bool', default=True), - termination_policies=dict(type='list', default='Default') + termination_policies=dict(type='list', default='Default'), + notification_topic=dict(type='str', default=None), + notification_types=dict(type='list', default=[ + 'autoscaling:EC2_INSTANCE_LAUNCH', + 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', + 'autoscaling:EC2_INSTANCE_TERMINATE', + 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' + ]), + suspend_processes=dict(type='list', default=[]) ), ) @@ -813,7 +907,7 @@ def main(): connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) if not connection: module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) changed = create_changed = replace_changed = False diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index 32daaf4bcf7..22d950f9fb6 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -14,12 +14,17 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_eip -short_description: associate an EC2 elastic IP with an instance. +short_description: manages EC2 elastic IP (EIP) addresses. description: - - This module associates AWS EC2 elastic IP addresses with instances + - This module can allocate or release an EIP. + - This module can associate/disassociate an EIP with instances or network interfaces. version_added: "1.4" options: device_id: @@ -30,13 +35,15 @@ version_added: "2.0" public_ip: description: - - The elastic IP address to associate with the instance. - - If absent, allocate a new address + - The IP address of a previously allocated EIP. + - If present and device is specified, the EIP is associated with the device. + - If absent and device is specified, the EIP is disassociated from the device. required: false + aliases: [ ip ] state: description: - - If present, associate the IP with the instance. - - If absent, disassociate the IP with the instance. + - If present, allocate an EIP or associate an existing EIP with a device. + - If absent, disassociate the EIP from the device and optionally release it. required: false choices: ['present', 'absent'] default: present @@ -48,7 +55,7 @@ version_added: "1.4" reuse_existing_ip_allowed: description: - - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one. + - Reuse an EIP that is not associated to a device (when available), instead of allocating a new one. required: false default: false version_added: "1.6" @@ -64,8 +71,8 @@ author: "Rick Mendes (@rickmendes) " notes: - This module will return C(public_ip) on success, which will contain the - public IP address associated with the instance. - - There may be a delay between the time the Elastic IP is assigned and when + public IP address associated with the device. + - There may be a delay between the time the EIP is assigned and when the cloud instance is reachable via the new address. Use wait_for and pause to delay further playbook execution until the instance is reachable, if necessary. @@ -76,34 +83,67 @@ EXAMPLES = ''' - name: associate an elastic IP with an instance - ec2_eip: device_id=i-1212f003 ip=93.184.216.119 + ec2_eip: + device_id: i-1212f003 + ip: 93.184.216.119 + - name: associate an elastic IP with a device - ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119 + ec2_eip: + device_id: eni-c8ad70f3 + ip: 93.184.216.119 + - name: disassociate an elastic IP from an instance - ec2_eip: device_id=i-1212f003 ip=93.184.216.119 state=absent + ec2_eip: + device_id: i-1212f003 + ip: 93.184.216.119 + state: absent + - name: disassociate an elastic IP with a device - ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119 state=absent + ec2_eip: + device_id: eni-c8ad70f3 + ip: 93.184.216.119 + state: absent + - name: allocate a new elastic IP and associate it with an instance - ec2_eip: device_id=i-1212f003 + ec2_eip: + device_id: i-1212f003 + - name: allocate a new elastic IP without associating it to anything action: ec2_eip register: eip + - name: output the IP - debug: msg="Allocated IP is {{ eip.public_ip }}" + debug: + msg: "Allocated IP is {{ eip.public_ip }}" + - name: another way of allocating an elastic IP without associating it to anything - ec2_eip: state='present' + ec2_eip: + state: 'present' + - name: provision new instances with ec2 - ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes''' -''' group=webserver count=3 + ec2: + keypair: mykey + instance_type: c1.medium + image: ami-40603AD1 + wait: yes + group: webserver + count: 3 register: ec2 + - name: associate new elastic IPs with each of the instances - ec2_eip: "device_id={{ item }}" - with_items: ec2.instance_ids + ec2_eip: + device_id: "{{ item }}" + with_items: "{{ ec2.instance_ids }}" + - name: allocate a new elastic IP inside a VPC in us-west-2 - ec2_eip: region=us-west-2 in_vpc=yes + ec2_eip: + region: us-west-2 + in_vpc: yes register: eip + - name: output the IP - debug: msg="Allocated IP inside a VPC is {{ eip.public_ip }}" + debug: + msg: "Allocated IP inside a VPC is {{ eip.public_ip }}" ''' try: @@ -221,13 +261,13 @@ def release_address(ec2, address, check_mode): return {'changed': True} -def find_device(ec2, device_id, isinstance=True): +def find_device(ec2, module, device_id, isinstance=True): """ Attempt to find the EC2 instance and return it """ if isinstance: try: reservations = ec2.get_all_reservations(instance_ids=[device_id]) - except boto.exception.EC2ResponseError, e: + except boto.exception.EC2ResponseError as e: module.fail_json(msg=str(e)) if len(reservations) == 1: @@ -237,7 +277,7 @@ def find_device(ec2, device_id, isinstance=True): else: try: interfaces = ec2.get_all_network_interfaces(network_interface_ids=[device_id]) - except boto.exception.EC2ResponseError, e: + except boto.exception.EC2ResponseError as e: module.fail_json(msg=str(e)) if len(interfaces) == 1: @@ -246,7 +286,7 @@ def find_device(ec2, device_id, isinstance=True): raise EIPException("could not find instance" + device_id) -def ensure_present(ec2, domain, address, device_id, +def ensure_present(ec2, module, domain, address, device_id, reuse_existing_ip_allowed, check_mode, isinstance=True): changed = False @@ -261,7 +301,7 @@ def ensure_present(ec2, domain, address, device_id, if device_id: # Allocate an IP for instance since no public_ip was provided if isinstance: - instance = find_device(ec2, device_id) + instance = find_device(ec2, module, device_id) if reuse_existing_ip_allowed: if len(instance.vpc_id) > 0 and domain is None: raise EIPException("You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc") @@ -269,7 +309,7 @@ def ensure_present(ec2, domain, address, device_id, assoc_result = associate_ip_and_device(ec2, address, device_id, check_mode) else: - instance = find_device(ec2, device_id, isinstance=False) + instance = find_device(ec2, module, device_id, isinstance=False) # Associate address object (provided or allocated) with instance assoc_result = associate_ip_and_device(ec2, address, device_id, check_mode, isinstance=False) @@ -340,6 +380,8 @@ def main(): if device_id and device_id.startswith('i-'): is_instance = True elif device_id: + if device_id.startswith('eni-') and not in_vpc: + module.fail_json(msg="If you are specifying an ENI, in_vpc must be true") is_instance = False try: @@ -350,7 +392,7 @@ def main(): if state == 'present': if device_id: - result = ensure_present(ec2, domain, address, device_id, + result = ensure_present(ec2, module, domain, address, device_id, reuse_existing_ip_allowed, module.check_mode, isinstance=is_instance) else: diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 5b5569ce00d..cd2cf5fbae6 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = """ --- module: ec2_elb @@ -92,7 +96,7 @@ instance_id: "{{ ansible_ec2_instance_id }}" ec2_elbs: "{{ item }}" state: present - with_items: ec2_elbs + with_items: "{{ ec2_elbs }}" """ import time @@ -204,7 +208,7 @@ def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): self.changed = True break elif self._is_instance_state_pending(instance_state): - # If it's pending, we'll skip further checks andd continue waiting + # If it's pending, we'll skip further checks and continue waiting pass elif (awaited_state == 'InService' and instance_state.reason_code == "Instance" @@ -212,7 +216,7 @@ def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): # If the reason_code for the instance being out of service is # "Instance" this indicates a failure state, e.g. the instance # has failed a health check or the ELB does not have the - # instance's availabilty zone enabled. The exact reason why is + # instance's availability zone enabled. The exact reason why is # described in InstantState.description. msg = ("The instance %s could not be put in service on %s." " Reason: %s") @@ -239,7 +243,7 @@ def _get_instance_health(self, lb): """ try: status = lb.get_instance_health([self.instance_id])[0] - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: if e.error_code == 'InvalidInstance': return None else: @@ -257,7 +261,7 @@ def _get_instance_lbs(self, ec2_elbs=None): try: elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: self.module.fail_json(msg=str(e)) elbs = [] @@ -290,7 +294,7 @@ def _get_auto_scaling_group_lbs(self): try: asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: self.module.fail_json(msg=str(e)) asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) @@ -314,7 +318,7 @@ def _get_instance(self): """Returns a boto.ec2.InstanceObject for self.instance_id""" try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] @@ -327,7 +331,7 @@ def main(): ec2_elbs={'default': None, 'required': False, 'type':'list'}, enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, wait={'required': False, 'default': True, 'type': 'bool'}, - wait_timeout={'requred': False, 'default': 0, 'type': 'int'} + wait_timeout={'required': False, 'default': 0, 'type': 'int'} ) ) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 453faf633c3..ca87a1cb3ad 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = """ --- module: ec2_elb_lb @@ -24,7 +28,6 @@ version_added: "1.5" author: - "Jim Dalton (@jsdalton)" - - "Rick Mendes (@rickmendes)" options: state: description: @@ -136,7 +139,7 @@ version_added: "1.8" stickiness: description: - - An associative array of stickness policy settings. Policy will be applied to all listeners ( see example ) + - An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example ) required: false version_added: "2.0" wait: @@ -154,6 +157,11 @@ required: false default: 60 version_added: "2.1" + tags: + description: + - An associative array of tags. To delete all tags, supply an empty dict. + required: false + version_added: "2.1" extends_documentation_fragment: - aws @@ -308,11 +316,11 @@ - us-east-1a - us-east-1d listeners: - - protocols: http - - load_balancer_port: 80 - - instance_port: 80 + - protocol: http + load_balancer_port: 80 + instance_port: 80 -# Create an ELB with load balanacer stickiness enabled +# Create an ELB with load balancer stickiness enabled - local_action: module: ec2_elb_lb name: "New ELB" @@ -322,9 +330,9 @@ - us-east-1a - us-east-1d listeners: - - protocols: http - - load_balancer_port: 80 - - instance_port: 80 + - protocol: http + load_balancer_port: 80 + instance_port: 80 stickiness: type: loadbalancer enabled: yes @@ -340,26 +348,89 @@ - us-east-1a - us-east-1d listeners: - - protocols: http - - load_balancer_port: 80 - - instance_port: 80 + - protocol: http + load_balancer_port: 80 + instance_port: 80 stickiness: type: application enabled: yes cookie: SESSIONID +# Create an ELB and add tags +- local_action: + module: ec2_elb_lb + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: + Name: "New ELB" + stack: "production" + client: "Bob" + +# Delete all tags from an ELB +- local_action: + module: ec2_elb_lb + name: "New ELB" + state: present + region: us-east-1 + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: {} """ try: import boto import boto.ec2.elb import boto.ec2.elb.attributes + import boto.vpc from boto.ec2.elb.healthcheck import HealthCheck + from boto.ec2.tag import Tag from boto.regioninfo import RegionInfo HAS_BOTO = True except ImportError: HAS_BOTO = False +import time +import random + +def _throttleable_operation(max_retries): + def _operation_wrapper(op): + def _do_op(*args, **kwargs): + retry = 0 + while True: + try: + return op(*args, **kwargs) + except boto.exception.BotoServerError as e: + if retry < max_retries and e.code in \ + ("Throttling", "RequestLimitExceeded"): + retry = retry + 1 + time.sleep(min(random.random() * (2 ** retry), 300)) + continue + else: + raise + return _do_op + return _operation_wrapper + +def _get_vpc_connection(module, region, aws_connect_params): + try: + return connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + + +_THROTTLING_RETRIES = 5 class ElbManager(object): """Handles ELB creation and destruction""" @@ -370,7 +441,8 @@ def __init__(self, module, name, listeners=None, purge_listeners=None, scheme="internet-facing", connection_draining_timeout=None, idle_timeout=None, cross_az_load_balancing=None, access_logs=None, - stickiness=None, wait=None, wait_timeout=None, region=None, + stickiness=None, wait=None, wait_timeout=None, tags=None, + region=None, instance_ids=None, purge_instance_ids=None, **aws_connect_params): self.module = module @@ -393,6 +465,7 @@ def __init__(self, module, name, listeners=None, purge_listeners=None, self.stickiness = stickiness self.wait = wait self.wait_timeout = wait_timeout + self.tags = tags self.aws_connect_params = aws_connect_params self.region = region @@ -403,6 +476,7 @@ def __init__(self, module, name, listeners=None, purge_listeners=None, self.elb = self._get_elb() self.ec2_conn = self._get_ec2_connection() + @_throttleable_operation(_THROTTLING_RETRIES) def ensure_ok(self): """Create the ELB""" if not self.elb: @@ -427,11 +501,14 @@ def ensure_ok(self): self._set_access_log() # add sitcky options self.select_stickiness_policy() + # ensure backend server policies are correct self._set_backend_policies() # set/remove instance ids self._set_instance_ids() + self._set_tags() + def ensure_gone(self): """Destroy the ELB""" if self.elb: @@ -544,8 +621,11 @@ def get_info(self): # return stickiness info? + info['tags'] = self.tags + return info + @_throttleable_operation(_THROTTLING_RETRIES) def _wait_for_elb_removed(self): polling_increment_secs = 15 max_retries = (self.wait_timeout / polling_increment_secs) @@ -554,7 +634,7 @@ def _wait_for_elb_removed(self): for x in range(0, max_retries): try: result = self.elb_conn.get_all_lb_attributes(self.name) - except (boto.exception.BotoServerError, StandardError), e: + except (boto.exception.BotoServerError, StandardError) as e: if "LoadBalancerNotFound" in e.code: status_achieved = True break @@ -563,6 +643,7 @@ def _wait_for_elb_removed(self): return status_achieved + @_throttleable_operation(_THROTTLING_RETRIES) def _wait_for_elb_interface_removed(self): polling_increment_secs = 15 max_retries = (self.wait_timeout / polling_increment_secs) @@ -581,7 +662,7 @@ def _wait_for_elb_interface_removed(self): break else: time.sleep(polling_increment_secs) - except (boto.exception.BotoServerError, StandardError), e: + except (boto.exception.BotoServerError, StandardError) as e: if 'InvalidNetworkInterfaceID' in e.code: status_achieved = True break @@ -590,6 +671,7 @@ def _wait_for_elb_interface_removed(self): return status_achieved + @_throttleable_operation(_THROTTLING_RETRIES) def _get_elb(self): elbs = self.elb_conn.get_all_load_balancers() for elb in elbs: @@ -601,16 +683,17 @@ def _get_elb_connection(self): try: return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: self.module.fail_json(msg=str(e)) def _get_ec2_connection(self): try: return connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, StandardError) as e: self.module.fail_json(msg=str(e)) + @_throttleable_operation(_THROTTLING_RETRIES) def _delete_elb(self): # True if succeeds, exception raised if not result = self.elb_conn.delete_load_balancer(name=self.name) @@ -627,6 +710,16 @@ def _create_elb(self): subnets=self.subnets, scheme=self.scheme) if self.elb: + # HACK: Work around a boto bug in which the listeners attribute is + # always set to the listeners argument to create_load_balancer, and + # not the complex_listeners + # We're not doing a self.elb = self._get_elb here because there + # might be eventual consistency issues and it doesn't necessarily + # make sense to wait until the ELB gets returned from the EC2 API. + # This is necessary in the event we hit the throttling errors and + # need to retry ensure_ok + # See https://github.com/boto/boto/issues/3526 + self.elb.listeners = self.listeners self.changed = True self.status = 'created' @@ -734,7 +827,7 @@ def _listener_as_tuple(self, listener): def _enable_zones(self, zones): try: self.elb.enable_zones(zones) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: if "Invalid Availability Zone" in e.error_message: self.module.fail_json(msg=e.error_message) else: @@ -744,7 +837,7 @@ def _enable_zones(self, zones): def _disable_zones(self, zones): try: self.elb.disable_zones(zones) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: if "Invalid Availability Zone" in e.error_message: self.module.fail_json(msg=e.error_message) else: @@ -893,7 +986,7 @@ def _set_idle_timeout(self): self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings) def _policy_name(self, policy_type): - return __file__.split('/')[-1].replace('_', '-') + '-' + policy_type + return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type def _create_policy(self, policy_param, policy_meth, policy): getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy) @@ -995,10 +1088,14 @@ def select_stickiness_policy(self): def _get_backend_policies(self): """Get a list of backend policies""" - return [ - str(backend.instance_port) + ':' + policy.policy_name for backend in self.elb.backends - for policy in backend.policies - ] + policies = [] + if self.elb.backends is not None: + for backend in self.elb.backends: + if backend.policies is not None: + for policy in backend.policies: + policies.append(str(backend.instance_port) + ':' + policy.policy_name) + + return policies def _set_backend_policies(self): """Sets policies for all backends""" @@ -1031,9 +1128,10 @@ def _set_backend_policies(self): def _get_proxy_protocol_policy(self): """Find out if the elb has a proxy protocol enabled""" - for policy in self.elb.policies.other_policies: - if policy.policy_name == 'ProxyProtocol-policy': - return policy.policy_name + if self.elb.policies is not None and self.elb.policies.other_policies is not None: + for policy in self.elb.policies.other_policies: + if policy.policy_name == 'ProxyProtocol-policy': + return policy.policy_name return None @@ -1054,11 +1152,20 @@ def _diff_list(self, a, b): b = set(b) return [aa for aa in a if aa not in b] + def _get_instance_ids(self): + """Get the current list of instance ids installed in the elb""" + instances = [] + if self.elb.instances is not None: + for instance in self.elb.instances: + instances.append(instance.id) + + return instances + def _set_instance_ids(self): """Register or deregister instances from an lb instance""" assert_instances = self.instance_ids or [] - has_instances = [has_instance.id for has_instance in self.elb.instances] + has_instances = self._get_instance_ids() add_instances = self._diff_list(assert_instances, has_instances) if add_instances: @@ -1071,6 +1178,41 @@ def _set_instance_ids(self): self.elb_conn.deregister_instances(self.elb.name, remove_instances) self.changed = True + def _set_tags(self): + """Add/Delete tags""" + if self.tags is None: + return + + params = {'LoadBalancerNames.member.1': self.name} + + tagdict = dict() + + # get the current list of tags from the ELB, if ELB exists + if self.elb: + current_tags = self.elb_conn.get_list('DescribeTags', params, + [('member', Tag)]) + tagdict = dict((tag.Key, tag.Value) for tag in current_tags + if hasattr(tag, 'Key')) + + # Add missing tags + dictact = dict(set(self.tags.items()) - set(tagdict.items())) + if dictact: + for i, key in enumerate(dictact): + params['Tags.member.%d.Key' % (i + 1)] = key + params['Tags.member.%d.Value' % (i + 1)] = dictact[key] + + self.elb_conn.make_request('AddTags', params) + self.changed=True + + # Remove extra tags + dictact = dict(set(tagdict.items()) - set(self.tags.items())) + if dictact: + for i, key in enumerate(dictact): + params['Tags.member.%d.Key' % (i + 1)] = key + + self.elb_conn.make_request('RemoveTags', params) + self.changed=True + def _get_health_check_target(self): """Compose target string from healthcheck parameters""" protocol = self.health_check['ping_protocol'].upper() @@ -1105,7 +1247,8 @@ def main(): stickiness={'default': None, 'required': False, 'type': 'dict'}, access_logs={'default': None, 'required': False, 'type': 'dict'}, wait={'default': False, 'type': 'bool', 'required': False}, - wait_timeout={'default': 60, 'type': 'int', 'required': False} + wait_timeout={'default': 60, 'type': 'int', 'required': False}, + tags={'default': None, 'required': False, 'type': 'dict'} ) ) @@ -1142,9 +1285,10 @@ def main(): stickiness = module.params['stickiness'] wait = module.params['wait'] wait_timeout = module.params['wait_timeout'] + tags = module.params['tags'] if state == 'present' and not listeners: - module.fail_json(msg="At least one port is required for ELB creation") + module.fail_json(msg="At least one listener is required for ELB creation") if state == 'present' and not (zones or subnets): module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") @@ -1156,7 +1300,13 @@ def main(): security_group_ids = [] try: ec2 = ec2_connect(module) - grp_details = ec2.get_all_security_groups() + if subnets: # We have at least one subnet, ergo this is a VPC + vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params) + vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id + filters = {'vpc_id': vpc_id} + else: + filters = None + grp_details = ec2.get_all_security_groups(filters=filters) for group_name in security_group_names: if isinstance(group_name, basestring): @@ -1164,7 +1314,7 @@ def main(): group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] security_group_ids.extend(group_id) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg = str(e)) @@ -1173,7 +1323,7 @@ def main(): subnets, purge_subnets, scheme, connection_draining_timeout, idle_timeout, cross_az_load_balancing, - access_logs, stickiness, wait, wait_timeout, + access_logs, stickiness, wait, wait_timeout, tags, region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids, **aws_connect_params) diff --git a/cloud/amazon/ec2_facts.py b/cloud/amazon/ec2_facts.py index 5147428f646..498cf9c2dfe 100644 --- a/cloud/amazon/ec2_facts.py +++ b/cloud/amazon/ec2_facts.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_facts @@ -42,27 +46,30 @@ EXAMPLES = ''' # Conditional example - name: Gather facts - action: ec2_facts + ec2_facts: - name: Conditional - action: debug msg="This instance is a t1.micro" + debug: + msg: "This instance is a t1.micro" when: ansible_ec2_instance_type == "t1.micro" ''' - + import socket import re socket.setdefaulttimeout(5) -class Ec2Metadata(object): +class Ec2Metadata(object): ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/' - ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key' + ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key' ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/' AWS_REGIONS = ('ap-northeast-1', + 'ap-northeast-2', 'ap-southeast-1', 'ap-southeast-2', + 'ap-south-1', 'eu-central-1', 'eu-west-1', 'sa-east-1', @@ -73,12 +80,12 @@ class Ec2Metadata(object): ) def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None): - self.module = module + self.module = module self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri - self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri - self._data = {} - self._prefix = 'ansible_ec2_%s' + self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri + self._data = {} + self._prefix = 'ansible_ec2_%s' def _fetch(self, url): (response, info) = fetch_url(self.module, url, force=True) @@ -101,7 +108,7 @@ def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']): for pattern in filter_patterns: for key in new_fields.keys(): match = re.search(pattern, key) - if match: + if match: new_fields.pop(key) return new_fields @@ -121,7 +128,7 @@ def fetch(self, uri, recurse=True): content = self._fetch(new_uri) if field == 'security-groups': sg_fields = ",".join(content.split('\n')) - self._data['%s' % (new_uri)] = sg_fields + self._data['%s' % (new_uri)] = sg_fields else: self._data['%s' % (new_uri)] = content @@ -129,7 +136,7 @@ def fix_invalid_varnames(self, data): """Change ':'' and '-' to '_' to ensure valid template variable names""" for (key, value) in data.items(): if ':' in key or '-' in key: - newkey = key.replace(':','_').replace('-','_') + newkey = key.replace(':', '_').replace('-', '_') del data[key] data[newkey] = value @@ -153,7 +160,7 @@ def add_ec2_region(self, data): data['ansible_ec2_placement_region'] = region def run(self): - self.fetch(self.uri_meta) # populate _data + self.fetch(self.uri_meta) # populate _data data = self._mangle_fields(self._data, self.uri_meta) data[self._prefix % 'user-data'] = self._fetch(self.uri_user) data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh) @@ -161,12 +168,13 @@ def run(self): self.add_ec2_region(data) return data + def main(): argument_spec = url_argument_spec() module = AnsibleModule( - argument_spec = argument_spec, - supports_check_mode = True, + argument_spec=argument_spec, + supports_check_mode=True, ) ec2_facts = Ec2Metadata(module).run() @@ -174,8 +182,10 @@ def main(): module.exit_json(**ec2_facts_result) + # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index 6a696eacc6c..b381218f496 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_group @@ -163,6 +167,10 @@ def validate_rule(module, rule): VALID_PARAMS = ('cidr_ip', 'group_id', 'group_name', 'group_desc', 'proto', 'from_port', 'to_port') + + if not isinstance(rule, dict): + module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) + for k in rule: if k not in VALID_PARAMS: module.fail_json(msg='Invalid rule parameter \'{}\''.format(k)) @@ -213,7 +221,7 @@ def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): group_id = group.id groups[group_id] = group groups[group_name] = group - elif group_name in groups: + elif group_name in groups and (vpc_id is None or groups[group_name].vpc_id == vpc_id): group_id = groups[group_name].id else: if not rule.get('group_desc', '').strip(): @@ -285,8 +293,9 @@ def main(): if group: '''found a match, delete it''' try: - group.delete() - except Exception, e: + if not module.check_mode: + group.delete() + except Exception as e: module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e)) else: group = None @@ -425,20 +434,21 @@ def main(): src_group_id=grantGroup, cidr_ip=thisip) changed = True - elif vpc_id and not module.check_mode: + elif vpc_id: # when using a vpc, but no egress rules are specified, # we add in a default allow all out rule, which was the # default behavior before egress rules were added default_egress_rule = 'out--1-None-None-None-0.0.0.0/0' if default_egress_rule not in groupRules: - ec2.authorize_security_group_egress( - group_id=group.id, - ip_protocol=-1, - from_port=None, - to_port=None, - src_group_id=None, - cidr_ip='0.0.0.0/0' - ) + if not module.check_mode: + ec2.authorize_security_group_egress( + group_id=group.id, + ip_protocol=-1, + from_port=None, + to_port=None, + src_group_id=None, + cidr_ip='0.0.0.0/0' + ) changed = True else: # make sure the default egress rule is not removed @@ -469,4 +479,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_key.py b/cloud/amazon/ec2_key.py index 3fe7b959f71..69d96fed977 100644 --- a/cloud/amazon/ec2_key.py +++ b/cloud/amazon/ec2_key.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_key @@ -149,7 +153,7 @@ def main(): time.sleep(1) if not action_complete: module.fail_json(msg="timed out while waiting for the key to be removed") - except Exception, e: + except Exception as e: module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) key = None changed = True @@ -241,4 +245,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 8b2fe3fc9c0..7a8754b8994 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -14,12 +14,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = """ --- module: ec2_lc short_description: Create or delete AWS Autoscaling Launch Configurations description: - - Can create or delete AwS Autoscaling Configurations + - Can create or delete AWS Autoscaling Configurations - Works with the ec2_asg module to manage Autoscaling Groups notes: - "Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration @@ -53,7 +57,7 @@ required: false security_groups: description: - - A list of security groups into which instances should be found + - A list of security groups to apply to the instances. For VPC instances, specify security group IDs. For EC2-Classic, specify either security group names or IDs. required: false volumes: description: @@ -126,6 +130,8 @@ device_type: io1 iops: 3000 delete_on_termination: true + - device_name: /dev/sdb + ephemeral: ephemeral0 ''' @@ -217,7 +223,7 @@ def create_launch_config(connection, module): connection.create_launch_configuration(lc) launch_configs = connection.get_all_launch_configurations(names=[name]) changed = True - except BotoServerError, e: + except BotoServerError as e: module.fail_json(msg=str(e)) result = dict( @@ -296,7 +302,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) state = module.params.get('state') @@ -306,4 +312,5 @@ def main(): elif state == 'absent': delete_launch_config(connection, module) -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py index 8ae7195f2e1..984211bc277 100644 --- a/cloud/amazon/ec2_metric_alarm.py +++ b/cloud/amazon/ec2_metric_alarm.py @@ -14,12 +14,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = """ module: ec2_metric_alarm short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'" description: - - Can create or delete AWS metric alarms - - Metrics you wish to alarm on must already exist + - Can create or delete AWS metric alarms. + - Metrics you wish to alarm on must already exist. version_added: "1.6" author: "Zacharie Eakin (@zeekin)" options: @@ -165,7 +169,7 @@ def create_metric_alarm(connection, module): connection.create_alarm(alm) changed = True alarms = connection.describe_alarms(alarm_names=[name]) - except BotoServerError, e: + except BotoServerError as e: module.fail_json(msg=str(e)) else: @@ -200,7 +204,7 @@ def create_metric_alarm(connection, module): try: if changed: connection.create_alarm(alarm) - except BotoServerError, e: + except BotoServerError as e: module.fail_json(msg=str(e)) result = alarms[0] module.exit_json(changed=changed, name=result.name, @@ -232,7 +236,7 @@ def delete_metric_alarm(connection, module): try: connection.delete_alarms([name]) module.exit_json(changed=True) - except BotoServerError, e: + except BotoServerError as e: module.fail_json(msg=str(e)) else: module.exit_json(changed=False) @@ -272,7 +276,7 @@ def main(): if region: try: connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py index 3c412232e21..bea3bfbca8a 100644 --- a/cloud/amazon/ec2_scaling_policy.py +++ b/cloud/amazon/ec2_scaling_policy.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = """ module: ec2_scaling_policy short_description: Create or delete AWS scaling policies for Autoscaling groups @@ -106,7 +110,7 @@ def create_scaling_policy(connection, module): connection.create_scaling_policy(sp) policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0] module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) - except BotoServerError, e: + except BotoServerError as e: module.fail_json(msg=str(e)) else: policy = scalingPolicies[0] @@ -133,7 +137,7 @@ def create_scaling_policy(connection, module): connection.create_scaling_policy(policy) policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0] module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step) - except BotoServerError, e: + except BotoServerError as e: module.fail_json(msg=str(e)) @@ -147,7 +151,7 @@ def delete_scaling_policy(connection, module): try: connection.delete_policy(sp_name, asg_name) module.exit_json(changed=True) - except BotoServerError, e: + except BotoServerError as e: module.exit_json(changed=False, msg=str(e)) else: module.exit_json(changed=False) @@ -178,7 +182,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg = str(e)) if state == 'present': diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index a3200efe847..b962e187608 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_snapshot @@ -145,7 +149,7 @@ def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None): if not now: now = datetime.datetime.utcnow() - youngest_snapshot = min(snapshots, key=_get_snapshot_starttime) + youngest_snapshot = max(snapshots, key=_get_snapshot_starttime) # See if the snapshot is younger that the given max age snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z') @@ -193,7 +197,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None, if instance_id: try: volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name}) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if not volumes: @@ -206,7 +210,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None, module.fail_json(msg = 'snapshot_id must be set when state is absent') try: ec2.delete_snapshot(snapshot_id) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: # exception is raised if snapshot does not exist if e.error_code == 'InvalidSnapshot.NotFound': module.exit_json(changed=False) @@ -219,7 +223,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None, if last_snapshot_min_age > 0: try: current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id}) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds @@ -236,7 +240,7 @@ def create_snapshot(module, ec2, state=None, description=None, wait=None, if snapshot_tags: for k, v in snapshot_tags.items(): snapshot.add_tag(k, v) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) module.exit_json(changed=changed, diff --git a/cloud/amazon/ec2_tag.py b/cloud/amazon/ec2_tag.py index e5aa866cc4d..0fe20e1786d 100644 --- a/cloud/amazon/ec2_tag.py +++ b/cloud/amazon/ec2_tag.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_tag @@ -89,7 +93,7 @@ instance: "{{ item.id }}" region: eu-west-1 state: list - with_items: ec2.tagged_instances + with_items: "{{ ec2.tagged_instances }}" register: ec2_vol - name: tag the volumes @@ -103,6 +107,22 @@ with_subelements: - ec2_vol.results - volumes + +# Playbook example of listing tags on an instance +tasks: +- name: get ec2 facts + action: ec2_facts + +- name: list tags on an instance + ec2_tag: + region: "{{ ansible_ec2_placement_region }}" + resource: "{{ ansible_ec2_instance_id }}" + state: list + register: ec2_tags + +- name: list tags, such as Name, env if exist + shell: echo {{ ec2_tags.tags.Name }} {{ ec2_tags.tags.env }} + ''' diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 668895df9eb..cd76703f432 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_vol @@ -135,7 +139,7 @@ - ec2_vol: instance: "{{ item.id }} " volume_size: 5 - with_items: ec2.instances + with_items: "{{ ec2.instances }}" register: ec2_vol # Example: Launch an instance and then add a volume if not already attached @@ -156,7 +160,7 @@ instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvdf - with_items: ec2.instances + with_items: "{{ ec2.instances }}" register: ec2_vol # Remove a volume @@ -264,7 +268,7 @@ def get_volume(module, ec2): volume_ids = [id] try: vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if not vols: @@ -290,7 +294,7 @@ def get_volumes(module, ec2): vols = ec2.get_all_volumes() else: vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance}) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) return vols @@ -344,7 +348,7 @@ def create_volume(module, ec2, zone): if name: ec2.create_tags([volume.id], {"Name": name}) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) return volume, changed @@ -369,7 +373,7 @@ def attach_volume(module, ec2, volume, instance): device_name = '/dev/sdf' else: device_name = '/dev/xvdf' - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if volume.attachment_state() is not None: @@ -387,7 +391,7 @@ def attach_volume(module, ec2, volume, instance): time.sleep(3) volume.update() changed = True - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) modify_dot_attribute(module, ec2, instance, device_name) @@ -404,7 +408,7 @@ def modify_dot_attribute(module, ec2, instance, device_name): try: instance.update() dot = instance.block_device_mapping[device_name].delete_on_termination - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if delete_on_termination != dot: @@ -419,7 +423,7 @@ def modify_dot_attribute(module, ec2, instance, device_name): time.sleep(3) instance.update() changed = True - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) return changed @@ -524,7 +528,7 @@ def main(): if region: try: ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py index e05755943c3..5b0cfc51b0e 100644 --- a/cloud/amazon/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_vpc @@ -270,7 +274,7 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non Checks if the remote routes match the local routes. route_tables : Route_tables parameter in the module - vpc_conn : The VPC conection object + vpc_conn : The VPC connection object module : The module object vpc : The vpc object for this route table igw : The internet gateway object for this vpc @@ -362,7 +366,7 @@ def create_vpc(module, vpc_conn): pending = False # sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs() # when that happens, just wait a bit longer and try again - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: if e.error_code != 'InvalidVpcID.NotFound': raise if pending: @@ -371,7 +375,7 @@ def create_vpc(module, vpc_conn): # waiting took too long module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime()) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) # Done with base VPC, now change to attributes and features. @@ -408,7 +412,7 @@ def create_vpc(module, vpc_conn): for subnet in subnets: add_subnet = True subnet_tags_current = True - new_subnet_tags = subnet.get('resource_tags', None) + new_subnet_tags = subnet.get('resource_tags', {}) subnet_tags_delete = [] for csn in current_subnets: @@ -429,7 +433,7 @@ def create_vpc(module, vpc_conn): subnet_tags_delete = [key[0] for key in subnet_tags_delete] delete_subnet_tag = vpc_conn.delete_tags(csn.id, subnet_tags_delete) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to delete resource tag, error {0}'.format(e)) # Add new subnet tags if not current @@ -438,13 +442,13 @@ def create_vpc(module, vpc_conn): changed = True create_subnet_tag = vpc_conn.create_tags(csn.id, new_subnet_tags) - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to create resource tag, error: {0}'.format(e)) if add_subnet: try: new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None)) - new_subnet_tags = subnet.get('resource_tags', None) + new_subnet_tags = subnet.get('resource_tags', {}) if new_subnet_tags: # Sometimes AWS takes its time to create a subnet and so using new subnets's id # to create tags results in exception. @@ -456,7 +460,7 @@ def create_vpc(module, vpc_conn): vpc_conn.create_tags(new_subnet.id, new_subnet_tags) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e)) # Now delete all absent subnets @@ -469,7 +473,7 @@ def create_vpc(module, vpc_conn): try: vpc_conn.delete_subnet(csubnet.id) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e)) # Handle Internet gateway (create/delete igw) @@ -484,7 +488,7 @@ def create_vpc(module, vpc_conn): igw = vpc_conn.create_internet_gateway() vpc_conn.attach_internet_gateway(igw.id, vpc.id) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e)) else: # Set igw variable to the current igw instance for use in route tables. @@ -495,7 +499,7 @@ def create_vpc(module, vpc_conn): vpc_conn.detach_internet_gateway(igws[0].id, vpc.id) vpc_conn.delete_internet_gateway(igws[0].id) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e)) if igw is not None: @@ -503,7 +507,7 @@ def create_vpc(module, vpc_conn): # Handle route tables - this may be worth splitting into a # different module but should work fine here. The strategy to stay - # indempotent is to basically build all the route tables as + # idempotent is to basically build all the route tables as # defined, track the route table ids, and then run through the # remote list of route tables and delete any that we didn't # create. This shouldn't interrupt traffic in theory, but is the @@ -571,7 +575,7 @@ def create_vpc(module, vpc_conn): all_route_tables.append(new_rt) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json( msg='Unable to create and associate route table {0}, error: ' \ '{1}'.format(rt, e) @@ -600,7 +604,7 @@ def create_vpc(module, vpc_conn): if not is_main: vpc_conn.delete_route_table(rt.id) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e)) vpc_dict = get_vpc_info(vpc) @@ -678,7 +682,7 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None): vpc_conn.delete_route_table(rt.id) vpc_conn.delete_vpc(vpc.id) - except EC2ResponseError, e: + except EC2ResponseError as e: module.fail_json( msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e) ) @@ -721,7 +725,7 @@ def main(): if region: try: vpc_conn = connect_to_aws(boto.vpc, region, **aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg = str(e)) else: module.fail_json(msg="region must be specified") @@ -742,4 +746,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py index 23ce175b92b..7b7e0e46477 100644 --- a/cloud/amazon/ec2_vpc_net.py +++ b/cloud/amazon/ec2_vpc_net.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_vpc_net @@ -25,7 +29,7 @@ options: name: description: - - The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists. + - The name to give your VPC. This is used in combination with the cidr_block parameter to determine if a VPC already exists. required: yes cidr_block: description: @@ -122,7 +126,7 @@ def vpc_exists(module, vpc, name, cidr_block, multi): try: matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block}) - except Exception, e: + except Exception as e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) @@ -146,11 +150,12 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name): try: current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id})) if cmp(tags, current_tags): - vpc.create_tags(vpc_obj.id, tags) + if not module.check_mode: + vpc.create_tags(vpc_obj.id, tags) return True else: return False - except Exception, e: + except Exception as e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) @@ -158,7 +163,8 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name): def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): if vpc_obj.dhcp_options_id != dhcp_id: - connection.associate_dhcp_options(dhcp_id, vpc_obj.id) + if not module.check_mode: + connection.associate_dhcp_options(dhcp_id, vpc_obj.id) return True else: return False @@ -194,6 +200,7 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, + supports_check_mode=True ) if not HAS_BOTO: @@ -216,7 +223,7 @@ def main(): if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -231,41 +238,46 @@ def main(): if vpc_obj is None: try: - vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy) changed = True - except BotoServerError, e: + if not module.check_mode: + vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy) + else: + module.exit_json(changed=changed) + except BotoServerError as e: module.fail_json(msg=e) if dhcp_id is not None: try: if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): changed = True - except BotoServerError, e: + except BotoServerError as e: module.fail_json(msg=e) if tags is not None or name is not None: try: if update_vpc_tags(connection, module, vpc_obj, tags, name): changed = True - except BotoServerError, e: + except BotoServerError as e: module.fail_json(msg=e) # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute # which is needed in order to detect the current status of DNS options. For now we just update # the attribute each time and is not used as a changed-factor. try: - connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support) - connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames) - except BotoServerError, e: + if not module.check_mode: + connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support) + connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames) + except BotoServerError as e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) - # get the vpc obj again in case it has changed - try: - vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0] - except BotoServerError, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) + if not module.check_mode: + # get the vpc obj again in case it has changed + try: + vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0] + except BotoServerError as e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) @@ -276,10 +288,11 @@ def main(): if vpc_obj is not None: try: - connection.delete_vpc(vpc_obj.id) + if not module.check_mode: + connection.delete_vpc(vpc_obj.id) vpc_obj = None changed = True - except BotoServerError, e: + except BotoServerError as e: e_msg = boto_exception(e) module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " "and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index 61d4458f39a..00098b171ef 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: elasticache @@ -225,7 +229,7 @@ def create(self): cache_subnet_group_name=self.cache_subnet_group, preferred_availability_zone=self.zone, port=self.cache_port) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: self.module.fail_json(msg=e.message) self._refresh_data() @@ -252,7 +256,7 @@ def delete(self): try: response = self.conn.delete_cache_cluster(cache_cluster_id=self.name) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: self.module.fail_json(msg=e.message) cache_cluster_data = response['DeleteCacheClusterResponse']['DeleteCacheClusterResult']['CacheCluster'] self._refresh_data(cache_cluster_data) @@ -301,7 +305,7 @@ def modify(self): security_group_ids=self.security_group_ids, apply_immediately=True, engine_version=self.cache_engine_version) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: self.module.fail_json(msg=e.message) self._refresh_data() @@ -329,7 +333,7 @@ def reboot(self): try: response = self.conn.reboot_cache_cluster(cache_cluster_id=self.name, cache_node_ids_to_reboot=cache_node_ids) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: self.module.fail_json(msg=e.message) self._refresh_data() @@ -428,7 +432,7 @@ def _get_elasticache_connection(self): region=connect_region, **self.aws_connect_kwargs ) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: self.module.fail_json(msg=e.message) def _get_port(self): @@ -554,4 +558,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/elasticache_subnet_group.py b/cloud/amazon/elasticache_subnet_group.py index 0dcf126b170..1e5708c03e3 100644 --- a/cloud/amazon/elasticache_subnet_group.py +++ b/cloud/amazon/elasticache_subnet_group.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: elasticache_subnet_group @@ -113,7 +117,7 @@ def main(): endpoint = "elasticache.%s.amazonaws.com" % region connect_region = RegionInfo(name=region, endpoint=endpoint) conn = ElastiCacheConnection(region=connect_region, **aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=e.message) try: @@ -123,7 +127,7 @@ def main(): try: matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100) exists = len(matching_groups) > 0 - except BotoServerError, e: + except BotoServerError as e: if e.error_code != 'CacheSubnetGroupNotFoundFault': module.fail_json(msg = e.error_message) @@ -139,7 +143,7 @@ def main(): changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) changed = True - except BotoServerError, e: + except BotoServerError as e: if e.error_message != 'No modifications were requested.': module.fail_json(msg = e.error_message) else: @@ -151,4 +155,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 456ddba5e75..79a3e346221 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: iam @@ -53,6 +57,18 @@ - When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match. required: false default: "/" + trust_policy: + description: + - The inline (JSON or YAML) trust policy document that grants an entity permission to assume the role. Mutually exclusive with C(trust_policy_filepath). + required: false + default: null + version_added: "2.2" + trust_policy_filepath: + description: + - The path to the trust policy document that grants an entity permission to assume the role. Mutually exclusive with C(trust_policy). + required: false + default: null + version_added: "2.2" access_key_state: description: - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified. @@ -66,7 +82,7 @@ default: '1' access_key_ids: description: - - A list of the keys that you want impacted by the access_key_state paramter. + - A list of the keys that you want impacted by the access_key_state parameter. groups: description: - A list of groups the user should belong to. When update, will gracefully remove groups not listed. @@ -124,7 +140,21 @@ name: jdavila state: update groups: "{{ item.created_group.group_name }}" - with_items: new_groups.results + with_items: "{{ new_groups.results }}" + +# Example of role with custom trust policy for Lambda service +- name: Create IAM role with custom trust relationship + iam: + iam_type: role + name: AAALambdaTestRole + state: present + trust_policy: + Version: '2012-10-17' + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: lambda.amazonaws.com ''' @@ -151,6 +181,40 @@ def boto_exception(err): return error +def _paginate(func, attr): + ''' + paginates the results from func by continuously passing in + the returned marker if the results were truncated. this returns + an iterator over the items in the returned response. `attr` is + the name of the attribute to iterate over in the response. + ''' + finished, marker = False, None + while not finished: + res = func(marker=marker) + for item in getattr(res, attr): + yield item + + finished = res.is_truncated == 'false' + if not finished: + marker = res.marker + + +def list_all_groups(iam): + return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')] + + +def list_all_users(iam): + return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')] + + +def list_all_roles(iam): + return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')] + + +def list_all_instance_profiles(iam): + return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')] + + def create_user(module, iam, name, pwd, path, key_state, key_count): key_qty = 0 keys = [] @@ -170,7 +234,7 @@ def create_user(module, iam, name, pwd, path, key_state, key_count): key_qty += 1 else: keys = None - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: module.fail_json(changed=False, msg=str(err)) else: user_info = dict(created_user=user_meta, password=pwd, access_keys=keys) @@ -186,14 +250,13 @@ def delete_user(module, iam, name): iam.delete_access_key(key, name) try: login_profile = iam.get_login_profiles(name).get_login_profile_response - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('Cannot find Login Profile') in error_msg: - del_meta = iam.delete_user(name).delete_user_response - else: - iam.delete_login_profile(name) - del_meta = iam.delete_user(name).delete_user_response + else: + iam.delete_login_profile(name) + del_meta = iam.delete_user(name).delete_user_response except Exception as ex: module.fail_json(changed=False, msg="delete failed %s" %ex) if ('must detach all policies first') in error_msg: @@ -201,7 +264,7 @@ def delete_user(module, iam, name): iam.delete_user_policy(name, policy) try: del_meta = iam.delete_user(name) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" @@ -230,7 +293,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key [ck['status'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] key_qty = len(current_keys) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'cannot be found' in error_msg and updated: current_keys, status = \ @@ -257,7 +320,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key name, new_path=new_path).update_user_response.response_metadata user['updates'] = dict( old_username=name, new_username=new_name, old_path=c_path, new_path=new_path) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) module.fail_json(changed=False, msg=str(err)) else: @@ -272,10 +335,10 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key try: iam.create_login_profile(name, pwd) changed = True - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(str(err)) if 'Password does not conform to the account password policy' in error_msg: - module.fail_json(changed=False, msg="Passsword doesn't conform to policy") + module.fail_json(changed=False, msg="Password doesn't conform to policy") else: module.fail_json(msg=error_msg) @@ -287,7 +350,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key key_qty += 1 changed = True - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: module.fail_json(changed=False, msg=str(err)) if keys and key_state: @@ -298,7 +361,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key try: iam.update_access_key( access_key, key_state.capitalize(), user_name=name) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: module.fail_json(changed=False, msg=str(err)) else: changed = True @@ -306,7 +369,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key if key_state == 'remove': try: iam.delete_access_key(access_key, user_name=name) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: module.fail_json(changed=False, msg=str(err)) else: changed = True @@ -321,7 +384,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key iam.get_all_access_keys(name). list_access_keys_result. access_key_metadata] - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) for fk, fks in zip(final_keys, final_key_status): @@ -332,7 +395,7 @@ def update_user(module, iam, name, new_name, new_path, key_state, key_count, key def set_users_groups(module, iam, name, groups, updated=None, new_name=None): - """ Sets groups for a user, will purge groups not explictly passed, while + """ Sets groups for a user, will purge groups not explicitly passed, while retaining pre-existing groups that also are in the new list. """ changed = False @@ -347,7 +410,7 @@ def set_users_groups(module, iam, name, groups, updated=None, rg for rg in frozenset(orig_users_groups).difference(groups)] new_groups = [ ng for ng in frozenset(groups).difference(orig_users_groups)] - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) else: if len(orig_users_groups) > 0: @@ -359,7 +422,7 @@ def set_users_groups(module, iam, name, groups, updated=None, for group in groups: try: iam.add_user_to_group(group, name) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('The group with name %s cannot be found.' % group) in error_msg: module.fail_json(changed=False, msg="Group %s doesn't exist" % group) @@ -376,7 +439,7 @@ def create_group(module=None, iam=None, name=None, path=None): try: iam.create_group( name, path).create_group_response.create_group_result.group - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) else: changed = True @@ -387,14 +450,14 @@ def delete_group(module=None, iam=None, name=None): changed = False try: iam.delete_group(name) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names: iam.delete_group_policy(name, policy) try: iam.delete_group(name) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" @@ -423,33 +486,38 @@ def update_group(module=None, iam=None, name=None, new_name=None, new_path=None) iam.update_group(name, new_group_name=new_name, new_path=new_path) changed = True name = new_name - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) return changed, name, new_path, current_group_path -def create_role(module, iam, name, path, role_list, prof_list): +def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc): changed = False + iam_role_result = None + instance_profile_result = None try: if name not in role_list: changed = True - iam.create_role( - name, path=path).create_role_response.create_role_result.role.role_name + iam_role_result = iam.create_role(name, + assume_role_policy_document=trust_policy_doc, + path=path).create_role_response.create_role_result.role.role_name if name not in prof_list: - iam.create_instance_profile(name, path=path) + instance_profile_result = iam.create_instance_profile(name, + path=path).create_instance_profile_response.create_instance_profile_result.instance_profile iam.add_role_to_instance_profile(name, name) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) else: - updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. - list_roles_result.roles] - return changed, updated_role_list + updated_role_list = list_all_roles(iam) + return changed, updated_role_list, iam_role_result, instance_profile_result def delete_role(module, iam, name, role_list, prof_list): changed = False + iam_role_result = None + instance_profile_result = None try: if name in role_list: cur_ins_prof = [rp['instance_profile_name'] for rp in @@ -460,14 +528,14 @@ def delete_role(module, iam, name, role_list, prof_list): iam.remove_role_from_instance_profile(profile, name) try: iam.delete_role(name) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: for policy in iam.list_role_policies(name).list_role_policies_result.policy_names: iam.delete_role_policy(name, policy) try: - iam.delete_role(name) - except boto.exception.BotoServerError, err: + iam_role_result = iam.delete_role(name) + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" @@ -484,13 +552,12 @@ def delete_role(module, iam, name, role_list, prof_list): for prof in prof_list: if name == prof: - iam.delete_instance_profile(name) - except boto.exception.BotoServerError, err: + instance_profile_result = iam.delete_instance_profile(name) + except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) else: - updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. - list_roles_result.roles] - return changed, updated_role_list + updated_role_list = list_all_roles(iam) + return changed, updated_role_list, iam_role_result, instance_profile_result def main(): @@ -509,6 +576,8 @@ def main(): access_key_ids=dict(type='list', default=None, required=False), key_count=dict(type='int', default=1, required=False), name=dict(default=None, required=False), + trust_policy_filepath=dict(default=None, required=False), + trust_policy=dict(type='dict', default=None, required=False), new_name=dict(default=None, required=False), path=dict(default='/', required=False), new_path=dict(default=None, required=False) @@ -517,7 +586,7 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, - mutually_exclusive=[], + mutually_exclusive=[['trust_policy', 'trust_policy_filepath']], ) if not HAS_BOTO: @@ -534,12 +603,15 @@ def main(): new_path = module.params.get('new_path') key_count = module.params.get('key_count') key_state = module.params.get('access_key_state') + trust_policy = module.params.get('trust_policy') + trust_policy_filepath = module.params.get('trust_policy_filepath') + key_ids = module.params.get('access_key_ids') + if key_state: key_state = key_state.lower() if any([n in key_state for n in ['active', 'inactive']]) and not key_ids: module.fail_json(changed=False, msg="At least one access key has to be defined in order" " to use 'active' or 'inactive'") - key_ids = module.params.get('access_key_ids') if iam_type == 'user' and module.params.get('password') is not None: pwd = module.params.get('password') @@ -556,7 +628,22 @@ def main(): if iam_type == 'role' and state == 'update': module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, " - "please specificy present or absent") + "please specify present or absent") + + # check if trust_policy is present -- it can be inline JSON or a file path to a JSON file + if trust_policy_filepath: + try: + with open(trust_policy_filepath, 'r') as json_data: + trust_policy_doc = json.dumps(json.load(json_data)) + except Exception as e: + module.fail_json(msg=str(e) + ': ' + trust_policy_filepath) + elif trust_policy: + try: + trust_policy_doc = json.dumps(trust_policy) + except Exception as e: + module.fail_json(msg=str(e) + ': ' + trust_policy) + else: + trust_policy_doc = None region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) @@ -565,30 +652,21 @@ def main(): iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) else: iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) result = {} changed = False try: - orig_group_list = [gl['group_name'] for gl in iam.get_all_groups(). - list_groups_result. - groups] - - orig_user_list = [ul['user_name'] for ul in iam.get_all_users(). - list_users_result. - users] - - orig_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. - list_roles_result. - roles] - - orig_prof_list = [ap['instance_profile_name'] for ap in iam.list_instance_profiles(). - list_instance_profiles_response. - list_instance_profiles_result. - instance_profiles] - except boto.exception.BotoServerError, err: + orig_group_list = list_all_groups(iam) + + orig_user_list = list_all_users(iam) + + orig_role_list = list_all_roles(iam) + + orig_prof_list = list_all_instance_profiles(iam) + except boto.exception.BotoServerError as err: module.fail_json(msg=err.message) if iam_type == 'user': @@ -668,11 +746,12 @@ def main(): group_exists = name in orig_group_list if state == 'present' and not group_exists: - new_group, changed = create_group(iam=iam, name=name, path=path) + new_group, changed = create_group(module=module, iam=iam, name=name, path=path) module.exit_json(changed=changed, group_name=new_group) elif state in ['present', 'update'] and group_exists: changed, updated_name, updated_path, cur_path = update_group( - iam=iam, name=name, new_name=new_name, new_path=new_path) + module=module, iam=iam, name=name, new_name=new_name, + new_path=new_path) if new_path and new_name: module.exit_json(changed=changed, old_group_name=name, @@ -694,11 +773,11 @@ def main(): elif state == 'update' and not group_exists: module.fail_json( - changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) + changed=changed, msg="Update Failed. Group %s doesn't seem to exist!" % name) elif state == 'absent': if name in orig_group_list: - removed_group, changed = delete_group(iam=iam, name=name) + removed_group, changed = delete_group(module=module, iam=iam, name=name) module.exit_json(changed=changed, delete_group=removed_group) else: module.exit_json(changed=changed, msg="Group already absent") @@ -706,17 +785,19 @@ def main(): elif iam_type == 'role': role_list = [] if state == 'present': - changed, role_list = create_role( - module, iam, name, path, orig_role_list, orig_prof_list) + changed, role_list, role_result, instance_profile_result = create_role( + module, iam, name, path, orig_role_list, orig_prof_list, trust_policy_doc) elif state == 'absent': - changed, role_list = delete_role( + changed, role_list, role_result, instance_profile_result = delete_role( module, iam, name, orig_role_list, orig_prof_list) elif state == 'update': module.fail_json( changed=False, msg='Role update not currently supported by boto.') - module.exit_json(changed=changed, roles=role_list) + module.exit_json(changed=changed, roles=role_list, role_result=role_result, + instance_profile_result=instance_profile_result) from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/iam_cert.py b/cloud/amazon/iam_cert.py index 0b269b06753..6e71b5552f5 100644 --- a/cloud/amazon/iam_cert.py +++ b/cloud/amazon/iam_cert.py @@ -13,6 +13,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: iam_cert @@ -226,13 +230,13 @@ def main(): state=dict( default=None, required=True, choices=['present', 'absent']), name=dict(default=None, required=False), - cert=dict(default=None, required=False), - key=dict(default=None, required=False), - cert_chain=dict(default=None, required=False), + cert=dict(default=None, required=False, type='path'), + key=dict(default=None, required=False, type='path'), + cert_chain=dict(default=None, required=False, type='path'), new_name=dict(default=None, required=False), path=dict(default='/', required=False), new_path=dict(default=None, required=False), - dup_ok=dict(default=False, required=False, choices=[False, True]) + dup_ok=dict(default=False, required=False, type='bool') ) ) @@ -251,7 +255,7 @@ def main(): iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) else: iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) state = module.params.get('state') @@ -286,7 +290,7 @@ def main(): try: cert_action(module, iam, name, path, new_name, new_path, state, cert, key, cert_chain, orig_certs, orig_bodies, dup_ok) - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err), debug=[cert,key]) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 35c9d4e6cc8..97be3f40512 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -13,6 +13,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: iam_policy @@ -94,7 +98,7 @@ policy_name: "READ-ONLY" policy_document: readonlypolicy.json state: present - with_items: new_groups.results + with_items: "{{ new_groups.results }}" # Create a new S3 policy with prefix per user tasks: @@ -139,6 +143,7 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state): current_policies = [cp for cp in iam.get_all_user_policies(name). list_user_policies_result. policy_names] + matching_policies = [] for pol in current_policies: ''' urllib is needed here because boto returns url encoded strings instead @@ -146,19 +151,20 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state): if urllib.unquote(iam.get_user_policy(name, pol). get_user_policy_result.policy_document) == pdoc: policy_match = True + matching_policies.append(pol) if state == 'present': # If policy document does not already exist (either it's changed # or the policy is not present) or if we're not skipping dupes then # make the put call. Note that the put call does a create or update. - if not policy_match or not skip: + if not policy_match or (not skip and policy_name not in matching_policies): changed = True iam.put_user_policy(name, policy_name, pdoc) elif state == 'absent': try: iam.delete_user_policy(name, policy_name) changed = True - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'cannot be found.' in error_msg: changed = False @@ -167,7 +173,7 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state): updated_policies = [cp for cp in iam.get_all_user_policies(name). list_user_policies_result. policy_names] - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) module.fail_json(changed=changed, msg=error_msg) @@ -189,23 +195,25 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state): module.fail_json(msg=e.message) try: + matching_policies = [] for pol in current_policies: if urllib.unquote(iam.get_role_policy(name, pol). get_role_policy_result.policy_document) == pdoc: policy_match = True + matching_policies.append(pol) if state == 'present': # If policy document does not already exist (either it's changed # or the policy is not present) or if we're not skipping dupes then # make the put call. Note that the put call does a create or update. - if not policy_match or not skip: + if not policy_match or (not skip and policy_name not in matching_policies): changed = True iam.put_role_policy(name, policy_name, pdoc) elif state == 'absent': try: iam.delete_role_policy(name, policy_name) changed = True - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'cannot be found.' in error_msg: changed = False @@ -217,7 +225,7 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state): updated_policies = [cp for cp in iam.list_role_policies(name). list_role_policies_result. policy_names] - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) module.fail_json(changed=changed, msg=error_msg) @@ -232,25 +240,26 @@ def group_action(module, iam, name, policy_name, skip, pdoc, state): current_policies = [cp for cp in iam.get_all_group_policies(name). list_group_policies_result. policy_names] + matching_policies = [] for pol in current_policies: if urllib.unquote(iam.get_group_policy(name, pol). get_group_policy_result.policy_document) == pdoc: policy_match = True - if policy_match: - msg=("The policy document you specified already exists " - "under the name %s." % pol) + matching_policies.append(pol) + msg=("The policy document you specified already exists " + "under the name %s." % pol) if state == 'present': # If policy document does not already exist (either it's changed # or the policy is not present) or if we're not skipping dupes then # make the put call. Note that the put call does a create or update. - if not policy_match or not skip: + if not policy_match or (not skip and policy_name not in matching_policies): changed = True iam.put_group_policy(name, policy_name, pdoc) elif state == 'absent': try: iam.delete_group_policy(name, policy_name) changed = True - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'cannot be found.' in error_msg: changed = False @@ -260,7 +269,7 @@ def group_action(module, iam, name, policy_name, skip, pdoc, state): updated_policies = [cp for cp in iam.get_all_group_policies(name). list_group_policies_result. policy_names] - except boto.exception.BotoServerError, err: + except boto.exception.BotoServerError as err: error_msg = boto_exception(err) module.fail_json(changed=changed, msg=error_msg) @@ -277,7 +286,7 @@ def main(): iam_name=dict(default=None, required=False), policy_name=dict(default=None, required=True), policy_document=dict(default=None, required=False), - policy_json=dict(default=None, required=False), + policy_json=dict(type='json', default=None, required=False), skip_duplicates=dict(type='bool', default=True, required=False) )) @@ -320,7 +329,7 @@ def main(): iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) else: iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) changed = False @@ -344,4 +353,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 5792ca39642..41438da1843 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rds @@ -40,9 +44,10 @@ db_engine: description: - The type of database. Used only when command=create. + - mariadb was added in version 2.2 required: false default: null - choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'] + choices: [ 'mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'] size: description: - Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify. @@ -114,7 +119,7 @@ description: - Port number that the DB instance uses for connections. Used only when command=create or command=replicate. - Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql. - Starting at 2.0 it auotmaticaly defaults to what is expected for each c(db_engine). + Starting at 2.0 it automatically defaults to what is expected for each c(db_engine). required: false default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. upgrade: @@ -222,7 +227,9 @@ author: - "Bruce Pennypacker (@bpennypacker)" - "Will Thames (@willthames)" - +extends_documentation_fragment: + - aws + - ec2 ''' # FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD @@ -298,9 +305,9 @@ instance_name: MyNewInstanceName region: us-west-2 vpc_security_groups: sg-xxx945xx - -- debug: msg="The new db endpoint is {{ rds.instance.endpoint }}" +- debug: + msg: "The new db endpoint is {{ rds.instance.endpoint }}" ''' import sys @@ -319,6 +326,8 @@ has_rds2 = False DEFAULT_PORTS= { + 'aurora': 3306, + 'mariadb': 3306, 'mysql': 3306, 'oracle': 1521, 'sqlserver': 1433, @@ -342,19 +351,19 @@ class RDSConnection: def __init__(self, module, region, **aws_connect_params): try: self.connection = connect_to_aws(boto.rds, region, **aws_connect_params) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg=e.error_message) def get_db_instance(self, instancename): try: return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0]) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: return None def get_db_snapshot(self, snapshotid): try: return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0]) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: return None def create_db_instance(self, instance_name, size, instance_class, db_engine, @@ -364,63 +373,63 @@ def create_db_instance(self, instance_name, size, instance_class, db_engine, result = self.connection.create_dbinstance(instance_name, size, instance_class, username, password, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_instance_read_replica(self, instance_name, source_instance, **params): try: result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_instance(self, instance_name, **params): try: result = self.connection.delete_dbinstance(instance_name, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_snapshot(self, snapshot): try: result = self.connection.delete_dbsnapshot(snapshot) return RDSSnapshot(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def modify_db_instance(self, instance_name, **params): try: result = self.connection.modify_dbinstance(instance_name, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def reboot_db_instance(self, instance_name, **params): try: result = self.connection.reboot_dbinstance(instance_name) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_snapshot(self, snapshot, instance_name, **params): try: result = self.connection.create_dbsnapshot(snapshot, instance_name) return RDSSnapshot(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def promote_read_replica(self, instance_name, **params): try: result = self.connection.promote_read_replica(instance_name, **params) return RDSDBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) @@ -428,7 +437,7 @@ class RDS2Connection: def __init__(self, module, region, **aws_connect_params): try: self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg=e.error_message) def get_db_instance(self, instancename): @@ -436,9 +445,9 @@ def get_db_instance(self, instancename): dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'] result = RDS2DBInstance(dbinstances[0]) return result - except boto.rds2.exceptions.DBInstanceNotFound, e: + except boto.rds2.exceptions.DBInstanceNotFound as e: return None - except Exception, e: + except Exception as e: raise e def get_db_snapshot(self, snapshotid): @@ -446,7 +455,7 @@ def get_db_snapshot(self, snapshotid): snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots'] result = RDS2Snapshot(snapshots[0]) return result - except boto.rds2.exceptions.DBSnapshotNotFound, e: + except boto.rds2.exceptions.DBSnapshotNotFound as e: return None def create_db_instance(self, instance_name, size, instance_class, db_engine, @@ -455,63 +464,63 @@ def create_db_instance(self, instance_name, size, instance_class, db_engine, result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_instance_read_replica(self, instance_name, source_instance, **params): try: result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_instance(self, instance_name, **params): try: result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def delete_db_snapshot(self, snapshot): try: result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] return RDS2Snapshot(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def modify_db_instance(self, instance_name, **params): try: result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def reboot_db_instance(self, instance_name, **params): try: result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def create_db_snapshot(self, snapshot, instance_name, **params): try: result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot'] return RDS2Snapshot(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) def promote_read_replica(self, instance_name, **params): try: result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance'] return RDS2DBInstance(result) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: raise RDSException(e) @@ -536,8 +545,8 @@ def get_data(self): 'iops' : self.instance.iops } - # Endpoint exists only if the instance is available - if self.status == 'available': + # Only assign an Endpoint if one is available + if hasattr(self.instance, 'endpoint'): d["endpoint"] = self.instance.endpoint[0] d["port"] = self.instance.endpoint[1] if self.instance.vpc_security_groups is not None: @@ -552,7 +561,7 @@ def get_data(self): # ReadReplicaSourceDBInstanceIdentifier may or may not exist try: d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier - except Exception, e: + except Exception as e: d["replication_source"] = None return d @@ -584,9 +593,9 @@ def get_data(self): } if self.instance["VpcSecurityGroups"] is not None: d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups']) - if self.status == 'available': - d['endpoint'] = self.instance["Endpoint"]["Address"] - d['port'] = self.instance["Endpoint"]["Port"] + if "Endpoint" in self.instance and self.instance["Endpoint"] is not None: + d['endpoint'] = self.instance["Endpoint"].get('Address', None) + d['port'] = self.instance["Endpoint"].get('Port', None) else: d['endpoint'] = None d['port'] = None @@ -687,7 +696,7 @@ def create_db_instance(module, conn): module.params.get('instance_type'), module.params.get('db_engine'), module.params.get('username'), module.params.get('password'), **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg="Failed to create instance: %s" % e.message) if module.params.get('wait'): @@ -714,7 +723,7 @@ def replicate_db_instance(module, conn): try: result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg="Failed to create replica instance: %s " % e.message) if module.params.get('wait'): @@ -753,7 +762,7 @@ def delete_db_instance_or_snapshot(module, conn): result = conn.delete_db_instance(instance_name, **params) else: result = conn.delete_db_snapshot(snapshot) - except RDSException, e: + except RDSException as e: module.fail_json(msg="Failed to delete instance: %s" % e.message) # If we're not waiting for a delete to complete then we're all done @@ -763,12 +772,12 @@ def delete_db_instance_or_snapshot(module, conn): try: resource = await_resource(conn, result, 'deleted', module) module.exit_json(changed=True) - except RDSException, e: + except RDSException as e: if e.code == 'DBInstanceNotFound': module.exit_json(changed=True) else: module.fail_json(msg=e.message) - except Exception, e: + except Exception as e: module.fail_json(msg=str(e)) @@ -806,7 +815,7 @@ def modify_db_instance(module, conn): try: result = conn.modify_db_instance(instance_name, **params) - except RDSException, e: + except RDSException as e: module.fail_json(msg=e.message) if params.get('apply_immediately'): if new_instance_name: @@ -844,7 +853,7 @@ def promote_db_instance(module, conn): try: result = conn.promote_read_replica(instance_name, **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg=e.message) else: changed = False @@ -869,7 +878,7 @@ def snapshot_db_instance(module, conn): try: result = conn.create_db_snapshot(snapshot, instance_name, **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg=e.message) if module.params.get('wait'): @@ -894,7 +903,7 @@ def reboot_db_instance(module, conn): try: result = conn.reboot_db_instance(instance_name, **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg=e.message) if module.params.get('wait'): @@ -925,7 +934,7 @@ def restore_db_instance(module, conn): try: result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params) changed = True - except RDSException, e: + except RDSException as e: module.fail_json(msg=e.message) if module.params.get('wait'): @@ -1023,7 +1032,7 @@ def main(): command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True), instance_name = dict(required=False), source_instance = dict(required=False), - db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), + db_engine = dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'], required=False), size = dict(required=False), instance_type = dict(aliases=['type'], required=False), username = dict(required=False), @@ -1080,7 +1089,7 @@ def main(): module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.") # set port to per db defaults if not specified - if module.params['port'] is None and module.params['command'] in ['create', 'replicate']: + if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create': if '-' in module.params['db_engine']: engine = module.params['db_engine'].split('-')[0] else: @@ -1099,4 +1108,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/rds_param_group.py b/cloud/amazon/rds_param_group.py index 2e79700b091..154fed391aa 100644 --- a/cloud/amazon/rds_param_group.py +++ b/cloud/amazon/rds_param_group.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rds_param_group @@ -47,7 +51,7 @@ required: false default: null aliases: [] - choices: [ 'aurora5.6', 'mariadb10.0', 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-ee-12.1', 'oracle-se-11.2', 'oracle-se-12.1', 'oracle-se1-11.2', 'oracle-se1-12.1', 'postgres9.3', 'postgres9.4', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-ex-12.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-se-12.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0', 'sqlserver-web-12.0' ] + choices: [ 'aurora5.6', 'mariadb10.0', 'mysql5.1', 'mysql5.5', 'mysql5.6', 'mysql5.7', 'oracle-ee-11.2', 'oracle-ee-12.1', 'oracle-se-11.2', 'oracle-se-12.1', 'oracle-se1-11.2', 'oracle-se1-12.1', 'postgres9.3', 'postgres9.4', 'postgres9.5', sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-ex-12.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-se-12.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0', 'sqlserver-web-12.0' ] immediate: description: - Whether to apply the changes immediately, or after the next reboot of any associated instances. @@ -88,6 +92,7 @@ 'mysql5.1', 'mysql5.5', 'mysql5.6', + 'mysql5.7', 'oracle-ee-11.2', 'oracle-ee-12.1', 'oracle-se-11.2', @@ -96,6 +101,7 @@ 'oracle-se1-12.1', 'postgres9.3', 'postgres9.4', + 'postgres9.5', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', @@ -159,7 +165,7 @@ def set_parameter(param, value, immediate): # may be based on a variable (ie. {foo*3/4}) so # just pass it on through to boto converted_value = str(value) - elif type(value) == bool: + elif isinstance(value, bool): converted_value = 1 if value else 0 else: converted_value = int(value) @@ -232,7 +238,7 @@ def main(): immediate = module.params.get('immediate') or False if state == 'present': - for required in ['name', 'description', 'engine', 'params']: + for required in ['name', 'description', 'engine']: if not module.params.get(required): module.fail_json(msg = str("Parameter %s required for state='present'" % required)) else: @@ -248,7 +254,7 @@ def main(): try: conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = e.error_message) group_was_added = False @@ -259,7 +265,7 @@ def main(): try: all_groups = conn.get_all_dbparameter_groups(group_name, max_records=100) exists = len(all_groups) > 0 - except BotoServerError, e: + except BotoServerError as e: if e.error_code != 'DBParameterGroupNotFound': module.fail_json(msg = e.error_message) exists = False @@ -288,10 +294,10 @@ def main(): else: break - except BotoServerError, e: + except BotoServerError as e: module.fail_json(msg = e.error_message) - except NotModifiableError, e: + except NotModifiableError as e: msg = e.error_message if group_was_added: msg = '%s The group "%s" was added first.' % (msg, group_name) @@ -305,3 +311,4 @@ def main(): if __name__ == '__main__': main() + diff --git a/cloud/amazon/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py index 1dbdcee8c5a..bec08cf61d6 100644 --- a/cloud/amazon/rds_subnet_group.py +++ b/cloud/amazon/rds_subnet_group.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rds_subnet_group @@ -113,7 +117,7 @@ def main(): try: conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = e.error_message) try: @@ -123,7 +127,7 @@ def main(): try: matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100) exists = len(matching_groups) > 0 - except BotoServerError, e: + except BotoServerError as e: if e.error_code != 'DBSubnetGroupNotFoundFault': module.fail_json(msg = e.error_message) @@ -142,7 +146,7 @@ def main(): if ( (matching_groups[0].name != group_name) or (matching_groups[0].description != group_description) or (matching_groups[0].subnet_ids != group_subnets) ): changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) changed = True - except BotoServerError, e: + except BotoServerError as e: module.fail_json(msg = e.error_message) module.exit_json(changed=changed) @@ -151,4 +155,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 773dc8a8656..6452fefb359 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: route53 @@ -93,7 +97,7 @@ version_added: "1.9" identifier: description: - - Weighted and latency-based resource record sets only. An identifier + - Have to be specified for Weighted, latency-based and failover resource record sets only. An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type. required: false @@ -126,7 +130,7 @@ failover: description: - Failover resource record sets only. Whether this is the primary or - secondary resource record set. + secondary resource record set. Allowed values are PRIMARY and SECONDARY required: false default: null version_added: "2.0" @@ -195,6 +199,16 @@ ttl: "7200" value: "::1" +# Add a SRV record with multiple fields for a service on port 22222 +# For more information on SRV records see: +# https://en.wikipedia.org/wiki/SRV_record +- route53: + command: "create" + "zone": "foo.com" + "record": "_example-service._tcp.foo.com" + "type": "SRV" + "value": ["0 0 22222 host1.foo.com", "0 0 22222 host2.foo.com"] + # Add a TXT record. Note that TXT and SPF records must be surrounded # by quotes when sent to Route 53: - route53: @@ -207,24 +221,43 @@ # Add an alias record that points to an Amazon ELB: - route53: - command=create - zone=foo.com - record=elb.foo.com - type=A - value="{{ elb_dns_name }}" - alias=True - alias_hosted_zone_id="{{ elb_zone_id }}" + command: create + zone: foo.com + record: elb.foo.com + type: A + value: "{{ elb_dns_name }}" + alias: True + alias_hosted_zone_id: "{{ elb_zone_id }}" + +# Retrieve the details for elb.foo.com +- route53: + command: get + zone: foo.com + record: elb.foo.com + type: A + register: rec + +# Delete an alias record using the results from the get command +- route53: + command: delete + zone: foo.com + record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" + type: "{{ rec.set.type }}" + value: "{{ rec.set.value }}" + alias: True + alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}" # Add an alias record that points to an Amazon ELB and evaluates it health: - route53: - command=create - zone=foo.com - record=elb.foo.com - type=A - value="{{ elb_dns_name }}" - alias=True - alias_hosted_zone_id="{{ elb_zone_id }}" - alias_evaluate_target_health=True + command: create + zone: foo.com + record: elb.foo.com + type: A + value: "{{ elb_dns_name }}" + alias: True + alias_hosted_zone_id: "{{ elb_zone_id }}" + alias_evaluate_target_health: True # Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value # that the entire parameter list must be quoted: @@ -318,7 +351,7 @@ def commit(changes, retry_interval, wait, wait_timeout): retry -= 1 result = changes.commit() break - except boto.route53.exception.DNSServerError, e: + except boto.route53.exception.DNSServerError as e: code = e.body.split("")[1] code = code.split("")[0] if code != 'PriorRequestNotComplete' or retry < 0: @@ -326,16 +359,16 @@ def commit(changes, retry_interval, wait, wait_timeout): time.sleep(float(retry_interval)) if wait: - timeout_time = time.time() + wait_timeout - connection = changes.connection - change = result['ChangeResourceRecordSetsResponse']['ChangeInfo'] - status = Status(connection, change) - while status.status != 'INSYNC' and time.time() < timeout_time: - time.sleep(WAIT_RETRY_SLEEP) - status.update() - if time.time() >= timeout_time: - raise TimeoutError() - return result + timeout_time = time.time() + wait_timeout + connection = changes.connection + change = result['ChangeResourceRecordSetsResponse']['ChangeInfo'] + status = Status(connection, change) + while status.status != 'INSYNC' and time.time() < timeout_time: + time.sleep(WAIT_RETRY_SLEEP) + status.update() + if time.time() >= timeout_time: + raise TimeoutError() + return result # Shamelessly copied over from https://git.io/vgmDG IGNORE_CODE = 'Throttling' @@ -346,7 +379,7 @@ def invoke_with_throttling_retries(function_ref, *argv): try: retval=function_ref(*argv) return retval - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: if e.code != IGNORE_CODE or retries==MAX_RETRIES: raise e time.sleep(5 * (2**retries)) @@ -372,7 +405,7 @@ def main(): weight = dict(required=False, type='int'), region = dict(required=False), health_check = dict(required=False), - failover = dict(required=False), + failover = dict(required=False,choices=['PRIMARY','SECONDARY']), vpc_id = dict(required=False), wait = dict(required=False, type='bool', default=False), wait_timeout = dict(required=False, type='int', default=300), @@ -411,10 +444,10 @@ def main(): value_list = () - if type(value_in) is str: + if isinstance(value_in, str): if value_in: value_list = sorted([s.strip() for s in value_in.split(',')]) - elif type(value_in) is list: + elif isinstance(value_in, list): value_list = sorted(value_in) if zone_in[-1:] != '.': @@ -427,10 +460,20 @@ def main(): if not value_in: module.fail_json(msg = "parameter 'value' required for create/delete") elif alias_in: - if len(value_list) != 1: - module.fail_json(msg = "parameter 'value' must contain a single dns name for alias create/delete") - elif not alias_hosted_zone_id_in: - module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete") + if len(value_list) != 1: + module.fail_json(msg = "parameter 'value' must contain a single dns name for alias create/delete") + elif not alias_hosted_zone_id_in: + module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete") + elif ( weight_in!=None or region_in!=None or failover_in!=None ) and identifier_in==None: + module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier") + + if command_in == 'create': + if ( weight_in!=None or region_in!=None or failover_in!=None ) and identifier_in==None: + module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier") + elif ( weight_in==None and region_in==None and failover_in==None ) and identifier_in!=None: + module.fail_json(msg= "You have specified identifier which makes sense only if you specify one of: weight, region or failover.") + + if vpc_id_in and not private_zone_in: module.fail_json(msg="parameter 'private_zone' must be true when specifying parameter" @@ -440,7 +483,7 @@ def main(): # connect to the route53 endpoint try: conn = Route53Connection(**aws_connect_kwargs) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg = e.error_message) # Find the named zone ID @@ -472,7 +515,10 @@ def main(): #Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block rset.name = decoded_name - if rset.type == type_in and decoded_name.lower() == record_in.lower() and str(rset.identifier) == str(identifier_in): + if identifier_in is not None: + identifier_in = str(identifier_in) + + if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in: found_record = True record['zone'] = zone_in record['type'] = rset.type @@ -528,10 +574,13 @@ def main(): try: result = invoke_with_throttling_retries(commit, changes, retry_interval_in, wait_in, wait_timeout_in) - except boto.route53.exception.DNSServerError, e: + except boto.route53.exception.DNSServerError as e: txt = e.body.split("")[1] txt = txt.split("")[0] - module.fail_json(msg = txt) + if "but it already exists" in txt: + module.exit_json(changed=False) + else: + module.fail_json(msg = txt) except TimeoutError: module.fail_json(msg='Timeout waiting for changes to replicate') @@ -541,4 +590,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py old mode 100644 new mode 100755 index 5a120ca7be2..9974a4f467e --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: s3 @@ -113,9 +117,9 @@ version_added: "2.0" overwrite: description: - - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. Boolean or one of [Always, Never, Different], new in 2.0 + - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0 required: false - default: true + default: 'always' version_added: "1.2" region: description: @@ -131,9 +135,14 @@ version_added: "2.0" s3_url: description: - - S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS + - S3 URL endpoint for usage with Ceph, Eucalypus, fakes3, etc. Otherwise assumes AWS default: null aliases: [ S3_URL ] + rgw: + description: + - Enable Ceph RGW S3 support. This option requires an explicit url via s3_url. + default: false + version_added: "2.2" src: description: - The source file path when performing a PUT operation. @@ -149,45 +158,101 @@ ''' EXAMPLES = ''' -# Simple PUT operation -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put - -# Simple GET operation -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get - -# Get a specific version of an object. -- s3: bucket=mybucket object=/my/desired/key.txt version=48c9ee5131af7a716edc22df9772aa6f dest=/usr/local/myfile.txt mode=get - -# PUT/upload with metadata -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' - -# PUT/upload with custom headers -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put headers=x-amz-grant-full-control=emailAddress=owner@example.com - -# List keys simple -- s3: bucket=mybucket mode=list - -# List keys all options -- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472 - -# Create an empty bucket -- s3: bucket=mybucket mode=create permission=public-read - -# Create a bucket with key as directory, in the EU region -- s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1 - -# Delete a bucket and all contents -- s3: bucket=mybucket mode=delete - -# GET an object but dont download if the file checksums match. New in 2.0 -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different - -# Delete an object from a bucket -- s3: bucket=mybucket object=/my/desired/key.txt mode=delobj +- name: Simple PUT operation + s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + +- name: Simple PUT operation in Ceph RGW S3 + s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + rgw: true + s3_url: "http://localhost:8000" + +- name: Simple GET operation + s3: + bucket: mybucket + object: /my/desired/key.txt + dest: /usr/local/myfile.txt + mode: get + +- name: Get a specific version of an object. + s3: + bucket: mybucket + object: /my/desired/key.txt + version: 48c9ee5131af7a716edc22df9772aa6f + dest: /usr/local/myfile.txt + mode: get + +- name: PUT/upload with metadata + s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + metadata: 'Content-Encoding=gzip,Cache-Control=no-cache' + +- name: PUT/upload with custom headers + s3: + bucket: mybucket + object: /my/desired/key.txt + src: /usr/local/myfile.txt + mode: put + headers: 'x-amz-grant-full-control=emailAddress=owner@example.com' + +- name: List keys simple + s3: + bucket: mybucket + mode: list + +- name: List keys all options + s3: + bucket: mybucket + mode: list + prefix: /my/desired/ + marker: /my/desired/0023.txt + max_keys: 472 + +- name: Create an empty bucket + s3: + bucket: mybucket + mode: create + permission: public-read + +- name: Create a bucket with key as directory, in the EU region + s3: + bucket: mybucket + object: /my/directory/path + mode: create + region: eu-west-1 + +- name: Delete a bucket and all contents + s3: + bucket: mybucket + mode: delete + +- name: GET an object but dont download if the file checksums match. New in 2.0 + s3: + bucket: mybucket + object: /my/desired/key.txt + dest: /usr/local/myfile.txt + mode: get + overwrite: different + +- name: Delete an object from a bucket + s3: + bucket: mybucket + object: /my/desired/key.txt + mode: delobj ''' import os -import urlparse +from ansible.module_utils.six.moves.urllib.parse import urlparse from ssl import SSLError try: @@ -205,7 +270,7 @@ def key_check(module, s3, bucket, obj, version=None): try: bucket = s3.lookup(bucket) key_check = bucket.get_key(obj, version_id=version) - except s3.provider.storage_response_error, e: + except s3.provider.storage_response_error as e: if version is not None and e.status == 400: # If a specified version doesn't exist a 400 is returned. key_check = None else: @@ -229,7 +294,7 @@ def keysum(module, s3, bucket, obj, version=None): def bucket_check(module, s3, bucket): try: result = s3.lookup(bucket) - except s3.provider.storage_response_error, e: + except s3.provider.storage_response_error as e: module.fail_json(msg= str(e)) if result: return True @@ -243,7 +308,7 @@ def create_bucket(module, s3, bucket, location=None): bucket = s3.create_bucket(bucket, location=location) for acl in module.params.get('permission'): bucket.set_acl(acl) - except s3.provider.storage_response_error, e: + except s3.provider.storage_response_error as e: module.fail_json(msg= str(e)) if bucket: return True @@ -251,7 +316,7 @@ def create_bucket(module, s3, bucket, location=None): def get_bucket(module, s3, bucket): try: return s3.lookup(bucket) - except s3.provider.storage_response_error, e: + except s3.provider.storage_response_error as e: module.fail_json(msg= str(e)) def list_keys(module, bucket_object, prefix, marker, max_keys): @@ -268,7 +333,7 @@ def delete_bucket(module, s3, bucket): bucket.delete_keys([key.name for key in bucket_contents]) bucket.delete() return True - except s3.provider.storage_response_error, e: + except s3.provider.storage_response_error as e: module.fail_json(msg= str(e)) def delete_key(module, s3, bucket, obj): @@ -276,7 +341,7 @@ def delete_key(module, s3, bucket, obj): bucket = s3.lookup(bucket) bucket.delete_key(obj) module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True) - except s3.provider.storage_response_error, e: + except s3.provider.storage_response_error as e: module.fail_json(msg= str(e)) def create_dirkey(module, s3, bucket, obj): @@ -285,7 +350,7 @@ def create_dirkey(module, s3, bucket, obj): key = bucket.new_key(obj) key.set_contents_from_string('') module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True) - except s3.provider.storage_response_error, e: + except s3.provider.storage_response_error as e: module.fail_json(msg= str(e)) def path_check(path): @@ -308,7 +373,7 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, heade key.set_acl(acl) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) - except s3.provider.storage_copy_error, e: + except s3.provider.storage_copy_error as e: module.fail_json(msg= str(e)) def download_s3file(module, s3, bucket, obj, dest, retries, version=None): @@ -320,7 +385,7 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None): try: key.get_contents_to_filename(dest) module.exit_json(msg="GET operation complete", changed=True) - except s3.provider.storage_copy_error, e: + except s3.provider.storage_copy_error as e: module.fail_json(msg= str(e)) except SSLError as e: # actually fail on last pass through the loop. @@ -335,7 +400,7 @@ def download_s3str(module, s3, bucket, obj, version=None): key = bucket.get_key(obj, version_id=version) contents = key.get_contents_as_string() module.exit_json(msg="GET operation complete", contents=contents, changed=True) - except s3.provider.storage_copy_error, e: + except s3.provider.storage_copy_error as e: module.fail_json(msg= str(e)) def get_download_url(module, s3, bucket, obj, expiry, changed=True): @@ -344,7 +409,7 @@ def get_download_url(module, s3, bucket, obj, expiry, changed=True): key = bucket.lookup(obj) url = key.generate_url(expiry) module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed) - except s3.provider.storage_response_error, e: + except s3.provider.storage_response_error as e: module.fail_json(msg= str(e)) def is_fakes3(s3_url): @@ -384,6 +449,7 @@ def main(): prefix = dict(default=None), retries = dict(aliases=['retry'], type='int', default=0), s3_url = dict(aliases=['S3_URL']), + rgw = dict(default='no', type='bool'), src = dict(), ), ) @@ -408,6 +474,7 @@ def main(): prefix = module.params.get('prefix') retries = module.params.get('retries') s3_url = module.params.get('s3_url') + rgw = module.params.get('rgw') src = module.params.get('src') for acl in module.params.get('permission'): @@ -437,6 +504,10 @@ def main(): if not s3_url and 'S3_URL' in os.environ: s3_url = os.environ['S3_URL'] + # rgw requires an explicit url + if rgw and not s3_url: + module.fail_json(msg='rgw flavour requires s3_url') + # bucket names with .'s in them need to use the calling_format option, # otherwise the connection will fail. See https://github.com/boto/boto/issues/2836 # for more details. @@ -444,9 +515,18 @@ def main(): aws_connect_kwargs['calling_format'] = OrdinaryCallingFormat() # Look at s3_url and tweak connection settings - # if connecting to Walrus or fakes3 + # if connecting to RGW, Walrus or fakes3 try: - if is_fakes3(s3_url): + if s3_url and rgw: + rgw = urlparse.urlparse(s3_url) + s3 = boto.connect_s3( + is_secure=rgw.scheme == 'https', + host=rgw.hostname, + port=rgw.port, + calling_format=OrdinaryCallingFormat(), + **aws_connect_kwargs + ) + elif is_fakes3(s3_url): fakes3 = urlparse.urlparse(s3_url) s3 = S3Connection( is_secure=fakes3.scheme == 'fakes3s', @@ -460,14 +540,15 @@ def main(): s3 = boto.connect_walrus(walrus, **aws_connect_kwargs) else: aws_connect_kwargs['is_secure'] = True - s3 = connect_to_aws(boto.s3, location, **aws_connect_kwargs) - # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases - if s3 is None: + try: + s3 = connect_to_aws(boto.s3, location, **aws_connect_kwargs) + except AnsibleAWSError: + # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases s3 = boto.connect_s3(**aws_connect_kwargs) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg='No Authentication Handler found: %s ' % str(e)) - except Exception, e: + except Exception as e: module.fail_json(msg='Failed to connect to S3: %s' % str(e)) if s3 is None: # this should never happen @@ -659,4 +740,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index bcd98d70772..60cdbbe0479 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: azure @@ -298,7 +302,7 @@ def _handle_timeout(signum, frame): if disk.attached_to is None: azure.delete_disk(disk.name, True) disk_names.remove(disk_name) - except AzureException, e: + except AzureException as e: module.fail_json(msg="failed to get or delete disk, error was: %s" % (disk_name, str(e))) finally: signal.alarm(0) @@ -356,7 +360,7 @@ def create_virtual_machine(module, azure): result = azure.create_hosted_service(service_name=name, label=name, location=location) _wait_for_completion(azure, result, wait_timeout, "create_hosted_service") changed = True - except AzureException, e: + except AzureException as e: module.fail_json(msg="failed to create the new service, error was: %s" % str(e)) try: @@ -427,13 +431,13 @@ def create_virtual_machine(module, azure): virtual_network_name=virtual_network_name) _wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment") changed = True - except AzureException, e: + except AzureException as e: module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e)) try: deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name) return (changed, urlparse(deployment.url).hostname, deployment) - except AzureException, e: + except AzureException as e: module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e))) @@ -461,9 +465,9 @@ def terminate_virtual_machine(module, azure): disk_names = [] try: deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name) - except AzureMissingException, e: + except AzureMissingException as e: pass # no such deployment or service - except AzureException, e: + except AzureException as e: module.fail_json(msg="failed to find the deployment, error was: %s" % str(e)) # Delete deployment @@ -476,28 +480,28 @@ def terminate_virtual_machine(module, azure): role_props = azure.get_role(name, deployment.name, role.role_name) if role_props.os_virtual_hard_disk.disk_name not in disk_names: disk_names.append(role_props.os_virtual_hard_disk.disk_name) - except AzureException, e: + except AzureException as e: module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e))) try: result = azure.delete_deployment(name, deployment.name) _wait_for_completion(azure, result, wait_timeout, "delete_deployment") - except AzureException, e: + except AzureException as e: module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e))) - # It's unclear when disks associated with terminated deployment get detatched. + # It's unclear when disks associated with terminated deployment get detached. # Thus, until the wait_timeout is reached, we continue to delete disks as they - # become detatched by polling the list of remaining disks and examining the state. + # become detached by polling the list of remaining disks and examining the state. try: _delete_disks_when_detached(azure, wait_timeout, disk_names) - except (AzureException, TimeoutError), e: + except (AzureException, TimeoutError) as e: module.fail_json(msg=str(e)) try: # Now that the vm is deleted, remove the cloud service result = azure.delete_hosted_service(service_name=name) _wait_for_completion(azure, result, wait_timeout, "delete_hosted_service") - except AzureException, e: + except AzureException as e: module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e))) public_dns_name = urlparse(deployment.url).hostname @@ -535,7 +539,7 @@ def main(): management_cert_path=dict(), endpoints=dict(default='22'), user=dict(), - password=dict(), + password=dict(no_log=True), image=dict(), virtual_network_name=dict(default=None), state=dict(default='present'), @@ -594,7 +598,7 @@ def __getattr__(self, name): raise AttributeError(name) def _wrap(self, func, args, kwargs): - if type(func) == MethodType: + if isinstance(func, MethodType): result = self._handle_temporary_redirects(lambda: func(*args, **kwargs)) else: result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs)) @@ -605,7 +609,7 @@ def _handle_temporary_redirects(self, f): while wait_timeout > time.time(): try: return f() - except AzureException, e: + except AzureException as e: if not str(e).lower().find("temporary redirect") == -1: time.sleep(5) pass diff --git a/cloud/azure/azure_rm_networkinterface.py b/cloud/azure/azure_rm_networkinterface.py new file mode 100644 index 00000000000..651da3caadd --- /dev/null +++ b/cloud/azure/azure_rm_networkinterface.py @@ -0,0 +1,582 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_networkinterface + +version_added: "2.1" + +short_description: Manage Azure network interfaces. + +description: + - Create, update or delete a network interface. When creating a network interface you must provide the name of an + existing virtual network, the name of an existing subnet within the virtual network. A default security group + and public IP address will be created automatically, or you can provide the name of an existing security group + and public IP address. See the examples below for more details. + +options: + resource_group: + description: + - Name of a resource group where the network interface exists or will be created. + required: true + name: + description: + - Name of the network interface. + required: true + state: + description: + - Assert the state of the network interface. Use 'present' to create or update an interface and + 'absent' to delete an interface. + default: present + choices: + - absent + - present + required: false + location: + description: + - Valid azure location. Defaults to location of the resource group. + default: resource_group location + required: false + virtual_network_name: + description: + - Name of an existing virtual network with which the network interface will be associated. Required + when creating a network interface. + aliases: + - virtual_network + required: false + default: null + subnet_name: + description: + - Name of an existing subnet within the specified virtual network. Required when creating a network + interface + aliases: + - subnet + required: false + default: null + os_type: + description: + - Determines any rules to be added to a default security group. When creating a network interface, if no + security group name is provided, a default security group will be created. If the os_type is 'Windows', + a rule will be added allowing RDP access. If the os_type is 'Linux', a rule allowing SSH access will be + added. + choices: + - Windows + - Linux + default: Linux + required: false + private_ip_address: + description: + - Valid IPv4 address that falls within the specified subnet. + required: false + private_ip_allocation_method: + description: + - "Specify whether or not the assigned IP address is permanent. NOTE: when creating a network interface + specifying a value of 'Static' requires that a private_ip_address value be provided. You can update + the allocation method to 'Static' after a dynamic private ip address has been assigned." + default: Dynamic + choices: + - Dynamic + - Static + required: false + public_ip: + description: + - When creating a network interface, if no public IP address name is provided a default public IP + address will be created. Set to false, if you do not want a public IP address automatically created. + default: true + required: false + public_ip_address_name: + description: + - Name of an existing public IP address object to associate with the security group. + aliases: + - public_ip_address + - public_ip_name + required: false + default: null + public_ip_allocation_method: + description: + - If a public_ip_address_name is not provided, a default public IP address will be created. The allocation + method determines whether or not the public IP address assigned to the network interface is permanent. + choices: + - Dynamic + - Static + default: Dynamic + required: false + security_group_name: + description: + - Name of an existing security group with which to associate the network interface. If not provided, a + default security group will be created. + aliases: + - security_group + required: false + default: null + open_ports: + description: + - When a default security group is created for a Linux host a rule will be added allowing inbound TCP + connections to the default SSH port 22, and for a Windows host rules will be added allowing inbound + access to RDP ports 3389 and 5986. Override the default ports by providing a list of open ports. + type: list + required: false + default: null +extends_documentation_fragment: + - azure + - azure_tags + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" +''' + +EXAMPLES = ''' + - name: Create a network interface with minimal parameters + azure_rm_networkinterface: + name: nic001 + resource_group: Testing + virtual_network_name: vnet001 + subnet_name: subnet001 + + - name: Create a network interface with private IP address only (no Public IP) + azure_rm_networkinterface: + name: nic001 + resource_group: Testing + virtual_network_name: vnet001 + subnet_name: subnet001 + public_ip: no + + - name: Create a network interface for use in a Windows host (opens RDP port) with custom RDP port + azure_rm_networkinterface: + name: nic002 + resource_group: Testing + virtual_network_name: vnet001 + subnet_name: subnet001 + os_type: Windows + rdp_port: 3399 + + - name: Create a network interface using existing security group and public IP + azure_rm_networkinterface: + name: nic003 + resource_group: Testing + virtual_network_name: vnet001 + subnet_name: subnet001 + security_group_name: secgroup001 + public_ip_address_name: publicip001 + + - name: Delete network interface + azure_rm_networkinterface: + resource_group: Testing + name: nic003 + state: absent +''' + +RETURN = ''' +state: + description: The current state of the network interface. + returned: always + type: dict + sample: { + "dns_settings": { + "applied_dns_servers": [], + "dns_servers": [], + "internal_dns_name_label": null, + "internal_fqdn": null + }, + "enable_ip_forwarding": false, + "etag": 'W/"be115a43-2148-4545-a324-f33ad444c926"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/nic003", + "ip_configuration": { + "name": "default", + "private_ip_address": "10.1.0.10", + "private_ip_allocation_method": "Static", + "public_ip_address": { + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/publicip001", + "name": "publicip001" + }, + "subnet": {} + }, + "location": "eastus2", + "mac_address": null, + "name": "nic003", + "network_security_group": {}, + "primary": null, + "provisioning_state": "Succeeded", + "tags": null, + "type": "Microsoft.Network/networkInterfaces" + } +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.network.models import NetworkInterface, NetworkInterfaceIPConfiguration, Subnet, \ + PublicIPAddress, NetworkSecurityGroup +except ImportError: + # This is handled in azure_rm_common + pass + + + +def nic_to_dict(nic): + result = dict( + id=nic.id, + name=nic.name, + type=nic.type, + location=nic.location, + tags=nic.tags, + network_security_group=dict(), + ip_configuration=dict( + name=nic.ip_configurations[0].name, + private_ip_address=nic.ip_configurations[0].private_ip_address, + private_ip_allocation_method=nic.ip_configurations[0].private_ip_allocation_method, + subnet=dict(), + public_ip_address=dict(), + ), + dns_settings=dict( + dns_servers=nic.dns_settings.dns_servers, + applied_dns_servers=nic.dns_settings.applied_dns_servers, + internal_dns_name_label=nic.dns_settings.internal_dns_name_label, + internal_fqdn=nic.dns_settings.internal_fqdn + ), + mac_address=nic.mac_address, + primary=nic.primary, + enable_ip_forwarding=nic.enable_ip_forwarding, + provisioning_state=nic.provisioning_state, + etag=nic.etag, + ) + + if nic.network_security_group: + result['network_security_group']['id'] = nic.network_security_group.id + id_keys = azure_id_to_dict(nic.network_security_group.id) + result['network_security_group']['name'] = id_keys['networkSecurityGroups'] + + if nic.ip_configurations[0].subnet: + result['ip_configuration']['subnet']['id'] = \ + nic.ip_configurations[0].subnet.id + id_keys = azure_id_to_dict(nic.ip_configurations[0].subnet.id) + result['ip_configuration']['subnet']['virtual_network_name'] = id_keys['virtualNetworks'] + result['ip_configuration']['subnet']['name'] = id_keys['subnets'] + + if nic.ip_configurations[0].public_ip_address: + result['ip_configuration']['public_ip_address']['id'] = \ + nic.ip_configurations[0].public_ip_address.id + id_keys = azure_id_to_dict(nic.ip_configurations[0].public_ip_address.id) + result['ip_configuration']['public_ip_address']['name'] = id_keys['publicIPAddresses'] + + return result + + +class AzureRMNetworkInterface(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + location=dict(type='str'), + security_group_name=dict(type='str', aliases=['security_group']), + state=dict(default='present', choices=['present', 'absent']), + private_ip_address=dict(type='str'), + private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'), + public_ip_address_name=dict(type='str', aliases=['public_ip_address', 'public_ip_name']), + public_ip=dict(type='bool', default=True), + subnet_name=dict(type='str', aliases=['subnet']), + virtual_network_name=dict(type='str', aliases=['virtual_network']), + os_type=dict(type='str', choices=['Windows', 'Linux'], default='Linux'), + open_ports=dict(type='list'), + public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'), + ) + + self.resource_group = None + self.name = None + self.location = None + self.security_group_name = None + self.private_ip_address = None + self.private_ip_allocation_method = None + self.public_ip_address_name = None + self.state = None + self.subnet_name = None + self.tags = None + self.virtual_network_name = None + self.security_group_name = None + self.os_type = None + self.open_ports = None + self.public_ip_allocation_method = None + self.public_ip = None + + self.results = dict( + changed=False, + state=dict(), + ) + + super(AzureRMNetworkInterface, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys() + ['tags']: + setattr(self, key, kwargs[key]) + + results = dict() + changed = False + nic = None + subnet = None + nsg = None + pip = None + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if self.state == 'present': + if self.virtual_network_name and not self.subnet_name: + self.fail("Parameter error: a subnet is required when passing a virtual_network_name.") + + if self.subnet_name and not self.virtual_network_name: + self.fail("Parameter error: virtual_network_name is required when passing a subnet value.") + + if self.virtual_network_name and self.subnet_name: + subnet = self.get_subnet(self.virtual_network_name, self.subnet_name) + + if self.public_ip_address_name: + pip = self.get_public_ip_address(self.public_ip_address_name) + + if self.security_group_name: + nsg = self.get_security_group(self.security_group_name) + + try: + self.log('Fetching network interface {0}'.format(self.name)) + nic = self.network_client.network_interfaces.get(self.resource_group, self.name) + + self.log('Network interface {0} exists'.format(self.name)) + self.check_provisioning_state(nic, self.state) + results = nic_to_dict(nic) + self.log(results, pretty_print=True) + + if self.state == 'present': + + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + + if self.private_ip_address: + if results['ip_configuration']['private_ip_address'] != self.private_ip_address: + self.log("CHANGED: network interface {0} private ip".format(self.name)) + changed = True + results['ip_configuration']['private_ip_address'] = self.private_ip_address + + if self.public_ip_address_name: + if results['ip_configuration']['public_ip_address'].get('id') != pip.id: + self.log("CHANGED: network interface {0} public ip".format(self.name)) + changed = True + results['ip_configuration']['public_ip_address']['id'] = pip.id + results['ip_configuration']['public_ip_address']['name'] = pip.name + + if self.security_group_name: + if results['network_security_group'].get('id') != nsg.id: + self.log("CHANGED: network interface {0} network security group".format(self.name)) + changed = True + results['network_security_group']['id'] = nsg.id + results['network_security_group']['name'] = nsg.name + + if self.private_ip_allocation_method: + if results['ip_configuration']['private_ip_allocation_method'] != self.private_ip_allocation_method: + self.log("CHANGED: network interface {0} private ip allocation".format(self.name)) + changed = True + results['ip_configuration']['private_ip_allocation_method'] = self.private_ip_allocation_method + if self.private_ip_allocation_method == 'Dynamic': + results['ip_configuration']['private_ip_address'] = None + + if self.subnet_name: + if results['ip_configuration']['subnet'].get('id') != subnet.id: + changed = True + self.log("CHANGED: network interface {0} subnet".format(self.name)) + results['ip_configuration']['subnet']['id'] = subnet.id + results['ip_configuration']['subnet']['name'] = subnet.name + results['ip_configuration']['subnet']['virtual_network_name'] = self.virtual_network_name + + elif self.state == 'absent': + self.log("CHANGED: network interface {0} exists but requested state is 'absent'".format(self.name)) + changed = True + except CloudError: + self.log('Network interface {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: network interface {0} does not exist but requested state is " + "'present'".format(self.name)) + changed = True + + self.results['changed'] = changed + self.results['state'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + if not nic: + # create network interface + self.log("Creating network interface {0}.".format(self.name)) + + # check required parameters + if not self.subnet_name: + self.fail("parameter error: subnet_name required when creating a network interface.") + if not self.virtual_network_name: + self.fail("parameter error: virtual_network_name required when creating a network interface.") + + if not self.security_group_name: + # create default security group + nsg = self.create_default_securitygroup(self.resource_group, self.location, self.name, + self.os_type, self.open_ports) + + if not pip and self.public_ip: + # create a default public_ip + pip = self.create_default_pip(self.resource_group, self.location, self.name, + self.public_ip_allocation_method) + + nic = NetworkInterface( + location=self.location, + tags=self.tags, + ip_configurations=[ + NetworkInterfaceIPConfiguration( + private_ip_allocation_method=self.private_ip_allocation_method, + ) + ] + ) + #nic.name = self.name + nic.ip_configurations[0].subnet = Subnet(id=subnet.id) + nic.ip_configurations[0].name = 'default' + nic.network_security_group = NetworkSecurityGroup(id=nsg.id, + location=nsg.location, + resource_guid=nsg.resource_guid) + if self.private_ip_address: + nic.ip_configurations[0].private_ip_address = self.private_ip_address + + if pip: + nic.ip_configurations[0].public_ip_address = PublicIPAddress( + id=pip.id, + location=pip.location, + resource_guid=pip.resource_guid) + else: + self.log("Updating network interface {0}.".format(self.name)) + nic = NetworkInterface( + id=results['id'], + location=results['location'], + tags=results['tags'], + ip_configurations=[ + NetworkInterfaceIPConfiguration( + private_ip_allocation_method= + results['ip_configuration']['private_ip_allocation_method'] + ) + ] + ) + subnet = self.get_subnet(results['ip_configuration']['subnet']['virtual_network_name'], + results['ip_configuration']['subnet']['name']) + nic.ip_configurations[0].subnet = Subnet(id=subnet.id) + nic.ip_configurations[0].name = results['ip_configuration']['name'] + #nic.name = name=results['name'], + + if results['ip_configuration'].get('private_ip_address'): + nic.ip_configurations[0].private_ip_address = results['ip_configuration']['private_ip_address'] + + if results['ip_configuration']['public_ip_address'].get('id'): + pip = \ + self.get_public_ip_address(results['ip_configuration']['public_ip_address']['name']) + nic.ip_configurations[0].public_ip_address = PublicIPAddress( + id=pip.id, + location=pip.location, + resource_guid=pip.resource_guid) + #name=pip.name, + + if results['network_security_group'].get('id'): + nsg = self.get_security_group(results['network_security_group']['name']) + nic.network_security_group = NetworkSecurityGroup(id=nsg.id, + location=nsg.location, + resource_guid=nsg.resource_guid) + + # See what actually gets sent to the API + request = self.serialize_obj(nic, 'NetworkInterface') + self.log(request, pretty_print=True) + + self.results['state'] = self.create_or_update_nic(nic) + + elif self.state == 'absent': + self.log('Deleting network interface {0}'.format(self.name)) + self.delete_nic() + + return self.results + + def create_or_update_nic(self, nic): + try: + poller = self.network_client.network_interfaces.create_or_update(self.resource_group, self.name, nic) + new_nic = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating network interface {0} - {1}".format(self.name, str(exc))) + + return nic_to_dict(new_nic) + + def delete_nic(self): + try: + poller = self.network_client.network_interfaces.delete(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting network interface {0} - {1}".format(self.name, str(exc))) + # Delete doesn't return anything. If we get this far, assume success + self.results['state']['status'] = 'Deleted' + return True + + def get_public_ip_address(self, name): + self.log("Fetching public ip address {0}".format(name)) + try: + public_ip = self.network_client.public_ip_addresses.get(self.resource_group, name) + except Exception as exc: + self.fail("Error: fetching public ip address {0} - {1}".format(self.name, str(exc))) + return public_ip + + def get_subnet(self, vnet_name, subnet_name): + self.log("Fetching subnet {0} in virtual network {1}".format(subnet_name, vnet_name)) + try: + subnet = self.network_client.subnets.get(self.resource_group, vnet_name, subnet_name) + except Exception as exc: + self.fail("Error: fetching subnet {0} in virtual network {1} - {2}".format(subnet_name, + vnet_name, + str(exc))) + return subnet + + def get_security_group(self, name): + self.log("Fetching security group {0}".format(name)) + try: + nsg = self.network_client.network_security_groups.get(self.resource_group, name) + except Exception as exc: + self.fail("Error: fetching network security group {0} - {1}.".format(name, str(exc))) + return nsg + + +def main(): + AzureRMNetworkInterface() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_networkinterface_facts.py b/cloud/azure/azure_rm_networkinterface_facts.py new file mode 100644 index 00000000000..b82a9bf690b --- /dev/null +++ b/cloud/azure/azure_rm_networkinterface_facts.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_networkinterface_facts + +version_added: "2.1" + +short_description: Get network interface facts. + +description: + - Get facts for a specific network interface or all network interfaces within a resource group. + +options: + name: + description: + - Only show results for a specific network interface. + required: false + default: null + resource_group: + description: + - Name of the resource group containing the network interface(s). Required when searching by name. + required: false + default: null + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + required: false + default: null + +extends_documentation_fragment: + - azure + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' + - name: Get facts for one network interface + azure_rm_networkinterface_facts: + resource_group: Testing + name: nic001 + + - name: Get network interfaces within a resource group + azure_rm_networkinterface_facts: + resource_group: Testing + + - name: Get network interfaces by tag + azure_rm_networkinterface_facts: + resource_group: Testing + tags: + - testing + - foo:bar +''' + +RETURN = ''' +azure_networkinterfaces: + description: List of network interface dicts. + returned: always + type: list + example: [{ + "dns_settings": { + "applied_dns_servers": [], + "dns_servers": [], + "internal_dns_name_label": null, + "internal_fqdn": null + }, + "enable_ip_forwarding": false, + "etag": 'W/"59726bfc-08c4-44ed-b900-f6a559876a9d"', + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/nic003", + "ip_configuration": { + "name": "default", + "private_ip_address": "10.10.0.4", + "private_ip_allocation_method": "Dynamic", + "public_ip_address": { + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/publicip001", + "name": "publicip001" + }, + "subnet": { + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/vnet001/subnets/subnet001", + "name": "subnet001", + "virtual_network_name": "vnet001" + } + }, + "location": "westus", + "mac_address": null, + "name": "nic003", + "network_security_group": { + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001", + "name": "secgroup001" + }, + "primary": null, + "provisioning_state": "Succeeded", + "tags": {}, + "type": "Microsoft.Network/networkInterfaces" + }] +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except: + # This is handled in azure_rm_common + pass + + +AZURE_OBJECT_CLASS = 'NetworkInterface' + + +class AzureRMNetworkInterfaceFacts(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list') + ) + + self.results = dict( + changed=False, + ansible_facts=dict(azure_networkinterfaces=[]) + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMNetworkInterfaceFacts, self).__init__(self.module_arg_spec, + supports_tags=False, + facts_module=True + ) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + self.results['ansible_facts']['azure_networkinterfaces'] = self.get_item() + elif self.resource_group: + self.results['ansible_facts']['azure_networkinterfaces'] = self.list_resource_group() + else: + self.results['ansible_facts']['azure_networkinterfaces'] = self.list_all() + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + result = [] + item = None + try: + item = self.network_client.network_interfaces.get(self.resource_group, self.name) + except: + pass + + if item and self.has_tags(item.tags, self.tags): + nic = self.serialize_obj(item, AZURE_OBJECT_CLASS) + result = [nic] + + return result + + def list_resource_group(self): + self.log('List for resource group') + try: + response = self.network_client.network_interfaces.list(self.resource_group) + except Exception as exc: + self.fail("Error listing by resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + nic = self.serialize_obj(item, AZURE_OBJECT_CLASS) + results.append(nic) + return results + + def list_all(self): + self.log('List all') + try: + response = self.network_client.network_interfaces.list_all() + except Exception as exc: + self.fail("Error listing all - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + nic = self.serialize_obj(item, AZURE_OBJECT_CLASS) + results.append(nic) + return results + + +def main(): + AzureRMNetworkInterfaceFacts() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_publicipaddress.py b/cloud/azure/azure_rm_publicipaddress.py new file mode 100644 index 00000000000..7aa05d4ed5e --- /dev/null +++ b/cloud/azure/azure_rm_publicipaddress.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_publicipaddress + +version_added: "2.1" + +short_description: Manage Azure Public IP Addresses. + +description: + - Create, update and delete a Public IP address. Allows setting and updating the address allocation method and + domain name label. Use the azure_rm_networkinterface module to associate a Public IP with a network interface. + +options: + resource_group: + description: + - Name of resource group with which the Public IP is associated. + required: true + allocation_method: + description: + - Control whether the assigned Public IP remains permanently assigned to the object. If not + set to 'Static', the IP address my changed anytime an associated virtual machine is power cycled. + choices: + - Dynamic + - Static + default: Dynamic + required: false + domain_name_label: + description: + - The customizable portion of the FQDN assigned to public IP address. This is an explicit setting. If + no value is provided, any existing value will be removed on an existing public IP. + aliases: + - domain_name_label + required: false + default: null + name: + description: + - Name of the Public IP. + required: true + state: + description: + - Assert the state of the Public IP. Use 'present' to create or update a and + 'absent' to delete. + default: present + choices: + - absent + - present + required: false + location: + description: + - Valid azure location. Defaults to location of the resource group. + default: resource_group location + required: false + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" +''' + +EXAMPLES = ''' + - name: Create a public ip address + azure_rm_publicipaddress: + resource_group: testing + name: my_public_ip + allocation_method: Static + domain_name: foobar + + - name: Delete public ip + azure_rm_publicipaddress: + resource_group: testing + name: my_public_ip + state: absent +''' + +RETURN = ''' +state: + description: Facts about the current state of the object. + returned: always + type: dict + sample: { + "dns_settings": {}, + "etag": '"/"a5e56955-12df-445a-bda4-dc129d22c12f"', + "idle_timeout_in_minutes": 4, + "ip_address": "52.160.103.93", + "location": "westus", + "name": "publicip002", + "provisioning_state": "Succeeded", + "public_ip_allocation_method": "Static", + "tags": {}, + "type": "Microsoft.Network/publicIPAddresses" + } +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.network.models import PublicIPAddress, PublicIPAddressDnsSettings +except ImportError: + # This is handled in azure_rm_common + pass + + +def pip_to_dict(pip): + result = dict( + name=pip.name, + type=pip.type, + location=pip.location, + tags=pip.tags, + public_ip_allocation_method=pip.public_ip_allocation_method, + dns_settings=dict(), + ip_address=pip.ip_address, + idle_timeout_in_minutes=pip.idle_timeout_in_minutes, + provisioning_state=pip.provisioning_state, + etag=pip.etag + ) + if pip.dns_settings: + result['dns_settings']['domain_name_label'] = pip.dns_settings.domain_name_label + result['dns_settings']['fqdn'] = pip.dns_settings.fqdn + result['dns_settings']['reverse_fqdn'] = pip.dns_settings.reverse_fqdn + return result + + +class AzureRMPublicIPAddress(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + allocation_method=dict(type='str', default='Dynamic', choices=['Dynamic', 'Static']), + domain_name=dict(type='str', aliases=['domain_name_label']), + ) + + self.resource_group = None + self.name = None + self.location = None + self.state = None + self.tags = None + self.allocation_method = None + self.domain_name = None + + self.results = dict( + changed=False, + state=dict() + ) + + super(AzureRMPublicIPAddress, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys() + ['tags']: + setattr(self, key, kwargs[key]) + + results = dict() + changed = False + pip = None + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + try: + self.log("Fetch public ip {0}".format(self.name)) + pip = self.network_client.public_ip_addresses.get(self.resource_group, self.name) + self.check_provisioning_state(pip, self.state) + self.log("PIP {0} exists".format(self.name)) + if self.state == 'present': + results = pip_to_dict(pip) + if self.domain_name != results['dns_settings'].get('domain_name_label'): + self.log('CHANGED: domain_name_label') + changed = True + results['dns_settings']['domain_name_label'] =self.domain_name + + if self.allocation_method != results['public_ip_allocation_method']: + self.log("CHANGED: allocation_method") + changed = True + results['public_ip_allocation_method'] = self.allocation_method + + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + + elif self.state == 'absent': + self.log("CHANGED: public ip {0} exists but requested state is 'absent'".format(self.name)) + changed = True + except CloudError: + self.log('Public ip {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: pip {0} does not exist but requested state is 'present'".format(self.name)) + changed = True + + self.results['state'] = results + self.results['changed'] = changed + + if self.check_mode: + return results + + if changed: + if self.state == 'present': + if not pip: + self.log("Create new Public IP {0}".format(self.name)) + pip = PublicIPAddress( + location=self.location, + public_ip_allocation_method=self.allocation_method, + ) + if self.tags: + pip.tags = self.tags + if self.domain_name: + pip.dns_settings = PublicIPAddressDnsSettings( + domain_name_label=self.domain_name + ) + else: + self.log("Update Public IP {0}".format(self.name)) + pip = PublicIPAddress( + location=results['location'], + public_ip_allocation_method=results['public_ip_allocation_method'], + tags=results['tags'] + ) + if self.domain_name: + pip.dns_settings = PublicIPAddressDnsSettings( + domain_name_label=self.domain_name + ) + self.results['state'] = self.create_or_update_pip(pip) + elif self.state == 'absent': + self.log('Delete public ip {0}'.format(self.name)) + self.delete_pip() + + return self.results + + def create_or_update_pip(self, pip): + try: + poller = self.network_client.public_ip_addresses.create_or_update(self.resource_group, self.name, pip) + pip = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating {0} - {1}".format(self.name, str(exc))) + return pip_to_dict(pip) + + def delete_pip(self): + try: + poller = self.network_client.public_ip_addresses.delete(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting {0} - {1}".format(self.name, str(exc))) + # Delete returns nada. If we get here, assume that all is well. + self.results['state']['status'] = 'Deleted' + return True + + +def main(): + AzureRMPublicIPAddress() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_publicipaddress_facts.py b/cloud/azure/azure_rm_publicipaddress_facts.py new file mode 100644 index 00000000000..d6b443756a3 --- /dev/null +++ b/cloud/azure/azure_rm_publicipaddress_facts.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_publicip_facts + +version_added: "2.1" + +short_description: Get public IP facts. + +description: + - Get facts for a specific public IP or all public IPs within a resource group. + +options: + name: + description: + - Only show results for a specific Public IP. + required: false + default: null + resource_group: + description: + - Limit results by resource group. Required when using name parameter. + required: false + default: null + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + required: false + default: null + +extends_documentation_fragment: + - azure + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" +''' + +EXAMPLES = ''' + - name: Get facts for one Public IP + azure_rm_publicip_facts: + resource_group: Testing + name: publicip001 + + - name: Get facts for all Public IPs within a resource groups + azure_rm_publicip_facts: + resource_group: Testing +''' + +RETURN = ''' +azure_publicipaddresses: + description: List of public IP address dicts. + returned: always + type: list + example: [{ + "etag": 'W/"a31a6d7d-cb18-40a5-b16d-9f4a36c1b18a"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/pip2001", + "location": "eastus2", + "name": "pip2001", + "properties": { + "idleTimeoutInMinutes": 4, + "provisioningState": "Succeeded", + "publicIPAllocationMethod": "Dynamic", + "resourceGuid": "29de82f4-a7da-440e-bd3d-9cabb79af95a" + }, + "type": "Microsoft.Network/publicIPAddresses" + }] +''' + + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'PublicIp' + + +class AzureRMPublicIPFacts(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list') + ) + + self.results = dict( + changed=False, + ansible_facts=dict(azure_publicipaddresses=[]) + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMPublicIPFacts, self).__init__(self.module_arg_spec, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + self.results['ansible_facts']['azure_publicipaddresses'] = self.get_item() + elif self.resource_group: + self.results['ansible_facts']['azure_publicipaddresses'] = self.list_resource_group() + else: + self.results['ansible_facts']['azure_publicipaddresses'] = self.list_all() + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + result = [] + + try: + item = self.network_client.public_ip_addresses.get(self.resource_group, self.name) + except CloudError: + pass + + if item and self.has_tags(item.tags, self.tags): + pip = self.serialize_obj(item, AZURE_OBJECT_CLASS) + pip['name'] = item.name + pip['type'] = item.type + result = [pip] + + return result + + def list_resource_group(self): + self.log('List items in resource groups') + try: + response = self.network_client.public_ip_addresses.list(self.resource_group) + except AzureHttpError as exc: + self.fail("Error listing items in resource groups {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + pip = self.serialize_obj(item, AZURE_OBJECT_CLASS) + pip['name'] = item.name + pip['type'] = item.type + results.append(pip) + return results + + def list_all(self): + self.log('List all items') + try: + response = self.network_client.public_ip_addresses.list_all() + except AzureHttpError as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + pip = self.serialize_obj(item, AZURE_OBJECT_CLASS) + pip['name'] = item.name + pip['type'] = item.type + results.append(pip) + return results + + + +def main(): + AzureRMPublicIPFacts() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_resourcegroup.py b/cloud/azure/azure_rm_resourcegroup.py new file mode 100644 index 00000000000..34a01afef05 --- /dev/null +++ b/cloud/azure/azure_rm_resourcegroup.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_resourcegroup +version_added: "2.1" +short_description: Manage Azure resource groups. +description: + - Create, update and delete a resource group. +options: + force: + description: + - Remove a resource group and all associated resources. Use with state 'absent' to delete a resource + group that contains resources. + default: false + required: false + location: + description: + - Azure location for the resource group. Required when creating a new resource group. Cannot + be changed once resource group is created. + required: false + default: null + name: + description: + - Name of the resource group. + required: true + state: + description: + - Assert the state of the resource group. Use 'present' to create or update and + 'absent' to delete. When 'absent' a resource group containing resources will not be removed unless the + force option is used. + default: present + choices: + - absent + - present + required: false +extends_documentation_fragment: + - azure + - azure_tags + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' + - name: Create a resource group + azure_rm_resourcegroup: + name: Testing + location: westus + tags: + testing: testing + delete: never + + - name: Delete a resource group + azure_rm_resourcegroup: + name: Testing + state: absent +''' +RETURN = ''' +contains_resources: + description: Whether or not the resource group contains associated resources. + type: bool + sample: True +state: + description: Current state of the resource group. + returned: always + type: dict + sample: { + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing", + "location": "westus", + "name": "Testing", + "provisioning_state": "Succeeded", + "tags": { + "delete": "on-exit", + "testing": "no" + } + } +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.resource.resources.models import ResourceGroup +except ImportError: + pass + + +def resource_group_to_dict(rg): + return dict( + id=rg.id, + name=rg.name, + location=rg.location, + tags=rg.tags, + provisioning_state=rg.properties.provisioning_state + ) + + +class AzureRMResourceGroup(AzureRMModuleBase): + + def __init__(self): + self.module_arg_spec = dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + force=dict(type='bool', default=False) + ) + + self.name = None + self.state = None + self.location = None + self.tags = None + self.force = None + + self.results = dict( + changed=False, + contains_resources=False, + state=dict(), + ) + + super(AzureRMResourceGroup, self).__init__(self.module_arg_spec, + supports_check_mode=True, + supports_tags=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys() + ['tags']: + setattr(self, key, kwargs[key]) + + results = dict() + changed = False + rg = None + contains_resources = False + + try: + self.log('Fetching resource group {0}'.format(self.name)) + rg = self.rm_client.resource_groups.get(self.name) + self.check_provisioning_state(rg, self.state) + contains_resources = self.resources_exist() + + results = resource_group_to_dict(rg) + if self.state == 'absent': + self.log("CHANGED: resource group {0} exists but requested state is 'absent'".format(self.name)) + changed = True + elif self.state == 'present': + update_tags, results['tags'] = self.update_tags(results['tags']) + self.log("update tags %s" % update_tags) + self.log("new tags: %s" % str(results['tags'])) + if update_tags: + changed = True + + if self.location and self.location != results['location']: + self.fail("Resource group '{0}' already exists in location '{1}' and cannot be " + "moved.".format(self.name, results['location'])) + except CloudError: + self.log('Resource group {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: resource group {0} does not exist but requested state is " + "'present'".format(self.name)) + changed = True + + self.results['changed'] = changed + self.results['state'] = results + self.results['contains_resources'] = contains_resources + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + if not rg: + # Create resource group + self.log("Creating resource group {0}".format(self.name)) + if not self.location: + self.fail("Parameter error: location is required when creating a resource " + "group.".format(self.name)) + if self.name_exists(): + self.fail("Error: a resource group with the name {0} already exists in your subscription." + .format(self.name)) + params = ResourceGroup( + location=self.location, + tags=self.tags + ) + else: + # Update resource group + params = ResourceGroup( + location=results['location'], + tags=results['tags'] + ) + self.results['state'] = self.create_or_update_resource_group(params) + elif self.state == 'absent': + if contains_resources and not self.force: + self.fail("Error removing resource group {0}. Resources exist within the group.".format(self.name)) + self.delete_resource_group() + + return self.results + + def create_or_update_resource_group(self, params): + try: + result = self.rm_client.resource_groups.create_or_update(self.name, params) + except Exception as exc: + self.fail("Error creating or updating resource group {0} - {1}".format(self.name, str(exc))) + return resource_group_to_dict(result) + + def delete_resource_group(self): + try: + poller = self.rm_client.resource_groups.delete(self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error delete resource group {0} - {1}".format(self.name, str(exc))) + + # The delete operation doesn't return anything. + # If we got here, assume all is good + self.results['state']['status'] = 'Deleted' + return True + + def resources_exist(self): + found = False + try: + response = self.rm_client.resource_groups.list_resources(self.name) + except Exception as exc: + self.fail("Error checking for resource existence in {0} - {1}".format(self.name, str(exc))) + for item in response: + found = True + break + return found + + def name_exists(self): + try: + exists = self.rm_client.resource_groups.check_existence(self.name) + except Exception as exc: + self.fail("Error checking for existence of name {0} - {1}".format(self.name, str(exc))) + return exists + + +def main(): + AzureRMResourceGroup() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_resourcegroup_facts.py b/cloud/azure/azure_rm_resourcegroup_facts.py new file mode 100644 index 00000000000..a6f7d1c7376 --- /dev/null +++ b/cloud/azure/azure_rm_resourcegroup_facts.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_resouregroup_facts + +version_added: "2.1" + +short_description: Get resource group facts. + +description: + - Get facts for a specific resource group or all resource groups. + +options: + name: + description: + - Limit results to a specific resource group. + required: false + default: null + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + required: false + default: null + +extends_documentation_fragment: + - azure + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' + - name: Get facts for one resource group + azure_rm_resourcegroup_facts: + name: Testing + + - name: Get facts for all resource groups + azure_rm_securitygroup_facts: + + - name: Get facts by tags + azure_rm_resourcegroup_facts: + tags: + - testing + - foo:bar +''' +RETURN = ''' +azure_resourcegroups: + description: List of resource group dicts. + returned: always + type: list + example: [{ + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing", + "location": "westus", + "name": "Testing", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": { + "delete": "never", + "testing": "testing" + } + }] +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except: + # This is handled in azure_rm_common + pass + + +AZURE_OBJECT_CLASS = 'ResourceGroup' + + +class AzureRMResourceGroupFacts(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + tags=dict(type='list') + ) + + self.results = dict( + changed=False, + ansible_facts=dict(azure_resourcegroups=[]) + ) + + self.name = None + self.tags = None + + super(AzureRMResourceGroupFacts, self).__init__(self.module_arg_spec, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name: + self.results['ansible_facts']['azure_resourcegroups'] = self.get_item() + else: + self.results['ansible_facts']['azure_resourcegroups'] = self.list_items() + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + result = [] + + try: + item = self.rm_client.resource_groups.get(self.name) + except CloudError: + pass + + if item and self.has_tags(item.tags, self.tags): + result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)] + + return result + + def list_items(self): + self.log('List all items') + try: + response = self.rm_client.resource_groups.list() + except AzureHttpError as exc: + self.fail("Failed to list all items - {1}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS)) + return results + + +def main(): + AzureRMResourceGroupFacts() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_securitygroup.py b/cloud/azure/azure_rm_securitygroup.py new file mode 100644 index 00000000000..de2569d0941 --- /dev/null +++ b/cloud/azure/azure_rm_securitygroup.py @@ -0,0 +1,712 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_securitygroup +version_added: "2.1" +short_description: Manage Azure network security groups. +description: + - Create, update or delete a network security group. A security group contains Access Control List (ACL) rules + that allow or deny network traffic to subnets or individual network interfaces. A security group is created + with a set of default security rules and an empty set of security rules. Shape traffic flow by adding + rules to the empty set of security rules. + +options: + default_rules: + description: + - The set of default rules automatically added to a security group at creation. In general default + rules will not be modified. Modify rules to shape the flow of traffic to or from a subnet or NIC. See + rules below for the makeup of a rule dict. + required: false + default: null + location: + description: + - Valid azure location. Defaults to location of the resource group. + default: resource_group location + required: false + name: + description: + - Name of the security group to operate on. + required: false + default: null + purge_default_rules: + description: + - Remove any existing rules not matching those defined in the default_rules parameter. + default: false + required: false + purge_rules: + description: + - Remove any existing rules not matching those defined in the rules parameters. + default: false + required: false + resource_group: + description: + - Name of the resource group the security group belongs to. + required: true + rules: + description: + - Set of rules shaping traffic flow to or from a subnet or NIC. Each rule is a dictionary. + type: complex + required: false + default: null + contains: + name: + description: Unique name for the rule. + required: true + description: + description: Short description of the rule's purpose. + protocol: + description: Accepted traffic protocol. + choices: + - Udp + - Tcp + - "*" + default: "*" + source_port_range: + description: Port or range of ports from which traffic originates. + default: "*" + destination_port_range: + description: Port or range of ports to which traffic is headed. + default: "*" + source_address_prefix: + description: IP address or CIDR from which traffic originates. + default: "*" + destination_address_prefix: + description: IP address or CIDR to which traffic is headed. + default: "*" + access: + description: Whether or not to allow the traffic flow. + choices: + - Allow + - Deny + default: Allow + priority: + description: Order in which to apply the rule. Must a unique integer between 100 and 4096 inclusive. + type: int + required: true + direction: + description: Indicates the direction of the traffic flow. + choices: + - Inbound + - Outbound + default: Inbound + state: + description: + - Assert the state of the security group. Set to 'present' to create or update a security group. Set to + 'absent' to remove a security group. + default: present + required: false + choices: + - absent + - present + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' + +# Create a security group +- azure_rm_securitygroup: + resource_group: mygroup + name: mysecgroup + purge_rules: yes + rules: + - name: DenySSH + protocol: TCP + destination_port_range: 22 + access: Deny + priority: 100 + direction: Inbound + - name: 'AllowSSH' + protocol: TCP + source_address_prefix: '174.109.158.0/24' + destination_port_range: 22 + access: Allow + priority: 101 + direction: Inbound + +# Update rules on existing security group +- azure_rm_securitygroup: + resource_group: mygroup + name: mysecgroup + rules: + - name: DenySSH + protocol: TCP + destination_port_range: 22-23 + access: Deny + priority: 100 + direction: Inbound + - name: AllowSSHFromHome + protocol: TCP + source_address_prefix: '174.109.158.0/24' + destination_port_range: 22-23 + access: Allow + priority: 102 + direction: Inbound + tags: + testing: testing + delete: on-exit + +# Delete security group +- azure_rm_securitygroup: + resource_group: mygroup + name: mysecgroup + state: absent +''' + +RETURN = ''' +state: + description: Current state of the security group. + returned: always + type: dict + sample: { + "default_rules": [ + { + "access": "Allow", + "description": "Allow inbound traffic from all VMs in VNET", + "destination_address_prefix": "VirtualNetwork", + "destination_port_range": "*", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetInBound", + "name": "AllowVnetInBound", + "priority": 65000, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "VirtualNetwork", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": "Allow inbound traffic from azure load balancer", + "destination_address_prefix": "*", + "destination_port_range": "*", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowAzureLoadBalancerInBound", + "name": "AllowAzureLoadBalancerInBound", + "priority": 65001, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "AzureLoadBalancer", + "source_port_range": "*" + }, + { + "access": "Deny", + "description": "Deny all inbound traffic", + "destination_address_prefix": "*", + "destination_port_range": "*", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllInBound", + "name": "DenyAllInBound", + "priority": 65500, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": "Allow outbound traffic from all VMs to all VMs in VNET", + "destination_address_prefix": "VirtualNetwork", + "destination_port_range": "*", + "direction": "Outbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetOutBound", + "name": "AllowVnetOutBound", + "priority": 65000, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "VirtualNetwork", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": "Allow outbound traffic from all VMs to Internet", + "destination_address_prefix": "Internet", + "destination_port_range": "*", + "direction": "Outbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowInternetOutBound", + "name": "AllowInternetOutBound", + "priority": 65001, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + }, + { + "access": "Deny", + "description": "Deny all outbound traffic", + "destination_address_prefix": "*", + "destination_port_range": "*", + "direction": "Outbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllOutBound", + "name": "DenyAllOutBound", + "priority": 65500, + "protocol": "*", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + } + ], + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup", + "location": "westus", + "name": "mysecgroup", + "network_interfaces": [], + "rules": [ + { + "access": "Deny", + "description": null, + "destination_address_prefix": "*", + "destination_port_range": "22", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/DenySSH", + "name": "DenySSH", + "priority": 100, + "protocol": "Tcp", + "provisioning_state": "Succeeded", + "source_address_prefix": "*", + "source_port_range": "*" + }, + { + "access": "Allow", + "description": null, + "destination_address_prefix": "*", + "destination_port_range": "22", + "direction": "Inbound", + "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"', + "id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/AllowSSH", + "name": "AllowSSH", + "priority": 101, + "protocol": "Tcp", + "provisioning_state": "Succeeded", + "source_address_prefix": "174.109.158.0/24", + "source_port_range": "*" + } + ], + "subnets": [], + "tags": { + "delete": "on-exit", + "foo": "bar", + "testing": "testing" + }, + "type": "Microsoft.Network/networkSecurityGroups" + } +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.common import AzureHttpError + from azure.mgmt.network.models import NetworkSecurityGroup, SecurityRule + from azure.mgmt.network.models.network_management_client_enums import (SecurityRuleAccess, + SecurityRuleDirection, + SecurityRuleProtocol) +except ImportError: + # This is handled in azure_rm_common + pass + + +def validate_rule(rule, rule_type=None): + ''' + Apply defaults to a rule dictionary and check that all values are valid. + + :param rule: rule dict + :param rule_type: Set to 'default' if the rule is part of the default set of rules. + :return: None + ''' + + if not rule.get('name'): + raise Exception("Rule name value is required.") + + priority = rule.get('priority', None) + if not priority: + raise Exception("Rule priority is required.") + if not isinstance(priority, (int, long)): + raise Exception("Rule priority attribute must be an integer.") + if rule_type != 'default' and (priority < 100 or priority > 4096): + raise Exception("Rule priority must be between 100 and 4096") + + if not rule.get('access'): + rule['access'] = 'Allow' + + access_names = [member.value for member in SecurityRuleAccess] + if rule['access'] not in access_names: + raise Exception("Rule access must be one of [{0}]".format(', '.join(access_names))) + + if not rule.get('destination_address_prefix'): + rule['destination_address_prefix'] = '*' + + if not rule.get('source_address_prefix'): + rule['source_address_prefix'] = '*' + + if not rule.get('protocol'): + rule['protocol'] = '*' + + protocol_names = [member.value for member in SecurityRuleProtocol] + if rule['protocol'] not in protocol_names: + raise Exception("Rule protocol must be one of [{0}]".format(', '.join(protocol_names))) + + if not rule.get('direction'): + rule['direction'] = 'Inbound' + + direction_names = [member.value for member in SecurityRuleDirection] + if rule['direction'] not in direction_names: + raise Exception("Rule direction must be one of [{0}]".format(', '.join(direction_names))) + + if not rule.get('source_port_range'): + rule['source_port_range'] = '*' + + if not rule.get('destination_port_range'): + rule['destination_port_range'] = '*' + + +def compare_rules(r, rule): + matched = False + changed = False + if r['name'] == rule['name']: + matched = True + if rule.get('description', None) != r['description']: + changed = True + r['description'] = rule['description'] + if rule['protocol'] != r['protocol']: + changed = True + r['protocol'] = rule['protocol'] + if rule['source_port_range'] != r['source_port_range']: + changed = True + r['source_port_range'] = rule['source_port_range'] + if rule['destination_port_range'] != r['destination_port_range']: + changed = True + r['destination_port_range'] = rule['destination_port_range'] + if rule['access'] != r['access']: + changed = True + r['access'] = rule['access'] + if rule['priority'] != r['priority']: + changed = True + r['priority'] = rule['priority'] + if rule['direction'] != r['direction']: + changed = True + r['direction'] = rule['direction'] + return matched, changed + + +def create_rule_instance(rule): + ''' + Create an instance of SecurityRule from a dict. + + :param rule: dict + :return: SecurityRule + ''' + return SecurityRule( + rule['protocol'], + rule['source_address_prefix'], + rule['destination_address_prefix'], + rule['access'], + rule['direction'], + id=rule.get('id', None), + description=rule.get('description', None), + source_port_range=rule.get('source_port_range', None), + destination_port_range=rule.get('destination_port_range', None), + priority=rule.get('priority', None), + provisioning_state=rule.get('provisioning_state', None), + name=rule.get('name', None), + etag=rule.get('etag', None) + ) + + +def create_rule_dict_from_obj(rule): + ''' + Create a dict from an instance of a SecurityRule. + + :param rule: SecurityRule + :return: dict + ''' + return dict( + id=rule.id, + name=rule.name, + description=rule.description, + protocol=rule.protocol, + source_port_range=rule.source_port_range, + destination_port_range=rule.destination_port_range, + source_address_prefix=rule.source_address_prefix, + destination_address_prefix=rule.destination_address_prefix, + access=rule.access, + priority=rule.priority, + direction=rule.direction, + provisioning_state=rule.provisioning_state, + etag=rule.etag + ) + + +def create_network_security_group_dict(nsg): + results = dict( + id=nsg.id, + name=nsg.name, + type=nsg.type, + location=nsg.location, + tags=nsg.tags, + ) + results['rules'] = [] + if nsg.security_rules: + for rule in nsg.security_rules: + results['rules'].append(create_rule_dict_from_obj(rule)) + + results['default_rules'] = [] + if nsg.default_security_rules: + for rule in nsg.default_security_rules: + results['default_rules'].append(create_rule_dict_from_obj(rule)) + + results['network_interfaces'] = [] + if nsg.network_interfaces: + for interface in nsg.network_interfaces: + results['network_interfaces'].append(interface.id) + + results['subnets'] = [] + if nsg.subnets: + for subnet in nsg.subnets: + results['subnets'].append(subnet.id) + + return results + + +class AzureRMSecurityGroup(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + default_rules=dict(type='list'), + location=dict(type='str'), + name=dict(type='str', required=True), + purge_default_rules=dict(type='bool', default=False), + purge_rules=dict(type='bool', default=False), + resource_group=dict(required=True, type='str'), + rules=dict(type='list'), + state=dict(type='str', default='present', choices=['present', 'absent']), + ) + + self.default_rules = None + self.location = None + self.name = None + self.purge_default_rules = None + self.purge_rules = None + self.resource_group = None + self.rules = None + self.state = None + self.tags = None + + self.results = dict( + changed=False, + state=dict() + ) + + super(AzureRMSecurityGroup, self).__init__(self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys() + ['tags']: + setattr(self, key, kwargs[key]) + + changed = False + results = dict() + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if self.rules: + for rule in self.rules: + try: + validate_rule(rule) + except Exception as exc: + self.fail("Error validating rule {0} - {1}".format(rule, str(exc))) + + if self.default_rules: + for rule in self.default_rules: + try: + validate_rule(rule, 'default') + except Exception as exc: + self.fail("Error validating default rule {0} - {1}".format(rule, str(exc))) + + try: + nsg = self.network_client.network_security_groups.get(self.resource_group, self.name) + results = create_network_security_group_dict(nsg) + self.log("Found security group:") + self.log(results, pretty_print=True) + self.check_provisioning_state(nsg, self.state) + if self.state == 'present': + pass + elif self.state == 'absent': + self.log("CHANGED: security group found but state is 'absent'") + changed = True + except CloudError: + if self.state == 'present': + self.log("CHANGED: security group not found and state is 'present'") + changed = True + + if self.state == 'present' and not changed: + # update the security group + self.log("Update security group {0}".format(self.name)) + + if self.rules: + for rule in self.rules: + rule_matched = False + for r in results['rules']: + match, changed = compare_rules(r, rule) + if changed: + changed = True + if match: + rule_matched = True + + if not rule_matched: + changed = True + results['rules'].append(rule) + + if self.purge_rules: + new_rules = [] + for rule in results['rules']: + for r in self.rules: + if rule['name'] == r['name']: + new_rules.append(rule) + results['rules'] = new_rules + + if self.default_rules: + for rule in self.default_rules: + rule_matched = False + for r in results['default_rules']: + match, changed = compare_rules(r, rule) + if changed: + changed = True + if match: + rule_matched = True + if not rule_matched: + changed = True + results['default_rules'].append(rule) + + if self.purge_default_rules: + new_default_rules = [] + for rule in results['default_rules']: + for r in self.default_rules: + if rule['name'] == r['name']: + new_default_rules.append(rule) + results['default_rules'] = new_default_rules + + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + + self.results['changed'] = changed + self.results['state'] = results + if not self.check_mode: + self.results['state'] = self.create_or_update(results) + + elif self.state == 'present' and changed: + # create the security group + self.log("Create security group {0}".format(self.name)) + + if not self.location: + self.fail("Parameter error: location required when creating a security group.") + + results['name'] = self.name + results['location'] = self.location + results['rules'] = [] + results['default_rules'] = [] + results['tags'] = {} + + if self.rules: + results['rules'] = self.rules + if self.default_rules: + results['default_rules'] = self.default_rules + if self.tags: + results['tags'] = self.tags + + self.results['changed'] = changed + self.results['state'] = results + if not self.check_mode: + self.results['state'] = self.create_or_update(results) + + elif self.state == 'absent' and changed: + self.log("Delete security group {0}".format(self.name)) + self.results['changed'] = changed + self.results['state'] = dict() + if not self.check_mode: + self.delete() + # the delete does not actually return anything. if no exception, then we'll assume + # it worked. + self.results['state']['status'] = 'Deleted' + + return self.results + + def create_or_update(self, results): + parameters = NetworkSecurityGroup() + if results.get('rules'): + parameters.security_rules = [] + for rule in results.get('rules'): + parameters.security_rules.append(create_rule_instance(rule)) + if results.get('default_rules'): + parameters.default_security_rules = [] + for rule in results.get('default_rules'): + parameters.default_security_rules.append(create_rule_instance(rule)) + parameters.tags = results.get('tags') + parameters.location = results.get('location') + + try: + poller = self.network_client.network_security_groups.create_or_update(self.resource_group, + self.name, + parameters) + result = self.get_poller_result(poller) + except AzureHttpError as exc: + self.fail("Error creating/upating security group {0} - {1}".format(self.name, str(exc))) + return create_network_security_group_dict(result) + + def delete(self): + try: + poller = self.network_client.network_security_groups.delete(self.resource_group, self.name) + result = self.get_poller_result(poller) + except AzureHttpError as exc: + raise Exception("Error deleting security group {0} - {1}".format(self.name, str(exc))) + return result + + +def main(): + AzureRMSecurityGroup() + +if __name__ == '__main__': + main() diff --git a/cloud/azure/azure_rm_securitygroup_facts.py b/cloud/azure/azure_rm_securitygroup_facts.py new file mode 100644 index 00000000000..66c3cd20da6 --- /dev/null +++ b/cloud/azure/azure_rm_securitygroup_facts.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_securitygroup_facts + +version_added: "2.1" + +short_description: Get security group facts. + +description: + - Get facts for a specific security group or all security groups within a resource group. + +options: + name: + description: + - Only show results for a specific security group. + required: false + default: null + resource_group: + description: + - Name of the resource group to use. + required: true + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + required: false + default: null + +extends_documentation_fragment: + - azure + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' + - name: Get facts for one security group + azure_rm_securitygroup_facts: + resource_group: Testing + name: secgroup001 + + - name: Get facts for all security groups + azure_rm_securitygroup_facts: + resource_group: Testing + +''' + +RETURN = ''' +azure_securitygroups: + description: List containing security group dicts. + returned: always + type: list + example: [{ + "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001", + "location": "eastus2", + "name": "secgroup001", + "properties": { + "defaultSecurityRules": [ + { + "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowVnetInBound", + "name": "AllowVnetInBound", + "properties": { + "access": "Allow", + "description": "Allow inbound traffic from all VMs in VNET", + "destinationAddressPrefix": "VirtualNetwork", + "destinationPortRange": "*", + "direction": "Inbound", + "priority": 65000, + "protocol": "*", + "provisioningState": "Succeeded", + "sourceAddressPrefix": "VirtualNetwork", + "sourcePortRange": "*" + } + }, + { + "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowAzureLoadBalancerInBound", + "name": "AllowAzureLoadBalancerInBound", + "properties": { + "access": "Allow", + "description": "Allow inbound traffic from azure load balancer", + "destinationAddressPrefix": "*", + "destinationPortRange": "*", + "direction": "Inbound", + "priority": 65001, + "protocol": "*", + "provisioningState": "Succeeded", + "sourceAddressPrefix": "AzureLoadBalancer", + "sourcePortRange": "*" + } + }, + { + "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/DenyAllInBound", + "name": "DenyAllInBound", + "properties": { + "access": "Deny", + "description": "Deny all inbound traffic", + "destinationAddressPrefix": "*", + "destinationPortRange": "*", + "direction": "Inbound", + "priority": 65500, + "protocol": "*", + "provisioningState": "Succeeded", + "sourceAddressPrefix": "*", + "sourcePortRange": "*" + } + }, + { + "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowVnetOutBound", + "name": "AllowVnetOutBound", + "properties": { + "access": "Allow", + "description": "Allow outbound traffic from all VMs to all VMs in VNET", + "destinationAddressPrefix": "VirtualNetwork", + "destinationPortRange": "*", + "direction": "Outbound", + "priority": 65000, + "protocol": "*", + "provisioningState": "Succeeded", + "sourceAddressPrefix": "VirtualNetwork", + "sourcePortRange": "*" + } + }, + { + "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowInternetOutBound", + "name": "AllowInternetOutBound", + "properties": { + "access": "Allow", + "description": "Allow outbound traffic from all VMs to Internet", + "destinationAddressPrefix": "Internet", + "destinationPortRange": "*", + "direction": "Outbound", + "priority": 65001, + "protocol": "*", + "provisioningState": "Succeeded", + "sourceAddressPrefix": "*", + "sourcePortRange": "*" + } + }, + { + "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/DenyAllOutBound", + "name": "DenyAllOutBound", + "properties": { + "access": "Deny", + "description": "Deny all outbound traffic", + "destinationAddressPrefix": "*", + "destinationPortRange": "*", + "direction": "Outbound", + "priority": 65500, + "protocol": "*", + "provisioningState": "Succeeded", + "sourceAddressPrefix": "*", + "sourcePortRange": "*" + } + } + ], + "networkInterfaces": [ + { + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/nic004" + } + ], + "provisioningState": "Succeeded", + "resourceGuid": "ebd00afa-5dc8-446f-810a-50dd6f671588", + "securityRules": [] + }, + "tags": {}, + "type": "Microsoft.Network/networkSecurityGroups" + }] + +''' + + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except: + # This is handled in azure_rm_common + pass + + +AZURE_OBJECT_CLASS = 'NetworkSecurityGroup' + + +class AzureRMSecurityGroupFacts(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(required=True, type='str'), + tags=dict(type='list'), + ) + + self.results = dict( + changed=False, + ansible_facts=dict(azure_securitygroups=[]) + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMSecurityGroupFacts, self).__init__(self.module_arg_spec, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['ansible_facts']['azure_securitygroups'] = self.get_item() + else: + self.results['ansible_facts']['azure_securitygroups'] = self.list_items() + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + result = [] + + try: + item = self.network_client.network_security_groups.get(self.resource_group, self.name) + except CloudError: + pass + + if item and self.has_tags(item.tags, self.tags): + grp = self.serialize_obj(item, AZURE_OBJECT_CLASS) + grp['name'] = item.name + result = [grp] + + return result + + def list_items(self): + self.log('List all items') + try: + response = self.network_client.network_security_groups.list(self.resource_group) + except Exception as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + grp = self.serialize_obj(item, AZURE_OBJECT_CLASS) + grp['name'] = item.name + results.append(grp) + return results + + +def main(): + AzureRMSecurityGroupFacts() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_storageaccount.py b/cloud/azure/azure_rm_storageaccount.py new file mode 100644 index 00000000000..b71db78e96e --- /dev/null +++ b/cloud/azure/azure_rm_storageaccount.py @@ -0,0 +1,446 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_storageaccount +version_added: "2.1" +short_description: Manage Azure storage accounts. +description: + - Create, update or delete a storage account. +options: + resource_group: + description: + - Name of the resource group to use. + required: true + name: + description: + - Name of the storage account to update or create. + required: false + default: null + state: + description: + - Assert the state of the storage account. Use 'present' to create or update a storage account and + 'absent' to delete an account. + default: present + required: false + choices: + - absent + - present + location: + description: + - Valid azure location. Defaults to location of the resource group. + required: false + default: resource_group location + account_type: + description: + - "Type of storage account. Required when creating a storage account. NOTE: Standard_ZRS and Premium_LRS + accounts cannot be changed to other account types, and other account types cannot be changed to + Standard_ZRS or Premium_LRS." + required: false + default: null + choices: + - Premium_LRS + - Standard_GRS + - Standard_LRS + - Standard_RAGRS + - Standard_ZRS + aliases: + - type + custom_domain: + description: + - User domain assigned to the storage account. Must be a dictionary with 'name' and 'use_sub_domain' + keys where 'name' is the CNAME source. Only one custom domain is supported per storage account at this + time. To clear the existing custom domain, use an empty string for the custom domain name property. + - Can be added to an existing storage account. Will be ignored during storage account creation. + required: false + default: null + kind: + description: + - The 'kind' of storage. + required: false + default: 'Storage' + choices: + - Storage + - StorageBlob + version_added: "2.2" + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' + - name: remove account, if it exists + azure_rm_storageaccount: + resource_group: Testing + name: clh0002 + state: absent + + - name: create an account + azure_rm_storageaccount: + resource_group: Testing + name: clh0002 + type: Standard_RAGRS + tags: + - testing: testing + - delete: on-exit +''' + + +RETURN = ''' +state: + description: Current state of the storage account. + returned: always + type: dict + sample: { + "account_type": "Standard_RAGRS", + "custom_domain": null, + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/testing/providers/Microsoft.Storage/storageAccounts/clh0003", + "location": "eastus2", + "name": "clh0003", + "primary_endpoints": { + "blob": "https://clh0003.blob.core.windows.net/", + "queue": "https://clh0003.queue.core.windows.net/", + "table": "https://clh0003.table.core.windows.net/" + }, + "primary_location": "eastus2", + "provisioning_state": "Succeeded", + "resource_group": "Testing", + "secondary_endpoints": { + "blob": "https://clh0003-secondary.blob.core.windows.net/", + "queue": "https://clh0003-secondary.queue.core.windows.net/", + "table": "https://clh0003-secondary.table.core.windows.net/" + }, + "secondary_location": "centralus", + "status_of_primary": "Available", + "status_of_secondary": "Available", + "tags": null, + "type": "Microsoft.Storage/storageAccounts" + } +''' + + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.storage.cloudstorageaccount import CloudStorageAccount + from azure.common import AzureMissingResourceHttpError, AzureHttpError + from azure.mgmt.storage.models.storage_management_client_enums import ProvisioningState, SkuName, SkuTier, Kind + from azure.mgmt.storage.models import StorageAccountUpdateParameters, CustomDomain, \ + StorageAccountCreateParameters, Sku +except ImportError: + # This is handled in azure_rm_common + pass + + + +class AzureRMStorageAccount(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + account_type=dict(type='str', choices=[], aliases=['type']), + custom_domain=dict(type='dict'), + location=dict(type='str'), + name=dict(type='str', required=True), + resource_group=dict(required=True, type='str'), + state=dict(default='present', choices=['present', 'absent']), + force=dict(type='bool', default=False), + tags=dict(type='dict'), + kind=dict(type='str', default='Storage', choices=['Storage', 'BlobStorage']) + ) + + for key in SkuName: + self.module_arg_spec['account_type']['choices'].append(getattr(key, 'value')) + + self.results = dict( + changed=False, + state=dict() + ) + + self.account_dict = None + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.account_type = None + self.custom_domain = None + self.tags = None + self.force = None + self.kind = None + + super(AzureRMStorageAccount, self).__init__(self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys() + ['tags']: + setattr(self, key, kwargs[key]) + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if len(self.name) < 3 or len(self.name) > 24: + self.fail("Parameter error: name length must be between 3 and 24 characters.") + + if self.custom_domain: + if self.custom_domain.get('name', None) is None: + self.fail("Parameter error: expecting custom_domain to have a name attribute of type string.") + if self.custom_domain.get('use_sub_domain', None) is None: + self.fail("Parameter error: expecting custom_domain to have a use_sub_domain " + "attribute of type boolean.") + + self.account_dict = self.get_account() + + if self.state == 'present' and self.account_dict and \ + self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE : + self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state " + "to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE)) + + if self.account_dict is not None: + self.results['state'] = self.account_dict + else: + self.results['state'] = dict() + + if self.state == 'present': + if not self.account_dict: + self.results['state'] = self.create_account() + else: + self.update_account() + elif self.state == 'absent' and self.account_dict: + self.delete_account() + self.results['state'] = dict(Status='Deleted') + + return self.results + + def check_name_availability(self): + self.log('Checking name availability for {0}'.format(self.name)) + try: + response = self.storage_client.storage_accounts.check_name_availability(self.name) + except AzureHttpError as e: + self.log('Error attempting to validate name.') + self.fail("Error checking name availability: {0}".format(str(e))) + if not response.name_available: + self.log('Error name not available.') + self.fail("{0} - {1}".format(response.message, response.reason)) + + def get_account(self): + self.log('Get properties for account {0}'.format(self.name)) + account_obj = None + account_dict = None + + try: + account_obj = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name) + except CloudError: + pass + + if account_obj: + account_dict = self.account_obj_to_dict(account_obj) + + return account_dict + + def account_obj_to_dict(self, account_obj): + account_dict = dict( + id=account_obj.id, + name=account_obj.name, + location=account_obj.location, + resource_group=self.resource_group, + type=account_obj.type, + sku_tier=account_obj.sku.tier.value, + sku_name=account_obj.sku.name.value, + provisioning_state=account_obj.provisioning_state.value, + secondary_location=account_obj.secondary_location, + status_of_primary=(account_obj.status_of_primary.value + if account_obj.status_of_primary is not None else None), + status_of_secondary=(account_obj.status_of_secondary.value + if account_obj.status_of_secondary is not None else None), + primary_location=account_obj.primary_location + ) + account_dict['custom_domain'] = None + if account_obj.custom_domain: + account_dict['custom_domain'] = dict( + name=account_obj.custom_domain.name, + use_sub_domain=account_obj.custom_domain.use_sub_domain + ) + + account_dict['primary_endpoints'] = None + if account_obj.primary_endpoints: + account_dict['primary_endpoints'] = dict( + blob=account_obj.primary_endpoints.blob, + queue=account_obj.primary_endpoints.queue, + table=account_obj.primary_endpoints.table + ) + account_dict['secondary_endpoints'] = None + if account_obj.secondary_endpoints: + account_dict['secondary_endpoints'] = dict( + blob=account_obj.secondary_endpoints.blob, + queue=account_obj.secondary_endpoints.queue, + table=account_obj.secondary_endpoints.table + ) + account_dict['tags'] = None + if account_obj.tags: + account_dict['tags'] = account_obj.tags + return account_dict + + def update_account(self): + self.log('Update storage account {0}'.format(self.name)) + if self.account_type: + if self.account_type != self.account_dict['sku_name']: + # change the account type + if self.account_dict['sku_name'] in [SkuName.premium_lrs, SkuName.standard_zrs]: + self.fail("Storage accounts of type {0} and {1} cannot be changed.".format( + SkuName.premium_lrs, SkuName.standard_zrs)) + if self.account_type in [SkuName.premium_lrs, SkuName.standard_zrs]: + self.fail("Storage account of type {0} cannot be changed to a type of {1} or {2}.".format( + self.account_dict['sku_name'], SkuName.premium_lrs, SkuName.standard_zrs)) + + self.results['changed'] = True + self.account_dict['sku_name'] = self.account_type + + if self.results['changed'] and not self.check_mode: + # Perform the update. The API only allows changing one attribute per call. + try: + self.log("sku_name: %s" % self.account_dict['sku_name']) + self.log("sku_tier: %s" % self.account_dict['sku_tier']) + sku = Sku(SkuName(self.account_dict['sku_name'])) + sku.tier = SkuTier(self.account_dict['sku_tier']) + parameters = StorageAccountUpdateParameters(sku=sku) + self.storage_client.storage_accounts.update(self.resource_group, + self.name, + parameters) + except Exception as exc: + self.fail("Failed to update account type: {0}".format(str(exc))) + + if self.custom_domain: + if not self.account_dict['custom_domain'] or \ + self.account_dict['custom_domain'] != self.account_dict['custom_domain']: + self.results['changed'] = True + self.account_dict['custom_domain'] = self.custom_domain + + if self.results['changed'] and not self.check_mode: + new_domain = CustomDomain(name=self.custom_domain['name'], + use_sub_domain=self.custom_domain['use_sub_domain']) + parameters = StorageAccountUpdateParameters(custom_domain=new_domain) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update custom domain: {0}".format(str(exc))) + + update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags']) + if update_tags: + self.results['changed'] = True + if not self.check_mode: + parameters = StorageAccountUpdateParameters(tags=self.account_dict['tags']) + try: + self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters) + except Exception as exc: + self.fail("Failed to update tags: {0}".format(str(exc))) + + def create_account(self): + self.log("Creating account {0}".format(self.name)) + + if not self.location: + self.fail('Parameter error: location required when creating a storage account.') + + if not self.account_type: + self.fail('Parameter error: account_type required when creating a storage account.') + + self.check_name_availability() + self.results['changed'] = True + + if self.check_mode: + account_dict = dict( + location=self.location, + account_type=self.account_type, + name=self.name, + resource_group=self.resource_group, + tags=dict() + ) + if self.tags: + account_dict['tags'] = self.tags + return account_dict + sku = Sku(SkuName(self.account_type)) + sku.tier = SkuTier.standard if 'Standard' in self.account_type else SkuTier.premium + parameters = StorageAccountCreateParameters(sku, self.kind, self.location, tags=self.tags) + self.log(str(parameters)) + try: + poller = self.storage_client.storage_accounts.create(self.resource_group, self.name, parameters) + self.get_poller_result(poller) + except AzureHttpError as e: + self.log('Error creating storage account.') + self.fail("Failed to create account: {0}".format(str(e))) + # the poller doesn't actually return anything + return self.get_account() + + def delete_account(self): + if self.account_dict['provisioning_state'] == ProvisioningState.succeeded.value and \ + self.account_has_blob_containers() and self.force: + self.fail("Account contains blob containers. Is it in use? Use the force option to attempt deletion.") + + self.log('Delete storage account {0}'.format(self.name)) + self.results['changed'] = True + if not self.check_mode: + try: + status = self.storage_client.storage_accounts.delete(self.resource_group, self.name) + self.log("delete status: ") + self.log(str(status)) + except AzureHttpError as e: + self.fail("Failed to delete the account: {0}".format(str(e))) + return True + + def account_has_blob_containers(self): + ''' + If there are blob containers, then there are likely VMs depending on this account and it should + not be deleted. + ''' + self.log('Checking for existing blob containers') + blob_service = self.get_blob_client(self.resource_group, self.name) + try: + response = blob_service.list_containers() + except AzureMissingResourceHttpError: + # No blob storage available? + return False + + if len(response.items) > 0: + return True + return False + + +def main(): + AzureRMStorageAccount() + +if __name__ == '__main__': + main() diff --git a/cloud/azure/azure_rm_storageaccount_facts.py b/cloud/azure/azure_rm_storageaccount_facts.py new file mode 100644 index 00000000000..bbc18eb0ad5 --- /dev/null +++ b/cloud/azure/azure_rm_storageaccount_facts.py @@ -0,0 +1,206 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_storageaccount_facts + +version_added: "2.1" + +short_description: Get storage account facts. + +description: + - Get facts for one storage account or all storage accounts within a resource group. + +options: + name: + description: + - Only show results for a specific account. + required: false + default: null + resource_group: + description: + - Limit results to a resource group. Required when filtering by name. + required: false + default: null + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + required: false + default: null + +extends_documentation_fragment: + - azure + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' + - name: Get facts for one account + azure_rm_storageaccount_facts: + resource_group: Testing + name: clh0002 + + - name: Get facts for all accounts in a resource group + azure_rm_storageaccount_facts: + resource_group: Testing + + - name: Get facts for all accounts by tags + azure_rm_storageaccount_facts: + tags: + - testing + - foo:bar +''' + +RETURN = ''' +azure_storageaccounts: + description: List of storage account dicts. + returned: always + type: list + example: [{ + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/testing/providers/Microsoft.Storage/storageAccounts/testaccount001", + "location": "eastus2", + "name": "testaccount001", + "properties": { + "accountType": "Standard_LRS", + "creationTime": "2016-03-28T02:46:58.290113Z", + "primaryEndpoints": { + "blob": "https://testaccount001.blob.core.windows.net/", + "file": "https://testaccount001.file.core.windows.net/", + "queue": "https://testaccount001.queue.core.windows.net/", + "table": "https://testaccount001.table.core.windows.net/" + }, + "primaryLocation": "eastus2", + "provisioningState": "Succeeded", + "statusOfPrimary": "Available" + }, + "tags": {}, + "type": "Microsoft.Storage/storageAccounts" + }] +''' + +AZURE_OBJECT_CLASS = 'StorageAccount' + + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except: + # This is handled in azure_rm_common + pass + + +class AzureRMStorageAccountFacts(AzureRMModuleBase): + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list'), + ) + + self.results = dict( + changed=False, + ansible_facts=dict(azure_storageaccounts=[]) + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMStorageAccountFacts, self).__init__(self.module_arg_spec, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name and not self.resource_group: + self.fail("Parameter error: resource group required when filtering by name.") + + if self.name: + self.results['ansible_facts']['azure_storageaccounts'] = self.get_account() + elif self.resource_group: + self.results['ansible_facts']['azure_storageaccounts'] = self.list_resource_group() + else: + self.results['ansible_facts']['azure_storageaccounts'] = self.list_all() + + return self.results + + def get_account(self): + self.log('Get properties for account {0}'.format(self.name)) + account = None + result = [] + + try: + account = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name) + except CloudError: + pass + + if account and self.has_tags(account.tags, self.tags): + result = [self.serialize_obj(account, AZURE_OBJECT_CLASS)] + + return result + + def list_resource_group(self): + self.log('List items') + try: + response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group) + except Exception as exc: + self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS)) + return results + + def list_all(self): + self.log('List all items') + try: + response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group) + except Exception as exc: + self.fail("Error listing all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS)) + return results + + +def main(): + AzureRMStorageAccountFacts() + +if __name__ == '__main__': + main() diff --git a/cloud/azure/azure_rm_storageblob.py b/cloud/azure/azure_rm_storageblob.py new file mode 100644 index 00000000000..9ff235fd961 --- /dev/null +++ b/cloud/azure/azure_rm_storageblob.py @@ -0,0 +1,571 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_storageblob +short_description: Manage blob containers and blob objects. +version_added: "2.1" +description: + - Create, update and delete blob containers and blob objects. Use to upload a file and store it as a blob object, + or download a blob object to a file. +options: + storage_account_name: + description: + - Name of the storage account to use. + required: true + aliases: + - account_name + blob: + description: + - Name of a blob object within the container. + required: false + default: null + aliases: + - blob_name + container: + description: + - Name of a blob container within the storage account. + required: true + aliases: + - container_name + content_type: + description: + - Set the blob content-type header. For example, 'image/png'. + default: null + required: false + cache_control: + description: + - Set the blob cache-control header. + required: false + default: null + content_disposition: + description: + - Set the blob content-disposition header. + required: false + default: null + content_encoding: + description: + - Set the blob encoding header. + required: false + default: null + content_language: + description: + - Set the blob content-language header. + required: false + default: null + content_md5: + description: + - Set the blob md5 hash value. + required: false + default: null + dest: + description: + - Destination file path. Use with state 'present' to download a blob. + aliases: + - destination + required: false + default: null + force: + description: + - Overwrite existing blob or file when uploading or downloading. Force deletion of a container + that contains blobs. + default: false + required: false + resource_group: + description: + - Name of the resource group to use. + required: true + src: + description: + - Source file path. Use with state 'present' to upload a blob. + aliases: + - source + required: false + default: null + state: + description: + - Assert the state of a container or blob. + - Use state 'absent' with a container value only to delete a container. Include a blob value to remove + a specific blob. A container will not be deleted, if it contains blobs. Use the force option to override, + deleting the container and all associated blobs. + - Use state 'present' to create or update a container and upload or download a blob. If the container + does not exist, it will be created. If it exists, it will be updated with configuration options. Provide + a blob name and either src or dest to upload or download. Provide a src path to upload and a dest path + to download. If a blob (uploading) or a file (downloading) already exists, it will not be overwritten + unless the force parameter is true. + default: present + required: false + choices: + - absent + - present + public_access: + description: + - Determine a container's level of public access. By default containers are private. Can only be set at + time of container creation. + required: false + default: null + choices: + - container + - blob + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' +- name: Remove container foo + azure_rm_storageblob: + resource_group: testing + storage_account_name: clh0002 + container: foo + state: absent + +- name: Create container foo and upload a file + azure_rm_storageblob: + resource_group: Testing + storage_account_name: clh0002 + container: foo + blob: graylog.png + src: ./files/graylog.png + public_access: container + content_type: 'application/image' + +- name: Download the file + azure_rm_storageblob: + resource_group: Testing + storage_account_name: clh0002 + container: foo + blob: graylog.png + dest: ~/tmp/images/graylog.png +''' + +RETURN = ''' +blob: + description: Facts about the current state of the blob. + returned: when a blob is operated on + type: dict + sample: { + "content_length": 136532, + "content_settings": { + "cache_control": null, + "content_disposition": null, + "content_encoding": null, + "content_language": null, + "content_md5": null, + "content_type": "application/image" + }, + "last_modified": "09-Mar-2016 22:08:25 +0000", + "name": "graylog.png", + "tags": {}, + "type": "BlockBlob" + } +container: + description: Facts about the current state of the selected container. + returned: always + type: dict + sample: { + "last_mdoified": "09-Mar-2016 19:28:26 +0000", + "name": "foo", + "tags": {} + } +''' + + +import os + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + + +try: + from azure.storage.blob.models import ContentSettings + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMStorageBlob(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + storage_account_name=dict(required=True, type='str', aliases=['account_name']), + blob=dict(type='str', aliases=['blob_name']), + container=dict(required=True, type='str', aliases=['container_name']), + dest=dict(type='str'), + force=dict(type='bool', default=False), + resource_group=dict(required=True, type='str'), + src=dict(type='str'), + state=dict(type='str', default='present', choices=['absent', 'present']), + public_access=dict(type='str', choices=['container', 'blob']), + content_type=dict(type='str'), + content_encoding=dict(type='str'), + content_language=dict(type='str'), + content_disposition=dict(type='str'), + cache_control=dict(type='str'), + content_md5=dict(type='str'), + ) + + mutually_exclusive = [('src', 'dest')] + + self.blob_client = None + self.blob_details = None + self.storage_account_name = None + self.blob = None + self.blob_obj = None + self.container = None + self.container_obj = None + self.dest = None + self.force = None + self.resource_group = None + self.src = None + self.state = None + self.tags = None + self.public_access = None + self.results = dict( + changed=False, + actions=[], + container=dict(), + blob=dict() + ) + + super(AzureRMStorageBlob, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + supports_tags=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys() + ['tags']: + setattr(self, key, kwargs[key]) + + self.results['check_mode'] = self.check_mode + + # add file path validation + + self.blob_client = self.get_blob_client(self.resource_group, self.storage_account_name) + self.container_obj = self.get_container() + + if self.blob is not None: + self.blob_obj = self.get_blob() + + if self.state == 'present': + if not self.container_obj: + # create the container + self.create_container() + elif self.container_obj and not self.blob: + # update container attributes + update_tags, self.container_obj['tags'] = self.update_tags(self.container_obj.get('tags')) + if update_tags: + self.update_container_tags(self.container_obj['tags']) + + if self.blob: + # create, update or download blob + if self.src and self.src_is_valid(): + if self.blob_obj and not self.force: + self.log("Cannot upload to {0}. Blob with that name already exists. " + "Use the force option".format(self.blob)) + else: + self.upload_blob() + elif self.dest and self.dest_is_valid(): + self.download_blob() + + update_tags, self.blob_obj['tags'] = self.update_tags(self.blob_obj.get('tags')) + if update_tags: + self.update_blob_tags(self.blob_obj['tags']) + + if self.blob_content_settings_differ(): + self.update_blob_content_settings() + + elif self.state == 'absent': + if self.container_obj and not self.blob: + # Delete container + if self.container_has_blobs(): + if self.force: + self.delete_container() + else: + self.log("Cannot delete container {0}. It contains blobs. Use the force option.".format( + self.container)) + else: + self.delete_container() + elif self.container_obj and self.blob_obj: + # Delete blob + self.delete_blob() + + # until we sort out how we want to do this globally + del self.results['actions'] + return self.results + + def get_container(self): + result = dict() + container = None + if self.container: + try: + container = self.blob_client.get_container_properties(self.container) + except AzureMissingResourceHttpError: + pass + if container: + result = dict( + name=container.name, + tags=container.metadata, + last_mdoified=container.properties.last_modified.strftime('%d-%b-%Y %H:%M:%S %z'), + ) + return result + + def get_blob(self): + result = dict() + blob = None + if self.blob: + try: + blob = self.blob_client.get_blob_properties(self.container, self.blob) + except AzureMissingResourceHttpError: + pass + if blob: + result = dict( + name=blob.name, + tags=blob.metadata, + last_modified=blob.properties.last_modified.strftime('%d-%b-%Y %H:%M:%S %z'), + type=blob.properties.blob_type, + content_length=blob.properties.content_length, + content_settings=dict( + content_type=blob.properties.content_settings.content_type, + content_encoding=blob.properties.content_settings.content_encoding, + content_language=blob.properties.content_settings.content_language, + content_disposition=blob.properties.content_settings.content_disposition, + cache_control=blob.properties.content_settings.cache_control, + content_md5 =blob.properties.content_settings.content_md5 + ) + ) + return result + + def create_container(self): + self.log('Create container %s' % self.container) + + tags = None + if not self.blob and self.tags: + # when a blob is present, then tags are assigned at the blob level + tags = self.tags + + if not self.check_mode: + try: + self.blob_client.create_container(self.container, metadata=tags, public_access=self.public_access) + except AzureHttpError as exc: + self.fail("Error creating container {0} - {1}".format(self.container, str(exc))) + self.container_obj = self.get_container() + self.results['changed'] = True + self.results['actions'].append('created container {0}'.format(self.container)) + self.results['container'] = self.container_obj + + def upload_blob(self): + content_settings = None + if self.content_type or self.content_encoding or self.content_language or self.content_disposition or \ + self.cache_control or self.content_md5: + content_settings = ContentSettings( + content_type=self.content_type, + content_encoding=self.content_encoding, + content_language=self.content_language, + content_disposition=self.content_disposition, + cache_control=self.cache_control, + content_md5=self.content_md5 + ) + if not self.check_mode: + try: + self.blob_client.create_blob_from_path(self.container, self.blob, self.src, + metadata=self.tags, content_settings=content_settings) + except AzureHttpError as exc: + self.fail("Error creating blob {0} - {1}".format(self.blob, str(exc))) + + self.blob_obj = self.get_blob() + self.results['changed'] = True + self.results['actions'].append('created blob {0} from {1}'.format(self.blob, self.src)) + self.results['container'] = self.container_obj + self.results['blob'] = self.blob_obj + + def download_blob(self): + if not self.check_mode: + try: + self.blob_client.get_blob_to_path(self.container, self.blob, self.dest) + except Exception as exc: + self.fail("Failed to download blob {0}:{1} to {2} - {3}".format(self.container, + self.blob, + self.dest, + exc)) + self.results['changed'] = True + self.results['actions'].append('downloaded blob {0}:{1} to {2}'.format(self.container, + self.blob, + self.dest)) + + self.results['container'] = self.container_obj + self.results['blob'] = self.blob_obj + + def src_is_valid(self): + if not os.path.isfile(self.src): + self.fail("The source path must be a file.") + try: + fp = open(self.src, 'r') + fp.close() + except IOError: + self.fail("Failed to access {0}. Make sure the file exists and that you have " + "read access.".format(self.src)) + return True + + def dest_is_valid(self): + if not self.check_mode: + self.dest = os.path.expanduser(self.dest) + self.dest = os.path.expandvars(self.dest) + if not os.path.basename(self.dest): + # dest is a directory + if os.path.isdir(self.dest): + self.log("Path is dir. Appending blob name.") + self.dest += self.blob + else: + try: + self.log('Attempting to makedirs {0}'.format(self.dest)) + os.makddirs(self.dest) + except IOError as exc: + self.fail("Failed to create directory {0} - {1}".format(self.dest, str(exc))) + self.dest += self.blob + else: + # does path exist without basename + file_name = os.path.basename(self.dest) + path = self.dest.replace(file_name, '') + self.log('Checking path {0}'.format(path)) + if not os.path.isdir(path): + try: + self.log('Attempting to makedirs {0}'.format(path)) + os.makedirs(path) + except IOError as exc: + self.fail("Failed to create directory {0} - {1}".format(path, str(exc))) + self.log('Checking final path {0}'.format(self.dest)) + if os.path.isfile(self.dest) and not self.force: + # dest already exists and we're not forcing + self.log("Dest {0} already exists. Cannot download. Use the force option.".format(self.dest)) + return False + return True + + def delete_container(self): + if not self.check_mode: + try: + self.blob_client.delete_container(self.container) + except AzureHttpError as exc: + self.fail("Error deleting container {0} - {1}".format(self.container, str(exc))) + + self.results['changed'] = True + self.results['actions'].append('deleted container {0}'.format(self.container)) + + def container_has_blobs(self): + try: + list_generator = self.blob_client.list_blobs(self.container) + except AzureHttpError as exc: + self.fail("Error list blobs in {0} - {1}".format(self.container, str(exc))) + if len(list_generator.items) > 0: + return True + return False + + def delete_blob(self): + if not self.check_mode: + try: + self.blob_client.delete_blob(self.container, self.blob) + except AzureHttpError as exc: + self.fail("Error deleting blob {0}:{1} - {2}".format(self.container, self.blob, str(exc))) + + self.results['changed'] = True + self.results['actions'].append('deleted blob {0}:{1}'.format(self.container, self.blob)) + self.results['container'] = self.container_obj + + def update_container_tags(self, tags): + if not self.check_mode: + try: + self.blob_client.set_container_metadata(self.container, metadata=tags) + except AzureHttpError as exc: + self.fail("Error updating container tags {0} - {1}".format(self.container, str(exc))) + self.container_obj = self.get_container() + self.results['changed'] = True + self.results['actions'].append("updated container {0} tags.".format(self.container)) + self.results['container'] = self.container_obj + + def update_blob_tags(self, tags): + if not self.check_mode: + try: + self.blob_client.set_blob_metadata(self.container, self.blob, metadata=tags) + except AzureHttpError as exc: + self.fail("Update blob tags {0}:{1} - {2}".format(self.container, self.blob, str(exc))) + self.blob_obj = self.get_blob() + self.results['changed'] = True + self.results['actions'].append("updated blob {0}:{1} tags.".format(self.container, self.blob)) + self.results['container'] = self.container_obj + self.results['blob'] = self.blob_obj + + def blob_content_settings_differ(self): + if self.content_type or self.content_encoding or self.content_language or self.content_disposition or \ + self.cache_control or self.content_md5: + settings = dict( + content_type=self.content_type, + content_encoding=self.content_encoding, + content_language=self.content_language, + content_disposition=self.content_disposition, + cache_control=self.cache_control, + content_md5=self.content_md5 + ) + if self.blob_obj['content_settings'] != settings: + return True + + return False + + def update_blob_content_settings(self): + content_settings = ContentSettings( + content_type=self.content_type, + content_encoding=self.content_encoding, + content_language=self.content_language, + content_disposition=self.content_disposition, + cache_control=self.cache_control, + content_md5=self.content_md5 + ) + if not self.check_mode: + try: + self.blob_client.set_blob_properties(self.container, self.blob, content_settings=content_settings) + except AzureHttpError as exc: + self.fail("Update blob content settings {0}:{1} - {2}".format(self.container, self.blob, str(exc))) + + self.blob_obj = self.get_blob() + self.results['changed'] = True + self.results['actions'].append("updated blob {0}:{1} content settings.".format(self.container, self.blob)) + self.results['container'] = self.container_obj + self.results['blob'] = self.blob_obj + + +def main(): + AzureRMStorageBlob() + +if __name__ == '__main__': + main() diff --git a/cloud/azure/azure_rm_subnet.py b/cloud/azure/azure_rm_subnet.py new file mode 100644 index 00000000000..5155715ac28 --- /dev/null +++ b/cloud/azure/azure_rm_subnet.py @@ -0,0 +1,318 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_subnet +version_added: "2.1" +short_description: Manage Azure subnets. +description: + - Create, update or delete a subnet within a given virtual network. Allows setting and updating the address + prefix CIDR, which must be valid within the context of the virtual network. Use the azure_rm_networkinterface + module to associate interfaces with the subnet and assign specific IP addresses. +options: + resource_group: + description: + - Name of resource group. + required: true + name: + description: + - Name of the subnet. + required: true + address_prefix_cidr: + description: + - CIDR defining the IPv4 address space of the subnet. Must be valid within the context of the + virtual network. + required: true + aliases: + - address_prefix + security_group_name: + description: + - Name of an existing security group with which to associate the subnet. + required: false + default: null + aliases: + - security_group + state: + description: + - Assert the state of the subnet. Use 'present' to create or update a subnet and + 'absent' to delete a subnet. + required: true + default: present + choices: + - absent + - present + virtual_network_name: + description: + - Name of an existing virtual network with which the subnet is or will be associated. + required: true + aliases: + - virtual_network + +extends_documentation_fragment: + - azure + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' + - name: Create a subnet + azure_rm_subnet: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: Testing + address_prefix_cidr: "10.1.0.0/24" + + - name: Delete a subnet + azure_rm_subnet: + name: foobar + virtual_network_name: My_Virtual_Network + resource_group: Testing + state: absent +''' + +RETURN = ''' +state: + description: Current state of the subnet. + returned: success + type: complex + contains: + address_prefix: + description: IP address CIDR. + type: str + example: "10.1.0.0/16" + id: + description: Subnet resource path. + type: str + example: "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/My_Virtual_Network/subnets/foobar" + name: + description: Subnet name. + type: str + example: "foobar" + network_security_group: + type: complex + contains: + id: + description: Security group resource identifier. + type: str + example: "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroupfoo" + name: + description: Name of the security group. + type: str + example: "secgroupfoo" + provisioning_state: + description: Success or failure of the provisioning event. + type: str + example: "Succeeded" +''' + + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.network.models import Subnet, NetworkSecurityGroup +except ImportError: + # This is handled in azure_rm_common + pass + + + +def subnet_to_dict(subnet): + result = dict( + id=subnet.id, + name=subnet.name, + provisioning_state=subnet.provisioning_state, + address_prefix=subnet.address_prefix, + network_security_group=dict(), + ) + if subnet.network_security_group: + id_keys = azure_id_to_dict(subnet.network_security_group.id) + result['network_security_group']['id'] = subnet.network_security_group.id + result['network_security_group']['name'] = id_keys['networkSecurityGroups'] + return result + + +class AzureRMSubnet(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + virtual_network_name=dict(type='str', required=True, aliases=['virtual_network']), + address_prefix_cidr=dict(type='str', aliases=['address_prefix']), + security_group_name=dict(type='str', aliases=['security_group']), + ) + + required_if = [ + ('state', 'present', ['address_prefix_cidr']) + ] + + self.results = dict( + changed=False, + state=dict() + ) + + self.resource_group = None + self.name = None + self.state = None + self.virtual_etwork_name = None + self.address_prefix_cidr = None + self.security_group_name = None + + super(AzureRMSubnet, self).__init__(self.module_arg_spec, + supports_check_mode=True, + required_if=required_if) + + def exec_module(self, **kwargs): + + nsg = None + subnet = None + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.state == 'present' and not CIDR_PATTERN.match(self.address_prefix_cidr): + self.fail("Invalid address_prefix_cidr value {0}".format(self.address_prefix_cidr)) + + if self.security_group_name: + nsg = self.get_security_group(self.security_group_name) + + results = dict() + changed = False + + try: + self.log('Fetching subnet {0}'.format(self.name)) + subnet = self.network_client.subnets.get(self.resource_group, + self.virtual_network_name, + self.name) + self.check_provisioning_state(subnet, self.state) + results = subnet_to_dict(subnet) + + if self.state == 'present': + if self.address_prefix_cidr: + if results['address_prefix'] != self.address_prefix_cidr: + self.log("CHANGED: subnet {0} address_prefix_cidr".format(self.name)) + changed = True + results['address_prefix'] = self.address_prefix_cidr + + if self.security_group_name: + if results['network_security_group'].get('id') != nsg.id: + self.log("CHANGED: subnet {0} network security group".format(self.name)) + changed = True + results['network_security_group']['id'] = nsg.id + results['network_security_group']['name'] = nsg.name + elif self.state == 'absent': + changed = True + except CloudError: + # the subnet does not exist + if self.state == 'present': + changed = True + + self.results['changed'] = changed + self.results['state'] = results + + if not self.check_mode: + + if self.state == 'present' and changed: + if not subnet: + # create new subnet + self.log('Creating subnet {0}'.format(self.name)) + subnet = Subnet( + address_prefix=self.address_prefix_cidr + ) + if nsg: + subnet.network_security_group = NetworkSecurityGroup(id=nsg.id, + location=nsg.location, + resource_guid=nsg.resource_guid) + + else: + # update subnet + self.log('Updating subnet {0}'.format(self.name)) + subnet = Subnet( + address_prefix=results['address_prefix'] + ) + if results['network_security_group'].get('id'): + nsg = self.get_security_group(results['network_security_group']['name']) + subnet.network_security_group = NetworkSecurityGroup(id=nsg.id, + location=nsg.location, + resource_guid=nsg.resource_guid) + + self.results['state'] = self.create_or_update_subnet(subnet) + elif self.state == 'absent': + # delete subnet + self.delete_subnet() + # the delete does not actually return anything. if no exception, then we'll assume + # it worked. + self.results['state']['status'] = 'Deleted' + + return self.results + + def create_or_update_subnet(self, subnet): + try: + poller = self.network_client.subnets.create_or_update(self.resource_group, + self.virtual_network_name, + self.name, + subnet) + new_subnet = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updateing subnet {0} - {1}".format(self.name, str(exc))) + self.check_provisioning_state(new_subnet) + return subnet_to_dict(new_subnet) + + def delete_subnet(self): + self.log('Deleting subnet {0}'.format(self.name)) + try: + poller = self.network_client.subnets.delete(self.resource_group, + self.virtual_network_name, + self.name) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting subnet {0} - {1}".format(self.name, str(exc))) + + return result + + def get_security_group(self, name): + self.log("Fetching security group {0}".format(name)) + nsg = None + try: + nsg = self.network_client.network_security_groups.get(self.resource_group, name) + except Exception as exc: + self.fail("Error: fetching network security group {0} - {1}.".format(name, str(exc))) + return nsg + + +def main(): + AzureRMSubnet() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_virtualmachine.py b/cloud/azure/azure_rm_virtualmachine.py new file mode 100644 index 00000000000..40010df8eba --- /dev/null +++ b/cloud/azure/azure_rm_virtualmachine.py @@ -0,0 +1,1310 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachine + +version_added: "2.1" + +short_description: Manage Azure virtual machines. + +description: + - Create, update, stop and start a virtual machine. Provide an existing storage account and network interface or + allow the module to create these for you. If you choose not to provide a network interface, the resource group + must contain a virtual network with at least one subnet. + - Currently requires an image found in the Azure Marketplace. Use azure_rm_virtualmachineimage_facts module + to discover the publisher, offer, sku and version of a particular image. + +options: + resource_group: + description: + - Name of the resource group containing the virtual machine. + required: true + name: + description: + - Name of the virtual machine. + required: true + state: + description: + - Assert the state of the virtual machine. + - State 'present' will check that the machine exists with the requested configuration. If the configuration + of the existing machine does not match, the machine will be updated. Use options started, allocated and restarted to change the machine's power state. + - State 'absent' will remove the virtual machine. + default: present + required: false + choices: + - absent + - present + started: + description: + - Use with state 'present' to start the machine. Set to false to have the machine be 'stopped'. + default: true + required: false + allocated: + description: + - Toggle that controls if the machine is allocated/deallocated, only useful with state='present'. + default: True + required: false + restarted: + description: + - Use with state 'present' to restart a running VM. + default: false + required: false + location: + description: + - Valid Azure location. Defaults to location of the resource group. + default: null + required: false + short_hostname: + description: + - Name assigned internally to the host. On a linux VM this is the name returned by the `hostname` command. + When creating a virtual machine, short_hostname defaults to name. + default: null + required: false + vm_size: + description: + - A valid Azure VM size value. For example, 'Standard_D4'. The list of choices varies depending on the + subscription and location. Check your subscription for available choices. + default: Standard_D1 + required: false + admin_username: + description: + - Admin username used to access the host after it is created. Required when creating a VM. + default: null + required: false + admin_password: + description: + - Password for the admin username. Not required if the os_type is Linux and SSH password authentication + is disabled by setting ssh_password_enabled to false. + default: null + required: false + ssh_password_enabled: + description: + - When the os_type is Linux, setting ssh_password_enabled to false will disable SSH password authentication + and require use of SSH keys. + default: true + required: false + ssh_public_keys: + description: + - "For os_type Linux provide a list of SSH keys. Each item in the list should be a dictionary where the + dictionary contains two keys: path and key_data. Set the path to the default location of the + authorized_keys files. On an Enterprise Linux host, for example, the path will be + /home//.ssh/authorized_keys. Set key_data to the actual value of the public key." + default: null + required: false + image: + description: + - "A dictionary describing the Marketplace image used to build the VM. Will contain keys: publisher, + offer, sku and version. NOTE: set image.version to 'latest' to get the most recent version of a given + image." + required: true + storage_account_name: + description: + - Name of an existing storage account that supports creation of VHD blobs. If not specified for a new VM, + a new storage account named 01 will be created using storage type 'Standard_LRS'. + default: null + required: false + storage_container_name: + description: + - Name of the container to use within the storage account to store VHD blobs. If no name is specified a + default container will created. + default: vhds + required: false + storage_blob_name: + description: + - Name fo the storage blob used to hold the VM's OS disk image. If no name is provided, defaults to + the VM name + '.vhd'. If you provide a name, it must end with '.vhd' + aliases: + - storage_blob + default: null + required: false + os_disk_caching: + description: + - Type of OS disk caching. + choices: + - ReadOnly + - ReadWrite + default: ReadOnly + aliases: + - disk_caching + required: false + os_type: + description: + - Base type of operating system. + choices: + - Windows + - Linux + default: + - Linux + required: false + public_ip_allocation_method: + description: + - If a public IP address is created when creating the VM (because a Network Interface was not provided), + determines if the public IP address remains permanently associated with the Network Interface. If set + to 'Dynamic' the public IP address may change any time the VM is rebooted or power cycled. + choices: + - Dynamic + - Static + default: + - Static + aliases: + - public_ip_allocation + required: false + open_ports: + description: + - If a network interface is created when creating the VM, a security group will be created as well. For + Linux hosts a rule will be added to the security group allowing inbound TCP connections to the default + SSH port 22, and for Windows hosts ports 3389 and 5986 will be opened. Override the default open ports by + providing a list of ports. + default: null + required: false + network_interface_names: + description: + - List of existing network interface names to add to the VM. If a network interface name is not provided + when the VM is created, a default network interface will be created. In order for the module to create + a network interface, at least one Virtual Network with one Subnet must exist. + default: null + required: false + virtual_network_name: + description: + - When creating a virtual machine, if a network interface name is not provided, one will be created. + The new network interface will be assigned to the first virtual network found in the resource group. + Use this parameter to provide a specific virtual network instead. + aliases: + - virtual_network + default: null + required: false + subnet_name: + description: + - When creating a virtual machine, if a network interface name is not provided, one will be created. + The new network interface will be assigned to the first subnet found in the virtual network. + Use this parameter to provide a specific subnet instead. + aliases: + - virtual_network + default: null + required: false + remove_on_absent: + description: + - When removing a VM using state 'absent', also remove associated resources + - "It can be 'all' or a list with any of the following: ['network_interfaces', 'virtual_storage', 'public_ips']" + - Any other input will be ignored + default: ['all'] + required: false + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' +EXAMPLES = ''' + +- name: Create VM with defaults + azure_rm_virtualmachine: + resource_group: Testing + name: testvm10 + admin_username: chouseknecht + admin_password: + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + +- name: Create a VM with exiting storage account and NIC + azure_rm_virtualmachine: + resource_group: Testing + name: testvm002 + vm_size: Standard_D4 + storage_account: testaccount001 + admin_username: adminUser + ssh_public_keys: + - path: /home/adminUser/.ssh/authorized_keys + key_data: < insert yor ssh public key here... > + network_interfaces: testvm001 + image: + offer: CentOS + publisher: OpenLogic + sku: '7.1' + version: latest + +- name: Power Off + azure_rm_virtualmachine: + resource_group: Testing + name: testvm002 + started: no + +- name: Deallocate + azure_rm_virtualmachine: + resource_group: Testing + name: testvm002 + allocated: no + +- name: Power On + azure_rm_virtualmachine: + resource_group: + name: testvm002 + +- name: Restart + azure_rm_virtualmachine: + resource_group: + name: testvm002 + restarted: yes + +- name: remove vm and all resources except public ips + azure_rm_virtualmachine: + resource_group: Testing + name: testvm002 + state: absent + remove_on_absent: + - network_interfaces + - virtual_storage +''' + +RETURN = ''' +powerstate: + description: Indicates if the state is running, stopped, deallocated + returned: always + type: string + example: running +deleted_vhd_uris: + description: List of deleted Virtual Hard Disk URIs. + returned: 'on delete' + type: list + example: ["https://testvm104519.blob.core.windows.net/vhds/testvm10.vhd"] +deleted_network_interfaces: + description: List of deleted NICs. + returned: 'on delete' + type: list + example: ["testvm1001"] +deleted_public_ips: + description: List of deleted public IP address names. + returned: 'on delete' + type: list + example: ["testvm1001"] +azure_vm: + description: Facts about the current state of the object. Note that facts are not part of the registered output but available directly. + returned: always + type: complex + example: { + "properties": { + "hardwareProfile": { + "vmSize": "Standard_D1" + }, + "instanceView": { + "disks": [ + { + "name": "testvm10.vhd", + "statuses": [ + { + "code": "ProvisioningState/succeeded", + "displayStatus": "Provisioning succeeded", + "level": "Info", + "time": "2016-03-30T07:11:16.187272Z" + } + ] + } + ], + "statuses": [ + { + "code": "ProvisioningState/succeeded", + "displayStatus": "Provisioning succeeded", + "level": "Info", + "time": "2016-03-30T20:33:38.946916Z" + }, + { + "code": "PowerState/running", + "displayStatus": "VM running", + "level": "Info" + } + ], + "vmAgent": { + "extensionHandlers": [], + "statuses": [ + { + "code": "ProvisioningState/succeeded", + "displayStatus": "Ready", + "level": "Info", + "message": "GuestAgent is running and accepting new configurations.", + "time": "2016-03-30T20:31:16.000Z" + } + ], + "vmAgentVersion": "WALinuxAgent-2.0.16" + } + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01", + "name": "testvm10_NIC01", + "properties": { + "dnsSettings": { + "appliedDnsServers": [], + "dnsServers": [] + }, + "enableIPForwarding": false, + "ipConfigurations": [ + { + "etag": 'W/"041c8c2a-d5dd-4cd7-8465-9125cfbe2cf8"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default", + "name": "default", + "properties": { + "privateIPAddress": "10.10.0.5", + "privateIPAllocationMethod": "Dynamic", + "provisioningState": "Succeeded", + "publicIPAddress": { + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/testvm10_PIP01", + "name": "testvm10_PIP01", + "properties": { + "idleTimeoutInMinutes": 4, + "ipAddress": "13.92.246.197", + "ipConfiguration": { + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default" + }, + "provisioningState": "Succeeded", + "publicIPAllocationMethod": "Static", + "resourceGuid": "3447d987-ca0d-4eca-818b-5dddc0625b42" + } + } + } + } + ], + "macAddress": "00-0D-3A-12-AA-14", + "primary": true, + "provisioningState": "Succeeded", + "resourceGuid": "10979e12-ccf9-42ee-9f6d-ff2cc63b3844", + "virtualMachine": { + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Compute/virtualMachines/testvm10" + } + } + } + ] + }, + "osProfile": { + "adminUsername": "chouseknecht", + "computerName": "test10", + "linuxConfiguration": { + "disablePasswordAuthentication": false + }, + "secrets": [] + }, + "provisioningState": "Succeeded", + "storageProfile": { + "dataDisks": [], + "imageReference": { + "offer": "CentOS", + "publisher": "OpenLogic", + "sku": "7.1", + "version": "7.1.20160308" + }, + "osDisk": { + "caching": "ReadOnly", + "createOption": "fromImage", + "name": "testvm10.vhd", + "osType": "Linux", + "vhd": { + "uri": "https://testvm10sa1.blob.core.windows.net/vhds/testvm10.vhd" + } + } + } + }, + "type": "Microsoft.Compute/virtualMachines" + } +''' + +import random + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.compute.models import NetworkInterfaceReference, \ + VirtualMachine, HardwareProfile, \ + StorageProfile, OSProfile, OSDisk, \ + VirtualHardDisk, ImageReference,\ + NetworkProfile, LinuxConfiguration, \ + SshConfiguration, SshPublicKey + from azure.mgmt.network.models import PublicIPAddress, NetworkSecurityGroup, NetworkInterface, \ + NetworkInterfaceIPConfiguration, Subnet + from azure.mgmt.storage.models import StorageAccountCreateParameters, Sku + from azure.mgmt.storage.models.storage_management_client_enums import Kind, SkuTier, SkuName + from azure.mgmt.compute.models.compute_management_client_enums import VirtualMachineSizeTypes, DiskCreateOptionTypes +except ImportError: + # This is handled in azure_rm_common + pass + +AZURE_OBJECT_CLASS = 'VirtualMachine' + +AZURE_ENUM_MODULES = ['azure.mgmt.compute.models.compute_management_client_enums'] + + +def extract_names_from_blob_uri(blob_uri): + # HACK: ditch this once python SDK supports get by URI + m = re.match('^https://(?P[^\.]+)\.blob\.core\.windows\.net/' + '(?P[^/]+)/(?P.+)$', blob_uri) + if not m: + raise Exception("unable to parse blob uri '%s'" % blob_uri) + extracted_names = m.groupdict() + return extracted_names + + +class AzureRMVirtualMachine(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(choices=['present', 'absent'], default='present', type='str'), + location=dict(type='str'), + short_hostname=dict(type='str'), + vm_size=dict(type='str', choices=[], default='Standard_D1'), + admin_username=dict(type='str'), + admin_password=dict(type='str', ), + ssh_password_enabled=dict(type='bool', default=True), + ssh_public_keys=dict(type='list'), + image=dict(type='dict'), + storage_account_name=dict(type='str', aliases=['storage_account']), + storage_container_name=dict(type='str', aliases=['storage_container'], default='vhds'), + storage_blob_name=dict(type='str', aliases=['storage_blob']), + os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite'], + default='ReadOnly'), + os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'), + public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Static', + aliases=['public_ip_allocation']), + open_ports=dict(type='list'), + network_interface_names=dict(type='list', aliases=['network_interfaces']), + remove_on_absent=dict(type='list', default=['all']), + virtual_network_name=dict(type='str', aliases=['virtual_network']), + subnet_name=dict(type='str', aliases=['subnet']), + allocated=dict(type='bool', default=True), + restarted=dict(type='bool', default=False), + started=dict(type='bool', default=True), + ) + + for key in VirtualMachineSizeTypes: + self.module_arg_spec['vm_size']['choices'].append(getattr(key, 'value')) + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.short_hostname = None + self.vm_size = None + self.admin_username = None + self.admin_password = None + self.ssh_password_enabled = None + self.ssh_public_keys = None + self.image = None + self.storage_account_name = None + self.storage_container_name = None + self.storage_blob_name = None + self.os_type = None + self.os_disk_caching = None + self.network_interface_names = None + self.remove_on_absent = set() + self.tags = None + self.force = None + self.public_ip_allocation_method = None + self.open_ports = None + self.virtual_network_name = None + self.subnet_name = None + self.allocated = None + self.restarted = None + self.started = None + self.differences = None + + self.results = dict( + changed=False, + actions=[], + powerstate_change=None, + ansible_facts=dict(azure_vm=None) + ) + + super(AzureRMVirtualMachine, self).__init__(derived_arg_spec=self.module_arg_spec, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys() + ['tags']: + setattr(self, key, kwargs[key]) + + # make sure options are lower case + self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent]) + + changed = False + powerstate_change = None + results = dict() + vm = None + network_interfaces = [] + requested_vhd_uri = None + disable_ssh_password = None + vm_dict = None + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if self.state == 'present': + # Verify parameters and resolve any defaults + + if self.vm_size and not self.vm_size_is_valid(): + self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format( + self.vm_size + )) + + if self.network_interface_names: + for name in self.network_interface_names: + nic = self.get_network_interface(name) + network_interfaces.append(nic.id) + + if self.ssh_public_keys: + msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \ + "each dict contains keys: path, key_data." + for key in self.ssh_public_keys: + if not isinstance(key, dict): + self.fail(msg) + if not key.get('path') or not key.get('key_data'): + self.fail(msg) + + if self.image: + if not self.image.get('publisher') or not self.image.get('offer') or not self.image.get('sku') \ + or not self.image.get('version'): + self.error("parameter error: expecting image to contain publisher, offer, sku and version keys.") + image_version = self.get_image_version() + if self.image['version'] == 'latest': + self.image['version'] = image_version.name + self.log("Using image version {0}".format(self.image['version'])) + + if not self.storage_blob_name: + self.storage_blob_name = self.name + '.vhd' + + if self.storage_account_name: + self.get_storage_account(self.storage_account_name) + + requested_vhd_uri = 'https://{0}.blob.core.windows.net/{1}/{2}'.format(self.storage_account_name, + self.storage_container_name, + self.storage_blob_name) + + disable_ssh_password = not self.ssh_password_enabled + + try: + self.log("Fetching virtual machine {0}".format(self.name)) + vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview') + self.check_provisioning_state(vm, self.state) + vm_dict = self.serialize_vm(vm) + + if self.state == 'present': + differences = [] + current_nics = [] + results = vm_dict + + # Try to determine if the VM needs to be updated + if self.network_interface_names: + for nic in vm_dict['properties']['networkProfile']['networkInterfaces']: + current_nics.append(nic['id']) + + if set(current_nics) != set(network_interfaces): + self.log('CHANGED: virtual machine {0} - network interfaces are different.'.format(self.name)) + differences.append('Network Interfaces') + updated_nics = [dict(id=id) for id in network_interfaces] + vm_dict['properties']['networkProfile']['networkInterfaces'] = updated_nics + changed = True + + if self.os_disk_caching and \ + self.os_disk_caching != vm_dict['properties']['storageProfile']['osDisk']['caching']: + self.log('CHANGED: virtual machine {0} - OS disk caching'.format(self.name)) + differences.append('OS Disk caching') + changed = True + vm_dict['properties']['storageProfile']['osDisk']['caching'] = self.os_disk_caching + + update_tags, vm_dict['tags'] = self.update_tags(vm_dict.get('tags', dict())) + if update_tags: + differences.append('Tags') + changed = True + + if self.short_hostname and self.short_hostname != vm_dict['properties']['osProfile']['computerName']: + self.log('CHANGED: virtual machine {0} - short hostname'.format(self.name)) + differences.append('Short Hostname') + changed = True + vm_dict['properties']['osProfile']['computerName'] = self.short_hostname + + if self.started and vm_dict['powerstate'] != 'running': + self.log("CHANGED: virtual machine {0} not running and requested state 'running'".format(self.name)) + changed = True + powerstate_change = 'poweron' + elif self.state == 'present' and vm_dict['powerstate'] == 'running' and self.restarted: + self.log("CHANGED: virtual machine {0} {1} and requested state 'restarted'" + .format(self.name, vm_dict['powerstate'])) + changed = True + powerstate_change = 'restarted' + elif self.state == 'present' and not self.allocated and vm_dict['powerstate'] != 'deallocated': + self.log("CHANGED: virtual machine {0} {1} and requested state 'deallocated'" + .format(self.name, vm_dict['powerstate'])) + changed = True + powerstate_change = 'deallocated' + elif not self.started and vm_dict['powerstate'] == 'running': + self.log("CHANGED: virtual machine {0} running and requested state 'stopped'".format(self.name)) + changed = True + powerstate_change = 'poweroff' + + self.differences = differences + + elif self.state == 'absent': + self.log("CHANGED: virtual machine {0} exists and requested state is 'absent'".format(self.name)) + results = dict() + changed = True + + except CloudError: + self.log('Virtual machine {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: virtual machine does not exist but state is present." \ + .format(self.name)) + changed = True + + self.results['changed'] = changed + self.results['ansible_facts']['azure_vm'] = results + self.results['powerstate_change'] = powerstate_change + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + if not vm: + # Create the VM + self.log("Create virtual machine {0}".format(self.name)) + self.results['actions'].append('Created VM {0}'.format(self.name)) + + # Validate parameters + if not self.admin_username: + self.fail("Parameter error: admin_username required when creating a virtual machine.") + + if self.os_type == 'Linux': + if disable_ssh_password and not self.ssh_public_keys: + self.fail("Parameter error: ssh_public_keys required when disabling SSH password.") + + if not self.image: + self.fail("Parameter error: an image is required when creating a virtual machine.") + + # Get defaults + if not self.network_interface_names: + default_nic = self.create_default_nic() + self.log("network interface:") + self.log(self.serialize_obj(default_nic, 'NetworkInterface'), pretty_print=True) + network_interfaces = [default_nic.id] + + if not self.storage_account_name: + storage_account = self.create_default_storage_account() + self.log("storage account:") + self.log(self.serialize_obj(storage_account, 'StorageAccount'), pretty_print=True) + requested_vhd_uri = 'https://{0}.blob.core.windows.net/{1}/{2}'.format( + storage_account.name, + self.storage_container_name, + self.storage_blob_name) + + if not self.short_hostname: + self.short_hostname = self.name + + nics = [NetworkInterfaceReference(id=id) for id in network_interfaces] + vhd = VirtualHardDisk(uri=requested_vhd_uri) + vm_resource = VirtualMachine( + self.location, + tags=self.tags, + os_profile=OSProfile( + admin_username=self.admin_username, + computer_name=self.short_hostname, + ), + hardware_profile=HardwareProfile( + vm_size=self.vm_size + ), + storage_profile=StorageProfile( + os_disk=OSDisk( + self.storage_blob_name, + vhd, + DiskCreateOptionTypes.from_image, + caching=self.os_disk_caching, + ), + image_reference=ImageReference( + publisher=self.image['publisher'], + offer=self.image['offer'], + sku=self.image['sku'], + version=self.image['version'], + ), + ), + network_profile=NetworkProfile( + network_interfaces=nics + ), + ) + + if self.admin_password: + vm_resource.os_profile.admin_password = self.admin_password + + if self.os_type == 'Linux': + vm_resource.os_profile.linux_configuration = LinuxConfiguration( + disable_password_authentication=disable_ssh_password + ) + if self.ssh_public_keys: + ssh_config = SshConfiguration() + ssh_config.public_keys = \ + [SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys] + vm_resource.os_profile.linux_configuration.ssh = ssh_config + + self.log("Create virtual machine with parameters:") + self.create_or_update_vm(vm_resource) + + elif self.differences and len(self.differences) > 0: + # Update the VM based on detected config differences + + self.log("Update virtual machine {0}".format(self.name)) + self.results['actions'].append('Updated VM {0}'.format(self.name)) + + nics = [NetworkInterfaceReference(id=interface['id']) + for interface in vm_dict['properties']['networkProfile']['networkInterfaces']] + vhd = VirtualHardDisk(uri=vm_dict['properties']['storageProfile']['osDisk']['vhd']['uri']) + vm_resource = VirtualMachine( + vm_dict['location'], + vm_id=vm_dict['properties']['vmId'], + os_profile=OSProfile( + admin_username=vm_dict['properties']['osProfile']['adminUsername'], + computer_name=vm_dict['properties']['osProfile']['computerName'] + ), + hardware_profile=HardwareProfile( + vm_size=vm_dict['properties']['hardwareProfile']['vmSize'] + ), + storage_profile=StorageProfile( + os_disk=OSDisk( + vm_dict['properties']['storageProfile']['osDisk']['name'], + vhd, + vm_dict['properties']['storageProfile']['osDisk']['createOption'], + os_type=vm_dict['properties']['storageProfile']['osDisk']['osType'], + caching=vm_dict['properties']['storageProfile']['osDisk']['caching'] + ), + image_reference=ImageReference( + publisher=vm_dict['properties']['storageProfile']['imageReference']['publisher'], + offer=vm_dict['properties']['storageProfile']['imageReference']['offer'], + sku=vm_dict['properties']['storageProfile']['imageReference']['sku'], + version=vm_dict['properties']['storageProfile']['imageReference']['version'] + ), + ), + network_profile=NetworkProfile( + network_interfaces=nics + ), + ) + + if vm_dict.get('tags'): + vm_resource.tags = vm_dict['tags'] + + # Add admin password, if one provided + if vm_dict['properties']['osProfile'].get('adminPassword'): + vm_resource.os_profile.admin_password = vm_dict['properties']['osProfile']['adminPassword'] + + # Add linux configuration, if applicable + linux_config = vm_dict['properties']['osProfile'].get('linuxConfiguration') + if linux_config: + ssh_config = linux_config.get('ssh', None) + vm_resource.os_profile.linux_configuration = LinuxConfiguration( + disable_password_authentication=linux_config.get('disablePasswordAuthentication', False) + ) + if ssh_config: + public_keys = ssh_config.get('publicKeys') + if public_keys: + vm_resource.os_profile.linux_configuration.ssh = SshConfiguration(public_keys=[]) + for key in public_keys: + vm_resource.os_profile.linux_configuration.ssh.public_keys.append( + SshPublicKey(path=key['path'], key_data=key['keyData']) + ) + self.log("Update virtual machine with parameters:") + self.create_or_update_vm(vm_resource) + + # Make sure we leave the machine in requested power state + if powerstate_change == 'poweron' and \ + self.results['ansible_facts']['azure_vm']['powerstate'] != 'running': + # Attempt to power on the machine + self.power_on_vm() + + elif powerstate_change == 'poweroff' and \ + self.results['ansible_facts']['azure_vm']['powerstate'] == 'running': + # Attempt to power off the machine + self.power_off_vm() + + elif powerstate_change == 'restarted': + self.restart_vm() + + elif powerstate_change == 'deallocated': + self.deallocate_vm() + + self.results['ansible_facts']['azure_vm'] = self.serialize_vm(self.get_vm()) + + elif self.state == 'absent': + # delete the VM + self.log("Delete virtual machine {0}".format(self.name)) + self.results['ansible_facts']['azure_vm'] = None + self.delete_vm(vm) + + # until we sort out how we want to do this globally + del self.results['actions'] + + return self.results + + def get_vm(self): + ''' + Get the VM with expanded instanceView + + :return: VirtualMachine object + ''' + try: + vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview') + return vm + except Exception as exc: + self.fail("Error getting virtual machine (0) - {1}".format(self.name, str(exc))) + + def serialize_vm(self, vm): + ''' + Convert a VirtualMachine object to dict. + + :param vm: VirtualMachine object + :return: dict + ''' + + result = self.serialize_obj(vm, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES) + result['id'] = vm.id + result['name'] = vm.name + result['type'] = vm.type + result['location'] = vm.location + result['tags'] = vm.tags + + result['powerstate'] = dict() + if vm.instance_view: + result['powerstate'] = next((s.code.replace('PowerState/', '') + for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None) + + # Expand network interfaces to include config properties + for interface in vm.network_profile.network_interfaces: + int_dict = azure_id_to_dict(interface.id) + nic = self.get_network_interface(int_dict['networkInterfaces']) + for interface_dict in result['properties']['networkProfile']['networkInterfaces']: + if interface_dict['id'] == interface.id: + nic_dict = self.serialize_obj(nic, 'NetworkInterface') + interface_dict['name'] = int_dict['networkInterfaces'] + interface_dict['properties'] = nic_dict['properties'] + + # Expand public IPs to include config properties + for interface in result['properties']['networkProfile']['networkInterfaces']: + for config in interface['properties']['ipConfigurations']: + if config['properties'].get('publicIPAddress'): + pipid_dict = azure_id_to_dict(config['properties']['publicIPAddress']['id']) + try: + pip = self.network_client.public_ip_addresses.get(self.resource_group, + pipid_dict['publicIPAddresses']) + except Exception as exc: + self.fail("Error fetching public ip {0} - {1}".format(pipid_dict['publicIPAddresses'], + str(exc))) + pip_dict = self.serialize_obj(pip, 'PublicIPAddress') + config['properties']['publicIPAddress']['name'] = pipid_dict['publicIPAddresses'] + config['properties']['publicIPAddress']['properties'] = pip_dict['properties'] + + self.log(result, pretty_print=True) + if self.state != 'absent' and not result['powerstate']: + self.fail("Failed to determine PowerState of virtual machine {0}".format(self.name)) + return result + + def power_off_vm(self): + self.log("Powered off virtual machine {0}".format(self.name)) + self.results['actions'].append("Powered off virtual machine {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machines.power_off(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error powering off virtual machine {0} - {1}".format(self.name, str(exc))) + return True + + def power_on_vm(self): + self.results['actions'].append("Powered on virtual machine {0}".format(self.name)) + self.log("Power on virtual machine {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machines.start(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error powering on virtual machine {0} - {1}".format(self.name, str(exc))) + return True + + def restart_vm(self): + self.results['actions'].append("Restarted virtual machine {0}".format(self.name)) + self.log("Restart virtual machine {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machines.restart(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error restarting virtual machine {0} - {1}".format(self.name, str(exc))) + return True + + def deallocate_vm(self): + self.results['actions'].append("Deallocated virtual machine {0}".format(self.name)) + self.log("Deallocate virtual machine {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machines.deallocate(self.resource_group, self.name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deallocating virtual machine {0} - {1}".format(self.name, str(exc))) + return True + + def delete_vm(self, vm): + vhd_uris = [] + nic_names = [] + pip_names = [] + + if self.remove_on_absent.intersection(set(['all','virtual_storage'])): + # store the attached vhd info so we can nuke it after the VM is gone + self.log('Storing VHD URI for deletion') + vhd_uris.append(vm.storage_profile.os_disk.vhd.uri) + self.log("VHD URIs to delete: {0}".format(', '.join(vhd_uris))) + self.results['deleted_vhd_uris'] = vhd_uris + + if self.remove_on_absent.intersection(set(['all','network_interfaces'])): + # store the attached nic info so we can nuke them after the VM is gone + self.log('Storing NIC names for deletion.') + for interface in vm.network_profile.network_interfaces: + id_dict = azure_id_to_dict(interface.id) + nic_names.append(id_dict['networkInterfaces']) + self.log('NIC names to delete {0}'.format(', '.join(nic_names))) + self.results['deleted_network_interfaces'] = nic_names + if self.remove_on_absent.intersection(set(['all','public_ips'])): + # also store each nic's attached public IPs and delete after the NIC is gone + for name in nic_names: + nic = self.get_network_interface(name) + for ipc in nic.ip_configurations: + if ipc.public_ip_address: + pip_dict = azure_id_to_dict(ipc.public_ip_address.id) + pip_names.append(pip_dict['publicIPAddresses']) + self.log('Public IPs to delete are {0}'.format(', '.join(pip_names))) + self.results['deleted_public_ips'] = pip_names + + self.log("Deleting virtual machine {0}".format(self.name)) + self.results['actions'].append("Deleted virtual machine {0}".format(self.name)) + try: + poller = self.compute_client.virtual_machines.delete(self.resource_group, self.name) + # wait for the poller to finish + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting virtual machine {0} - {1}".format(self.name, str(exc))) + + # TODO: parallelize nic, vhd, and public ip deletions with begin_deleting + # TODO: best-effort to keep deleting other linked resources if we encounter an error + if self.remove_on_absent.intersection(set(['all','virtual_storage'])): + self.log('Deleting virtual storage') + self.delete_vm_storage(vhd_uris) + + if self.remove_on_absent.intersection(set(['all','network_interfaces'])): + self.log('Deleting network interfaces') + for name in nic_names: + self.delete_nic(name) + + if self.remove_on_absent.intersection(set(['all','public_ips'])): + self.log('Deleting public IPs') + for name in pip_names: + self.delete_pip(name) + return True + + def get_network_interface(self, name): + try: + nic = self.network_client.network_interfaces.get(self.resource_group, name) + return nic + except Exception as exc: + self.fail("Error fetching network interface {0} - {1}".format(name, str(exc))) + + def delete_nic(self, name): + self.log("Deleting network interface {0}".format(name)) + self.results['actions'].append("Deleted network interface {0}".format(name)) + try: + poller = self.network_client.network_interfaces.delete(self.resource_group, name) + except Exception as exc: + self.fail("Error deleting network interface {0} - {1}".format(name, str(exc))) + self.get_poller_result(poller) + # Delete doesn't return anything. If we get this far, assume success + return True + + def delete_pip(self, name): + self.results['actions'].append("Deleted public IP {0}".format(name)) + try: + poller = self.network_client.public_ip_addresses.delete(self.resource_group, name) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting {0} - {1}".format(name, str(exc))) + # Delete returns nada. If we get here, assume that all is well. + return True + + def delete_vm_storage(self, vhd_uris): + for uri in vhd_uris: + self.log("Extracting info from blob uri '{0}'".format(uri)) + try: + blob_parts = extract_names_from_blob_uri(uri) + except Exception as exc: + self.fail("Error parsing blob URI {0}".format(str(exc))) + storage_account_name = blob_parts['accountname'] + container_name = blob_parts['containername'] + blob_name = blob_parts['blobname'] + + blob_client = self.get_blob_client(self.resource_group, storage_account_name) + + self.log("Delete blob {0}:{1}".format(container_name, blob_name)) + self.results['actions'].append("Deleted blob {0}:{1}".format(container_name, blob_name)) + try: + blob_client.delete_blob(container_name, blob_name) + except Exception as exc: + self.fail("Error deleting blob {0}:{1} - {2}".format(container_name, blob_name, str(exc))) + + def get_image_version(self): + try: + versions = self.compute_client.virtual_machine_images.list(self.location, + self.image['publisher'], + self.image['offer'], + self.image['sku']) + except Exception as exc: + self.fail("Error fetching image {0} {1} {2} - {4}".format(self.image['publisher'], + self.image['offer'], + self.image['sku'], + str(exc))) + if versions and len(versions) > 0: + if self.image['version'] == 'latest': + return versions[len(versions) - 1] + for version in versions: + if version.name == self.image['version']: + return version + + self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'], + self.image['offer'], + self.image['sku'], + self.image['version'])) + + def get_storage_account(self, name): + try: + account = self.storage_client.storage_accounts.get_properties(self.resource_group, + name) + return account + except Exception as exc: + self.fail("Error fetching storage account {0} - {1}".format(self.storage_account_name, str(exc))) + + def create_or_update_vm(self, params): + try: + poller = self.compute_client.virtual_machines.create_or_update(self.resource_group, self.name, params) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc))) + + def vm_size_is_valid(self): + ''' + Validate self.vm_size against the list of virtual machine sizes available for the account and location. + + :return: boolean + ''' + try: + sizes = self.compute_client.virtual_machine_sizes.list(self.location) + except Exception as exc: + self.fail("Error retrieving available machine sizes - {0}".format(str(exc))) + for size in sizes: + if size.name == self.vm_size: + return True + return False + + def create_default_storage_account(self): + ''' + Create a default storage account XXXX, where XXXX is a random number. If XXXX exists, use it. + Otherwise, create one. + + :return: storage account object + ''' + account = None + valid_name = False + + # Attempt to find a valid storage account name + storage_account_name_base = self.name[:20].lower() + for i in range(0, 5): + rand = random.randrange(1000, 9999) + storage_account_name = storage_account_name_base + str(rand) + if self.check_storage_account_name(storage_account_name): + valid_name = True + break + + if not valid_name: + self.fail("Failed to create a unique storage account name for {0}. Try using a different VM name." + .format(self.name)) + + try: + account = self.storage_client.storage_accounts.get_properties(self.resource_group, storage_account_name) + except CloudError: + pass + + if account: + self.log("Storage account {0} found.".format(storage_account_name)) + self.check_provisioning_state(account) + return account + sku = Sku(SkuName.standard_lrs) + Sku.tier = SkuTier.standard + kind = Kind.storage + parameters = StorageAccountCreateParameters(sku, kind, self.location) + self.log("Creating storage account {0} in location {1}".format(storage_account_name, self.location)) + self.results['actions'].append("Created storage account {0}".format(storage_account_name)) + try: + poller = self.storage_client.storage_accounts.create(self.resource_group, storage_account_name, parameters) + self.get_poller_result(poller) + except Exception as exc: + self.fail("Failed to create storage account: {0} - {1}".format(storage_account_name, str(exc))) + return self.get_storage_account(storage_account_name) + + def check_storage_account_name(self, name): + self.log("Checking storage account name availability for {0}".format(name)) + try: + response = self.storage_client.storage_accounts.check_name_availability(name) + except Exception as exc: + self.fail("Error checking storage account name availability for {0} - {1}".format(name, str(exc))) + return response.name_available + + def create_default_nic(self): + ''' + Create a default Network Interface 01. Requires an existing virtual network + with one subnet. If NIC 01 exists, use it. Otherwise, create one. + + :return: NIC object + ''' + + network_interface_name = self.name + '01' + nic = None + + self.log("Create default NIC {0}".format(network_interface_name)) + self.log("Check to see if NIC {0} exists".format(network_interface_name)) + try: + nic = self.network_client.network_interfaces.get(self.resource_group, network_interface_name) + except CloudError: + pass + + if nic: + self.log("NIC {0} found.".format(network_interface_name)) + self.check_provisioning_state(nic) + return nic + + self.log("NIC {0} does not exist.".format(network_interface_name)) + + if self.virtual_network_name: + try: + self.network_client.virtual_networks.list(self.resource_group, self.virtual_network_name) + virtual_network_name = self.virtual_network_name + except Exception as exc: + self.fail("Error: fetching virtual network {0} - {1}".format(self.virtual_network_name, str(exc))) + else: + # Find a virtual network + no_vnets_msg = "Error: unable to find virtual network in resource group {0}. A virtual network " \ + "with at least one subnet must exist in order to create a NIC for the virtual " \ + "machine.".format(self.resource_group) + + virtual_network_name = None + try: + vnets = self.network_client.virtual_networks.list(self.resource_group) + except CloudError: + self.log('cloud error!') + self.fail(no_vnets_msg) + + for vnet in vnets: + virtual_network_name = vnet.name + self.log('vnet name: {0}'.format(vnet.name)) + break + + if not virtual_network_name: + self.fail(no_vnets_msg) + + if self.subnet_name: + try: + subnet = self.network_client.subnets.get(self.resource_group, virtual_network_name) + subnet_id = subnet.id + except Exception as exc: + self.fail("Error: fetching subnet {0} - {1}".format(self.subnet_name, str(exc))) + else: + no_subnets_msg = "Error: unable to find a subnet in virtual network {0}. A virtual network " \ + "with at least one subnet must exist in order to create a NIC for the virtual " \ + "machine.".format(virtual_network_name) + + subnet_id = None + try: + subnets = self.network_client.subnets.list(self.resource_group, virtual_network_name) + except CloudError: + self.fail(no_subnets_msg) + + for subnet in subnets: + subnet_id = subnet.id + self.log('subnet id: {0}'.format(subnet_id)) + break + + if not subnet_id: + self.fail(no_subnets_msg) + + self.results['actions'].append('Created default public IP {0}'.format(self.name + '01')) + pip = self.create_default_pip(self.resource_group, self.location, self.name, self.public_ip_allocation_method) + + self.results['actions'].append('Created default security group {0}'.format(self.name + '01')) + group = self.create_default_securitygroup(self.resource_group, self.location, self.name, self.os_type, + self.open_ports) + + parameters = NetworkInterface( + location=self.location, + ip_configurations=[ + NetworkInterfaceIPConfiguration( + private_ip_allocation_method='Dynamic', + ) + ] + ) + parameters.ip_configurations[0].subnet = Subnet(id=subnet_id) + parameters.ip_configurations[0].name = 'default' + parameters.network_security_group = NetworkSecurityGroup(id=group.id, + location=group.location, + resource_guid=group.resource_guid) + parameters.ip_configurations[0].public_ip_address = PublicIPAddress(id=pip.id, + location=pip.location, + resource_guid=pip.resource_guid) + + self.log("Creating NIC {0}".format(network_interface_name)) + self.log(self.serialize_obj(parameters, 'NetworkInterface'), pretty_print=True) + self.results['actions'].append("Created NIC {0}".format(network_interface_name)) + try: + poller = self.network_client.network_interfaces.create_or_update(self.resource_group, + network_interface_name, + parameters) + new_nic = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating network interface {0} - {1}".format(network_interface_name, str(exc))) + return new_nic + + +def main(): + AzureRMVirtualMachine() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_virtualmachineimage_facts.py b/cloud/azure/azure_rm_virtualmachineimage_facts.py new file mode 100644 index 00000000000..3efdc57db39 --- /dev/null +++ b/cloud/azure/azure_rm_virtualmachineimage_facts.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualmachineimage_facts + +version_added: "2.1" + +short_description: Get virtual machine image facts. + +description: + - Get facts for virtual machine images. + +options: + name: + description: + - Only show results for a specific security group. + default: null + required: false + location: + description: + - Azure location value (ie. westus, eastus, eastus2, northcentralus, etc.). Supplying only a + location value will yield a list of available publishers for the location. + required: true + publisher: + description: + - Name of an image publisher. List image offerings associated with a particular publisher. + default: null + required: false + offer: + description: + - Name of an image offering. Combine with sku to see a list of available image versions. + default: null + required: false + sku: + description: + - Image offering SKU. Combine with offer to see a list of available versions. + default: null + required: false + version: + description: + - Specific version number of an image. + default: null + required: false + +extends_documentation_fragment: + - azure + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' + - name: Get facts for a specific image + azure_rm_virtualmachineimage_facts: + location: eastus + publisher: OpenLogic + offer: CentOS + sku: '7.1' + version: '7.1.20160308' + + - name: List available versions + azure_rm_virtualmachineimage_facts: + location: eastus + publisher: OpenLogic + offer: CentOS + sku: '7.1' + + - name: List available offers + azure_rm_virtualmachineimage_facts: + location: eastus + publisher: OpenLogic + + - name: List available publishers + azure_rm_virtualmachineimage_facts: + location: eastus + +''' + +RETURN = ''' +azure_vmimages: + description: List of image dicts. + returned: always + type: list + example: [] +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except: + # This is handled in azure_rm_common + pass + +AZURE_ENUM_MODULES = ['azure.mgmt.compute.models.compute_management_client_enums'] + +class AzureRMVirtualMachineImageFacts(AzureRMModuleBase): + + def __init__(self, **kwargs): + + self.module_arg_spec = dict( + location=dict(type='str', required=True), + publisher=dict(type='str'), + offer=dict(type='str'), + sku=dict(type='str'), + version=dict(type='str') + ) + + self.results = dict( + changed=False, + ansible_facts=dict(azure_vmimages=[]) + ) + + self.location = None + self.publisher = None + self.offer = None + self.sku = None + self.version = None + + super(AzureRMVirtualMachineImageFacts, self).__init__(self.module_arg_spec) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.location and self.publisher and self.offer and self.sku and self.version: + self.results['ansible_facts']['azure_vmimages'] = self.get_item() + elif self.location and self.publisher and self.offer and self.sku: + self.results['ansible_facts']['azure_vmimages'] = self.list_images() + elif self.location and self.publisher: + self.results['ansible_facts']['azure_vmimages'] = self.list_offers() + elif self.location: + self.results['ansible_facts']['azure_vmimages'] = self.list_publishers() + + return self.results + + def get_item(self): + item = None + result = [] + + try: + item = self.compute_client.virtual_machine_images.get(self.location, + self.publisher, + self.offer, + self.sku, + self.version) + except CloudError: + pass + + if item: + result = [self.serialize_obj(item, 'VirtualMachineImage', enum_modules=AZURE_ENUM_MODULES)] + + return result + + def list_images(self): + response = None + results = [] + try: + response = self.compute_client.virtual_machine_images.list(self.location, + self.publisher, + self.offer, + self.sku,) + except CloudError: + pass + except Exception as exc: + self.fail("Failed to list images: {0}".format(str(exc))) + + if response: + for item in response: + results.append(self.serialize_obj(item, 'VirtualMachineImageResource', + enum_modules=AZURE_ENUM_MODULES)) + return results + + def list_offers(self): + response = None + results = [] + try: + response = self.compute_client.virtual_machine_images.list_offers(self.location, + self.publisher) + except CloudError: + pass + except Exception as exc: + self.fail("Failed to list offers: {0}".format(str(exc))) + + if response: + for item in response: + results.append(self.serialize_obj(item, 'VirtualMachineImageResource', + enum_modules=AZURE_ENUM_MODULES)) + return results + + def list_publishers(self): + response = None + results = [] + try: + response = self.compute_client.virtual_machine_images.list_publishers(self.location) + except CloudError: + pass + except Exception as exc: + self.fail("Failed to list publishers: {0}".format(str(exc))) + + if response: + for item in response: + results.append(self.serialize_obj(item, 'VirtualMachineImageResource', + enum_modules=AZURE_ENUM_MODULES)) + return results + + +def main(): + AzureRMVirtualMachineImageFacts() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_virtualnetwork.py b/cloud/azure/azure_rm_virtualnetwork.py new file mode 100644 index 00000000000..97494444eea --- /dev/null +++ b/cloud/azure/azure_rm_virtualnetwork.py @@ -0,0 +1,368 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualnetwork +version_added: "2.1" +short_description: Manage Azure virtual networks. +description: + - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges + and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network. +options: + resource_group: + description: + - name of resource group. + required: true + address_prefixes_cidr: + description: + - List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating + a new virtual network or using purge_address_prefixes. + aliases: + - address_prefixes + default: null + required: false + dns_servers: + description: + - Custom list of DNS servers. Maximum length of two. The first server in the list will be treated + as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the + specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to + default Azure servers. + default: null + required: false + location: + description: + - Valid azure location. Defaults to location of the resource group. + default: resource_group location + required: false + name: + description: + - name of the virtual network. + required: true + purge_address_prefixes: + description: + - Use with state present to remove any existing address_prefixes. + default: false + purge_dns_servers: + description: + - Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually + exclusive with dns_servers. + default: false + required: false + state: + description: + - Assert the state of the virtual network. Use 'present' to create or update and + 'absent' to delete. + default: present + choices: + - absent + - present + required: false + +extends_documentation_fragment: + - azure + - azure_tags + +author: + - "Chris Houseknecht (@chouseknecht)" + - "Matt Davis (@nitzmahone)" + +''' + +EXAMPLES = ''' + - name: Create a virtual network + azure_rm_virtualnetwork: + name: foobar + resource_group: Testing + address_prefixes_cidr: + - "10.1.0.0/16" + - "172.100.0.0/16" + dns_servers: + - "127.0.0.1" + - "127.0.0.2" + tags: + testing: testing + delete: on-exit + + - name: Delete a virtual network + azure_rm_virtualnetwork: + name: foobar + resource_group: Testing + state: absent +''' +RETURN = ''' +state: + description: Current state of the virtual network. + returned: always + type: dict + sample: { + "address_prefixes": [ + "10.1.0.0/16", + "172.100.0.0/16" + ], + "dns_servers": [ + "127.0.0.1", + "127.0.0.3" + ], + "etag": 'W/"0712e87c-f02f-4bb3-8b9e-2da0390a3886"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network", + "location": "eastus", + "name": "my_test_network", + "provisioning_state": "Succeeded", + "tags": null, + "type": "Microsoft.Network/virtualNetworks" + } +''' + + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.mgmt.network.models import VirtualNetwork, AddressSpace, DhcpOptions +except ImportError: + # This is handled in azure_rm_common + pass + + + +def virtual_network_to_dict(vnet): + ''' + Convert a virtual network object to a dict. + :param vnet: VirtualNet object + :return: dict + ''' + results = dict( + id=vnet.id, + name=vnet.name, + location=vnet.location, + type=vnet.type, + tags=vnet.tags, + provisioning_state=vnet.provisioning_state, + etag=vnet.etag + ) + if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: + results['dns_servers'] = [] + for server in vnet.dhcp_options.dns_servers: + results['dns_servers'].append(server) + if vnet.address_space and len(vnet.address_space.address_prefixes) > 0: + results['address_prefixes'] = [] + for space in vnet.address_space.address_prefixes: + results['address_prefixes'].append(space) + return results + + +class AzureRMVirtualNetwork(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group=dict(type='str', required=True), + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + location=dict(type='str'), + address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']), + dns_servers=dict(type='list',), + purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']), + purge_dns_servers=dict(type='bool', default=False), + ) + + mutually_exclusive = [ + ('dns_servers', 'purge_dns_servers') + ] + + required_if = [ + ('purge_address_prefixes', True, ['address_prefixes_cidr']) + ] + + self.resource_group = None + self.name = None + self.state = None + self.location = None + self.address_prefixes_cidr = None + self.purge_address_prefixes = None + self.dns_servers = None + self.purge_dns_servers = None + + self.results=dict( + changed=False, + state=dict() + ) + + super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec.keys() + ['tags']: + setattr(self, key, kwargs[key]) + + self.results['check_mode'] = self.check_mode + + resource_group = self.get_resource_group(self.resource_group) + if not self.location: + # Set default location + self.location = resource_group.location + + if self.state == 'present' and self.purge_address_prefixes: + for prefix in self.address_prefixes_cidr: + if not CIDR_PATTERN.match(prefix): + self.fail("Parameter error: invalid address prefix value {0}".format(prefix)) + + if self.dns_servers and len(self.dns_servers) > 2: + self.fail("Parameter error: You can provide a maximum of 2 DNS servers.") + + changed = False + results = dict() + + try: + self.log('Fetching vnet {0}'.format(self.name)) + vnet = self.network_client.virtual_networks.get(self.resource_group, self.name) + + results = virtual_network_to_dict(vnet) + self.log('Vnet exists {0}'.format(self.name)) + self.log(results, pretty_print=True) + self.check_provisioning_state(vnet, self.state) + + if self.state == 'present': + if self.address_prefixes_cidr: + existing_address_prefix_set = set(vnet.address_space.address_prefixes) + requested_address_prefix_set = set(self.address_prefixes_cidr) + missing_prefixes = requested_address_prefix_set - existing_address_prefix_set + extra_prefixes = existing_address_prefix_set - requested_address_prefix_set + if len(missing_prefixes) > 0: + self.log('CHANGED: there are missing address_prefixes') + changed = True + if not self.purge_address_prefixes: + # add the missing prefixes + for prefix in missing_prefixes: + results['address_prefixes'].append(prefix) + + if len(extra_prefixes) > 0 and self.purge_address_prefixes: + self.log('CHANGED: there are address_prefixes to purge') + changed = True + # replace existing address prefixes with requested set + results['address_prefixes'] = self.address_prefixes_cidr + + update_tags, results['tags'] = self.update_tags(results['tags']) + if update_tags: + changed = True + + if self.dns_servers: + existing_dns_set = set(vnet.dhcp_options.dns_servers) + requested_dns_set = set(self.dns_servers) + if existing_dns_set != requested_dns_set: + self.log('CHANGED: replacing DNS servers') + changed = True + results['dns_servers'] = self.dns_servers + + if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0: + self.log('CHANGED: purging existing DNS servers') + changed = True + results['dns_servers'] = [] + elif self.state == 'absent': + self.log("CHANGED: vnet exists but requested state is 'absent'") + changed = True + except CloudError: + self.log('Vnet {0} does not exist'.format(self.name)) + if self.state == 'present': + self.log("CHANGED: vnet {0} does not exist but requested state is 'present'".format(self.name)) + changed = True + + self.results['changed'] = changed + self.results['state'] = results + + if self.check_mode: + return self.results + + if changed: + if self.state == 'present': + if not results: + # create a new virtual network + self.log("Create virtual network {0}".format(self.name)) + if not self.address_prefixes_cidr: + self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network') + vnet = VirtualNetwork( + location=self.location, + address_space=AddressSpace( + address_prefixes=self.address_prefixes_cidr + ) + ) + if self.dns_servers: + vnet.dhcp_options = DhcpOptions( + dns_servers=self.dns_servers + ) + if self.tags: + vnet.tags = self.tags + self.results['state'] = self.create_or_update_vnet(vnet) + else: + # update existing virtual network + self.log("Update virtual network {0}".format(self.name)) + vnet = VirtualNetwork( + location=results['location'], + address_space=AddressSpace( + address_prefixes=results['address_prefixes'] + ), + tags=results['tags'] + ) + if results.get('dns_servers'): + vnet.dhcp_options = DhcpOptions( + dns_servers=results['dns_servers'] + ) + self.results['state'] = self.create_or_update_vnet(vnet) + elif self.state == 'absent': + self.delete_virtual_network() + self.results['state']['status'] = 'Deleted' + + + return self.results + + def create_or_update_vnet(self, vnet): + try: + poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet) + new_vnet = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc))) + return virtual_network_to_dict(new_vnet) + + def delete_virtual_network(self): + try: + poller = self.network_client.virtual_networks.delete(self.resource_group, self.name) + result = self.get_poller_result(poller) + except Exception as exc: + self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc))) + return result + + +def main(): + AzureRMVirtualNetwork() + +if __name__ == '__main__': + main() + diff --git a/cloud/azure/azure_rm_virtualnetwork_facts.py b/cloud/azure/azure_rm_virtualnetwork_facts.py new file mode 100644 index 00000000000..229df95b74c --- /dev/null +++ b/cloud/azure/azure_rm_virtualnetwork_facts.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Matt Davis, +# Chris Houseknecht, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_virtualnetwork_facts + +version_added: "2.1" + +short_description: Get virtual network facts. + +description: + - Get facts for a specific virtual network or all virtual networks within a resource group. + +options: + name: + description: + - Only show results for a specific security group. + default: null + required: false + resource_group: + description: + - Limit results by resource group. Required when filtering by name. + default: null + required: false + tags: + description: + - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. + default: null + required: false + +extends_documentation_fragment: + - azure + +authors: + - "Chris Houseknecht house@redhat.com" + - "Matt Davis mdavis@redhat.com" + +''' + +EXAMPLES = ''' + - name: Get facts for one virtual network + azure_rm_virtualnetwork_facts: + resource_group: Testing + name: secgroup001 + + - name: Get facts for all virtual networks + azure_rm_virtualnetwork_facts: + resource_group: Testing + + - name: Get facts by tags + azure_rm_virtualnetwork_facts: + tags: + - testing +''' +RETURN = ''' +azure_virtualnetworks: + description: List of virtual network dicts. + returned: always + type: list + example: [{ + "etag": 'W/"532ba1be-ae71-40f2-9232-3b1d9cf5e37e"', + "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/vnet2001", + "location": "eastus2", + "name": "vnet2001", + "properties": { + "addressSpace": { + "addressPrefixes": [ + "10.10.0.0/16" + ] + }, + "provisioningState": "Succeeded", + "resourceGuid": "a7ba285f-f7e7-4e17-992a-de4d39f28612", + "subnets": [] + }, + "type": "Microsoft.Network/virtualNetworks" + }] +''' + +from ansible.module_utils.basic import * +from ansible.module_utils.azure_rm_common import * + +try: + from msrestazure.azure_exceptions import CloudError + from azure.common import AzureMissingResourceHttpError, AzureHttpError +except: + # This is handled in azure_rm_common + pass + + +AZURE_OBJECT_CLASS = 'VirtualNetwork' + + +class AzureRMNetworkInterfaceFacts(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + name=dict(type='str'), + resource_group=dict(type='str'), + tags=dict(type='list'), + ) + + self.results = dict( + changed=False, + ansible_facts=dict(azure_virtualnetworks=[]) + ) + + self.name = None + self.resource_group = None + self.tags = None + + super(AzureRMNetworkInterfaceFacts, self).__init__(self.module_arg_spec, + supports_tags=False, + facts_module=True) + + def exec_module(self, **kwargs): + + for key in self.module_arg_spec: + setattr(self, key, kwargs[key]) + + if self.name is not None: + self.results['ansible_facts']['azure_virtualnetworks'] = self.get_item() + else: + self.results['ansible_facts']['azure_virtualnetworks'] = self.list_items() + + return self.results + + def get_item(self): + self.log('Get properties for {0}'.format(self.name)) + item = None + results = [] + + try: + item = self.network_client.virtual_networks.get(self.resource_group, self.name) + except CloudError: + pass + + if item and self.has_tags(item.tags, self.tags): + results = [self.serialize_obj(item, AZURE_OBJECT_CLASS)] + + return results + + def list_resource_group(self): + self.log('List items for resource group') + try: + response = self.network_client.virtual_networks.list(self.resource_group) + except AzureHttpError as exc: + self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS)) + return results + + def list_items(self): + self.log('List all for items') + try: + response = self.network_client.virtual_networks.list_all() + except AzureHttpError as exc: + self.fail("Failed to list all items - {0}".format(str(exc))) + + results = [] + for item in response: + if self.has_tags(item.tags, self.tags): + results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS)) + return results + +def main(): + AzureRMNetworkInterfaceFacts() + +if __name__ == '__main__': + main() + diff --git a/cloud/digital_ocean/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py index be3321e60a6..2cdcbd6420c 100644 --- a/cloud/digital_ocean/digital_ocean.py +++ b/cloud/digital_ocean/digital_ocean.py @@ -15,6 +15,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: digital_ocean @@ -86,6 +90,13 @@ version_added: "2.0" required: false default: None + ipv6: + description: + - Optional, Boolean, enable IPv6 for your droplet. + version_added: "2.2" + required: false + default: "no" + choices: [ "yes", "no" ] wait: description: - Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned. @@ -102,7 +113,7 @@ notes: - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token. - As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token). - - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. + - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. Upgrade Ansible or, if unable to, try downloading the latest version of this module from github and putting it into a 'library' directory. requirements: - "python >= 2.6" @@ -134,11 +145,13 @@ region_id: ams2 image_id: fedora-19-x64 wait_timeout: 500 - register: my_droplet -- debug: msg="ID is {{ my_droplet.droplet.id }}" -- debug: msg="IP is {{ my_droplet.droplet.ip_address }}" +- debug: + msg: "ID is {{ my_droplet.droplet.id }}" + +- debug: + msg: "IP is {{ my_droplet.droplet.ip_address }}" # Ensure a droplet is present # If droplet id already exist, will return the droplet details and changed = False @@ -173,26 +186,39 @@ import os import time +import traceback + from distutils.version import LooseVersion -HAS_DOPY = True +try: + import six + HAS_SIX = True +except ImportError: + HAS_SIX = False + +HAS_DOPY = False try: import dopy from dopy.manager import DoError, DoManager - if LooseVersion(dopy.__version__) < LooseVersion('0.3.2'): - HAS_DOPY = False + if LooseVersion(dopy.__version__) >= LooseVersion('0.3.2'): + HAS_DOPY = True except ImportError: - HAS_DOPY = False + pass + +from ansible.module_utils.basic import AnsibleModule -class TimeoutError(DoError): - def __init__(self, msg, id): + +class TimeoutError(Exception): + def __init__(self, msg, id_): super(TimeoutError, self).__init__(msg) - self.id = id + self.id = id_ + class JsonfyMixIn(object): def to_json(self): return self.__dict__ + class Droplet(JsonfyMixIn): manager = None @@ -242,10 +268,13 @@ def setup(cls, api_token): cls.manager = DoManager(None, api_token, api_version=2) @classmethod - def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False, user_data=None): + def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False, user_data=None, ipv6=False): private_networking_lower = str(private_networking).lower() backups_enabled_lower = str(backups_enabled).lower() - json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking_lower, backups_enabled_lower,user_data) + ipv6_lower = str(ipv6).lower() + json = cls.manager.new_droplet(name, size_id, image_id, region_id, + ssh_key_ids=ssh_key_ids, virtio=virtio, private_networking=private_networking_lower, + backups_enabled=backups_enabled_lower, user_data=user_data, ipv6=ipv6_lower) droplet = cls(json) return droplet @@ -273,6 +302,7 @@ def list_all(cls): json = cls.manager.all_active_droplets() return map(cls, json) + class SSH(JsonfyMixIn): manager = None @@ -308,6 +338,7 @@ def add(cls, name, key_pub): json = cls.manager.new_ssh_key(name, key_pub) return cls(json) + def core(module): def getkeyordie(k): v = module.params[k] @@ -317,7 +348,7 @@ def getkeyordie(k): try: api_token = module.params['api_token'] or os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY'] - except KeyError, e: + except KeyError as e: module.fail_json(msg='Unable to load %s' % e.message) changed = True @@ -349,6 +380,7 @@ def getkeyordie(k): private_networking=module.params['private_networking'], backups_enabled=module.params['backups_enabled'], user_data=module.params.get('user_data'), + ipv6=module.params['ipv6'], ) if droplet.is_powered_on(): @@ -374,7 +406,7 @@ def getkeyordie(k): if not droplet: module.exit_json(changed=False, msg='The droplet is not found.') - event_json = droplet.destroy() + droplet.destroy() module.exit_json(changed=True) elif command == 'ssh': @@ -412,6 +444,7 @@ def main(): id = dict(aliases=['droplet_id'], type='int'), unique_name = dict(type='bool', default='no'), user_data = dict(default=None), + ipv6 = dict(type='bool', default='no'), wait = dict(type='bool', default=True), wait_timeout = dict(default=300, type='int'), ssh_pub_key = dict(type='str'), @@ -428,18 +461,17 @@ def main(): ['id', 'name'], ), ) + if not HAS_DOPY and not HAS_SIX: + module.fail_json(msg='dopy >= 0.3.2 is required for this module. dopy requires six but six is not installed. Make sure both dopy and six are installed.') if not HAS_DOPY: module.fail_json(msg='dopy >= 0.3.2 required for this module') try: core(module) - except TimeoutError, e: + except TimeoutError as e: module.fail_json(msg=str(e), id=e.id) - except (DoError, Exception), e: - module.fail_json(msg=str(e)) - -# import module snippets -from ansible.module_utils.basic import * + except (DoError, Exception) as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) if __name__ == '__main__': main() diff --git a/cloud/digital_ocean/digital_ocean_block_storage.py b/cloud/digital_ocean/digital_ocean_block_storage.py new file mode 100644 index 00000000000..73a76cd737b --- /dev/null +++ b/cloud/digital_ocean/digital_ocean_block_storage.py @@ -0,0 +1,348 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: digital_ocean_block_storage +short_description: Create/destroy or attach/detach Block Storage volumes in DigitalOcean +description: + - Create/destroy Block Storage volume in DigitalOcean, or attach/detach Block Storage volume to a droplet. +version_added: "2.2" +options: + command: + description: + - Which operation do you want to perform. + choices: ['create', 'attach'] + required: true + state: + description: + - Indicate desired state of the target. + choices: ['present', 'absent'] + required: true + api_token: + description: + - DigitalOcean api token. + required: true + block_size: + description: + - The size of the Block Storage volume in gigabytes. Required when command=create and state=present. + volume_name: + description: + - The name of the Block Storage volume. + required: true + description: + description: + - Description of the Block Storage volume. + region: + description: + - The slug of the region where your Block Storage volume should be located in. + required: true + droplet_id: + description: + - The droplet id you want to operate on. Required when command=attach. + timeout: + description: + - The timeout in seconds used for polling DigitalOcean's API. + default: 10 + +notes: + - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. + They both refer to the v2 token. + +author: + - "Harnek Sidhu (github: @harneksidhu)" +''' + +EXAMPLES = ''' +# Create new Block Storage +- digital_ocean_block_storage: + state: present + command: create + api_token: + region: nyc1 + block_size: 10 + volume_name: nyc1-block-storage +# Delete Block Storage +- digital_ocean_block_storage: + state: absent + command: create + api_token: + region: nyc1 + volume_name: nyc1-block-storage +# Attach Block Storage to a Droplet +- digital_ocean_block_storage: + state: present + command: attach + api_token: + volume_name: nyc1-block-storage + region: nyc1 + droplet_id: +# Detach Block Storage from a Droplet +- digital_ocean_block_storage: + state: absent + command: attach + api_token: + volume_name: nyc1-block-storage + region: nyc1 + droplet_id: +''' + +RETURN = ''' +id: + description: Unique identifier of a Block Storage volume returned during creation. + returned: changed + type: string + sample: "69b25d9a-494c-12e6-a5af-001f53126b44" +''' + +import json +import os +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import fetch_url + + +class DOBlockStorageException(Exception): + pass + + +class Response(object): + + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if self.body: + return json.loads(self.body) + elif "body" in self.info: + return json.loads(self.info["body"]) + else: + return None + + @property + def status_code(self): + return self.info["status"] + + +class Rest(object): + + def __init__(self, module, headers): + self.module = module + self.headers = headers + self.baseurl = 'https://api.digitalocean.com/v2' + + def _url_builder(self, path): + if path[0] == '/': + path = path[1:] + return '%s/%s' % (self.baseurl, path) + + def send(self, method, path, data=None, headers=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + + resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method) + + return Response(resp, info) + + def get(self, path, data=None, headers=None): + return self.send('GET', path, data, headers) + + def put(self, path, data=None, headers=None): + return self.send('PUT', path, data, headers) + + def post(self, path, data=None, headers=None): + return self.send('POST', path, data, headers) + + def delete(self, path, data=None, headers=None): + return self.send('DELETE', path, data, headers) + + +class DOBlockStorage(object): + + def __init__(self, module): + api_token = module.params['api_token'] or \ + os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY'] + self.module = module + self.rest = Rest(module, {'Authorization': 'Bearer {}'.format(api_token), + 'Content-type': 'application/json'}) + + def get_key_or_fail(self, k): + v = self.module.params[k] + if v is None: + self.module.fail_json(msg='Unable to load %s' % k) + return v + + def poll_action_for_complete_status(self, action_id): + url = 'actions/{}'.format(action_id) + end_time = time.time() + self.module.params['timeout'] + while time.time() < end_time: + time.sleep(2) + response = self.rest.get(url) + status = response.status_code + json = response.json + if status == 200: + if json['action']['status'] == 'completed': + return True + elif json['action']['status'] == 'errored': + raise DOBlockStorageException(json['message']) + raise DOBlockStorageException('Unable to reach api.digitalocean.com') + + def get_attached_droplet_ID(self, volume_name, region): + url = 'volumes?name={}®ion={}'.format(volume_name, region) + response = self.rest.get(url) + status = response.status_code + json = response.json + if status == 200: + volumes = json['volumes'] + if len(volumes) > 0: + droplet_ids = volumes[0]['droplet_ids'] + if len(droplet_ids) > 0: + return droplet_ids[0] + return None + else: + raise DOBlockStorageException(json['message']) + + def attach_detach_block_storage(self, method, volume_name, region, droplet_id): + data = { + 'type': method, + 'volume_name': volume_name, + 'region': region, + 'droplet_id': droplet_id + } + response = self.rest.post('volumes/actions', data=data) + status = response.status_code + json = response.json + if status == 202: + return self.poll_action_for_complete_status(json['action']['id']) + elif status == 200: + return True + elif status == 422: + return False + else: + raise DOBlockStorageException(json['message']) + + def create_block_storage(self): + block_size = self.get_key_or_fail('block_size') + volume_name = self.get_key_or_fail('volume_name') + region = self.get_key_or_fail('region') + description = self.module.params['description'] + data = { + 'size_gigabytes': block_size, + 'name': volume_name, + 'description': description, + 'region': region + } + response = self.rest.post("volumes", data=data) + status = response.status_code + json = response.json + if status == 201: + self.module.exit_json(changed=True, id=json['volume']['id']) + elif status == 409 and json['id'] == 'already_exists': + self.module.exit_json(changed=False) + else: + raise DOBlockStorageException(json['message']) + + def delete_block_storage(self): + volume_name = self.get_key_or_fail('volume_name') + region = self.get_key_or_fail('region') + url = 'volumes?name={}®ion={}'.format(volume_name, region) + attached_droplet_id = self.get_attached_droplet_ID(volume_name, region) + if attached_droplet_id is not None: + self.attach_detach_block_storage('detach', volume_name, region, attached_droplet_id) + response = self.rest.delete(url) + status = response.status_code + json = response.json + if status == 204: + self.module.exit_json(changed=True) + elif status == 404: + self.module.exit_json(changed=False) + else: + raise DOBlockStorageException(json['message']) + + def attach_block_storage(self): + volume_name = self.get_key_or_fail('volume_name') + region = self.get_key_or_fail('region') + droplet_id = self.get_key_or_fail('droplet_id') + attached_droplet_id = self.get_attached_droplet_ID(volume_name, region) + if attached_droplet_id is not None: + if attached_droplet_id == droplet_id: + self.module.exit_json(changed=False) + else: + self.attach_detach_block_storage('detach', volume_name, region, attached_droplet_id) + changed_status = self.attach_detach_block_storage('attach', volume_name, region, droplet_id) + self.module.exit_json(changed=changed_status) + + def detach_block_storage(self): + volume_name = self.get_key_or_fail('volume_name') + region = self.get_key_or_fail('region') + droplet_id = self.get_key_or_fail('droplet_id') + changed_status = self.attach_detach_block_storage('detach', volume_name, region, droplet_id) + self.module.exit_json(changed=changed_status) + + +def handle_request(module): + block_storage = DOBlockStorage(module) + command = module.params['command'] + state = module.params['state'] + if command == 'create': + if state == 'present': + block_storage.create_block_storage() + elif state == 'absent': + block_storage.delete_block_storage() + elif command == 'attach': + if state == 'present': + block_storage.attach_block_storage() + elif state == 'absent': + block_storage.detach_block_storage() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state = dict(choices=['present', 'absent'], required=True), + command = dict(choices=['create', 'attach'], required=True), + api_token = dict(aliases=['API_TOKEN'], no_log=True), + block_size = dict(type='int'), + volume_name = dict(type='str', required=True), + description = dict(type='str'), + region = dict(type='str', required=True), + droplet_id = dict(type='int'), + timeout = dict(type='int', default=10), + ), + ) + try: + handle_request(module) + except DOBlockStorageException: + e = get_exception() + module.fail_json(msg=e.message) + except KeyError: + e = get_exception() + module.fail_json(msg='Unable to load %s' % e.message) + +if __name__ == '__main__': + main() diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py index e83ac86bcd3..61fc6c8eeac 100644 --- a/cloud/digital_ocean/digital_ocean_domain.py +++ b/cloud/digital_ocean/digital_ocean_domain.py @@ -15,6 +15,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: digital_ocean_domain @@ -46,7 +50,7 @@ notes: - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token. - As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token). - - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. + - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. requirements: - "python >= 2.6" @@ -82,7 +86,7 @@ ''' import os -import time +import traceback try: from dopy.manager import DoError, DoManager @@ -90,15 +94,14 @@ except ImportError as e: HAS_DOPY = False -class TimeoutError(DoError): - def __init__(self, msg, id): - super(TimeoutError, self).__init__(msg) - self.id = id +from ansible.module_utils.basic import AnsibleModule + class JsonfyMixIn(object): def to_json(self): return self.__dict__ + class DomainRecord(JsonfyMixIn): manager = None @@ -106,7 +109,7 @@ def __init__(self, json): self.__dict__.update(json) update_attr = __init__ - def update(self, data = None, record_type = None): + def update(self, data=None, record_type=None): json = self.manager.edit_domain_record(self.domain_id, self.id, record_type if record_type is not None else self.record_type, @@ -118,6 +121,7 @@ def destroy(self): json = self.manager.destroy_domain_record(self.domain_id, self.id) return json + class Domain(JsonfyMixIn): manager = None @@ -125,10 +129,10 @@ def __init__(self, domain_json): self.__dict__.update(domain_json) def destroy(self): - self.manager.destroy_domain(self.id) + self.manager.destroy_domain(self.name) def records(self): - json = self.manager.all_domain_records(self.id) + json = self.manager.all_domain_records(self.name) return map(DomainRecord, json) @classmethod @@ -165,6 +169,7 @@ def find(cls, name=None, id=None): return False + def core(module): def getkeyordie(k): v = module.params[k] @@ -174,10 +179,9 @@ def getkeyordie(k): try: api_token = module.params['api_token'] or os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY'] - except KeyError, e: + except KeyError as e: module.fail_json(msg='Unable to load %s' % e.message) - changed = True state = module.params['state'] Domain.setup(api_token) @@ -195,12 +199,12 @@ def getkeyordie(k): records = domain.records() at_record = None for record in records: - if record.name == "@" and record.record_type == 'A': + if record.name == "@" and record.type == 'A': at_record = record if not at_record.data == getkeyordie("ip"): record.update(data=getkeyordie("ip"), record_type='A') - module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json()) + module.exit_json(changed=True, domain=Domain.find(id=record.id).to_json()) module.exit_json(changed=False, domain=domain.to_json()) @@ -237,12 +241,8 @@ def main(): try: core(module) - except TimeoutError as e: - module.fail_json(msg=str(e), id=e.id) except (DoError, Exception) as e: - module.fail_json(msg=str(e)) + module.fail_json(msg=str(e), exception=traceback.format_exc()) -# import module snippets -from ansible.module_utils.basic import * if __name__ == '__main__': main() diff --git a/cloud/digital_ocean/digital_ocean_sshkey.py b/cloud/digital_ocean/digital_ocean_sshkey.py index cf01ae7be67..00c31c788e9 100644 --- a/cloud/digital_ocean/digital_ocean_sshkey.py +++ b/cloud/digital_ocean/digital_ocean_sshkey.py @@ -15,6 +15,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: digital_ocean_sshkey @@ -69,7 +73,7 @@ ''' import os -import time +import traceback try: from dopy.manager import DoError, DoManager @@ -77,15 +81,14 @@ except ImportError: HAS_DOPY = False -class TimeoutError(DoError): - def __init__(self, msg, id): - super(TimeoutError, self).__init__(msg) - self.id = id +from ansible.module_utils.basic import AnsibleModule + class JsonfyMixIn(object): def to_json(self): return self.__dict__ + class SSH(JsonfyMixIn): manager = None @@ -121,6 +124,7 @@ def add(cls, name, key_pub): json = cls.manager.new_ssh_key(name, key_pub) return cls(json) + def core(module): def getkeyordie(k): v = module.params[k] @@ -132,10 +136,9 @@ def getkeyordie(k): # params['client_id'] will be None even if client_id is not passed in client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] api_key = module.params['api_key'] or os.environ['DO_API_KEY'] - except KeyError, e: + except KeyError as e: module.fail_json(msg='Unable to load %s' % e.message) - changed = True state = module.params['state'] SSH.setup(client_id, api_key) @@ -154,6 +157,7 @@ def getkeyordie(k): key.destroy() module.exit_json(changed=True) + def main(): module = AnsibleModule( argument_spec = dict( @@ -173,12 +177,8 @@ def main(): try: core(module) - except TimeoutError as e: - module.fail_json(msg=str(e), id=e.id) except (DoError, Exception) as e: - module.fail_json(msg=str(e)) + module.fail_json(msg=str(e), exception=traceback.format_exc()) -# import module snippets -from ansible.module_utils.basic import * if __name__ == '__main__': main() diff --git a/cloud/digital_ocean/digital_ocean_tag.py b/cloud/digital_ocean/digital_ocean_tag.py new file mode 100644 index 00000000000..3ad387a90c3 --- /dev/null +++ b/cloud/digital_ocean/digital_ocean_tag.py @@ -0,0 +1,262 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: digital_ocean_tag +short_description: Create and remove tag(s) to DigitalOcean resource. +description: + - Create and remove tag(s) to DigitalOcean resource. +version_added: "2.2" +options: + name: + description: + - The name of the tag. The supported characters for names include + alphanumeric characters, dashes, and underscores. + required: true + resource_id: + description: + - The ID of the resource to operate on. + resource_type: + description: + - The type of resource to operate on. Currently only tagging of + droplets is supported. + default: droplet + choices: ['droplet'] + state: + description: + - Whether the tag should be present or absent on the resource. + default: present + choices: ['present', 'absent'] + api_token: + description: + - DigitalOcean api token. + +notes: + - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. + They both refer to the v2 token. + - As of Ansible 2.0, Version 2 of the DigitalOcean API is used. + +requirements: + - "python >= 2.6" +''' + + +EXAMPLES = ''' +- name: create a tag + digital_ocean_tag: + name: production + state: present + +- name: tag a resource; creating the tag if it does not exists + digital_ocean_tag: + name: "{{ item }}" + resource_id: YYY + state: present + with_items: + - staging + - dbserver + +- name: untag a resource + digital_ocean_tag: + name: staging + resource_id: YYY + state: absent + +# Deleting a tag also untags all the resources that have previously been +# tagged with it +- name: remove a tag + digital_ocean_tag: + name: dbserver + state: absent +''' + + +RETURN = ''' +data: + description: a DigitalOcean Tag resource + returned: success and no resource constraint + type: dict + sample: { + "tag": { + "name": "awesome", + "resources": { + "droplets": { + "count": 0, + "last_tagged": null + } + } + } + } +''' + +import json +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +class Response(object): + + def __init__(self, resp, info): + self.body = None + if resp: + self.body = resp.read() + self.info = info + + @property + def json(self): + if not self.body: + if "body" in self.info: + return json.loads(self.info["body"]) + return None + try: + return json.loads(self.body) + except ValueError: + return None + + @property + def status_code(self): + return self.info["status"] + + +class Rest(object): + + def __init__(self, module, headers): + self.module = module + self.headers = headers + self.baseurl = 'https://api.digitalocean.com/v2' + + def _url_builder(self, path): + if path[0] == '/': + path = path[1:] + return '%s/%s' % (self.baseurl, path) + + def send(self, method, path, data=None, headers=None): + url = self._url_builder(path) + data = self.module.jsonify(data) + + resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method) + + return Response(resp, info) + + def get(self, path, data=None, headers=None): + return self.send('GET', path, data, headers) + + def put(self, path, data=None, headers=None): + return self.send('PUT', path, data, headers) + + def post(self, path, data=None, headers=None): + return self.send('POST', path, data, headers) + + def delete(self, path, data=None, headers=None): + return self.send('DELETE', path, data, headers) + + +def core(module): + try: + api_token = module.params['api_token'] or \ + os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY'] + except KeyError as e: + module.fail_json(msg='Unable to load %s' % e.message) + + state = module.params['state'] + name = module.params['name'] + resource_id = module.params['resource_id'] + resource_type = module.params['resource_type'] + + rest = Rest(module, {'Authorization': 'Bearer {}'.format(api_token), + 'Content-type': 'application/json'}) + + if state in ('present'): + if name is None: + module.fail_json(msg='parameter `name` is missing') + + # Ensure Tag exists + response = rest.post("tags", data={'name': name}) + status_code = response.status_code + json = response.json + if status_code == 201: + changed = True + elif status_code == 422: + changed = False + else: + module.exit_json(changed=False, data=json) + + if resource_id is None: + # No resource defined, we're done. + if json is None: + module.exit_json(changed=changed, data=json) + else: + module.exit_json(changed=changed, data=json) + else: + # Tag a resource + url = "tags/{}/resources".format(name) + payload = { + 'resources': [{ + 'resource_id': resource_id, + 'resource_type': resource_type}]} + response = rest.post(url, data=payload) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.fail_json(msg="error tagging resource '{}': {}".format( + resource_id, response.json["message"])) + + elif state in ('absent'): + if name is None: + module.fail_json(msg='parameter `name` is missing') + + if resource_id: + url = "tags/{}/resources".format(name) + payload = { + 'resources': [{ + 'resource_id': resource_id, + 'resource_type': resource_type}]} + response = rest.delete(url, data=payload) + else: + url = "tags/{}".format(name) + response = rest.delete(url) + if response.status_code == 204: + module.exit_json(changed=True) + else: + module.exit_json(changed=False, data=response.json) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type='str', required=True), + resource_id=dict(aliases=['droplet_id'], type='int'), + resource_type=dict(choices=['droplet'], default='droplet'), + state=dict(choices=['present', 'absent'], default='present'), + api_token=dict(aliases=['API_TOKEN'], no_log=True), + ) + ) + + try: + core(module) + except Exception as e: + module.fail_json(msg=str(e)) + +if __name__ == '__main__': + main() diff --git a/cloud/docker/docker.py b/cloud/docker/_docker.py similarity index 97% rename from cloud/docker/docker.py rename to cloud/docker/_docker.py index cd49c191193..08adf3b9071 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/_docker.py @@ -21,13 +21,20 @@ ###################################################################### +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: docker version_added: "1.4" short_description: manage docker containers +deprecated: In 2.2 use M(docker_container) and M(docker_image) instead. description: - - Manage the life cycle of docker containers. + - This is the original Ansible module for managing the Docker container life cycle. + - "NOTE: Additional and newer modules are available. For the latest on orchestrating containers with Ansible + visit our Getting Started with Docker Guide at https://github.com/ansible/ansible/blob/devel/docsite/rst/guide_docker.rst." options: count: description: @@ -524,7 +531,12 @@ import json import os import shlex -from urlparse import urlparse +try: + from urlparse import urlparse +except ImportError: + # python3 + from urllib.parse import urlparse + try: import docker.client import docker.utils @@ -554,6 +566,8 @@ def _human_to_bytes(number): if isinstance(number, int): return number + if number.isdigit(): + return int(number) if number[-1] == suffixes[0] and number[-2].isdigit(): return number[:-1] @@ -585,13 +599,13 @@ def get_split_image_tag(image): else: registry, resource = None, image - # now we can determine if image has a tag - if ':' in resource: - resource, tag = resource.split(':', 1) - if registry: - resource = '/'.join((registry, resource)) - if tag == "": - tag = "latest" + # now we can determine if image has a tag or a digest + for s in ['@',':']: + if s in resource: + resource, tag = resource.split(s, 1) + if registry: + resource = '/'.join((registry, resource)) + break else: tag = "latest" resource = image @@ -695,7 +709,7 @@ def __init__(self, module): self.binds = None self.volumes = None if self.module.params.get('volumes'): - self.binds = {} + self.binds = [] self.volumes = [] vols = self.module.params.get('volumes') for vol in vols: @@ -713,7 +727,7 @@ def __init__(self, module): self.module.fail_json(msg='invalid bind mode ' + parts[2]) else: mode = parts[2] - self.binds[parts[0]] = {'bind': parts[1], 'mode': mode } + self.binds.append("%s:%s:%s" % (parts[0], parts[1], mode)) else: self.module.fail_json(msg='volumes support 1 to 3 arguments') @@ -737,10 +751,6 @@ def __init__(self, module): if self.module.params.get('links'): self.links = self.get_links(self.module.params.get('links')) - env = self.module.params.get('env', None) - env_file = self.module.params.get('env_file', None) - self.environment = self.get_environment(env, env_file) - self.ulimits = None if self.module.params.get('ulimits'): self.ulimits = [] @@ -839,6 +849,10 @@ def __init__(self, module): self.docker_py_versioninfo = get_docker_py_versioninfo() + env = self.module.params.get('env', None) + env_file = self.module.params.get('env_file', None) + self.environment = self.get_environment(env, env_file) + def _check_capabilities(self): """ Create a list of available capabilities @@ -1293,10 +1307,12 @@ def get_differing_containers(self): for name, value in self.module.params.get('labels').iteritems(): expected_labels[name] = str(value) - actual_labels = {} - for container_label in container['Config']['Labels'] or []: - name, value = container_label.split('=', 1) - actual_labels[name] = value + if isinstance(container['Config']['Labels'], dict): + actual_labels = container['Config']['Labels'] + else: + for container_label in container['Config']['Labels'] or []: + name, value = container_label.split('=', 1) + actual_labels[name] = value if actual_labels != expected_labels: self.reload_reasons.append('labels {0} => {1}'.format(actual_labels, expected_labels)) @@ -1335,7 +1351,7 @@ def get_differing_containers(self): # STDIN_OPEN expected_stdin_open = self.module.params.get('stdin_open') - actual_stdin_open = container['Config']['AttachStdin'] + actual_stdin_open = container['Config']['OpenStdin'] if actual_stdin_open != expected_stdin_open: self.reload_reasons.append('stdin_open ({0} => {1})'.format(actual_stdin_open, expected_stdin_open)) differing.append(container) @@ -1366,14 +1382,8 @@ def get_differing_containers(self): expected_binds = set() if self.binds: - for host_path, config in self.binds.iteritems(): - if isinstance(config, dict): - container_path = config['bind'] - mode = config['mode'] - else: - container_path = config - mode = 'rw' - expected_binds.add("{0}:{1}:{2}".format(host_path, container_path, mode)) + for bind in self.binds: + expected_binds.add(bind) actual_binds = set() for bind in (container['HostConfig']['Binds'] or []): @@ -1848,12 +1858,12 @@ def main(): ports = dict(required=False, default=None, type='list'), publish_all_ports = dict(default=False, type='bool'), volumes = dict(default=None, type='list'), - volumes_from = dict(default=None), + volumes_from = dict(default=None, type='list'), links = dict(default=None, type='list'), devices = dict(default=None, type='list'), memory_limit = dict(default=0), - memory_swap = dict(default=0), - cpu_shares = dict(default=0), + memory_swap = dict(default=0, type='int'), + cpu_shares = dict(default=0, type='int'), docker_url = dict(), use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']), tls_client_cert = dict(required=False, default=None, type='path'), @@ -1870,7 +1880,7 @@ def main(): domainname = dict(default=None), env = dict(type='dict'), env_file = dict(default=None), - dns = dict(), + dns = dict(default=None, type='list'), detach = dict(default=True, type='bool'), state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']), signal = dict(default=None), diff --git a/cloud/docker/docker_container.py b/cloud/docker/docker_container.py new file mode 100644 index 00000000000..f921ce47177 --- /dev/null +++ b/cloud/docker/docker_container.py @@ -0,0 +1,2016 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: docker_container + +short_description: manage docker containers + +description: + - Manage the life cycle of docker containers. + - Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken. + +version_added: "2.1.0" + +options: + blkio_weight: + description: + - Block IO (relative weight), between 10 and 1000. + default: null + required: false + capabilities: + description: + - List of capabilities to add to the container. + default: null + required: false + cleanup: + description: + - Use with I(detach) to remove the container after successful execution. + default: false + required: false + version_added: "2.2" + command: + description: + - Command to execute when the container starts. + default: null + required: false + cpu_period: + description: + - Limit CPU CFS (Completely Fair Scheduler) period + default: 0 + required: false + cpu_quota: + description: + - Limit CPU CFS (Completely Fair Scheduler) quota + default: 0 + required: false + cpuset_cpus: + description: + - CPUs in which to allow execution C(1,3) or C(1-3). + default: null + required: false + cpuset_mems: + description: + - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1) + default: null + required: false + cpu_shares: + description: + - CPU shares (relative weight). + default: null + required: false + detach: + description: + - Enable detached mode to leave the container running in background. + If disabled, the task will reflect the status of the container run (failed if the command failed). + default: true + required: false + devices: + description: + - "List of host device bindings to add to the container. Each binding is a mapping expressed + in the format: ::" + default: null + required: false + dns_servers: + description: + - List of custom DNS servers. + default: null + required: false + dns_search_domains: + description: + - List of custom DNS search domains. + default: null + required: false + env: + description: + - Dictionary of key,value pairs. + default: null + required: false + env_file: + version_added: "2.2" + description: + - Path to a file containing environment variables I(FOO=BAR). + - If variable also present in C(env), then C(env) value will override. + - Requires docker-py >= 1.4.0. + default: null + required: false + entrypoint: + description: + - Command that overwrites the default ENTRYPOINT of the image. + default: null + required: false + etc_hosts: + description: + - Dict of host-to-IP mappings, where each host name is a key in the dictionary. + Each host name will be added to the container's /etc/hosts file. + default: null + required: false + exposed_ports: + description: + - List of additional container ports which informs Docker that the container + listens on the specified network ports at runtime. + If the port is already exposed using EXPOSE in a Dockerfile, it does not + need to be exposed again. + default: null + required: false + aliases: + - exposed + force_kill: + description: + - Use the kill command when stopping a running container. + default: false + required: false + groups: + description: + - List of additional group names and/or IDs that the container process will run as. + default: null + required: false + hostname: + description: + - Container hostname. + default: null + required: false + ignore_image: + description: + - When C(state) is I(present) or I(started) the module compares the configuration of an existing + container to requested configuration. The evaluation includes the image version. If + the image version in the registry does not match the container, the container will be + recreated. Stop this behavior by setting C(ignore_image) to I(True). + default: false + required: false + version_added: "2.2" + image: + description: + - Repository path and tag used to create the container. If an image is not found or pull is true, the image + will be pulled from the registry. If no tag is included, 'latest' will be used. + default: null + required: false + interactive: + description: + - Keep stdin open after a container is launched, even if not attached. + default: false + required: false + ipc_mode: + description: + - Set the IPC mode for the container. Can be one of 'container:' to reuse another + container's IPC namespace or 'host' to use the host's IPC namespace within the container. + default: null + required: false + keep_volumes: + description: + - Retain volumes associated with a removed container. + default: true + required: false + kill_signal: + description: + - Override default signal used to kill a running container. + default null: + required: false + kernel_memory: + description: + - "Kernel memory limit (format: []). Number is a positive integer. + Unit can be one of b, k, m, or g. Minimum is 4M." + default: 0 + required: false + labels: + description: + - Dictionary of key value pairs. + default: null + required: false + links: + description: + - List of name aliases for linked containers in the format C(container_name:alias) + default: null + required: false + log_driver: + description: + - Specify the logging driver. Docker uses json-file by default. + choices: + - json-file + - syslog + - journald + - gelf + - fluentd + - awslogs + - splunk + default: null + required: false + log_options: + description: + - Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/ + for details. + required: false + default: null + mac_address: + description: + - Container MAC address (e.g. 92:d0:c6:0a:29:33) + default: null + required: false + memory: + description: + - "Memory limit (format: []). Number is a positive integer. + Unit can be one of b, k, m, or g" + default: 0 + required: false + memory_reservation: + description: + - "Memory soft limit (format: []). Number is a positive integer. + Unit can be one of b, k, m, or g" + default: 0 + required: false + memory_swap: + description: + - Total memory limit (memory + swap, format:[]). + Number is a positive integer. Unit can be one of b, k, m, or g. + default: 0 + required: false + memory_swappiness: + description: + - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + default: 0 + required: false + name: + description: + - Assign a name to a new container or match an existing container. + - When identifying an existing container name may be a name or a long or short container ID. + required: true + network_mode: + description: + - Connect the container to a network. + choices: + - bridge + - container: + - host + - none + default: null + required: false + networks: + description: + - List of networks the container belongs to. + - Each network is a dict with keys C(name), C(ipv4_address), C(ipv6_address), C(links), C(aliases). + - For each network C(name) is required, all other keys are optional. + - If included, C(links) or C(aliases) are lists. + - For examples of the data structure and usage see EXAMPLES below. + - To remove a container from one or more networks, use the C(purge_networks) option. + default: null + required: false + version_added: "2.2" + oom_killer: + description: + - Whether or not to disable OOM Killer for the container. + default: false + required: false + oom_score_adj: + description: + - An integer value containing the score given to the container in order to tune OOM killer preferences. + default: 0 + required: false + version_added: "2.2" + paused: + description: + - Use with the started state to pause running processes inside the container. + default: false + required: false + pid_mode: + description: + - Set the PID namespace mode for the container. Currently only supports 'host'. + default: null + required: false + privileged: + description: + - Give extended privileges to the container. + default: false + required: false + published_ports: + description: + - List of ports to publish from the container to the host. + - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a + container port, 9000 is a host port, and 0.0.0.0 is a host interface." + - Container ports must be exposed either in the Dockerfile or via the C(expose) option. + - A value of ALL will publish all exposed container ports to random host ports, ignoring + any other mappings. + - If C(networks) parameter is provided, will inspect each network to see if there exists + a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4. + If such a network is found, then published ports where no host IP address is specified + will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4. + Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4 + value encountered in the list of C(networks) is the one that will be used. + aliases: + - ports + required: false + default: null + pull: + description: + - If true, always pull the latest version of an image. Otherwise, will only pull an image when missing. + default: false + required: false + purge_networks: + description: + - Remove the container from ALL networks not included in C(networks) parameter. + - Any default networks such as I(bridge), if not found in C(networks), will be removed as well. + default: false + required: false + version_added: "2.2" + read_only: + description: + - Mount the container's root file system as read-only. + default: false + required: false + recreate: + description: + - Use with present and started states to force the re-creation of an existing container. + default: false + required: false + restart: + description: + - Use with started state to force a matching container to be stopped and restarted. + default: false + required: false + restart_policy: + description: + - Container restart policy. Place quotes around I(no) option. + choices: + - always + - no + - on-failure + - unless-stopped + default: on-failure + required: false + restart_retries: + description: + - Use with restart policy to control maximum number of restart attempts. + default: 0 + required: false + shm_size: + description: + - Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). + - Omitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`. + default: null + required: false + security_opts: + description: + - List of security options in the form of C("label:user:User") + default: null + required: false + state: + description: + - 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container + rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.' + - 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no + container matches the name, a container will be created. If a container matches the name but the provided configuration + does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created + with the requested config. Image version will be taken into account when comparing configuration. To ignore image + version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use + force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed + container.' + - 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container + matches the name, a container will be created and started. If a container matching the name is found but the + configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed + and a new container will be created with the requested configuration and started. Image version will be taken into + account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always + re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and + restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated + with a removed container.' + - 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped + state. Use force_kill to kill a container rather than stopping it.' + required: false + default: started + choices: + - absent + - present + - stopped + - started + stop_signal: + description: + - Override default signal used to stop the container. + default: null + required: false + stop_timeout: + description: + - Number of seconds to wait for the container to stop before sending SIGKILL. + required: false + default: null + trust_image_content: + description: + - If true, skip image verification. + default: false + required: false + tty: + description: + - Allocate a psuedo-TTY. + default: false + required: false + ulimits: + description: + - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)" + default: null + required: false + user: + description: + - Sets the username or UID used and optionally the groupname or GID for the specified command. + - "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]" + default: null + required: false + uts: + description: + - Set the UTS namespace mode for the container. + default: null + required: false + volumes: + description: + - List of volumes to mount within the container. + - "Use docker CLI-style syntax: C(/host:/container[:mode])" + - You can specify a read mode for the mount with either C(ro) or C(rw). + - SELinux hosts can additionally use C(z) or C(Z) to use a shared or + private label for the volume. + default: null + required: false + volume_driver: + description: + - The container volume driver. + default: none + required: false + volumes_from: + description: + - List of container names or Ids to get volumes from. + default: null + required: false +extends_documentation_fragment: + - docker + +author: + - "Cove Schneider (@cove)" + - "Joshua Conner (@joshuaconner)" + - "Pavel Antonov (@softzilla)" + - "Thomas Steinbach (@ThomasSteinbach)" + - "Philippe Jandot (@zfil)" + - "Daan Oosterveld (@dusdanig)" + - "James Tanner (@jctanner)" + - "Chris Houseknecht (@chouseknecht)" + +requirements: + - "python >= 2.6" + - "docker-py >= 1.7.0" + - "Docker API >= 1.20" +''' + +EXAMPLES = ''' +- name: Create a data container + docker_container: + name: mydata + image: busybox + volumes: + - /data + +- name: Re-create a redis container + docker_container: + name: myredis + image: redis + command: redis-server --appendonly yes + state: present + recreate: yes + exposed_ports: + - 6379 + volumes_from: + - mydata + +- name: Restart a container + docker_container: + name: myapplication + image: someuser/appimage + state: started + restart: yes + links: + - "myredis:aliasedredis" + devices: + - "/dev/sda:/dev/xvda:rwm" + ports: + - "8080:9000" + - "127.0.0.1:8081:9001/udp" + env: + SECRET_KEY: ssssh + +- name: Container present + docker_container: + name: mycontainer + state: present + image: ubuntu:14.04 + command: sleep infinity + +- name: Stop a container + docker_container: + name: mycontainer + state: stopped + +- name: Start 4 load-balanced containers + docker_container: + name: "container{{ item }}" + recreate: yes + image: someuser/anotherappimage + command: sleep 1d + with_sequence: count=4 + +- name: remove container + docker_container: + name: ohno + state: absent + +- name: Syslogging output + docker_container: + name: myservice + image: busybox + log_driver: syslog + log_options: + syslog-address: tcp://my-syslog-server:514 + syslog-facility: daemon + syslog-tag: myservice + +- name: Create db container and connect to network + docker_container: + name: db_test + image: "postgres:latest" + networks: + - name: "{{ docker_network_name }}" + +- name: Start container, connect to network and link + docker_container: + name: sleeper + image: ubuntu:14.04 + networks: + - name: TestingNet + ipv4_address: "172.1.1.100" + aliases: + - sleepyzz + links: + - db_test:db + - name: TestingNet2 + +- name: Start a container with a command + docker_container: + name: sleepy + image: ubuntu:14.04 + command: sleep infinity + +- name: Add container to networks + docker_container: + name: sleepy + networks: + - name: TestingNet + ipv4_address: 172.1.1.18 + links: + - sleeper + - name: TestingNet2 + ipv4_address: 172.1.10.20 + +- name: Update network with aliases + docker_container: + name: sleepy + networks: + - name: TestingNet + aliases: + - sleepyz + - zzzz + +- name: Remove container from one network + docker_container: + name: sleepy + networks: + - name: TestingNet2 + purge_networks: yes + +- name: Remove container from all networks + docker_container: + name: sleepy + purge_networks: yes + +''' + +RETURN = ''' +ansible_docker_container: + description: + - Facts representing the current state of the container. Matches the docker inspection output. + - Note that facts are not part of registered vars but accessible directly. + - Empty if C(state) is I(absent) + - If detached is I(False), will include Output attribute containing any output from container run. + returned: always + type: dict + sample: '{ + "AppArmorProfile": "", + "Args": [], + "Config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/usr/bin/supervisord" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "443/tcp": {}, + "80/tcp": {} + }, + "Hostname": "8e47bf643eb9", + "Image": "lnmp_nginx:v1", + "Labels": {}, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/tmp/lnmp/nginx-sites/logs/": {} + }, + ... + }' +''' + +import re + +from ansible.module_utils.docker_common import * + +try: + from docker import utils + from docker.utils.types import Ulimit +except: + # missing docker-py handled in ansible.module_utils.docker + pass + + +REQUIRES_CONVERSION_TO_BYTES = [ + 'memory', + 'memory_reservation', + 'memory_swap', + 'shm_size' +] + +VOLUME_PERMISSIONS = ('rw', 'ro', 'z', 'Z') + +class TaskParameters(DockerBaseClass): + ''' + Access and parse module parameters + ''' + + def __init__(self, client): + super(TaskParameters, self).__init__() + self.client = client + + self.blkio_weight = None + self.capabilities = None + self.cleanup = None + self.command = None + self.cpu_period = None + self.cpu_quota = None + self.cpuset_cpus = None + self.cpuset_mems = None + self.cpu_shares = None + self.detach = None + self.debug = None + self.devices = None + self.dns_servers = None + self.dns_opts = None + self.dns_search_domains = None + self.env = None + self.env_file = None + self.entrypoint = None + self.etc_hosts = None + self.exposed_ports = None + self.force_kill = None + self.groups = None + self.hostname = None + self.ignore_image = None + self.image = None + self.interactive = None + self.ipc_mode = None + self.keep_volumes = None + self.kernel_memory = None + self.kill_signal = None + self.labels = None + self.links = None + self.log_driver = None + self.log_options = None + self.mac_address = None + self.memory = None + self.memory_reservation = None + self.memory_swap = None + self.memory_swappiness = None + self.name = None + self.network_mode = None + self.networks = None + self.oom_killer = None + self.oom_score_adj = None + self.paused = None + self.pid_mode = None + self.privileged = None + self.purge_networks = None + self.pull = None + self.read_only = None + self.recreate = None + self.restart = None + self.restart_retries = None + self.restart_policy = None + self.shm_size = None + self.security_opts = None + self.state = None + self.stop_signal = None + self.stop_timeout = None + self.trust_image_content = None + self.tty = None + self.user = None + self.uts = None + self.volumes = None + self.volume_binds = dict() + self.volumes_from = None + self.volume_driver = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + + for param_name in REQUIRES_CONVERSION_TO_BYTES: + if client.module.params.get(param_name): + try: + setattr(self, param_name, human_to_bytes(client.module.params.get(param_name))) + except ValueError as exc: + self.fail("Failed to convert %s to bytes: %s" % (param_name, exc)) + + self.publish_all_ports = False + self.published_ports = self._parse_publish_ports() + if self.published_ports == 'all': + self.publish_all_ports = True + self.published_ports = None + + self.ports = self._parse_exposed_ports(self.published_ports) + self.log("expose ports:") + self.log(self.ports, pretty_print=True) + + self.links = self._parse_links(self.links) + + if self.volumes: + self.volumes = self._expand_host_paths() + + self.env = self._get_environment() + self.ulimits = self._parse_ulimits() + self.log_config = self._parse_log_config() + self.exp_links = None + self.volume_binds = self._get_volume_binds(self.volumes) + + self.log("volumes:") + self.log(self.volumes, pretty_print=True) + self.log("volume binds:") + self.log(self.volume_binds, pretty_print=True) + + if self.networks: + for network in self.networks: + if not network.get('name'): + self.fail("Parameter error: network must have a name attribute.") + network['id'] = self._get_network_id(network['name']) + if not network['id']: + self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name']) + if network.get('links'): + network['links'] = self._parse_links(network['links']) + + def fail(self, msg): + self.client.module.fail_json(msg=msg) + + @property + def update_parameters(self): + ''' + Returns parameters used to update a container + ''' + + update_parameters = dict( + blkio_weight='blkio_weight', + cpu_period='cpu_period', + cpu_quota='cpu_quota', + cpu_shares='cpu_shares', + cpuset_cpus='cpuset_cpus', + mem_limit='memory', + mem_reservation='mem_reservation', + memswap_limit='memory_swap', + kernel_memory='kernel_memory' + ) + result = dict() + for key, value in update_parameters.iteritems(): + if getattr(self, value, None) is not None: + result[key] = getattr(self, value) + return result + + @property + def create_parameters(self): + ''' + Returns parameters used to create a container + ''' + create_params = dict( + command='command', + hostname='hostname', + user='user', + detach='detach', + stdin_open='interactive', + tty='tty', + ports='ports', + environment='env', + name='name', + entrypoint='entrypoint', + cpu_shares='cpu_shares', + mac_address='mac_address', + labels='labels', + stop_signal='stop_signal', + volume_driver='volume_driver', + ) + + result = dict( + host_config=self._host_config(), + volumes=self._get_mounts(), + ) + + for key, value in create_params.items(): + if getattr(self, value, None) is not None: + result[key] = getattr(self, value) + return result + + def _expand_host_paths(self): + new_vols = [] + for vol in self.volumes: + if ':' in vol: + if len(vol.split(':')) == 3: + host, container, mode = vol.split(':') + if re.match(r'[\.~]', host): + host = os.path.abspath(host) + new_vols.append("%s:%s:%s" % (host, container, mode)) + continue + elif len(vol.split(':')) == 2: + parts = vol.split(':') + if parts[1] not in VOLUME_PERMISSIONS and re.match(r'[\.~]', parts[0]): + host = os.path.abspath(parts[0]) + new_vols.append("%s:%s:rw" % (host, parts[1])) + continue + new_vols.append(vol) + return new_vols + + def _get_mounts(self): + ''' + Return a list of container mounts. + :return: + ''' + result = [] + if self.volumes: + for vol in self.volumes: + if ':' in vol: + if len(vol.split(':')) == 3: + host, container, _ = vol.split(':') + result.append(container) + continue + if len(vol.split(':')) == 2: + parts = vol.split(':') + if parts[1] not in VOLUME_PERMISSIONS: + result.append(parts[1]) + continue + result.append(vol) + self.log("mounts:") + self.log(result, pretty_print=True) + return result + + def _host_config(self): + ''' + Returns parameters used to create a HostConfig object + ''' + + host_config_params=dict( + port_bindings='published_ports', + publish_all_ports='publish_all_ports', + links='links', + privileged='privileged', + dns='dns_servers', + dns_search='dns_search_domains', + binds='volume_binds', + volumes_from='volumes_from', + network_mode='network_mode', + cap_add='capabilities', + extra_hosts='etc_hosts', + read_only='read_only', + ipc_mode='ipc_mode', + security_opt='security_opts', + ulimits='ulimits', + log_config='log_config', + mem_limit='memory', + memswap_limit='memory_swap', + mem_swappiness='memory_swappiness', + oom_score_adj='oom_score_adj', + shm_size='shm_size', + group_add='groups', + devices='devices', + pid_mode='pid_mode' + ) + params = dict() + for key, value in host_config_params.iteritems(): + if getattr(self, value, None) is not None: + params[key] = getattr(self, value) + + if self.restart_policy: + params['restart_policy'] = dict(Name=self.restart_policy, + MaximumRetryCount=self.restart_retries) + + return self.client.create_host_config(**params) + + @property + def default_host_ip(self): + ip = '0.0.0.0' + if not self.networks: + return ip + for net in self.networks: + if net.get('name'): + network = self.client.inspect_network(net['name']) + if network.get('Driver') == 'bridge' and \ + network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): + ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] + break + return ip + + def _parse_publish_ports(self): + ''' + Parse ports from docker CLI syntax + ''' + if self.published_ports is None: + return None + + if 'all' in self.published_ports: + return 'all' + + default_ip = self.default_host_ip + + binds = {} + for port in self.published_ports: + parts = str(port).split(':') + container_port = parts[-1] + if '/' not in container_port: + container_port = int(parts[-1]) + + p_len = len(parts) + if p_len == 1: + bind = (default_ip,) + elif p_len == 2: + bind = (default_ip, int(parts[0])) + elif p_len == 3: + bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],) + + if container_port in binds: + old_bind = binds[container_port] + if isinstance(old_bind, list): + old_bind.append(bind) + else: + binds[container_port] = [binds[container_port], bind] + else: + binds[container_port] = bind + return binds + + @staticmethod + def _get_volume_binds(volumes): + ''' + Extract host bindings, if any, from list of volume mapping strings. + + :return: dictionary of bind mappings + ''' + result = dict() + if volumes: + for vol in volumes: + host = None + if ':' in vol: + if len(vol.split(':')) == 3: + host, container, mode = vol.split(':') + if len(vol.split(':')) == 2: + parts = vol.split(':') + if parts[1] not in VOLUME_PERMISSIONS: + host, container, mode = (vol.split(':') + ['rw']) + if host is not None: + result[host] = dict( + bind=container, + mode=mode + ) + return result + + def _parse_exposed_ports(self, published_ports): + ''' + Parse exposed ports from docker CLI-style ports syntax. + ''' + exposed = [] + if self.exposed_ports: + for port in self.exposed_ports: + port = str(port).strip() + protocol = 'tcp' + match = re.search(r'(/.+$)', port) + if match: + protocol = match.group(1).replace('/', '') + port = re.sub(r'/.+$', '', port) + exposed.append((port, protocol)) + if published_ports: + # Any published port should also be exposed + for publish_port in published_ports: + match = False + if isinstance(publish_port, basestring) and '/' in publish_port: + port, protocol = publish_port.split('/') + port = int(port) + else: + protocol = 'tcp' + port = int(publish_port) + for exposed_port in exposed: + if isinstance(exposed_port[0], basestring) and '-' in exposed_port[0]: + start_port, end_port = exposed_port[0].split('-') + if int(start_port) <= port <= int(end_port): + match = True + elif exposed_port[0] == port: + match = True + if not match: + exposed.append((port, protocol)) + return exposed + + @staticmethod + def _parse_links(links): + ''' + Turn links into a dictionary + ''' + if links is None: + return None + + result = {} + for link in links: + parsed_link = link.split(':', 1) + if len(parsed_link) == 2: + result[parsed_link[0]] = parsed_link[1] + else: + result[parsed_link[0]] = parsed_link[0] + return result + + def _parse_ulimits(self): + ''' + Turn ulimits into an array of Ulimit objects + ''' + if self.ulimits is None: + return None + + results = [] + for limit in self.ulimits: + limits = dict() + pieces = limit.split(':') + if len(pieces) >= 2: + limits['name'] = pieces[0] + limits['soft'] = int(pieces[1]) + limits['hard'] = int(pieces[1]) + if len(pieces) == 3: + limits['hard'] = int(pieces[2]) + try: + results.append(Ulimit(**limits)) + except ValueError as exc: + self.fail("Error parsing ulimits value %s - %s" % (limit, exc)) + return results + + def _parse_log_config(self): + ''' + Create a LogConfig object + ''' + if self.log_driver is None: + return None + + options = dict( + Type=self.log_driver, + Config = dict() + ) + + if self.log_options is not None: + options['Config'] = self.log_options + + try: + return LogConfig(**options) + except ValueError as exc: + self.fail('Error parsing logging options - %s' % (exc)) + + def _get_environment(self): + """ + If environment file is combined with explicit environment variables, the explicit environment variables + take precedence. + """ + final_env = {} + if self.env_file: + parsed_env_file = utils.parse_env_file(self.env_file) + for name, value in parsed_env_file.iteritems(): + final_env[name] = str(value) + if self.env: + for name, value in self.env.iteritems(): + final_env[name] = str(value) + return final_env + + def _get_network_id(self, network_name): + network_id = None + try: + for network in self.client.networks(names=[network_name]): + if network['Name'] == network_name: + network_id = network['Id'] + break + except Exception as exc: + self.fail("Error getting network id for %s - %s" % (network_name, str(exc))) + return network_id + + + +class Container(DockerBaseClass): + + def __init__(self, container, parameters): + super(Container, self).__init__() + self.raw = container + self.Id = None + self.container = container + if container: + self.Id = container['Id'] + self.Image = container['Image'] + self.log(self.container, pretty_print=True) + self.parameters = parameters + self.parameters.expected_links = None + self.parameters.expected_ports = None + self.parameters.expected_exposed = None + self.parameters.expected_volumes = None + self.parameters.expected_ulimits = None + self.parameters.expected_etc_hosts = None + self.parameters.expected_env = None + + def fail(self, msg): + self.parameters.client.module.fail_json(msg=msg) + + @property + def exists(self): + return True if self.container else False + + @property + def running(self): + if self.container and self.container.get('State'): + if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False): + return True + return False + + def has_different_configuration(self, image): + ''' + Diff parameters vs existing container config. Returns tuple: (True | False, List of differences) + ''' + self.log('Starting has_different_configuration') + self.parameters.expected_entrypoint = self._get_expected_entrypoint() + self.parameters.expected_links = self._get_expected_links() + self.parameters.expected_ports = self._get_expected_ports() + self.parameters.expected_exposed = self._get_expected_exposed(image) + self.parameters.expected_volumes = self._get_expected_volumes(image) + self.parameters.expected_binds = self._get_expected_binds(image) + self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits) + self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts') + self.parameters.expected_env = self._get_expected_env(image) + self.parameters.expected_cmd = self._get_expected_cmd() + self.parameters.expected_devices = self._get_expected_devices() + + if not self.container.get('HostConfig'): + self.fail("has_config_diff: Error parsing container properties. HostConfig missing.") + if not self.container.get('Config'): + self.fail("has_config_diff: Error parsing container properties. Config missing.") + if not self.container.get('NetworkSettings'): + self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.") + + host_config = self.container['HostConfig'] + log_config = host_config.get('LogConfig', dict()) + restart_policy = host_config.get('RestartPolicy', dict()) + config = self.container['Config'] + network = self.container['NetworkSettings'] + + # The previous version of the docker module ignored the detach state by + # assuming if the container was running, it must have been detached. + detach = not (config.get('AttachStderr') and config.get('AttachStdout')) + + # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 + if config.get('ExposedPorts') is not None: + expected_exposed = [re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()] + else: + expected_exposed = [] + + # Map parameters to container inspect results + config_mapping = dict( + image=config.get('Image'), + expected_cmd=config.get('Cmd'), + hostname=config.get('Hostname'), + user=config.get('User'), + detach=detach, + interactive=config.get('OpenStdin'), + capabilities=host_config.get('CapAdd'), + expected_devices=host_config.get('Devices'), + dns_servers=host_config.get('Dns'), + dns_opts=host_config.get('DnsOptions'), + dns_search_domains=host_config.get('DnsSearch'), + expected_env=(config.get('Env') or []), + expected_entrypoint=config.get('Entrypoint'), + expected_etc_hosts=host_config['ExtraHosts'], + expected_exposed=expected_exposed, + groups=host_config.get('GroupAdd'), + ipc_mode=host_config.get("IpcMode"), + labels=config.get('Labels'), + expected_links=host_config.get('Links'), + log_driver=log_config.get('Type'), + log_options=log_config.get('Config'), + mac_address=network.get('MacAddress'), + memory_swappiness=host_config.get('MemorySwappiness'), + network_mode=host_config.get('NetworkMode'), + oom_killer=host_config.get('OomKillDisable'), + oom_score_adj=host_config.get('OomScoreAdj'), + pid_mode=host_config.get('PidMode'), + privileged=host_config.get('Privileged'), + expected_ports=host_config.get('PortBindings'), + read_only=host_config.get('ReadonlyRootfs'), + restart_policy=restart_policy.get('Name'), + restart_retries=restart_policy.get('MaximumRetryCount'), + # Cannot test shm_size, as shm_size is not included in container inspection results. + # shm_size=host_config.get('ShmSize'), + security_opts=host_config.get("SecuriytOpt"), + stop_signal=config.get("StopSignal"), + tty=config.get('Tty'), + expected_ulimits=host_config.get('Ulimits'), + uts=host_config.get('UTSMode'), + expected_volumes=config.get('Volumes'), + expected_binds=host_config.get('Binds'), + volumes_from=host_config.get('VolumesFrom'), + volume_driver=host_config.get('VolumeDriver') + ) + + differences = [] + for key, value in config_mapping.iteritems(): + self.log('check differences %s %s vs %s' % (key, getattr(self.parameters, key), str(value))) + if getattr(self.parameters, key, None) is not None: + if isinstance(getattr(self.parameters, key), list) and isinstance(value, list): + if len(getattr(self.parameters, key)) > 0 and isinstance(getattr(self.parameters, key)[0], dict): + # compare list of dictionaries + self.log("comparing list of dict: %s" % key) + match = self._compare_dictionary_lists(getattr(self.parameters, key), value) + else: + # compare two lists. Is list_a in list_b? + self.log("comparing lists: %s" % key) + set_a = set(getattr(self.parameters, key)) + set_b = set(value) + match = (set_a <= set_b) + elif isinstance(getattr(self.parameters, key), dict) and isinstance(value, dict): + # compare two dicts + self.log("comparing two dicts: %s" % key) + match = self._compare_dicts(getattr(self.parameters, key), value) + else: + # primitive compare + self.log("primitive compare: %s" % key) + match = (getattr(self.parameters, key) == value) + + if not match: + # no match. record the differences + item = dict() + item[key] = dict( + parameter=getattr(self.parameters, key), + container=value + ) + differences.append(item) + + has_differences = True if len(differences) > 0 else False + return has_differences, differences + + def _compare_dictionary_lists(self, list_a, list_b): + ''' + If all of list_a exists in list_b, return True + ''' + if not isinstance(list_a, list) or not isinstance(list_b, list): + return False + matches = 0 + for dict_a in list_a: + for dict_b in list_b: + if self._compare_dicts(dict_a, dict_b): + matches += 1 + break + result = (matches == len(list_a)) + return result + + def _compare_dicts(self, dict_a, dict_b): + ''' + If dict_a in dict_b, return True + ''' + if not isinstance(dict_a, dict) or not isinstance(dict_b, dict): + return False + for key, value in dict_a.iteritems(): + if isinstance(value, dict): + match = self._compare_dicts(value, dict_b.get(key)) + elif isinstance(value, list): + if len(value) > 0 and isinstance(value[0], dict): + match = self._compare_dictionary_lists(value, dict_b.get(key)) + else: + set_a = set(value) + set_b = set(dict_b.get(key)) + match = (set_a == set_b) + else: + match = (value == dict_b.get(key)) + if not match: + return False + return True + + def has_different_resource_limits(self): + ''' + Diff parameters and container resource limits + ''' + if not self.container.get('HostConfig'): + self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.") + + host_config = self.container['HostConfig'] + + config_mapping = dict( + cpu_period=host_config.get('CpuPeriod'), + cpu_quota=host_config.get('CpuQuota'), + cpuset_cpus=host_config.get('CpusetCpus'), + cpuset_mems=host_config.get('CpusetMems'), + cpu_shares=host_config.get('CpuShares'), + kernel_memory=host_config.get("KernelMemory"), + memory=host_config.get('Memory'), + memory_reservation=host_config.get('MemoryReservation'), + memory_swap=host_config.get('MemorySwap'), + oom_score_adj=host_config.get('OomScoreAdj'), + ) + + differences = [] + for key, value in config_mapping.iteritems(): + if getattr(self.parameters, key, None) and getattr(self.parameters, key) != value: + # no match. record the differences + item = dict() + item[key] = dict( + parameter=getattr(self.parameters, key), + container=value + ) + differences.append(item) + different = (len(differences) > 0) + return different, differences + + def has_network_differences(self): + ''' + Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 + ''' + different = False + differences = [] + + if not self.parameters.networks: + return different, differences + + if not self.container.get('NetworkSettings'): + self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = self.container['NetworkSettings']['Networks'] + for network in self.parameters.networks: + if connected_networks.get(network['name'], None) is None: + different = True + differences.append(dict( + parameter=network, + container=None + )) + else: + diff = False + if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'): + diff = True + if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'): + diff = True + if network.get('aliases') and not connected_networks[network['name']].get('Aliases'): + diff = True + if network.get('aliases') and connected_networks[network['name']].get('Aliases'): + for alias in network.get('aliases'): + if alias not in connected_networks[network['name']].get('Aliases', []): + diff = True + if network.get('links') and not connected_networks[network['name']].get('Links'): + diff = True + if network.get('links') and connected_networks[network['name']].get('Links'): + expected_links = [] + for link, alias in network['links'].iteritems(): + expected_links.append("%s:%s" % (link, alias)) + for link in expected_links: + if link not in connected_networks[network['name']].get('Links', []): + diff = True + if diff: + different = True + differences.append(dict( + parameter=network, + container=dict( + name=network['name'], + ipv4_address=connected_networks[network['name']].get('IPAddress'), + ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'), + aliases=connected_networks[network['name']].get('Aliases'), + links=connected_networks[network['name']].get('Links') + ) + )) + return different, differences + + def has_extra_networks(self): + ''' + Check if the container is connected to non-requested networks + ''' + extra_networks = [] + extra = False + + if not self.container.get('NetworkSettings'): + self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") + + connected_networks = self.container['NetworkSettings'].get('Networks') + if connected_networks: + for network, network_config in connected_networks.iteritems(): + keep = False + if self.parameters.networks: + for expected_network in self.parameters.networks: + if expected_network['name'] == network: + keep = True + if not keep: + extra = True + extra_networks.append(dict(name=network, id=network_config['NetworkID'])) + return extra, extra_networks + + def _get_expected_devices(self): + if not self.parameters.devices: + return None + expected_devices = [] + for device in self.parameters.devices: + parts = device.split(':') + if len(parts) == 1: + expected_devices.append( + dict( + CgroupPermissions='rwm', + PathInContainer=parts[0], + PathOnHost=parts[0] + )) + elif len(parts) == 2: + parts = device.split(':') + expected_devices.append( + dict( + CgroupPermissions='rwm', + PathInContainer=parts[1], + PathOnHost=parts[0] + ) + ) + else: + expected_devices.append( + dict( + CgroupPermissions=parts[2], + PathInContainer=parts[1], + PathOnHost=parts[0] + )) + return expected_devices + + def _get_expected_entrypoint(self): + self.log('_get_expected_entrypoint') + if not self.parameters.entrypoint: + return None + return shlex.split(self.parameters.entrypoint) + + def _get_expected_ports(self): + if not self.parameters.published_ports: + return None + expected_bound_ports = {} + for container_port, config in self.parameters.published_ports.iteritems(): + if isinstance(container_port, int): + container_port = "%s/tcp" % container_port + if len(config) == 1: + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for host_ip, host_port in config: + expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)}) + else: + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] + return expected_bound_ports + + def _get_expected_links(self): + if self.parameters.links is None: + return None + self.log('parameter links:') + self.log(self.parameters.links, pretty_print=True) + exp_links = [] + for link, alias in self.parameters.links.iteritems(): + exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias)) + return exp_links + + def _get_expected_binds(self, image): + self.log('_get_expected_binds') + image_vols = [] + if image: + image_vols = self._get_image_binds(image['ContainerConfig'].get('Volumes')) + param_vols = [] + if self.parameters.volumes: + for vol in self.parameters.volumes: + host = None + if ':' in vol: + if len(vol.split(':')) == 3: + host, container, mode = vol.split(':') + if len(vol.split(':')) == 2: + parts = vol.split(':') + if parts[1] not in VOLUME_PERMISSIONS: + host, container, mode = vol.split(':') + ['rw'] + if host: + param_vols.append("%s:%s:%s" % (host, container, mode)) + result = list(set(image_vols + param_vols)) + self.log("expected_binds:") + self.log(result, pretty_print=True) + return result + + def _get_image_binds(self, volumes): + ''' + Convert array of binds to array of strings with format host_path:container_path:mode + + :param volumes: array of bind dicts + :return: array of strings + ''' + results = [] + if isinstance(volumes, dict): + results += self._get_bind_from_dict(volumes) + elif isinstance(volumes, list): + for vol in volumes: + results += self._get_bind_from_dict(vol) + return results + + @staticmethod + def _get_bind_from_dict(volume_dict): + results = [] + if volume_dict: + for host_path, config in volume_dict.items(): + if isinstance(config, dict) and config.get('bind'): + container_path = config.get('bind') + mode = config.get('mode', 'rw') + results.append("%s:%s:%s" % (host_path, container_path, mode)) + return results + + def _get_expected_volumes(self, image): + self.log('_get_expected_volumes') + expected_vols = dict() + if image and image['ContainerConfig'].get('Volumes'): + expected_vols.update(image['ContainerConfig'].get('Volumes')) + + if self.parameters.volumes: + for vol in self.parameters.volumes: + container = None + if ':' in vol: + if len(vol.split(':')) == 3: + host, container, mode = vol.split(':') + if len(vol.split(':')) == 2: + parts = vol.split(':') + if parts[1] not in VOLUME_PERMISSIONS: + host, container, mode = vol.split(':') + ['rw'] + new_vol = dict() + if container: + new_vol[container] = dict() + else: + new_vol[vol] = dict() + expected_vols.update(new_vol) + + if not expected_vols: + expected_vols = None + self.log("expected_volumes:") + self.log(expected_vols, pretty_print=True) + return expected_vols + + def _get_expected_env(self, image): + self.log('_get_expected_env') + expected_env = dict() + if image and image['ContainerConfig'].get('Env'): + for env_var in image['ContainerConfig']['Env']: + parts = env_var.split('=', 1) + expected_env[parts[0]] = parts[1] + if self.parameters.env: + expected_env.update(self.parameters.env) + param_env = [] + for key, value in expected_env.items(): + param_env.append("%s=%s" % (key, value)) + return param_env + + def _get_expected_exposed(self, image): + self.log('_get_expected_exposed') + image_ports = [] + if image: + image_ports = [re.sub(r'/.+$', '', p) for p in (image['ContainerConfig'].get('ExposedPorts') or {}).keys()] + param_ports = [] + if self.parameters.ports: + param_ports = [str(p[0]) for p in self.parameters.ports] + result = list(set(image_ports + param_ports)) + self.log(result, pretty_print=True) + return result + + def _get_expected_ulimits(self, config_ulimits): + self.log('_get_expected_ulimits') + if config_ulimits is None: + return None + results = [] + for limit in config_ulimits: + results.append(dict( + Name=limit.name, + Soft=limit.soft, + Hard=limit.hard + )) + return results + + def _get_expected_cmd(self): + self.log('_get_expected_cmd') + if not self.parameters.command: + return None + return shlex.split(self.parameters.command) + + def _convert_simple_dict_to_list(self, param_name, join_with=':'): + if getattr(self.parameters, param_name, None) is None: + return None + results = [] + for key, value in getattr(self.parameters, param_name).iteritems(): + results.append("%s%s%s" % (key, join_with, value)) + return results + + +class ContainerManager(DockerBaseClass): + ''' + Perform container management tasks + ''' + + def __init__(self, client): + + super(ContainerManager, self).__init__() + + self.client = client + self.parameters = TaskParameters(client) + self.check_mode = self.client.check_mode + self.results = {'changed': False, 'actions': []} + self.diff = {} + self.facts = {} + + state = self.parameters.state + if state in ('stopped', 'started', 'present'): + self.present(state) + elif state == 'absent': + self.absent() + + if not self.check_mode and not self.parameters.debug: + self.results.pop('actions') + + if self.client.module._diff or self.parameters.debug: + self.results['diff'] = self.diff + + if self.facts: + self.results['ansible_facts'] = {'ansible_docker_container': self.facts} + + def present(self, state): + container = self._get_container(self.parameters.name) + image = self._get_image() + + if not container.exists: + # New container + self.log('No container found') + new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) + if new_container: + container = new_container + else: + # Existing container + different, differences = container.has_different_configuration(image) + image_different = False + if not self.parameters.ignore_image: + image_different = self._image_is_different(image, container) + if image_different or different or self.parameters.recreate: + self.diff['differences'] = differences + if image_different: + self.diff['image_different'] = True + self.log("differences") + self.log(differences, pretty_print=True) + if container.running: + self.container_stop(container.Id) + self.container_remove(container.Id) + new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) + if new_container: + container = new_container + + if container and container.exists: + container = self.update_limits(container) + container = self.update_networks(container) + + if state == 'started' and not container.running: + container = self.container_start(container.Id) + elif state == 'started' and self.parameters.restart: + self.container_stop(container.Id) + container = self.container_start(container.Id) + elif state == 'stopped' and container.running: + self.container_stop(container.Id) + container = self._get_container(container.Id) + + self.facts = container.raw + + def absent(self): + container = self._get_container(self.parameters.name) + if container.exists: + if container.running: + self.container_stop(container.Id) + self.container_remove(container.Id) + + def fail(self, msg, **kwargs): + self.client.module.fail_json(msg=msg, **kwargs) + + def _get_container(self, container): + ''' + Expects container ID or Name. Returns a container object + ''' + return Container(self.client.get_container(container), self.parameters) + + def _get_image(self): + if not self.parameters.image: + self.log('No image specified') + return None + repository, tag = utils.parse_repository_tag(self.parameters.image) + if not tag: + tag = "latest" + image = self.client.find_image(repository, tag) + if not self.check_mode: + if not image or self.parameters.pull: + self.log("Pull the image.") + image = self.client.pull_image(repository, tag) + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + self.results['changed'] = True + self.log("image") + self.log(image, pretty_print=True) + return image + + def _image_is_different(self, image, container): + if image and image.get('Id'): + if container and container.Image: + if image.get('Id') != container.Image: + return True + return False + + def update_limits(self, container): + limits_differ, different_limits = container.has_different_resource_limits() + if limits_differ: + self.log("limit differences:") + self.log(different_limits, pretty_print=True) + if limits_differ and not self.check_mode: + self.container_update(container.Id, self.parameters.update_parameters) + return self._get_container(container.Id) + return container + + def update_networks(self, container): + has_network_differences, network_differences = container.has_network_differences() + updated_container = container + if has_network_differences: + if self.diff.get('differences'): + self.diff['differences'].append(dict(network_differences=network_differences)) + else: + self.diff['differences'] = [dict(network_differences=network_differences)] + self.results['changed'] = True + updated_container = self._add_networks(container, network_differences) + + if self.parameters.purge_networks: + has_extra_networks, extra_networks = container.has_extra_networks() + if has_extra_networks: + if self.diff.get('differences'): + self.diff['differences'].append(dict(purge_networks=extra_networks)) + else: + self.diff['differences'] = [dict(purge_networks=extra_networks)] + self.results['changed'] = True + updated_container = self._purge_networks(container, extra_networks) + return updated_container + + def _add_networks(self, container, differences): + for diff in differences: + # remove the container from the network, if connected + if diff.get('container'): + self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) + if not self.check_mode: + try: + self.client.disconnect_container_from_network(container.Id, diff['parameter']['id']) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], + str(exc))) + # connect to the network + params = dict( + ipv4_address=diff['parameter'].get('ipv4_address', None), + ipv6_address=diff['parameter'].get('ipv6_address', None), + links=diff['parameter'].get('links', None), + aliases=diff['parameter'].get('aliases', None) + ) + self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) + if not self.check_mode: + try: + self.log("Connecting container to network %s" % diff['parameter']['id']) + self.log(params, pretty_print=True) + self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params) + except Exception as exc: + self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc))) + return self._get_container(container.Id) + + def _purge_networks(self, container, networks): + for network in networks: + self.results['actions'].append(dict(removed_from_network=network['name'])) + if not self.check_mode: + try: + self.client.disconnect_container_from_network(container.Id, network['name']) + except Exception as exc: + self.fail("Error disconnecting container from network %s - %s" % (network['name'], + str(exc))) + return self._get_container(container.Id) + + def container_create(self, image, create_parameters): + self.log("create container") + self.log("image: %s parameters:" % image) + self.log(create_parameters, pretty_print=True) + self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) + self.results['changed'] = True + new_container = None + if not self.check_mode: + try: + new_container = self.client.create_container(image, **create_parameters) + except Exception as exc: + self.fail("Error creating container: %s" % str(exc)) + return self._get_container(new_container['Id']) + return new_container + + def container_start(self, container_id): + self.log("start container %s" % (container_id)) + self.results['actions'].append(dict(started=container_id)) + self.results['changed'] = True + if not self.check_mode: + try: + self.client.start(container=container_id) + except Exception as exc: + self.fail("Error starting container %s: %s" % (container_id, str(exc))) + + if not self.parameters.detach: + status = self.client.wait(container_id) + output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False) + if status != 0: + self.fail(output, status=status) + if self.parameters.cleanup: + self.container_remove(container_id, force=True) + insp = self._get_container(container_id) + if insp.raw: + insp.raw['Output'] = output + else: + insp.raw = dict(Output=output) + return insp + return self._get_container(container_id) + + def container_remove(self, container_id, link=False, force=False): + volume_state = (not self.parameters.keep_volumes) + self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) + self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) + self.results['changed'] = True + response = None + if not self.check_mode: + try: + response = self.client.remove_container(container_id, v=volume_state, link=link, force=force) + except Exception as exc: + self.fail("Error removing container %s: %s" % (container_id, str(exc))) + return response + + def container_update(self, container_id, update_parameters): + if update_parameters: + self.log("update container %s" % (container_id)) + self.log(update_parameters, pretty_print=True) + self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) + self.results['changed'] = True + if not self.check_mode and callable(getattr(self.client, 'update_container')): + try: + self.client.update_container(container_id, **update_parameters) + except Exception as exc: + self.fail("Error updating container %s: %s" % (container_id, str(exc))) + return self._get_container(container_id) + + def container_kill(self, container_id): + self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal)) + self.results['changed'] = True + response = None + if not self.check_mode: + try: + if self.parameters.kill_signal: + response = self.client.kill(container_id, signal=self.parameters.kill_signal) + else: + response = self.client.kill(container_id) + except Exception as exc: + self.fail("Error killing container %s: %s" % (container_id, exc)) + return response + + def container_stop(self, container_id): + if self.parameters.force_kill: + self.container_kill(container_id) + return + self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout)) + self.results['changed'] = True + response = None + if not self.check_mode: + try: + if self.parameters.stop_timeout: + response = self.client.stop(container_id, timeout=self.parameters.stop_timeout) + else: + response = self.client.stop(container_id) + except Exception as exc: + self.fail("Error stopping container %s: %s" % (container_id, str(exc))) + return response + + +def main(): + argument_spec = dict( + blkio_weight=dict(type='int'), + capabilities=dict(type='list'), + cleanup=dict(type='bool', default=False), + command=dict(type='str'), + cpu_period=dict(type='int'), + cpu_quota=dict(type='int'), + cpuset_cpus=dict(type='str'), + cpuset_mems=dict(type='str'), + cpu_shares=dict(type='int'), + detach=dict(type='bool', default=True), + devices=dict(type='list'), + dns_servers=dict(type='list'), + dns_opts=dict(type='list'), + dns_search_domains=dict(type='list'), + env=dict(type='dict'), + env_file=dict(type='path'), + entrypoint=dict(type='str'), + etc_hosts=dict(type='dict'), + exposed_ports=dict(type='list', aliases=['exposed', 'expose']), + force_kill=dict(type='bool', default=False, aliases=['forcekill']), + groups=dict(type='list'), + hostname=dict(type='str'), + ignore_image=dict(type='bool', default=False), + image=dict(type='str'), + interactive=dict(type='bool', default=False), + ipc_mode=dict(type='str'), + keep_volumes=dict(type='bool', default=True), + kernel_memory=dict(type='str'), + kill_signal=dict(type='str'), + labels=dict(type='dict'), + links=dict(type='list'), + log_driver=dict(type='str', choices=['json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'], default=None), + log_options=dict(type='dict', aliases=['log_opt']), + mac_address=dict(type='str'), + memory=dict(type='str', default='0'), + memory_reservation=dict(type='str'), + memory_swap=dict(type='str'), + memory_swappiness=dict(type='int'), + name=dict(type='str', required=True), + network_mode=dict(type='str'), + networks=dict(type='list'), + oom_killer=dict(type='bool'), + oom_score_adj=dict(type='int'), + paused=dict(type='bool', default=False), + pid_mode=dict(type='str'), + privileged=dict(type='bool', default=False), + published_ports=dict(type='list', aliases=['ports']), + pull=dict(type='bool', default=False), + purge_networks=dict(type='bool', default=False), + read_only=dict(type='bool', default=False), + recreate=dict(type='bool', default=False), + restart=dict(type='bool', default=False), + restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), + restart_retries=dict(type='int', default=None), + shm_size=dict(type='str'), + security_opts=dict(type='list'), + state=dict(type='str', choices=['absent', 'present', 'started', 'stopped'], default='started'), + stop_signal=dict(type='str'), + stop_timeout=dict(type='int'), + trust_image_content=dict(type='bool', default=False), + tty=dict(type='bool', default=False), + ulimits=dict(type='list'), + user=dict(type='str'), + uts=dict(type='str'), + volumes=dict(type='list'), + volumes_from=dict(type='list'), + volume_driver=dict(type='str'), + ) + + required_if = [ + ('state', 'present', ['image']) + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True + ) + + cm = ContainerManager(client) + client.module.exit_json(**cm.results) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 12ba67de663..0de16632e03 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -1,457 +1,596 @@ #!/usr/bin/python # - -# (c) 2014, Pavel Antonov +# Copyright 2016 Red Hat | Ansible # # This file is part of Ansible # -# This module is free software: you can redistribute it and/or modify +# Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # -# This software is distributed in the hope that it will be useful, +# Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this software. If not, see . +# along with Ansible. If not, see . -###################################################################### +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} DOCUMENTATION = ''' --- module: docker_image -author: "Pavel Antonov (@softzilla)" + +short_description: Manage docker images. + version_added: "1.5" -short_description: manage docker images + description: - - Create, check and remove docker images + - Build, load or pull an image, making the image available for creating containers. Also supports tagging an + image into a repository and archiving an image to a .tar file. + options: - path: + archive_path: description: - - Path to directory with Dockerfile + - Use with state C(present) to archive an image to a .tar file. required: false - default: null - aliases: [] - dockerfile: + version_added: "2.1" + load_path: description: - - Dockerfile to use + - Use with state C(present) to load an image from a .tar file. required: false + version_added: "2.2" + dockerfile: + description: + - Use with state C(present) to provide an alternate name for the Dockerfile to use when building an image. default: Dockerfile + required: false version_added: "2.0" + force: + description: + - Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state + C(present) to build, load or pull an image when the image already exists. + default: false + required: false + version_added: "2.1" + http_timeout: + description: + - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of + seconds. + required: false + version_added: "2.1" name: description: - - Image name to work with + - "Image name. Name format will be one of: name, repository/name, registry_server:port/name. + When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'." required: true - default: null - aliases: [] - tag: + path: description: - - Image tag to work with + - Use with state 'present' to build an image. Will be the path to a directory containing the context and + Dockerfile for building an image. + aliases: + - build_path required: false - default: "latest" - aliases: [] - nocache: + pull: description: - - Do not use cache with building + - When building an image downloads any updates to the FROM image in Dockerfile. + default: true required: false - default: false - aliases: [] - docker_url: + version_added: "2.1" + push: description: - - URL of docker host to issue commands to + - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter. + default: false required: false - default: ${DOCKER_HOST} or unix://var/run/docker.sock - aliases: [] - use_tls: + version_added: "2.2" + rm: description: - - Whether to use tls to connect to the docker server. "no" means not to - use tls (and ignore any other tls related parameters). "encrypt" means - to use tls to encrypt the connection to the server. "verify" means to - also verify that the server's certificate is valid for the server - (this both verifies the certificate against the CA and that the - certificate was issued for that host. If this is unspecified, tls will - only be used if one of the other tls options require it. - choices: [ "no", "encrypt", "verify" ] - version_added: "2.0" - tls_client_cert: + - Remove intermediate containers after build. + default: true + required: false + version_added: "2.1" + nocache: description: - - Path to the PEM-encoded certificate used to authenticate docker client. - If specified tls_client_key must be valid - default: ${DOCKER_CERT_PATH}/cert.pem - version_added: "2.0" - tls_client_key: + - Do not use cache when building an image. + default: false + required: false + repository: description: - - Path to the PEM-encoded key used to authenticate docker client. If - specified tls_client_cert must be valid - default: ${DOCKER_CERT_PATH}/key.pem - version_added: "2.0" - tls_ca_cert: + - Full path to a repository. Use with state C(present) to tag the image into the repository. Expects + format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest). + required: false + version_added: "2.1" + state: description: - - Path to a PEM-encoded certificate authority to secure the Docker connection. - This has no effect if use_tls is encrypt. - default: ${DOCKER_CERT_PATH}/ca.pem - version_added: "2.0" - tls_hostname: + - Make assertions about the state of an image. + - When C(absent) an image will be removed. Use the force option to un-tag and remove all images + matching the provided name. + - When C(present) check if an image exists using the provided name and tag. If the image is not found or the + force option is used, the image will either be pulled, built or loaded. By default the image will be pulled + from Docker Hub. To build the image, provide a path value set to a directory containing a context and + Dockerfile. To load an image, specify load_path to provide a path to an archive file. To tag an image to a + repository, provide a repository path. If the name contains a repository path, it will be pushed. + - "NOTE: C(build) is DEPRECATED and will be removed in release 2.3. Specifying C(build) will behave the + same as C(present)." + required: false + default: present + choices: + - absent + - present + - build + tag: description: - - A hostname to check matches what's supplied in the docker server's - certificate. If unspecified, the hostname is taken from the docker_url. - default: Taken from docker_url - version_added: "2.0" - docker_api_version: + - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to + I(latest). + - If C(name) parameter format is I(name:tag), then tag value from C(name) will take precedence. + default: latest + required: false + buildargs: description: - - Remote API version to use. This defaults to the current default as - specified by docker-py. - default: docker-py default remote API version - version_added: "2.0" - state: + - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive. + - Docker expects the value to be a string. For convenience any non-string values will be converted to strings. + - Requires Docker API >= 1.21 and docker-py >= 1.7.0. + type: complex + required: false + version_added: "2.2" + container_limits: description: - - Set the state of the image + - A dictionary of limits applied to each container created by the build process. required: false - default: present - choices: [ "present", "absent", "build" ] - aliases: [] - timeout: + version_added: "2.1" + type: complex + contains: + memory: + description: Set memory limit for build + type: int + memswap: + description: Total memory (memory + swap), -1 to disable swap + type: int + cpushares: + description: CPU shares (relative weight) + type: int + cpusetcpus: + description: CPUs in which to allow execution, e.g., "0-3", "0,1" + type: str + use_tls: description: - - Set image operation timeout + - "DEPRECATED. Whether to use tls to connect to the docker server. Set to C(no) when TLS will not be used. Set to + C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that the server's certificate is valid for the + server. NOTE: If you specify this option, it will set the value of the tls or tls_verify parameters." + choices: + - no + - encrypt + - verify + default: no required: false - default: 600 - aliases: [] -requirements: - - "python >= 2.6" - - "docker-py" - - "requests" -''' - -EXAMPLES = ''' -Build docker image if required. Path should contains Dockerfile to build image: + version_added: "2.0" -- hosts: web - become: yes - tasks: - - name: check or build image - docker_image: path="/path/to/build/dir" name="my/app" state=present +extends_documentation_fragment: + - docker -Build new version of image: +requirements: + - "python >= 2.6" + - "docker-py >= 1.7.0" + - "Docker API >= 1.20" -- hosts: web - become: yes - tasks: - - name: check or build image - docker_image: path="/path/to/build/dir" name="my/app" state=build +authors: + - Pavel Antonov (@softzilla) + - Chris Houseknecht (@chouseknecht) + - James Tanner (@jctanner) -Remove image from local docker storage: +''' -- hosts: web - become: yes - tasks: - - name: remove image - docker_image: name="my/app" state=absent +EXAMPLES = ''' +- name: pull an image + docker_image: + name: pacur/centos-7 + +- name: Tag and push to docker hub + docker_image: + name: pacur/centos-7 + repository: dcoppenhagan/myimage + tag: 7.0 + push: yes + +- name: Tag and push to local registry + docker_image: + name: centos + repository: localhost:5000/centos + tag: 7 + push: yes + +- name: Remove image + docker_image: + state: absent + name: registry.ansible.com/chouseknecht/sinatra + tag: v1 + +- name: Build an image and push it to a private repo + docker_image: + path: ./sinatra + name: registry.ansible.com/chouseknecht/sinatra + tag: v1 + +- name: Archive image + docker_image: + name: registry.ansible.com/chouseknecht/sinatra + tag: v1 + archive_path: my_sinatra.tar + +- name: Load image from archive and push to a private registry + docker_image: + name: localhost:5000/myimages/sinatra + tag: v1 + push: yes + load_path: my_sinatra.tar + +- name: Build image and with buildargs + docker_image: + path: /path/to/build/dir + name: myimage + buildargs: + log_volume: /var/log/myapp + listen_port: 8080 ''' -import re -import os -from urlparse import urlparse - -try: - import json -except ImportError: - import simplejson as json +RETURN = ''' +image: + description: Image inspection results for the affected image. + returned: success + type: complex + sample: {} +''' -try: - from requests.exceptions import * - HAS_REQUESTS = True -except ImportError: - HAS_REQUESTS = False +from ansible.module_utils.docker_common import * try: - import docker.client - HAS_DOCKER_CLIENT = True + from docker.auth.auth import resolve_repository_name + from docker.utils.utils import parse_repository_tag except ImportError: - HAS_DOCKER_CLIENT = False - -DEFAULT_DOCKER_API_VERSION = None -if HAS_DOCKER_CLIENT: - try: - from docker.errors import APIError as DockerAPIError - except ImportError: - from docker.client import APIError as DockerAPIError - - try: - # docker-py 1.2+ - import docker.constants - DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION - except (ImportError, AttributeError): - # docker-py less than 1.2 - DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION - -class DockerImageManager: - - def __init__(self, module): - self.module = module - self.path = self.module.params.get('path') - self.dockerfile = self.module.params.get('dockerfile') - self.name = self.module.params.get('name') - self.tag = self.module.params.get('tag') - self.nocache = self.module.params.get('nocache') - - # Connect to the docker server using any configured host and TLS settings. - - env_host = os.getenv('DOCKER_HOST') - env_docker_verify = os.getenv('DOCKER_TLS_VERIFY') - env_cert_path = os.getenv('DOCKER_CERT_PATH') - env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME') - - docker_url = module.params.get('docker_url') - if not docker_url: - if env_host: - docker_url = env_host + # missing docker-py handled in docker_common + pass + + +class ImageManager(DockerBaseClass): + + def __init__(self, client, results): + + super(ImageManager, self).__init__() + + self.client = client + self.results = results + parameters = self.client.module.params + self.check_mode = self.client.check_mode + + self.archive_path = parameters.get('archive_path') + self.container_limits = parameters.get('container_limits') + self.dockerfile = parameters.get('dockerfile') + self.force = parameters.get('force') + self.load_path = parameters.get('load_path') + self.name = parameters.get('name') + self.nocache = parameters.get('nocache') + self.path = parameters.get('path') + self.pull = parameters.get('pull') + self.repository = parameters.get('repository') + self.rm = parameters.get('rm') + self.state = parameters.get('state') + self.tag = parameters.get('tag') + self.http_timeout = parameters.get('http_timeout') + self.push = parameters.get('push') + self.buildargs = parameters.get('buildargs') + + # If name contains a tag, it takes precedence over tag parameter. + repo, repo_tag = parse_repository_tag(self.name) + if repo_tag: + self.name = repo + self.tag = repo_tag + + if self.state in ['present', 'build']: + self.present() + elif self.state == 'absent': + self.absent() + + def fail(self, msg): + self.client.fail(msg) + + def present(self): + ''' + Handles state = 'present', which includes building, loading or pulling an image, + depending on user provided parameters. + + :returns None + ''' + image = self.client.find_image(name=self.name, tag=self.tag) + + if not image or self.force: + if self.path: + # Build the image + if not os.path.isdir(self.path): + self.fail("Requested build path %s could not be found or you do not have access." % self.path) + image_name = self.name + if self.tag: + image_name = "%s:%s" % (self.name, self.tag) + self.log("Building image %s" % image_name) + self.results['actions'].append("Built image %s from %s" % (image_name, self.path)) + self.results['changed'] = True + if not self.check_mode: + self.results['image'] = self.build_image() + elif self.load_path: + # Load the image from an archive + if not os.path.isfile(self.load_path): + self.fail("Error loading image %s. Specified path %s does not exist." % (self.name, + self.load_path)) + image_name = self.name + if self.tag: + image_name = "%s:%s" % (self.name, self.tag) + self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path)) + self.results['changed'] = True + if not self.check_mode: + self.results['image'] = self.load_image() else: - docker_url = 'unix://var/run/docker.sock' + # pull the image + self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag)) + self.results['changed'] = True + if not self.check_mode: + self.results['image'] = self.client.pull_image(self.name, tag=self.tag) + + if self.archive_path: + self.archive_image(self.name, self.tag) + + if self.push and not self.repository: + self.push_image(self.name, self.tag) + elif self.repository: + self.tag_image(self.name, self.tag, self.repository, force=self.force, push=self.push) + + def absent(self): + ''' + Handles state = 'absent', which removes an image. + + :return None + ''' + image = self.client.find_image(self.name, self.tag) + if image: + name = self.name + if self.tag: + name = "%s:%s" % (self.name, self.tag) + if not self.check_mode: + try: + self.client.remove_image(name, force=self.force) + except Exception as exc: + self.fail("Error removing image %s - %s" % (name, str(exc))) + + self.results['changed'] = True + self.results['actions'].append("Removed image %s" % (name)) + self.results['image']['state'] = 'Deleted' + + def archive_image(self, name, tag): + ''' + Archive an image to a .tar file. Called when archive_path is passed. + + :param name - name of the image. Type: str + :return None + ''' + + if not tag: + tag = "latest" + + image = self.client.find_image(name=name, tag=tag) + if not image: + self.log("archive image: image %s:%s not found" % (name, tag)) + return + + image_name = "%s:%s" % (name, tag) + self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path)) + self.results['changed'] = True + if not self.check_mode: + self.log("Getting archive of image %s" % image_name) + try: + image = self.client.get_image(image_name) + except Exception as exc: + self.fail("Error getting image %s - %s" % (image_name, str(exc))) - docker_api_version = module.params.get('docker_api_version') + try: + image_tar = open(self.archive_path, 'w') + image_tar.write(image.data) + image_tar.close() + except Exception as exc: + self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc))) + + image = self.client.find_image(name=name, tag=tag) + if image: + self.results['image'] = image + + def push_image(self, name, tag=None): + ''' + If the name of the image contains a repository path, then push the image. + + :param name Name of the image to push. + :param tag Use a specific tag. + :return: None + ''' + + repository = name + if not tag: + repository, tag = parse_repository_tag(name) + registry, repo_name = resolve_repository_name(repository) + + self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag)) + + if registry: + self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag)) + self.results['changed'] = True + if not self.check_mode: + status = None + try: + for line in self.client.push(repository, tag=tag, stream=True, decode=True): + self.log(line, pretty_print=True) + if line.get('errorDetail'): + raise Exception(line['errorDetail']['message']) + status = line.get('status') + except Exception as exc: + if re.search('unauthorized', str(exc)): + if re.search('authentication required', str(exc)): + self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." % + (registry, repo_name, tag, str(exc), registry)) + else: + self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" % + (registry, repo_name, tag, str(exc))) + self.fail("Error pushing image %s: %s" % (repository, str(exc))) + self.results['image'] = self.client.find_image(name=repository, tag=tag) + if not self.results['image']: + self.results['image'] = dict() + self.results['image']['push_status'] = status + + def tag_image(self, name, tag, repository, force=False, push=False): + ''' + Tag an image into a repository. + + :param name: name of the image. required. + :param tag: image tag. + :param repository: path to the repository. required. + :param force: bool. force tagging, even it image already exists with the repository path. + :param push: bool. push the image once it's tagged. + :return: None + ''' + repo, repo_tag = parse_repository_tag(repository) + if not repo_tag: + repo_tag = "latest" + if tag: + repo_tag = tag + image = self.client.find_image(name=repo, tag=repo_tag) + found = 'found' if image else 'not found' + self.log("image %s was %s" % (repo, found)) + + if not image or force: + self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag)) + self.results['changed'] = True + self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag)) + if not self.check_mode: + try: + # Finding the image does not always work, especially running a localhost registry. In those + # cases, if we don't set force=True, it errors. + image_name = name + if tag and not re.search(tag, name): + image_name = "%s:%s" % (name, tag) + tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True) + if not tag_status: + raise Exception("Tag operation failed.") + except Exception as exc: + self.fail("Error: failed to tag image - %s" % str(exc)) + self.results['image'] = self.client.find_image(name=repo, tag=repo_tag) + if push: + self.push_image(repo, repo_tag) + + def build_image(self): + ''' + Build an image + + :return: image dict + ''' + params = dict( + path=self.path, + tag=self.name, + rm=self.rm, + nocache=self.nocache, + stream=True, + timeout=self.http_timeout, + pull=self.pull, + forcerm=self.rm, + dockerfile=self.dockerfile, + decode=True + ) + if self.tag: + params['tag'] = "%s:%s" % (self.name, self.tag) + if self.container_limits: + params['container_limits'] = self.container_limits + if self.buildargs: + for key, value in self.buildargs.iteritems(): + if not isinstance(value, basestring): + self.buildargs[key] = str(value) + params['buildargs'] = self.buildargs + + for line in self.client.build(**params): + # line = json.loads(line) + self.log(line, pretty_print=True) + if line.get('error'): + if line.get('errorDetail'): + errorDetail = line.get('errorDetail') + self.fail("Error building %s - code: %s message: %s" % (self.name, + errorDetail.get('code'), + errorDetail.get('message'))) + else: + self.fail("Error building %s - %s" % (self.name, line.get('error'))) + return self.client.find_image(name=self.name, tag=self.tag) - tls_client_cert = module.params.get('tls_client_cert', None) - if not tls_client_cert and env_cert_path: - tls_client_cert = os.path.join(env_cert_path, 'cert.pem') + def load_image(self): + ''' + Load an image from a .tar archive - tls_client_key = module.params.get('tls_client_key', None) - if not tls_client_key and env_cert_path: - tls_client_key = os.path.join(env_cert_path, 'key.pem') + :return: image dict + ''' + try: + self.log("Opening image %s" % self.load_path) + image_tar = open(self.load_path, 'r') + except Exception as exc: + self.fail("Error opening image %s - %s" % (self.load_path, str(exc))) - tls_ca_cert = module.params.get('tls_ca_cert') - if not tls_ca_cert and env_cert_path: - tls_ca_cert = os.path.join(env_cert_path, 'ca.pem') + try: + self.log("Loading image from %s" % self.load_path) + self.client.load_image(image_tar) + except Exception as exc: + self.fail("Error loading image %s - %s" % (self.name, str(exc))) - tls_hostname = module.params.get('tls_hostname') - if tls_hostname is None: - if env_docker_hostname: - tls_hostname = env_docker_hostname - else: - parsed_url = urlparse(docker_url) - if ':' in parsed_url.netloc: - tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')] - else: - tls_hostname = parsed_url - if not tls_hostname: - tls_hostname = True - - # use_tls can be one of four values: - # no: Do not use tls - # encrypt: Use tls. We may do client auth. We will not verify the server - # verify: Use tls. We may do client auth. We will verify the server - # None: Only use tls if the parameters for client auth were specified - # or tls_ca_cert (which requests verifying the server with - # a specific ca certificate) - use_tls = module.params.get('use_tls') - if use_tls is None and env_docker_verify is not None: - use_tls = 'verify' - - tls_config = None - if use_tls != 'no': - params = {} - - # Setup client auth - if tls_client_cert and tls_client_key: - params['client_cert'] = (tls_client_cert, tls_client_key) - - # We're allowed to verify the connection to the server - if use_tls == 'verify' or (use_tls is None and tls_ca_cert): - if tls_ca_cert: - params['ca_cert'] = tls_ca_cert - params['verify'] = True - params['assert_hostname'] = tls_hostname - else: - params['verify'] = True - params['assert_hostname'] = tls_hostname - elif use_tls == 'encrypt': - params['verify'] = False - - if params: - # See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296 - docker_url = docker_url.replace('tcp://', 'https://') - tls_config = docker.tls.TLSConfig(**params) - - self.client = docker.Client( - base_url=docker_url, - version=module.params.get('docker_api_version'), - timeout=module.params.get('timeout'), - tls=tls_config) - - self.changed = False - self.log = [] - self.error_msg = None - - def get_log(self, as_string=True): - return "".join(self.log) if as_string else self.log - - def build(self): - stream = self.client.build(self.path, dockerfile=self.dockerfile, tag=':'.join([self.name, self.tag]), nocache=self.nocache, rm=True, stream=True) - success_search = r'Successfully built ([0-9a-f]+)' - image_id = None - self.changed = True - - for chunk in stream: - if not chunk: - continue + try: + image_tar.close() + except Exception as exc: + self.fail("Error closing image %s - %s" % (self.name, str(exc))) - try: - chunk_json = json.loads(chunk) - except ValueError: - continue - - if 'error' in chunk_json: - self.error_msg = chunk_json['error'] - return None - - if 'stream' in chunk_json: - output = chunk_json['stream'] - self.log.append(output) - match = re.search(success_search, output) - if match: - image_id = match.group(1) - - # Just in case we skipped evaluating the JSON returned from build - # during every iteration, add an error if the image_id was never - # populated - if not image_id: - self.error_msg = 'Unknown error encountered' - - return image_id - - def has_changed(self): - return self.changed - - def get_images(self): - filtered_images = [] - images = self.client.images() - for i in images: - # Docker-py version >= 0.3 (Docker API >= 1.8) - if 'RepoTags' in i: - repotag = ':'.join([self.name, self.tag]) - if not self.name or repotag in i['RepoTags']: - filtered_images.append(i) - # Docker-py version < 0.3 (Docker API < 1.8) - elif (not self.name or self.name == i['Repository']) and (not self.tag or self.tag == i['Tag']): - filtered_images.append(i) - return filtered_images - - def remove_images(self): - images = self.get_images() - for i in images: - try: - self.client.remove_image(i['Id']) - self.changed = True - except DockerAPIError as e: - # image can be removed by docker if not used - pass + return self.client.find_image(self.name, self.tag) def main(): - module = AnsibleModule( - argument_spec = dict( - path = dict(required=False, default=None, type='path'), - dockerfile = dict(required=False, default="Dockerfile"), - name = dict(required=True), - tag = dict(required=False, default="latest"), - nocache = dict(default=False, type='bool'), - state = dict(default='present', choices=['absent', 'present', 'build']), - use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']), - tls_client_cert = dict(required=False, default=None, type='str'), - tls_client_key = dict(required=False, default=None, type='str'), - tls_ca_cert = dict(required=False, default=None, type='str'), - tls_hostname = dict(required=False, type='str', default=None), - docker_url = dict(), - docker_api_version = dict(required=False, - default=DEFAULT_DOCKER_API_VERSION, - type='str'), - timeout = dict(default=600, type='int'), - ) + argument_spec = dict( + archive_path=dict(type='path'), + container_limits=dict(type='dict'), + dockerfile=dict(type='str'), + force=dict(type='bool', default=False), + http_timeout=dict(type='int'), + load_path=dict(type='path'), + name=dict(type='str', required=True), + nocache=dict(type='str', default=False), + path=dict(type='path', aliases=['build_path']), + pull=dict(type='bool', default=True), + push=dict(type='bool', default=False), + repository=dict(type='str'), + rm=dict(type='bool', default=True), + state=dict(type='str', choices=['absent', 'present', 'build'], default='present'), + tag=dict(type='str', default='latest'), + use_tls=dict(type='str', default='no', choices=['no', 'encrypt', 'verify']), + buildargs=dict(type='dict', default=None), ) - if not HAS_DOCKER_CLIENT: - module.fail_json(msg='docker-py is needed for this module') - if not HAS_REQUESTS: - module.fail_json(msg='requests is needed for this module') - - try: - manager = DockerImageManager(module) - state = module.params.get('state') - failed = False - image_id = None - msg = '' - do_build = False - - # build image if not exists - if state == "present": - images = manager.get_images() - if len(images) == 0: - do_build = True - # build image - elif state == "build": - do_build = True - # remove image or images - elif state == "absent": - manager.remove_images() - - if do_build: - image_id = manager.build() - if image_id: - msg = "Image built: %s" % image_id - else: - failed = True - msg = "Error: %s\nLog:%s" % (manager.error_msg, manager.get_log()) - - module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id) - - except SSLError as e: - if get_platform() == "Darwin": - # Ensure that the environment variables has been set - if "DOCKER_HOST" not in os.environ: - environment_error = ''' - It looks like you have not set your docker environment - variables. Please ensure that you have set the requested - variables as instructed when running boot2docker up. If - they are set in .bash_profile you will need to symlink - it to .bashrc. - ''' - module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) - # If the above is true it's likely the hostname does not match - else: - environment_error = ''' - You may need to ignore hostname missmatches by setting - tls_hostname=boot2docker in your role. If this does not - resolve the issue please open an issue at - ansible/ansible-modules-core and ping michaeljs1990 - ''' - module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) - # General error for non darwin users - else: - module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e)) - - except ConnectionError as e: - if get_platform() == "Darwin" and "DOCKER_HOST" not in os.environ: - # Ensure that the environment variables has been set - environment_error = ''' - It looks like you have not set your docker environment - variables. Please ensure that you have set the requested - variables as instructed when running boot2docker up. If - they are set in .bash_profile you will need to symlink - it to .bashrc. - ''' - module.exit_json(failed=True, changed=manager.has_changed(), msg="ConnectionError: " + str(e) + environment_error) - - module.exit_json(failed=True, changed=manager.has_changed(), msg="ConnectionError: " + str(e)) - - except DockerAPIError as e: - module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation) - - except RequestException as e: - module.exit_json(failed=True, changed=manager.has_changed(), msg=repr(e)) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + results = dict( + changed=False, + actions=[], + image={} + ) + + ImageManager(client, results) + client.module.exit_json(**results) + # import module snippets from ansible.module_utils.basic import * + if __name__ == '__main__': main() diff --git a/cloud/docker/docker_image_facts.py b/cloud/docker/docker_image_facts.py new file mode 100644 index 00000000000..81e16fe5e2a --- /dev/null +++ b/cloud/docker/docker_image_facts.py @@ -0,0 +1,248 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: docker_image_facts + +short_description: Inspect docker images + +version_added: "2.1.0" + +description: + - Provide one or more image names, and the module will inspect each, returning an array of inspection results. + +options: + name: + description: + - An image name or a list of image names. Name format will be name[:tag] or repository/name[:tag], where tag is + optional. If a tag is not provided, 'latest' will be used. + default: null + required: true + +extends_documentation_fragment: + - docker + +requirements: + - "python >= 2.6" + - "docker-py >= 1.7.0" + - "Docker API >= 1.20" + +authors: + - Chris Houseknecht (@chouseknecht) + - James Tanner (@jctanner) + +''' + +EXAMPLES = ''' + +- name: Inspect a single image + docker_image_facts: + name: pacur/centos-7 + +- name: Inspect multiple images + docker_image_facts: + name: + - pacur/centos-7 + - sinatra +''' + +RETURN = ''' +images: + description: Facts for the selected images. + returned: always + type: dict + sample: [ + { + "Architecture": "amd64", + "Author": "", + "Comment": "", + "Config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/etc/docker/registry/config.yml" + ], + "Domainname": "", + "Entrypoint": [ + "/bin/registry" + ], + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "5000/tcp": {} + }, + "Hostname": "e5c68db50333", + "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/var/lib/registry": {} + }, + "WorkingDir": "" + }, + "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610", + "ContainerConfig": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + '#(nop) CMD ["/etc/docker/registry/config.yml"]' + ], + "Domainname": "", + "Entrypoint": [ + "/bin/registry" + ], + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "5000/tcp": {} + }, + "Hostname": "e5c68db50333", + "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/var/lib/registry": {} + }, + "WorkingDir": "" + }, + "Created": "2016-03-08T21:08:15.399680378Z", + "DockerVersion": "1.9.1", + "GraphDriver": { + "Data": null, + "Name": "aufs" + }, + "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08", + "Name": "registry:2", + "Os": "linux", + "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805", + "RepoDigests": [], + "RepoTags": [ + "registry:2" + ], + "Size": 0, + "VirtualSize": 165808884 + } + ] +''' + +from ansible.module_utils.docker_common import * + +try: + from docker import auth + from docker import utils +except ImportError: + # missing docker-py handled in docker_common + pass + +class ImageManager(DockerBaseClass): + + def __init__(self, client, results): + + super(ImageManager, self).__init__() + + self.client = client + self.results = results + self.name = self.client.module.params.get('name') + self.log("Gathering facts for images: %s" % (str(self.name))) + + if self.name: + self.results['images'] = self.get_facts() + else: + self.results['images'] = self.get_all_images() + + def fail(self, msg): + self.client.fail(msg) + + def get_facts(self): + ''' + Lookup and inspect each image name found in the names parameter. + + :returns array of image dictionaries + ''' + + results = [] + + names = self.name + if not isinstance(names, list): + names = [names] + + for name in names: + repository, tag = utils.parse_repository_tag(name) + if not tag: + tag = 'latest' + self.log('Fetching image %s:%s' % (repository, tag)) + image = self.client.find_image(name=repository, tag=tag) + if image: + results.append(image) + return results + + def get_all_images(self): + results = [] + images = self.client.images() + for image in images: + try: + inspection = self.client.inspect_image(image['Id']) + except Exception as exc: + self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc))) + results.append(inspection) + return results + + +def main(): + argument_spec = dict( + name=dict(type='list'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec + ) + + results = dict( + changed=False, + images=[] + ) + + ImageManager(client, results) + client.module.exit_json(**results) + + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py new file mode 100644 index 00000000000..f0b3c87ec39 --- /dev/null +++ b/cloud/docker/docker_login.py @@ -0,0 +1,333 @@ +#!/usr/bin/python +# +# (c) 2016 Olaf Kilian +# Chris Houseknecht, +# James Tanner, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: docker_login +short_description: Log into a Docker registry. +version_added: "2.0" +description: + - Provides functionality similar to the "docker login" command. + - Authenticate with a docker registry and add the credentials to your local Docker config file. Adding the + credentials to the config files allows future connections to the registry using tools such as Ansible's Docker + modules, the Docker CLI and docker-py without needing to provide credentials. + - Running in check mode will perform the authentication without updating the config file. +options: + registry_url: + required: False + description: + - The registry URL. + default: "https://index.docker.io/v1/" + aliases: + - registry + - url + username: + description: + - The username for the registry account + required: True + password: + description: + - The plaintext password for the registry account + required: True + email: + required: False + description: + - "The email address for the registry account. NOTE: private registries may not require this, + but Docker Hub requires it." + default: None + reauthorize: + required: False + description: + - Refresh exiting authentication found in the configuration file. + default: no + choices: ['yes', 'no'] + aliases: + - reauth + config_path: + description: + - Custom path to the Docker CLI configuration file. + default: ~/.docker/config.json + required: False + aliases: + - self.config_path + - dockercfg_path + state: + version_added: '2.3' + description: + - This controls the current state of the user. C(present) will login in a user, C(absent) will log him out. + - To logout you only need the registry server, which defaults to DockerHub. + - Before 2.1 you could ONLY log in. + - docker does not support 'logout' with a custom config file. + choices: ['present', 'absent'] + default: 'present' + required: False + +extends_documentation_fragment: + - docker +requirements: + - "python >= 2.6" + - "docker-py >= 1.7.0" + - "Docker API >= 1.20" + - 'Only to be able to logout (state=absent): the docker command line utility' +authors: + - "Olaf Kilian " + - "Chris Houseknecht (@chouseknecht)" + - "James Tanner (@jctanner)" +''' + +EXAMPLES = ''' + +- name: Log into DockerHub + docker_login: + username: docker + password: rekcod + email: docker@docker.io + +- name: Log into private registry and force re-authorization + docker_login: + registry: your.private.registry.io + username: yourself + password: secrets3 + reauthorize: yes + +- name: Log into DockerHub using a custom config file + docker_login: + username: docker + password: rekcod + email: docker@docker.io + config_path: /tmp/.mydockercfg + +- name: Log out of DockerHub + docker_login: + state: absent + email: docker@docker.com +''' + +RETURN = ''' +login_results: + description: Results from the login. + returned: when state='present' + type: dict + sample: { + "email": "testuer@yahoo.com", + "password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER", + "serveraddress": "localhost:5000", + "username": "testuser" + } +''' + +import base64 + +from ansible.module_utils.docker_common import * + + +class LoginManager(DockerBaseClass): + + def __init__(self, client, results): + + super(LoginManager, self).__init__() + + self.client = client + self.results = results + parameters = self.client.module.params + self.check_mode = self.client.check_mode + + self.registry_url = parameters.get('registry_url') + self.username = parameters.get('username') + self.password = parameters.get('password') + self.email = parameters.get('email') + self.reauthorize = parameters.get('reauthorize') + self.config_path = parameters.get('config_path') + + if parameters['state'] == 'present': + self.login() + else: + self.logout() + + def fail(self, msg): + self.client.fail(msg) + + def login(self): + ''' + Log into the registry with provided username/password. On success update the config + file with the new authorization. + + :return: None + ''' + + if self.email and not re.match(EMAIL_REGEX, self.email): + self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match " + "/%s/" % (EMAIL_REGEX)) + + self.results['actions'].append("Logged into %s" % (self.registry_url)) + self.log("Log into %s with username %s" % (self.registry_url, self.username)) + try: + response = self.client.login( + self.username, + password=self.password, + email=self.email, + registry=self.registry_url, + reauth=self.reauthorize, + dockercfg_path=self.config_path + ) + except Exception as exc: + self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc))) + self.results['login_result'] = response + + if not self.check_mode: + self.update_config_file() + + def logout(self): + ''' + Log out of the registry. On success update the config file. + TODO: port to API once docker.py supports this. + + :return: None + ''' + + cmd = "%s logout " % self.client.module.get_bin_path('docker', True) + #TODO: docker does not support config file in logout, restore this when they do + #if self.config_path and self.config_file_exists(self.config_path): + # cmd += "--config '%s' " % self.config_path + cmd += "'%s'" % self.registry_url + + (rc, out, err) = self.client.module.run_command(cmd) + if rc != 0: + self.fail("Could not log out: %s" % err) + + def config_file_exists(self, path): + if os.path.exists(path): + self.log("Configuration file %s exists" % (path)) + return True + self.log("Configuration file %s not found." % (path)) + return False + + def create_config_file(self, path): + ''' + Create a config file with a JSON blob containing an auths key. + + :return: None + ''' + + self.log("Creating docker config file %s" % (path)) + config_path_dir = os.path.dirname(path) + if not os.path.exists(config_path_dir): + try: + os.makedirs(config_path_dir) + except Exception as exc: + self.fail("Error: failed to create %s - %s" % (config_path_dir, str(exc))) + self.write_config(path, dict(auths=dict())) + + def write_config(self, path, config): + try: + json.dump(config, open(path, "w"), indent=5, sort_keys=True) + except Exception as exc: + self.fail("Error: failed to write config to %s - %s" % (path, str(exc))) + + def update_config_file(self): + ''' + If the authorization not stored in the config file or reauthorize is True, + update the config file with the new authorization. + + :return: None + ''' + + path = os.path.expanduser(self.config_path) + if not self.config_file_exists(path): + self.create_config_file(path) + + try: + # read the existing config + config = json.load(open(path, "r")) + except ValueError: + self.log("Error reading config from %s" % (path)) + config = dict() + + if not config.get('auths'): + self.log("Adding auths dict to config.") + config['auths'] = dict() + + if not config['auths'].get(self.registry_url): + self.log("Adding registry_url %s to auths." % (self.registry_url)) + config['auths'][self.registry_url] = dict() + + encoded_credentials = dict( + auth=base64.b64encode(self.username + b':' + self.password), + email=self.email + ) + + if config['auths'][self.registry_url] != encoded_credentials or self.reauthorize: + # Update the config file with the new authorization + config['auths'][self.registry_url] = encoded_credentials + self.log("Updating config file %s with new authorization for %s" % (path, self.registry_url)) + self.results['actions'].append("Updated config file %s with new authorization for %s" % ( + path, self.registry_url)) + self.results['changed'] = True + self.write_config(path, config) + + +def main(): + + argument_spec=dict( + registry_url=dict(type='str', required=False, default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']), + username=dict(type='str', required=False), + password=dict(type='str', required=False, no_log=True), + email=dict(type='str'), + reauthorize=dict(type='bool', default=False, aliases=['reauth']), + state=dict(type='str', default='present', choices=['present', 'absent']), + config_path=dict(type='str', default='~/.docker/config.json', aliases=['self.config_path', 'dockercfg_path']), + ) + + required_if = [ + ('state', 'present', ['username', 'password']), + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if + ) + + results = dict( + changed=False, + actions=[], + login_result={} + ) + + if module.params['state'] == 'present' and module.params['registry_url'] == DEFAULT_DOCKER_REGISTRY and not module.params['email']: + module.fail_json(msg="'email' is required when loging into DockerHub") + + LoginManager(client, results) + if 'actions' in results: + del results['actions'] + client.module.exit_json(**results) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/docker/docker_network.py b/cloud/docker/docker_network.py new file mode 100644 index 00000000000..24ce4dc6a40 --- /dev/null +++ b/cloud/docker/docker_network.py @@ -0,0 +1,382 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: docker_network +version_added: "2.2" +short_description: Manage Docker networks +description: + - Create/remove Docker networks and connect containers to them. + - Performs largely the same function as the "docker network" CLI subcommand. +options: + name: + description: + - Name of the network to operate on. + required: true + aliases: + - network_name + + connected: + description: + - List of container names or container IDs to connect to a network. + default: null + aliases: + - containers + + driver: + description: + - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used. + default: bridge + + driver_options: + description: + - Dictionary of network settings. Consult docker docs for valid options and values. + default: null + + force: + description: + - With state I(absent) forces disconnecting all containers from the + network prior to deleting the network. With state I(present) will + disconnect all containers, delete the network and re-create the + network. This option is required if you have changed the IPAM or + driver options and want an existing network to be updated to use the + new options. + default: false + + appends: + description: + - By default the connected list is canonical, meaning containers not on the list are removed from the network. + Use C(appends) to leave existing containers connected. + default: false + aliases: + - incremental + + ipam_driver: + description: + - Specify an IPAM driver. + default: null + + ipam_options: + description: + - Dictionary of IPAM options. + default: null + + state: + description: + - I(absent) deletes the network. If a network has connected containers, it + cannot be deleted. Use the C(force) option to disconnect all containers + and delete the network. + - I(present) creates the network, if it does not already exist with the + specified parameters, and connects the list of containers provided via + the connected parameter. Containers not on the list will be disconnected. + An empty list will leave no containers connected to the network. Use the + C(appends) option to leave existing containers connected. Use the C(force) + options to force re-creation of the network. + default: present + choices: + - absent + - present + +extends_documentation_fragment: + - docker + +authors: + - "Ben Keith (@keitwb)" + - "Chris Houseknecht (@chouseknecht)" + +requirements: + - "python >= 2.6" + - "docker-py >= 1.7.0" + - "The docker server >= 1.9.0" +''' + +EXAMPLES = ''' +- name: Create a network + docker_network: + name: network_one + +- name: Remove all but selected list of containers + docker_network: + name: network_one + connected: + - container_a + - container_b + - container_c + +- name: Remove a single container + docker_network: + name: network_one + connected: "{{ fulllist|difference(['container_a']) }}" + +- name: Add a container to a network, leaving existing containers connected + docker_network: + name: network_one + connected: + - container_a + appends: yes + +- name: Create a network with options + docker_network: + name: network_two + driver_options: + com.docker.network.bridge.name: net2 + ipam_options: + subnet: '172.3.26.0/16' + gateway: 172.3.26.1 + iprange: '192.168.1.0/24' + +- name: Delete a network, disconnecting all containers + docker_network: + name: network_one + state: absent + force: yes +''' + +RETURN = ''' +facts: + description: Network inspection results for the affected network. + returned: success + type: complex + sample: {} +''' + +from ansible.module_utils.docker_common import * + +try: + from docker import utils + from docker.utils.types import Ulimit +except: + # missing docker-py handled in ansible.module_utils.docker + pass + + +class TaskParameters(DockerBaseClass): + def __init__(self, client): + super(TaskParameters, self).__init__() + self.client = client + + self.network_name = None + self.connected = None + self.driver = None + self.driver_options = None + self.ipam_driver = None + self.ipam_options = None + self.appends = None + self.force = None + self.debug = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + + +def container_names_in_network(network): + return [c['Name'] for c in network['Containers'].values()] + + +class DockerNetworkManager(object): + + def __init__(self, client): + self.client = client + self.parameters = TaskParameters(client) + self.check_mode = self.client.check_mode + self.results = { + u'changed': False, + u'actions': [] + } + self.diff = self.client.module._diff + + self.existing_network = self.get_existing_network() + + if not self.parameters.connected and self.existing_network: + self.parameters.connected = container_names_in_network(self.existing_network) + + state = self.parameters.state + if state == 'present': + self.present() + elif state == 'absent': + self.absent() + + def get_existing_network(self): + networks = self.client.networks() + network = None + for n in networks: + if n['Name'] == self.parameters.network_name: + network = n + return network + + def has_different_config(self, net): + ''' + Evaluates an existing network and returns a tuple containing a boolean + indicating if the configuration is different and a list of differences. + + :param net: the inspection output for an existing network + :return: (bool, list) + ''' + different = False + differences = [] + if self.parameters.driver and self.parameters.driver != net['Driver']: + different = True + differences.append('driver') + if self.parameters.driver_options: + if not net.get('Options'): + different = True + differences.append('driver_options') + else: + for key, value in self.parameters.driver_options.iteritems(): + if not net['Options'].get(key) or value != net['Options'][key]: + different = True + differences.append('driver_options.%s' % key) + if self.parameters.ipam_driver: + if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver: + different = True + differences.append('ipam_driver') + if self.parameters.ipam_options: + if not net.get('IPAM') or not net['IPAM'].get('Config'): + different = True + differences.append('ipam_options') + else: + for key, value in self.parameters.ipam_options.iteritems(): + camelkey = None + for net_key in net['IPAM']['Config'][0]: + if key == net_key.lower(): + camelkey = net_key + break + if not camelkey: + # key not found + different = True + differences.append('ipam_options.%s' % key) + elif net['IPAM']['Config'][0].get(camelkey) != value: + # key has different value + different = True + differences.append('ipam_options.%s' % key) + return different, differences + + def create_network(self): + if not self.existing_network: + ipam_pools = [] + if self.parameters.ipam_options: + ipam_pools.append(utils.create_ipam_pool(**self.parameters.ipam_options)) + + ipam_config = utils.create_ipam_config(driver=self.parameters.ipam_driver, + pool_configs=ipam_pools) + + if not self.check_mode: + resp = self.client.create_network(self.parameters.network_name, + driver=self.parameters.driver, + options=self.parameters.driver_options, + ipam=ipam_config) + + self.existing_network = self.client.inspect_network(resp['Id']) + self.results['actions'].append("Created network %s with driver %s" % (self.parameters.network_name, self.parameters.driver)) + self.results['changed'] = True + + def remove_network(self): + if self.existing_network: + self.disconnect_all_containers() + if not self.check_mode: + self.client.remove_network(self.parameters.network_name) + self.results['actions'].append("Removed network %s" % (self.parameters.network_name,)) + self.results['changed'] = True + + def is_container_connected(self, container_name): + return container_name in container_names_in_network(self.existing_network) + + def connect_containers(self): + for name in self.parameters.connected: + if not self.is_container_connected(name): + if not self.check_mode: + self.client.connect_container_to_network(name, self.parameters.network_name) + self.results['actions'].append("Connected container %s" % (name,)) + self.results['changed'] = True + + def disconnect_missing(self): + for c in self.existing_network['Containers'].values(): + name = c['Name'] + if name not in self.parameters.connected: + self.disconnect_container(name) + + def disconnect_all_containers(self): + containers = self.client.inspect_network(self.parameters.network_name)['Containers'] + for cont in containers.values(): + self.disconnect_container(cont['Name']) + + def disconnect_container(self, container_name): + if not self.check_mode: + self.client.disconnect_container_from_network(container_name, self.parameters.network_name) + self.results['actions'].append("Disconnected container %s" % (container_name,)) + self.results['changed'] = True + + def present(self): + different = False + differences = [] + if self.existing_network: + different, differences = self.has_different_config(self.existing_network) + + if self.parameters.force or different: + self.remove_network() + self.existing_network = None + + self.create_network() + self.connect_containers() + if not self.parameters.appends: + self.disconnect_missing() + + if self.diff or self.check_mode or self.parameters.debug: + self.results['diff'] = differences + + if not self.check_mode and not self.parameters.debug: + self.results.pop('actions') + + self.results['ansible_facts'] = {u'ansible_docker_network': self.get_existing_network()} + + def absent(self): + self.remove_network() + + +def main(): + argument_spec = dict( + network_name = dict(type='str', required=True, aliases=['name']), + connected = dict(type='list', default=[], aliases=['containers']), + state = dict(type='str', default='present', choices=['present', 'absent']), + driver = dict(type='str', default='bridge'), + driver_options = dict(type='dict', default={}), + force = dict(type='bool', default=False), + appends = dict(type='bool', default=False, aliases=['incremental']), + ipam_driver = dict(type='str', default=None), + ipam_options = dict(type='dict', default={}), + debug = dict(type='bool', default=False) + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True + ) + + cm = DockerNetworkManager(client) + client.module.exit_json(**cm.results) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/docker/docker_service.py b/cloud/docker/docker_service.py new file mode 100644 index 00000000000..ee7c35329f4 --- /dev/null +++ b/cloud/docker/docker_service.py @@ -0,0 +1,1024 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' + +module: docker_service + +short_description: Manage docker services and containers. + +version_added: "2.1" + +author: "Chris Houseknecht (@chouseknecht)" + +description: + - Consumes docker compose to start, shutdown and scale services. + - Works with compose versions 1 and 2. + - Compose can be read from a docker-compose.yml (or .yaml) file or inline using the C(definition) option. + - See the examples for more details. + - Supports check mode. + +options: + project_src: + description: + - Path to a directory containing a docker-compose.yml or docker-compose.yaml file. + - Mutually exclusive with C(definition). + - Required when no C(definition) is provided. + type: path + required: false + project_name: + description: + - Provide a project name. If not provided, the project name is taken from the basename of C(project_src). + - Required when no C(definition) is provided. + type: str + required: false + files: + description: + - List of file names relative to C(project_src). Overrides docker-compose.yml or docker-compose.yaml. + - Files are loaded and merged in the order given. + type: list + required: false + state: + description: + - Desired state of the project. + - Specifying I(present) is the same as running I(docker-compose up). + - Specifying I(absent) is the same as running I(docker-compose down). + choices: + - absent + - present + default: present + type: str + required: false + services: + description: + - When C(state) is I(present) run I(docker-compose up) on a subset of services. + type: list + required: false + scale: + description: + - When C(state) is I(present) scale services. Provide a dictionary of key/value pairs where the key + is the name of the service and the value is an integer count for the number of containers. + type: complex + required: false + dependencies: + description: + - When C(state) is I(present) specify whether or not to include linked services. + type: bool + required: false + default: true + definition: + description: + - Provide docker-compose yaml describing one or more services, networks and volumes. + - Mutually exclusive with C(project_src) and C(files). + type: complex + required: false + hostname_check: + description: + - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate. + type: bool + required: false + default: false + recreate: + description: + - By default containers will be recreated when their configuration differs from the service definition. + - Setting to I(never) ignores configuration differences and leaves existing containers unchanged. + - Setting to I(always) forces recreation of all existing containers. + type: str + required: false + choices: + - always + - never + - smart + default: smart + build: + description: + - Use with state I(present) to always build images prior to starting the application. + - Same as running docker-compose build with the pull option. + - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents. + - Use the C(nocache) option to ignore the image cache when performing the build. + - If an existing image is replaced, services using the image will be recreated unless C(recreate) is I(never). + type: bool + required: false + default: false + pull: + description: + - Use with state I(present) to always pull images prior to starting the application. + - Same as running docker-compose pull. + - When a new image is pulled, services using the image will be recreated unless C(recreate) is I(never). + type: bool + required: false + default: false + version_added: "2.2" + nocache: + description: + - Use with the build option to ignore the cache during the image build process. + type: bool + required: false + default: false + version_added: "2.2" + remove_images: + description: + - Use with state I(absent) to remove the all images or only local images. + type: str + required: false + default: null + remove_volumes: + description: + - Use with state I(absent) to remove data volumes. + required: false + type: bool + default: false + stopped: + description: + - Use with state I(present) to leave the containers in an exited or non-running state. + required: false + type: bool + default: false + restarted: + description: + - Use with state I(present) to restart all containers. + required: false + type: bool + default: false + debug: + description: + - Include I(actions) in the return values. + required: false + type: bool + default: false + +extends_documentation_fragment: + - docker + +requirements: + - "python >= 2.6" + - "docker-compose >= 1.7.0" + - "Docker API >= 1.20" + - "PyYAML >= 3.11" +''' + +EXAMPLES = ''' +# Examples use the django example at U(https://docs.docker.com/compose/django/). Follow it to create the flask +# directory + +- name: Run using a project directory + hosts: localhost + connection: local + gather_facts: no + tasks: + - docker_service: + project_src: flask + state: absent + + - docker_service: + project_src: flask + register: output + + - debug: + var: output + + - docker_service: + project_src: flask + build: no + register: output + + - debug: + var: output + + - assert: + that: "not output.changed " + + - docker_service: + project_src: flask + build: no + stopped: true + register: output + + - debug: + var: output + + - assert: + that: + - "not web.flask_web_1.state.running" + - "not db.flask_db_1.state.running" + + - docker_service: + project_src: flask + build: no + restarted: true + register: output + + - debug: + var: output + + - assert: + that: + - "web.flask_web_1.state.running" + - "db.flask_db_1.state.running" + +- name: Scale the web service to 2 + hosts: localhost + connection: local + gather_facts: no + tasks: + - docker_service: + project_src: flask + scale: + web: 2 + register: output + + - debug: + var: output + +- name: Run with inline v2 compose + hosts: localhost + connection: local + gather_facts: no + tasks: + - docker_service: + project_src: flask + state: absent + + - docker_service: + project_name: flask + definition: + version: '2' + services: + db: + image: postgres + web: + build: "{{ playbook_dir }}/flask" + command: "python manage.py runserver 0.0.0.0:8000" + volumes: + - "{{ playbook_dir }}/flask:/code" + ports: + - "8000:8000" + depends_on: + - db + register: output + + - debug: + var: output + + - assert: + that: + - "web.flask_web_1.state.running" + - "db.flask_db_1.state.running" + +- name: Run with inline v1 compose + hosts: localhost + connection: local + gather_facts: no + tasks: + - docker_service: + project_src: flask + state: absent + + - docker_service: + project_name: flask + definition: + db: + image: postgres + web: + build: "{{ playbook_dir }}/flask" + command: "python manage.py runserver 0.0.0.0:8000" + volumes: + - "{{ playbook_dir }}/flask:/code" + ports: + - "8000:8000" + links: + - db + register: output + + - debug: + var: output + + - assert: + that: + - "web.flask_web_1.state.running" + - "db.flask_db_1.state.running" +''' + +RETURN = ''' +service: + description: Name of the service. + returned: success + type: complex + contains: + container_name: + description: Name of the container. Format is I(project_service_#). + returned: success + type: complex + contains: + cmd: + description: One or more commands to be executed in the container. + returned: success + type: list + example: ["postgres"] + image: + description: Name of the image from which the container was built. + returned: success + type: str + example: postgres + labels: + description: Meta data assigned to the container. + returned: success + type: complex + example: {...} + networks: + description: Contains a dictionary for each network to which the container is a member. + returned: success + type: complex + contains: + IPAddress: + description: The IP address assigned to the container. + returned: success + type: string + example: 172.17.0.2 + IPPrefixLen: + description: Number of bits used by the subnet. + returned: success + type: int + example: 16 + aliases: + description: Aliases assigned to the container by the network. + returned: success + type: list + example: ['db'] + globalIPv6: + description: IPv6 address assigned to the container. + returned: success + type: str + example: '' + globalIPv6PrefixLen: + description: IPv6 subnet length. + returned: success + type: int + example: 0 + links: + description: List of container names to which this container is linked. + returned: success + type: list + example: null + macAddress: + description: Mac Address assigned to the virtual NIC. + returned: success + type: str + example: "02:42:ac:11:00:02" + state: + description: Information regarding the current disposition of the container. + returned: success + type: complex + contains: + running: + description: Whether or not the container is up with a running process. + returned: success + type: bool + example: true + status: + description: Description of the running state. + returned: success + type: str + example: running + +actions: + description: Provides the actions to be taken on each service as determined by compose. + returned: when in check mode or I(debug) true + type: complex + contains: + service_name: + description: Name of the service. + returned: always + type: complex + contains: + pulled_image: + description: Provides image details when a new image is pulled for the service. + returned: on image pull + type: complex + contains: + name: + description: name of the image + returned: always + type: string + id: + description: image hash + returned: always + type: string + built_image: + description: Provides image details when a new image is built for the service. + returned: on image build + type: complex + contains: + name: + description: name of the image + returned: always + type: string + id: + description: image hash + returned: always + type: string + + action: + description: A descriptive name of the action to be performed on the service's containers. + returned: always + type: list + contains: + id: + description: the container's long ID + returned: always + type: string + name: + description: the container's name + returned: always + type: string + short_id: + description: the container's short ID + returned: always + type: string +''' + +HAS_YAML = True +HAS_YAML_EXC = None +HAS_COMPOSE = True +HAS_COMPOSE_EXC = None +MINIMUM_COMPOSE_VERSION = '1.7.0' + +import sys +import re + +try: + import yaml +except ImportError as exc: + HAS_YAML = False + HAS_YAML_EXC = str(exc) + +from distutils.version import LooseVersion +from ansible.module_utils.basic import * + +try: + from compose import __version__ as compose_version + from compose.project import ProjectError + from compose.cli.command import project_from_options + from compose.service import ConvergenceStrategy, NoSuchImageError + from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt + from compose.const import DEFAULT_TIMEOUT +except ImportError as exc: + HAS_COMPOSE = False + HAS_COMPOSE_EXC = str(exc) + DEFAULT_TIMEOUT = 10 + +from ansible.module_utils.docker_common import * +from contextlib import contextmanager + + +AUTH_PARAM_MAPPING = { + u'docker_host': u'--host', + u'tls': u'--tls', + u'cacert_path': u'--tlscacert', + u'cert_path': u'--tlscert', + u'key_path': u'--tlskey', + u'tls_verify': u'--tlsverify' +} + + +@contextmanager +def stdout_redirector(path_name): + old_stdout = sys.stdout + fd = open(path_name, 'w') + sys.stdout = fd + try: + yield + finally: + sys.stdout = old_stdout + +def get_stdout(path_name): + full_stdout = '' + last_line = '' + with open(path_name, 'r') as fd: + for line in fd: + # strip terminal format/color chars + new_line = re.sub(r'\x1b\[.+m', '', line.encode('ascii')) + full_stdout += new_line + if new_line.strip(): + # Assuming last line contains the error message + last_line = new_line.strip().encode('utf-8') + fd.close() + os.remove(path_name) + return full_stdout, last_line + +class ContainerManager(DockerBaseClass): + + def __init__(self, client): + + super(ContainerManager, self).__init__() + + self.client = client + self.project_src = None + self.files = None + self.project_name = None + self.state = None + self.definition = None + self.hostname_check = None + self.timeout = None + self.remove_images = None + self.remove_orphans = None + self.remove_volumes = None + self.stopped = None + self.restarted = None + self.recreate = None + self.build = None + self.dependencies = None + self.services = None + self.scale = None + self.debug = None + self.pull = None + self.nocache = None + + for key, value in client.module.params.items(): + setattr(self, key, value) + + self.check_mode = client.check_mode + + if not self.debug: + self.debug = client.module._debug + + self.options = dict() + self.options.update(self._get_auth_options()) + self.options[u'--skip-hostname-check'] = (not self.hostname_check) + + if self.project_name: + self.options[u'--project-name'] = self.project_name + + if self.files: + self.options[u'--file'] = self.files + + if not HAS_COMPOSE: + self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" % HAS_COMPOSE_EXC) + + if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION): + self.client.fail("Found docker-compose version %s. Minimum required version is %s. " + "Upgrade docker-compose to a min version of %s." % + (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION)) + + self.log("options: ") + self.log(self.options, pretty_print=True) + + if self.definition: + if not HAS_YAML: + self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC) + + if not self.project_name: + self.client.fail("Parameter error - project_name required when providing definition.") + + self.project_src = tempfile.mkdtemp(prefix="ansible") + compose_file = os.path.join(self.project_src, "docker-compose.yml") + try: + self.log('writing: ') + self.log(yaml.dump(self.definition, default_flow_style=False)) + with open(compose_file, 'w') as f: + f.write(yaml.dump(self.definition, default_flow_style=False)) + except Exception as exc: + self.client.fail("Error writing to %s - %s" % (compose_file, str(exc))) + else: + if not self.project_src: + self.client.fail("Parameter error - project_src required.") + + try: + self.log("project_src: %s" % self.project_src) + self.project = project_from_options(self.project_src, self.options) + except Exception as exc: + self.client.fail("Configuration error - %s" % str(exc)) + + def exec_module(self): + result = dict() + + if self.state == 'present': + result = self.cmd_up() + elif self.state == 'absent': + result = self.cmd_down() + + if self.definition: + compose_file = os.path.join(self.project_src, "docker-compose.yml") + self.log("removing %s" % compose_file) + os.remove(compose_file) + self.log("removing %s" % self.project_src) + os.rmdir(self.project_src) + + if not self.check_mode and not self.debug and result.get('actions'): + result.pop('actions') + + return result + + def _get_auth_options(self): + options = dict() + for key, value in self.client.auth_params.items(): + if value is not None: + option = AUTH_PARAM_MAPPING.get(key) + if option: + options[option] = value + return options + + def cmd_up(self): + + start_deps = self.dependencies + service_names = self.services + detached = True + result = dict(changed=False, actions=[], ansible_facts=dict()) + + up_options = { + u'--no-recreate': False, + u'--build': True, + u'--no-build': False, + u'--no-deps': False, + u'--force-recreate': False, + } + + if self.recreate == 'never': + up_options[u'--no-recreate'] = True + elif self.recreate == 'always': + up_options[u'--force-recreate'] = True + + if self.remove_orphans: + up_options[u'--remove-orphans'] = True + + converge = convergence_strategy_from_opts(up_options) + self.log("convergence strategy: %s" % converge) + + if self.pull: + pull_output = self.cmd_pull() + result['changed'] = pull_output['changed'] + result['actions'] += pull_output['actions'] + + if self.build: + build_output = self.cmd_build() + result['changed'] = build_output['changed'] + result['actions'] += build_output['actions'] + + for service in self.project.services: + if not service_names or service.name in service_names: + plan = service.convergence_plan(strategy=converge) + if plan.action != 'noop': + result['changed'] = True + result_action = dict(service=service.name) + result_action[plan.action] = [] + for container in plan.containers: + result_action[plan.action].append(dict( + id=container.id, + name=container.name, + short_id=container.short_id, + )) + result['actions'].append(result_action) + + if not self.check_mode and result['changed']: + _, fd_name = tempfile.mkstemp(prefix="ansible") + try: + with stdout_redirector(fd_name): + do_build = build_action_from_opts(up_options) + self.log('Setting do_build to %s' % do_build) + self.project.up( + service_names=service_names, + start_deps=start_deps, + strategy=converge, + do_build=do_build, + detached=detached, + remove_orphans=self.remove_orphans, + timeout=self.timeout) + except Exception as exc: + full_stdout, last_line= get_stdout(fd_name) + self.client.module.fail_json(msg="Error starting project %s" % str(exc), module_stderr=last_line, + module_stdout=full_stdout) + else: + get_stdout(fd_name) + + if self.stopped: + stop_output = self.cmd_stop(service_names) + result['changed'] = stop_output['changed'] + result['actions'] += stop_output['actions'] + + if self.restarted: + restart_output = self.cmd_restart(service_names) + result['changed'] = restart_output['changed'] + result['actions'] += restart_output['actions'] + + if self.scale: + scale_output = self.cmd_scale() + result['changed'] = scale_output['changed'] + result['actions'] += scale_output['actions'] + + for service in self.project.services: + result['ansible_facts'][service.name] = dict() + for container in service.containers(stopped=True): + inspection = container.inspect() + # pare down the inspection data to the most useful bits + facts = dict( + cmd=[], + labels=dict(), + image=None, + state=dict( + running=None, + status=None + ), + networks=dict() + ) + if inspection['Config'].get('Cmd', None) is not None: + facts['cmd'] = inspection['Config']['Cmd'] + if inspection['Config'].get('Labels', None) is not None: + facts['labels'] = inspection['Config']['Labels'] + if inspection['Config'].get('Image', None) is not None: + facts['image'] = inspection['Config']['Image'] + if inspection['State'].get('Running', None) is not None: + facts['state']['running'] = inspection['State']['Running'] + if inspection['State'].get('Status', None) is not None: + facts['state']['status'] = inspection['State']['Status'] + + if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'): + networks = inspection['NetworkSettings']['Networks'] + for key in networks: + facts['networks'][key] = dict( + aliases=[], + globalIPv6=None, + globalIPv6PrefixLen=0, + IPAddress=None, + IPPrefixLen=0, + links=None, + macAddress=None, + ) + if networks[key].get('Aliases', None) is not None: + facts['networks'][key]['aliases'] = networks[key]['Aliases'] + if networks[key].get('GlobalIPv6Address', None) is not None: + facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address'] + if networks[key].get('GlobalIPv6PrefixLen', None) is not None: + facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen'] + if networks[key].get('IPAddress', None) is not None: + facts['networks'][key]['IPAddress'] = networks[key]['IPAddress'] + if networks[key].get('IPPrefixLen', None) is not None: + facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen'] + if networks[key].get('Links', None) is not None: + facts['networks'][key]['links'] = networks[key]['Links'] + if networks[key].get('MacAddress', None) is not None: + facts['networks'][key]['macAddress'] = networks[key]['MacAddress'] + + result['ansible_facts'][service.name][container.name] = facts + + return result + + def cmd_pull(self): + result = dict( + changed=False, + actions=[], + ) + + if not self.check_mode: + for service in self.project.get_services(self.services, include_deps=False): + if 'image' not in service.options: + continue + + self.log('Pulling image for service %s' % service.name) + # store the existing image ID + old_image_id = '' + try: + image = service.image() + if image and image.get('Id'): + old_image_id = image['Id'] + except NoSuchImageError: + pass + except Exception as exc: + self.client.fail("Error: service image lookup failed - %s" % str(exc)) + + # pull the image + try: + service.pull(ignore_pull_failures=False) + except Exception as exc: + self.client.fail("Error: pull failed with %s" % str(exc)) + + # store the new image ID + new_image_id = '' + try: + image = service.image() + if image and image.get('Id'): + new_image_id = image['Id'] + except NoSuchImageError as exc: + self.client.fail("Error: service image lookup failed after pull - %s" % str(exc)) + + if new_image_id != old_image_id: + # if a new image was pulled + result['changed'] = True + result['actions'].append(dict( + service=service.name, + pulled_image=dict( + name=service.image_name, + id=new_image_id + ) + )) + return result + + def cmd_build(self): + result = dict( + changed=False, + actions=[] + ) + if not self.check_mode: + for service in self.project.get_services(self.services, include_deps=False): + if service.can_be_built(): + self.log('Building image for service %s' % service.name) + # store the existing image ID + old_image_id = '' + try: + image = service.image() + if image and image.get('Id'): + old_image_id = image['Id'] + except NoSuchImageError: + pass + except Exception as exc: + self.client.fail("Error: service image lookup failed - %s" % str(exc)) + + # build the image + try: + new_image_id = service.build(pull=True, no_cache=self.nocache) + except Exception as exc: + self.client.fail("Error: build failed with %s" % str(exc)) + + if new_image_id not in old_image_id: + # if a new image was built + result['changed'] = True + result['actions'].append(dict( + service=service.name, + built_image=dict( + name=service.image_name, + id=new_image_id + ) + )) + return result + + def cmd_down(self): + result = dict( + changed=False, + actions=[] + ) + for service in self.project.services: + containers = service.containers(stopped=True) + if len(containers): + result['changed'] = True + result['actions'].append(dict( + service=service.name, + deleted=[container.name for container in containers] + )) + if not self.check_mode and result['changed']: + image_type = image_type_from_opt('--rmi', self.remove_images) + try: + self.project.down(image_type, self.remove_volumes, self.remove_orphans) + except Exception as exc: + self.client.fail("Error stopping project - %s" % str(exc)) + return result + + def cmd_stop(self, service_names): + result = dict( + changed=False, + actions=[] + ) + for service in self.project.services: + if not service_names or service.name in service_names: + service_res = dict( + service=service.name, + stop=[] + ) + for container in service.containers(stopped=False): + result['changed'] = True + service_res['stop'].append(dict( + id=container.id, + name=container.name, + short_id=container.short_id + )) + result['actions'].append(service_res) + if not self.check_mode and result['changed']: + _, fd_name = tempfile.mkstemp(prefix="ansible") + try: + with stdout_redirector(fd_name): + self.project.stop(service_names=service_names, timeout=self.timeout) + except Exception as exc: + full_stdout, last_line = get_stdout(fd_name) + self.client.module.fail_json(msg="Error stopping project %s" % str(exc), module_stderr=last_line, + module_stdout=full_stdout) + else: + get_stdout(fd_name) + return result + + def cmd_restart(self, service_names): + result = dict( + changed=False, + actions=[] + ) + + for service in self.project.services: + if not service_names or service.name in service_names: + service_res = dict( + service=service.name, + restart=[] + ) + for container in service.containers(stopped=True): + result['changed'] = True + service_res['restart'].append(dict( + id=container.id, + name=container.name, + short_id=container.short_id + )) + result['actions'].append(service_res) + + if not self.check_mode and result['changed']: + _, fd_name = tempfile.mkstemp(prefix="ansible") + try: + with stdout_redirector(fd_name): + self.project.restart(service_names=service_names, timeout=self.timeout) + except Exception as exc: + full_stdout, last_line = get_stdout(fd_name) + self.client.module.fail_json(msg="Error restarting project %s" % str(exc), module_stderr=last_line, + module_stdout=full_stdout) + else: + get_stdout(fd_name) + return result + + def cmd_scale(self): + result = dict( + changed=False, + actions=[] + ) + for service in self.project.services: + if service.name in self.scale: + service_res = dict( + service=service.name, + scale=0 + ) + containers = service.containers(stopped=True) + if len(containers) != self.scale[service.name]: + result['changed'] = True + service_res['scale'] = self.scale[service.name] - len(containers) + if not self.check_mode: + try: + service.scale(int(self.scale[service.name])) + except Exception as exc: + self.client.fail("Error scaling %s - %s" % (service.name, str(exc))) + result['actions'].append(service_res) + return result + + +def main(): + argument_spec = dict( + project_src=dict(type='path'), + project_name=dict(type='str',), + files=dict(type='list'), + state=dict(type='str', choices=['absent', 'present'], default='present'), + definition=dict(type='dict'), + hostname_check=dict(type='bool', default=False), + recreate=dict(type='str', choices=['always','never','smart'], default='smart'), + build=dict(type='bool', default=False), + remove_images=dict(type='str', choices=['all', 'local']), + remove_volumes=dict(type='bool', default=False), + remove_orphans=dict(type='bool', default=False), + stopped=dict(type='bool', default=False), + restarted=dict(type='bool', default=False), + scale=dict(type='dict'), + services=dict(type='list'), + dependencies=dict(type='bool', default=True), + pull=dict(type='bool', default=False), + nocache=dict(type='bool', default=False), + debug=dict(type='bool', default=False), + timeout=dict(type='int', default=DEFAULT_TIMEOUT) + ) + + mutually_exclusive = [ + ('definition', 'project_src'), + ('definition', 'files') + ] + + client = AnsibleDockerClient( + argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True + ) + + result = ContainerManager(client).exec_module() + client.module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/cloud/google/gc_storage.py b/cloud/google/gc_storage.py index 37d61b0b268..6a5e9023d02 100644 --- a/cloud/google/gc_storage.py +++ b/cloud/google/gc_storage.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: gc_storage @@ -69,14 +73,14 @@ required: true default: null choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ] - gcs_secret_key: + gs_secret_key: description: - - GCS secret key. If not set then the value of the GCS_SECRET_KEY environment variable is used. + - GS secret key. If not set then the value of the GS_SECRET_ACCESS_KEY environment variable is used. required: true default: null - gcs_access_key: + gs_access_key: description: - - GCS access key. If not set then the value of the GCS_ACCESS_KEY environment variable is used. + - GS access key. If not set then the value of the GS_ACCESS_KEY_ID environment variable is used. required: true default: null @@ -89,26 +93,49 @@ ''' EXAMPLES = ''' -# upload some content -- gc_storage: bucket=mybucket object=key.txt src=/usr/local/myfile.txt mode=put permission=public-read - -# upload some headers -- gc_storage: bucket=mybucket object=key.txt src=/usr/local/myfile.txt headers='{"Content-Encoding": "gzip"}' - -# download some content -- gc_storage: bucket=mybucket object=key.txt dest=/usr/local/myfile.txt mode=get - -# Download an object as a string to use else where in your playbook -- gc_storage: bucket=mybucket object=key.txt mode=get_str - -# Create an empty bucket -- gc_storage: bucket=mybucket mode=create - -# Create a bucket with key as directory -- gc_storage: bucket=mybucket object=/my/directory/path mode=create - -# Delete a bucket and all contents -- gc_storage: bucket=mybucket mode=delete +- name: Upload some content + gc_storage: + bucket: mybucket + object: key.txt + src: /usr/local/myfile.txt + mode: put + permission: public-read + +- name: Upload some headers + gc_storage: + bucket: mybucket + object: key.txt + src: /usr/local/myfile.txt + headers: '{"Content-Encoding": "gzip"}' + +- name: Download some content + gc_storage: + bucket: mybucket + object: key.txt + dest: /usr/local/myfile.txt + mode: get + +- name: Download an object as a string to use else where in your playbook + gc_storage: + bucket: mybucket + object: key.txt + mode: get_str + +- name: Create an empty bucket + gc_storage: + bucket: mybucket + mode: create + +- name: Create a bucket with key as directory + gc_storage: + bucket: mybucket + object: /my/directory/path + mode: create + +- name: Delete a bucket and all contents + gc_storage: + bucket: mybucket + mode: delete ''' import os @@ -134,7 +161,7 @@ def grant_check(module, gs, obj): if not grant: obj.set_acl('authenticated-read') module.exit_json(changed=True, result="The objects permission as been set to authenticated-read") - except gs.provider.storage_response_error, e: + except gs.provider.storage_response_error as e: module.fail_json(msg= str(e)) return True @@ -144,7 +171,7 @@ def key_check(module, gs, bucket, obj): try: bucket = gs.lookup(bucket) key_check = bucket.get_key(obj) - except gs.provider.storage_response_error, e: + except gs.provider.storage_response_error as e: module.fail_json(msg= str(e)) if key_check: grant_check(module, gs, key_check) @@ -166,7 +193,7 @@ def keysum(module, gs, bucket, obj): def bucket_check(module, gs, bucket): try: result = gs.lookup(bucket) - except gs.provider.storage_response_error, e: + except gs.provider.storage_response_error as e: module.fail_json(msg= str(e)) if result: grant_check(module, gs, result) @@ -178,7 +205,7 @@ def create_bucket(module, gs, bucket): try: bucket = gs.create_bucket(bucket) bucket.set_acl(module.params.get('permission')) - except gs.provider.storage_response_error, e: + except gs.provider.storage_response_error as e: module.fail_json(msg= str(e)) if bucket: return True @@ -191,7 +218,7 @@ def delete_bucket(module, gs, bucket): bucket.delete_key(key.name) bucket.delete() return True - except gs.provider.storage_response_error, e: + except gs.provider.storage_response_error as e: module.fail_json(msg= str(e)) def delete_key(module, gs, bucket, obj): @@ -199,7 +226,7 @@ def delete_key(module, gs, bucket, obj): bucket = gs.lookup(bucket) bucket.delete_key(obj) module.exit_json(msg="Object deleted from bucket ", changed=True) - except gs.provider.storage_response_error, e: + except gs.provider.storage_response_error as e: module.fail_json(msg= str(e)) def create_dirkey(module, gs, bucket, obj): @@ -208,7 +235,7 @@ def create_dirkey(module, gs, bucket, obj): key = bucket.new_key(obj) key.set_contents_from_string('') module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True) - except gs.provider.storage_response_error, e: + except gs.provider.storage_response_error as e: module.fail_json(msg= str(e)) def path_check(path): @@ -243,7 +270,7 @@ def upload_gsfile(module, gs, bucket, obj, src, expiry): key.set_acl(module.params.get('permission')) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) - except gs.provider.storage_copy_error, e: + except gs.provider.storage_copy_error as e: module.fail_json(msg= str(e)) def download_gsfile(module, gs, bucket, obj, dest): @@ -252,7 +279,7 @@ def download_gsfile(module, gs, bucket, obj, dest): key = bucket.lookup(obj) key.get_contents_to_filename(dest) module.exit_json(msg="GET operation complete", changed=True) - except gs.provider.storage_copy_error, e: + except gs.provider.storage_copy_error as e: module.fail_json(msg= str(e)) def download_gsstr(module, gs, bucket, obj): @@ -261,7 +288,7 @@ def download_gsstr(module, gs, bucket, obj): key = bucket.lookup(obj) contents = key.get_contents_as_string() module.exit_json(msg="GET operation complete", contents=contents, changed=True) - except gs.provider.storage_copy_error, e: + except gs.provider.storage_copy_error as e: module.fail_json(msg= str(e)) def get_download_url(module, gs, bucket, obj, expiry): @@ -270,7 +297,7 @@ def get_download_url(module, gs, bucket, obj, expiry): key = bucket.lookup(obj) url = key.generate_url(expiry) module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True) - except gs.provider.storage_response_error, e: + except gs.provider.storage_response_error as e: module.fail_json(msg= str(e)) def handle_get(module, gs, bucket, obj, overwrite, dest): @@ -388,7 +415,7 @@ def main(): try: gs = boto.connect_gs(gs_access_key, gs_secret_key) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg = str(e)) if mode == 'get': diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 3339d5ca355..802a7a1393c 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: gce @@ -23,7 +27,7 @@ short_description: create or terminate GCE instances description: - Creates or terminates Google Compute Engine (GCE) instances. See - U(https://cloud.google.com/products/compute-engine) for an overview. + U(https://cloud.google.com/compute) for an overview. Full install/configuration instructions for the gce* modules can be found in the comments of ansible/test/gce_tests.py. options: @@ -89,13 +93,28 @@ default: null name: description: - - identifier when working with a single instance + - either a name of a single instance or when used with 'num_instances', + the base name of a cluster of nodes + required: false + aliases: ['base_name'] + num_instances: + description: + - can be used with 'name', specifies + the number of nodes to provision using 'name' + as a base name required: false + version_added: "2.3" network: description: - name of the network, 'default' will be used if not specified required: false default: "default" + subnetwork: + description: + - name of the subnetwork in which the instance should be created + required: false + default: null + version_added: "2.2" persistent_boot_disk: description: - if set, create the instance with a persistent boot disk @@ -115,7 +134,7 @@ - desired state of the resource required: false default: "present" - choices: ["active", "present", "absent", "deleted"] + choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"] tags: description: - a comma-separated list of tags to associate with the instance @@ -136,7 +155,7 @@ external_ip: version_added: "1.9" description: - - type of external ip, ephemeral by default; alternatively, a list of fixed gce ips or ip names can be given (if there is not enough specified ip, 'ephemeral' will be used) + - type of external ip, ephemeral by default; alternatively, a list of fixed gce ips or ip names can be given (if there is not enough specified ip, 'ephemeral' will be used). Specify 'none' if no external ip is desired. required: false default: "ephemeral" disk_auto_delete: @@ -158,96 +177,116 @@ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials, >= 0.20.0 if using preemptible option" notes: - - Either I(name) or I(instance_names) is required. -author: "Eric Johnson (@erjohnso) " + - Either I(instance_names) or I(name) is required. + - JSON credentials strongly preferred. +author: "Eric Johnson (@erjohnso) , Tom Melendez (@supertom) " ''' EXAMPLES = ''' -# Basic provisioning example. Create a single Debian 7 instance in the -# us-central1-a Zone of n1-standard-1 machine type. -- local_action: - module: gce - name: test-instance - zone: us-central1-a - machine_type: n1-standard-1 - image: debian-7 - -# Example using defaults and with metadata to create a single 'foo' instance -- local_action: - module: gce - name: foo - metadata: '{"db":"postgres", "group":"qa", "id":500}' - - -# Launch instances from a control node, runs some tasks on the new instances, -# and then terminate them -# This example uses JSON credentials with the credentials_file parameter -# rather than the deprecated pem_file option with PEM formatted credentials. - -- name: Create a sandbox instance +# Basic provisioning example. Create a single Debian 8 instance in the +# us-central1-a Zone of the n1-standard-1 machine type. +# Create multiple instances by specifying multiple names, seperated by +# commas in the instance_names field +# (e.g. my-test-instance1,my-test-instance2) + gce: + instance_names: my-test-instance1 + zone: us-central1-a + machine_type: n1-standard-1 + image: debian-8 + state: present + service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com" + credentials_file: "/path/to/your-key.json" + project_id: "your-project-name" + +# Create a single Debian 8 instance in the us-central1-a Zone +# Use existing disks, custom network/subnetwork, set service account permissions +# add tags and metadata. + gce: + instance_names: my-test-instance + zone: us-central1-a + machine_type: n1-standard-1 + state: present + metadata: '{"db":"postgres", "group":"qa", "id":500}' + tags: + - http-server + - my-other-tag + disks: + - name: disk-2 + mode: READ_WRITE + - name: disk-3 + mode: READ_ONLY + disk_auto_delete: false + network: foobar-network + subnetwork: foobar-subnetwork-1 + preemptible: true + ip_forward: true + service_account_permissions: + - storage-full + - taskqueue + - bigquery + service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com" + credentials_file: "/path/to/your-key.json" + project_id: "your-project-name" + +# Example Playbook +- name: Compute Engine Instance Examples hosts: localhost vars: - names: foo,bar - machine_type: n1-standard-1 - image: debian-6 - zone: us-central1-a - service_account_email: unique-email@developer.gserviceaccount.com - credentials_file: /path/to/json_file - project_id: project-id + service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com" + credentials_file: "/path/to/your-key.json" + project_id: "your-project-name" tasks: - - name: Launch instances - local_action: gce instance_names={{names}} machine_type={{machine_type}} - image={{image}} zone={{zone}} - service_account_email={{ service_account_email }} - credentials_file={{ credentials_file }} - project_id={{ project_id }} + - name: create multiple instances + # Basic provisioning example. Create multiple Debian 8 instances in the + # us-central1-a Zone of n1-standard-1 machine type. + gce: + instance_names: test1,test2,test3 + zone: us-central1-a + machine_type: n1-standard-1 + image: debian-8 + state: present + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + metadata : '{ "startup-script" : "apt-get update" }' register: gce - - name: Wait for SSH to come up - local_action: wait_for host={{item.public_ip}} port=22 delay=10 - timeout=60 state=started - with_items: {{gce.instance_data}} - -- name: Configure instance(s) - hosts: launched - become: True - roles: - - my_awesome_role - - my_awesome_tasks - -- name: Terminate instances - hosts: localhost - connection: local - tasks: - - name: Terminate instances that were previously launched - local_action: - module: gce - state: 'absent' - instance_names: {{gce.instance_names}} - -# The deprecated PEM file credentials can be used as follows -- name: Create a sandbox instance with PEM credentials - hosts: localhost - vars: - names: foo,bar - machine_type: n1-standard-1 - image: debian-6 - zone: us-central1-a - service_account_email: unique-email@developer.gserviceaccount.com - pem_file: /path/to/pem_file - project_id: project-id - tasks: - - name: Launch instances - local_action: gce instance_names={{names}} machine_type={{machine_type}} - image={{image}} zone={{zone}} - service_account_email={{ service_account_email }} - pem_file={{ pem_file }} - project_id={{ project_id }} - register: gce - - name: Wait for SSH to come up - local_action: wait_for host={{item.public_ip}} port=22 delay=10 - timeout=60 state=started - with_items: {{gce.instance_data}} + - name: Save host data + add_host: + hostname: "{{ item.public_ip }}" + groupname: gce_instances_ips + with_items: "{{ gce.instance_data }}" + + - name: Wait for SSH for instances + wait_for: + delay: 1 + host: "{{ item.public_ip }}" + port: 22 + state: started + timeout: 30 + with_items: "{{ gce.instance_data }}" + + - name: Configure Hosts + hosts: gce_instances_ips + become: yes + become_method: sudo + roles: + - my-role-one + - my-role-two + tags: + - config + + - name: delete test-instances + # Basic termination of instance. + gce: + service_account_email: "{{ service_account_email }}" + credentials_file: "{{ credentials_file }}" + project_id: "{{ project_id }}" + instance_names: "{{ gce.instance_names }}" + zone: us-central1-a + state: absent + tags: + - delete ''' import socket @@ -285,6 +324,10 @@ def get_instance_info(inst): netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] except: netname = None + try: + subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1] + except: + subnetname = None if 'disks' in inst.extra: disk_names = [disk_info['source'].split('/')[-1] for disk_info @@ -305,6 +348,7 @@ def get_instance_info(inst): 'metadata': metadata, 'name': inst.name, 'network': netname, + 'subnetwork': subnetname, 'private_ip': inst.private_ips[0], 'public_ip': public_ip, 'status': ('status' in inst.extra) and inst.extra['status'] or None, @@ -313,7 +357,7 @@ def get_instance_info(inst): }) -def create_instances(module, gce, instance_names): +def create_instances(module, gce, instance_names, number): """Creates new instances. Attributes other than instance_names are picked up from 'module' @@ -330,6 +374,7 @@ def create_instances(module, gce, instance_names): machine_type = module.params.get('machine_type') metadata = module.params.get('metadata') network = module.params.get('network') + subnetwork = module.params.get('subnetwork') persistent_boot_disk = module.params.get('persistent_boot_disk') disks = module.params.get('disks') state = module.params.get('state') @@ -356,7 +401,7 @@ def create_instances(module, gce, instance_names): instance_external_ip = gce.ex_get_address(instance_external_ip) else: instance_external_ip = 'ephemeral' - except GoogleBaseError, e: + except GoogleBaseError as e: module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value)) else: instance_external_ip = external_ip @@ -410,7 +455,7 @@ def create_instances(module, gce, instance_names): bad_perms = [] if service_account_permissions: for perm in service_account_permissions: - if perm not in gce.SA_SCOPES_MAP.keys(): + if perm not in gce.SA_SCOPES_MAP: bad_perms.append(perm) if len(bad_perms) > 0: module.fail_json(msg='bad permissions: %s' % str(bad_perms)) @@ -422,38 +467,62 @@ def create_instances(module, gce, instance_names): module.fail_json(msg='Missing required create instance variable', changed=False) - for name in instance_names: - pd = None - if lc_disks: - pd = lc_disks[0] - elif persistent_boot_disk: + gce_args = dict( + location=lc_zone, + ex_network=network, ex_tags=tags, ex_metadata=metadata, + ex_can_ip_forward=ip_forward, + external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete, + ex_service_accounts=ex_sa_perms + ) + if preemptible is not None: + gce_args['ex_preemptible'] = preemptible + if subnetwork is not None: + gce_args['ex_subnetwork'] = subnetwork + + if isinstance(instance_names, str) and not number: + instance_names = [instance_names] + + if isinstance(instance_names, str) and number: + instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type, + lc_image(), number, **gce_args) + for resp in instance_responses: + n = resp + if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode): + try: + n = gce.ex_get_node(n.name, lc_zone) + except ResourceNotFoundError: + pass + else: + # Assure that at least one node has been created to set changed=True + changed = True + new_instances.append(n) + else: + for instance in instance_names: + pd = None + if lc_disks: + pd = lc_disks[0] + elif persistent_boot_disk: + try: + pd = gce.ex_get_volume("%s" % instance, lc_zone) + except ResourceNotFoundError: + pd = gce.create_volume(None, "%s" % instance, image=lc_image()) + gce_args['ex_boot_disk'] = pd + + inst = None try: - pd = gce.ex_get_volume("%s" % name, lc_zone) + inst = gce.ex_get_node(instance, lc_zone) except ResourceNotFoundError: - pd = gce.create_volume(None, "%s" % name, image=lc_image()) - - gce_args = dict( - location=lc_zone, - ex_network=network, ex_tags=tags, ex_metadata=metadata, - ex_boot_disk=pd, ex_can_ip_forward=ip_forward, - external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete, - ex_service_accounts=ex_sa_perms - ) - if preemptible is not None: - gce_args['ex_preemptible'] = preemptible - - inst = None - try: - inst = gce.ex_get_node(name, lc_zone) - except ResourceNotFoundError: - inst = gce.create_node( - name, lc_machine_type, lc_image(), **gce_args - ) - changed = True - except GoogleBaseError as e: - module.fail_json(msg='Unexpected error attempting to create ' + - 'instance %s, error: %s' % (name, e.value)) + inst = gce.create_node( + instance, lc_machine_type, lc_image(), **gce_args + ) + changed = True + except GoogleBaseError as e: + module.fail_json(msg='Unexpected error attempting to create ' + + 'instance %s, error: %s' % (instance, e.value)) + if inst: + new_instances.append(inst) + for inst in new_instances: for i, lc_disk in enumerate(lc_disks): # Check whether the disk is already attached if (len(inst.extra['disks']) > i): @@ -476,9 +545,6 @@ def create_instances(module, gce, instance_names): inst.extra['disks'].append( {'source': lc_disk.extra['selfLink'], 'index': i}) - if inst: - new_instances.append(inst) - instance_names = [] instance_json_data = [] for inst in new_instances: @@ -488,35 +554,60 @@ def create_instances(module, gce, instance_names): return (changed, instance_json_data, instance_names) - -def terminate_instances(module, gce, instance_names, zone_name): - """Terminates a list of instances. +def change_instance_state(module, gce, instance_names, number, zone_name, state): + """Changes the state of a list of instances. For example, + change from started to stopped, or started to absent. module: Ansible module object gce: authenticated GCE connection object instance_names: a list of instance names to terminate zone_name: the zone where the instances reside prior to termination + state: 'state' parameter passed into module as argument - Returns a dictionary of instance names that were terminated. + Returns a dictionary of instance names that were changed. """ changed = False - terminated_instance_names = [] - for name in instance_names: + nodes = [] + state_instance_names = [] + + if isinstance(instance_names, str) and number: + node_names = ['%s-%03d' % (instance_names, i) for i in range(number)] + elif isinstance(instance_names, str) and not number: + node_names = [instance_names] + else: + node_names = instance_names + + for name in node_names: inst = None try: inst = gce.ex_get_node(name, zone_name) except ResourceNotFoundError: - pass + state_instance_names.append(name) except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) - if inst: - gce.destroy_node(inst) - terminated_instance_names.append(inst.name) - changed = True - - return (changed, terminated_instance_names) + else: + nodes.append(inst) + state_instance_names.append(name) + if state in ['absent', 'deleted'] and number: + changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False] + changed = reduce(lambda x, y: x or y, changed_nodes) + else: + for node in nodes: + if state in ['absent', 'deleted']: + gce.destroy_node(node) + changed = True + elif state == 'started' and \ + node.state == libcloud.compute.types.NodeState.STOPPED: + gce.ex_start_node(node) + changed = True + elif state in ['stopped', 'terminated'] and \ + node.state == libcloud.compute.types.NodeState.RUNNING: + gce.ex_stop_node(node) + changed = True + + return (changed, state_instance_names) def main(): module = AnsibleModule( @@ -525,11 +616,14 @@ def main(): instance_names = dict(), machine_type = dict(default='n1-standard-1'), metadata = dict(), - name = dict(), + name = dict(aliases=['base_name']), + num_instances = dict(type='int'), network = dict(default='default'), + subnetwork = dict(), persistent_boot_disk = dict(type='bool', default=False), disks = dict(type='list'), - state = dict(choices=['active', 'present', 'absent', 'deleted'], + state = dict(choices=['active', 'present', 'absent', 'deleted', + 'started', 'stopped', 'terminated'], default='present'), tags = dict(type='list'), zone = dict(default='us-central1-a'), @@ -542,7 +636,8 @@ def main(): external_ip=dict(default='ephemeral'), disk_auto_delete = dict(type='bool', default=True), preemptible = dict(type='bool', default=None), - ) + ), + mutually_exclusive=[('instance_names', 'name')] ) if not HAS_PYTHON26: @@ -557,7 +652,9 @@ def main(): machine_type = module.params.get('machine_type') metadata = module.params.get('metadata') name = module.params.get('name') + number = module.params.get('num_instances') network = module.params.get('network') + subnetwork = module.params.get('subnetwork') persistent_boot_disk = module.params.get('persistent_boot_disk') state = module.params.get('state') tags = module.params.get('tags') @@ -566,13 +663,13 @@ def main(): preemptible = module.params.get('preemptible') changed = False - inames = [] + inames = None if isinstance(instance_names, list): inames = instance_names elif isinstance(instance_names, str): inames = instance_names.split(',') if name: - inames.append(name) + inames = name if not inames: module.fail_json(msg='Must specify a "name" or "instance_names"', changed=False) @@ -583,23 +680,27 @@ def main(): module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option", changed=False) + if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'): + module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option", + changed=False) + json_output = {'zone': zone} - if state in ['absent', 'deleted']: - json_output['state'] = 'absent' - (changed, terminated_instance_names) = terminate_instances( - module, gce, inames, zone) + if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']: + json_output['state'] = state + (changed, state_instance_names) = change_instance_state( + module, gce, inames, number, zone, state) # based on what user specified, return the same variable, although # value could be different if an instance could not be destroyed - if instance_names: - json_output['instance_names'] = terminated_instance_names + if instance_names or name and number: + json_output['instance_names'] = state_instance_names elif name: json_output['name'] = name elif state in ['active', 'present']: json_output['state'] = 'present' (changed, instance_data, instance_name_list) = create_instances( - module, gce, inames) + module, gce, inames, number) json_output['instance_data'] = instance_data if instance_names: json_output['instance_names'] = instance_name_list diff --git a/cloud/google/gce_lb.py b/cloud/google/gce_lb.py index fce797ba3e8..dc31bb3b421 100644 --- a/cloud/google/gce_lb.py +++ b/cloud/google/gce_lb.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: gce_lb @@ -109,6 +113,7 @@ default: "present" choices: ["active", "present", "absent", "deleted"] aliases: [] + required: false service_account_email: version_added: "1.6" description: @@ -221,7 +226,7 @@ def main(): gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce) gcelb.connection.user_agent_append("%s/%s" % ( USER_AGENT_PRODUCT, USER_AGENT_VERSION)) - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) changed = False @@ -247,7 +252,7 @@ def main(): changed = True except ResourceExistsError: hc = gce.ex_get_healthcheck(httphealthcheck_name) - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) if hc is not None: @@ -291,7 +296,7 @@ def main(): changed = True except ResourceExistsError: lb = gcelb.get_balancer(name) - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) if lb is not None: @@ -317,7 +322,7 @@ def main(): changed = True except ResourceNotFoundError: pass - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) # destroy the health check if specified @@ -329,7 +334,7 @@ def main(): changed = True except ResourceNotFoundError: pass - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) diff --git a/cloud/google/gce_mig.py b/cloud/google/gce_mig.py new file mode 100644 index 00000000000..024055dff30 --- /dev/null +++ b/cloud/google/gce_mig.py @@ -0,0 +1,908 @@ +#!/usr/bin/python +# Copyright 2016 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: gce_mig +version_added: "2.2" +short_description: Create, Update or Destroy a Managed Instance Group (MIG). +description: + - Create, Update or Destroy a Managed Instance Group (MIG). See + U(https://cloud.google.com/compute/docs/instance-groups) for an overview. + Full install/configuration instructions for the gce* modules can + be found in the comments of ansible/test/gce_tests.py. +requirements: + - "python >= 2.6" + - "apache-libcloud >= 1.2.0" +notes: + - Resizing and Recreating VM are also supported. + - An existing instance template is required in order to create a + Managed Instance Group. +author: + - "Tom Melendez (@supertom) " +options: + name: + description: + - Name of the Managed Instance Group. + required: true + template: + description: + - Instance Template to be used in creating the VMs. See + U(https://cloud.google.com/compute/docs/instance-templates) to learn more + about Instance Templates. Required for creating MIGs. + required: false + size: + description: + - Size of Managed Instance Group. If MIG already exists, it will be + resized to the number provided here. Required for creating MIGs. + required: false + service_account_email: + description: + - service account email + required: false + default: null + credentials_file: + description: + - Path to the JSON file associated with the service account email + default: null + required: false + project_id: + description: + - GCE project ID + required: false + default: null + state: + description: + - desired state of the resource + required: false + default: "present" + choices: ["absent", "present"] + zone: + description: + - The GCE zone to use for this Managed Instance Group. + required: true + autoscaling: + description: + - A dictionary of configuration for the autoscaler. 'enabled (bool)', 'name (str)' + and policy.max_instances (int) are required fields if autoscaling is used. See + U(https://cloud.google.com/compute/docs/reference/beta/autoscalers) for more information + on Autoscaling. + required: false + default: null + named_ports: + version_added: "2.3" + description: + - Define named ports that backend services can forward data to. Format is a a list of + name:port dictionaries. + required: false + default: null +''' + +EXAMPLES = ''' +# Following playbook creates, rebuilds instances, resizes and then deletes a MIG. +# Notes: +# - Two valid Instance Templates must exist in your GCE project in order to run +# this playbook. Change the fields to match the templates used in your +# project. +# - The use of the 'pause' module is not required, it is just for convenience. +- name: Managed Instance Group Example + hosts: localhost + gather_facts: False + tasks: + - name: Create MIG + gce_mig: + name: ansible-mig-example + zone: us-central1-c + state: present + size: 1 + template: my-instance-template-1 + named_ports: + - name: http + port: 80 + - name: foobar + port: 82 + + - name: Pause for 30 seconds + pause: + seconds: 30 + + - name: Recreate MIG Instances with Instance Template change. + gce_mig: + name: ansible-mig-example + zone: us-central1-c + state: present + template: my-instance-template-2-small + recreate_instances: yes + + - name: Pause for 30 seconds + pause: + seconds: 30 + + - name: Resize MIG + gce_mig: + name: ansible-mig-example + zone: us-central1-c + state: present + size: 3 + + - name: Update MIG with Autoscaler + gce_mig: + name: ansible-mig-example + zone: us-central1-c + state: present + size: 3 + template: my-instance-template-2-small + recreate_instances: yes + autoscaling: + enabled: yes + name: my-autoscaler + policy: + min_instances: 2 + max_instances: 5 + cool_down_period: 37 + cpu_utilization: + target: .39 + load_balancing_utilization: + target: 0.4 + + - name: Pause for 30 seconds + pause: + seconds: 30 + + - name: Delete MIG + gce_mig: + name: ansible-mig-example + zone: us-central1-c + state: absent + autoscaling: + enabled: no + name: my-autoscaler +''' +RETURN = ''' +zone: + description: Zone in which to launch MIG. + returned: always + type: string + sample: "us-central1-b" + +template: + description: Instance Template to use for VMs. Must exist prior to using with MIG. + returned: changed + type: string + sample: "my-instance-template" + +name: + description: Name of the Managed Instance Group. + returned: changed + type: string + sample: "my-managed-instance-group" + +named_ports: + description: list of named ports acted upon + returned: when named_ports are initially set or updated + type: list + sample: [{ "name": "http", "port": 80 }, { "name": "foo", "port": 82 }] + +size: + description: Number of VMs in Managed Instance Group. + returned: changed + type: integer + sample: 4 + +created_instances: + description: Names of instances created. + returned: When instances are created. + type: list + sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"] + +deleted_instances: + description: Names of instances deleted. + returned: When instances are deleted. + type: list + sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"] + +resize_created_instances: + description: Names of instances created during resizing. + returned: When a resize results in the creation of instances. + type: list + sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"] + +resize_deleted_instances: + description: Names of instances deleted during resizing. + returned: When a resize results in the deletion of instances. + type: list + sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"] + +recreated_instances: + description: Names of instances recreated. + returned: When instances are recreated. + type: list + sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"] + +created_autoscaler: + description: True if Autoscaler was attempted and created. False otherwise. + returned: When the creation of an Autoscaler was attempted. + type: bool + sample: true + +updated_autoscaler: + description: True if an Autoscaler update was attempted and succeeded. + False returned if update failed. + returned: When the update of an Autoscaler was attempted. + type: bool + sample: true + +deleted_autoscaler: + description: True if an Autoscaler delete attempted and succeeded. + False returned if delete failed. + returned: When the delete of an Autoscaler was attempted. + type: bool + sample: true + +set_named_ports: + description: True if the named_ports have been set + returned: named_ports have been set + type: bool + sample: true + +updated_named_ports: + description: True if the named_ports have been updated + returned: named_ports have been updated + type: bool + sample: true +''' + +import socket + +try: + import libcloud + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ + ResourceExistsError, ResourceInUseError, ResourceNotFoundError + from libcloud.compute.drivers.gce import GCEAddress + _ = Provider.GCE + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + +try: + from ast import literal_eval + HAS_PYTHON26 = True +except ImportError: + HAS_PYTHON26 = False + + +def _check_params(params, field_list): + """ + Helper to validate params. + + Use this in function definitions if they require specific fields + to be present. + + :param params: structure that contains the fields + :type params: ``dict`` + + :param field_list: list of dict representing the fields + [{'name': str, 'required': True/False', 'type': cls}] + :type field_list: ``list`` of ``dict`` + + :return True, exits otherwise + :rtype: ``bool`` + """ + for d in field_list: + if not d['name'] in params: + if d['required'] is True: + return (False, "%s is required and must be of type: %s" % + (d['name'], str(d['type']))) + else: + if not isinstance(params[d['name']], d['type']): + return (False, + "%s must be of type: %s" % (d['name'], str(d['type']))) + + return (True, '') + + +def _validate_autoscaling_params(params): + """ + Validate that the minimum configuration is present for autoscaling. + + :param params: Ansible dictionary containing autoscaling configuration + It is expected that autoscaling config will be found at the + key 'autoscaling'. + :type params: ``dict`` + + :return: Tuple containing a boolean and a string. True if autoscaler + is valid, False otherwise, plus str for message. + :rtype: ``(``bool``, ``str``)`` + """ + if not params['autoscaling']: + # It's optional, so if not set at all, it's valid. + return (True, '') + if not isinstance(params['autoscaling'], dict): + return (False, + 'autoscaling: configuration expected to be a dictionary.') + + # check first-level required fields + as_req_fields = [ + {'name': 'name', 'required': True, 'type': str}, + {'name': 'enabled', 'required': True, 'type': bool}, + {'name': 'policy', 'required': True, 'type': dict} + ] # yapf: disable + + (as_req_valid, as_req_msg) = _check_params(params['autoscaling'], + as_req_fields) + if not as_req_valid: + return (False, as_req_msg) + + # check policy configuration + as_policy_fields = [ + {'name': 'max_instances', 'required': True, 'type': int}, + {'name': 'min_instances', 'required': False, 'type': int}, + {'name': 'cool_down_period', 'required': False, 'type': int} + ] # yapf: disable + + (as_policy_valid, as_policy_msg) = _check_params( + params['autoscaling']['policy'], as_policy_fields) + if not as_policy_valid: + return (False, as_policy_msg) + + # TODO(supertom): check utilization fields + + return (True, '') + + +def _validate_named_port_params(params): + """ + Validate the named ports parameters + + :param params: Ansible dictionary containing named_ports configuration + It is expected that autoscaling config will be found at the + key 'named_ports'. That key should contain a list of + {name : port} dictionaries. + :type params: ``dict`` + + :return: Tuple containing a boolean and a string. True if params + are valid, False otherwise, plus str for message. + :rtype: ``(``bool``, ``str``)`` + """ + if not params['named_ports']: + # It's optional, so if not set at all, it's valid. + return (True, '') + if not isinstance(params['named_ports'], list): + return (False, 'named_ports: expected list of name:port dictionaries.') + req_fields = [ + {'name': 'name', 'required': True, 'type': str}, + {'name': 'port', 'required': True, 'type': int} + ] # yapf: disable + + for np in params['named_ports']: + (valid_named_ports, np_msg) = _check_params(np, req_fields) + if not valid_named_ports: + return (False, np_msg) + + return (True, '') + + +def _get_instance_list(mig, field='name', filter_list=['NONE']): + """ + Helper to grab field from instances response. + + :param mig: Managed Instance Group Object from libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :param field: Field name in list_managed_instances response. Defaults + to 'name'. + :type field: ``str`` + + :param filter_list: list of 'currentAction' strings to filter on. Only + items that match a currentAction in this list will + be returned. Default is "['NONE']". + :type filter_list: ``list`` of ``str`` + + :return: List of strings from list_managed_instances response. + :rtype: ``list`` + """ + return [x[field] for x in mig.list_managed_instances() + if x['currentAction'] in filter_list] + + +def _gen_gce_as_policy(as_params): + """ + Take Autoscaler params and generate GCE-compatible policy. + + :param as_params: Dictionary in Ansible-playbook format + containing policy arguments. + :type as_params: ``dict`` + + :return: GCE-compatible policy dictionary + :rtype: ``dict`` + """ + asp_data = {} + asp_data['maxNumReplicas'] = as_params['max_instances'] + if 'min_instances' in as_params: + asp_data['minNumReplicas'] = as_params['min_instances'] + if 'cool_down_period' in as_params: + asp_data['coolDownPeriodSec'] = as_params['cool_down_period'] + if 'cpu_utilization' in as_params and 'target' in as_params[ + 'cpu_utilization']: + asp_data['cpuUtilization'] = {'utilizationTarget': + as_params['cpu_utilization']['target']} + if 'load_balancing_utilization' in as_params and 'target' in as_params[ + 'load_balancing_utilization']: + asp_data['loadBalancingUtilization'] = { + 'utilizationTarget': + as_params['load_balancing_utilization']['target'] + } + + return asp_data + + +def create_autoscaler(gce, mig, params): + """ + Create a new Autoscaler for a MIG. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param mig: An initialized GCEInstanceGroupManager. + :type mig: :class: `GCEInstanceGroupManager` + + :param params: Dictionary of autoscaling parameters. + :type params: ``dict`` + + :return: Tuple with changed stats. + :rtype: tuple in the format of (bool, list) + """ + changed = False + as_policy = _gen_gce_as_policy(params['policy']) + autoscaler = gce.ex_create_autoscaler(name=params['name'], zone=mig.zone, + instance_group=mig, policy=as_policy) + if autoscaler: + changed = True + return changed + + +def update_autoscaler(gce, autoscaler, params): + """ + Update an Autoscaler. + + Takes an existing Autoscaler object, and updates it with + the supplied params before calling libcloud's update method. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param autoscaler: An initialized GCEAutoscaler. + :type autoscaler: :class: `GCEAutoscaler` + + :param params: Dictionary of autoscaling parameters. + :type params: ``dict`` + + :return: True if changes, False otherwise. + :rtype: ``bool`` + """ + as_policy = _gen_gce_as_policy(params['policy']) + if autoscaler.policy != as_policy: + autoscaler.policy = as_policy + autoscaler = gce.ex_update_autoscaler(autoscaler) + if autoscaler: + return True + return False + + +def delete_autoscaler(autoscaler): + """ + Delete an Autoscaler. Does not affect MIG. + + :param mig: Managed Instance Group Object from Libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :return: Tuple with changed stats and a list of affected instances. + :rtype: tuple in the format of (bool, list) + """ + changed = False + if autoscaler.destroy(): + changed = True + return changed + + +def get_autoscaler(gce, name, zone): + """ + Get an Autoscaler from GCE. + + If the Autoscaler is not found, None is found. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param name: Name of the Autoscaler. + :type name: ``str`` + + :param zone: Zone that the Autoscaler is located in. + :type zone: ``str`` + + :return: A GCEAutoscaler object or None. + :rtype: :class: `GCEAutoscaler` or None + """ + try: + # Does the Autoscaler already exist? + return gce.ex_get_autoscaler(name, zone) + + except ResourceNotFoundError: + return None + + +def create_mig(gce, params): + """ + Create a new Managed Instance Group. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param params: Dictionary of parameters needed by the module. + :type params: ``dict`` + + :return: Tuple with changed stats and a list of affected instances. + :rtype: tuple in the format of (bool, list) + """ + + changed = False + return_data = [] + actions_filter = ['CREATING'] + + mig = gce.ex_create_instancegroupmanager( + name=params['name'], size=params['size'], template=params['template'], + zone=params['zone']) + + if mig: + changed = True + return_data = _get_instance_list(mig, filter_list=actions_filter) + + return (changed, return_data) + + +def delete_mig(mig): + """ + Delete a Managed Instance Group. All VMs in that MIG are also deleted." + + :param mig: Managed Instance Group Object from Libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :return: Tuple with changed stats and a list of affected instances. + :rtype: tuple in the format of (bool, list) + """ + changed = False + return_data = [] + actions_filter = ['NONE', 'CREATING', 'RECREATING', 'DELETING', + 'ABANDONING', 'RESTARTING', 'REFRESHING'] + instance_names = _get_instance_list(mig, filter_list=actions_filter) + if mig.destroy(): + changed = True + return_data = instance_names + + return (changed, return_data) + + +def recreate_instances_in_mig(mig): + """ + Recreate the instances for a Managed Instance Group. + + :param mig: Managed Instance Group Object from libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :return: Tuple with changed stats and a list of affected instances. + :rtype: tuple in the format of (bool, list) + """ + changed = False + return_data = [] + actions_filter = ['RECREATING'] + + if mig.recreate_instances(): + changed = True + return_data = _get_instance_list(mig, filter_list=actions_filter) + + return (changed, return_data) + + +def resize_mig(mig, size): + """ + Resize a Managed Instance Group. + + Based on the size provided, GCE will automatically create and delete + VMs as needed. + + :param mig: Managed Instance Group Object from libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :return: Tuple with changed stats and a list of affected instances. + :rtype: tuple in the format of (bool, list) + """ + changed = False + return_data = [] + actions_filter = ['CREATING', 'DELETING'] + + if mig.resize(size): + changed = True + return_data = _get_instance_list(mig, filter_list=actions_filter) + + return (changed, return_data) + + +def get_mig(gce, name, zone): + """ + Get a Managed Instance Group from GCE. + + If the MIG is not found, None is found. + + :param gce: An initialized GCE driver object. + :type gce: :class: `GCENodeDriver` + + :param name: Name of the Managed Instance Group. + :type name: ``str`` + + :param zone: Zone that the Managed Instance Group is located in. + :type zone: ``str`` + + :return: A GCEInstanceGroupManager object or None. + :rtype: :class: `GCEInstanceGroupManager` or None + """ + try: + # Does the MIG already exist? + return gce.ex_get_instancegroupmanager(name=name, zone=zone) + + except ResourceNotFoundError: + return None + + +def update_named_ports(mig, named_ports): + """ + Set the named ports on a Managed Instance Group. + + Sort the existing named ports and new. If different, update. + This also implicitly allows for the removal of named_por + + :param mig: Managed Instance Group Object from libcloud. + :type mig: :class: `GCEInstanceGroupManager` + + :param named_ports: list of dictionaries in the format of {'name': port} + :type named_ports: ``list`` of ``dict`` + + :return: True if successful + :rtype: ``bool`` + """ + changed = False + existing_ports = [] + new_ports = [] + if hasattr(mig.instance_group, 'named_ports'): + existing_ports = sorted(mig.instance_group.named_ports, + key=lambda x: x['name']) + if named_ports is not None: + new_ports = sorted(named_ports, key=lambda x: x['name']) + + if existing_ports != new_ports: + if mig.instance_group.set_named_ports(named_ports): + changed = True + + return changed + + +def main(): + module = AnsibleModule(argument_spec=dict( + name=dict(required=True), + template=dict(), + recreate_instances=dict(type='bool', default=False), + # Do not set a default size here. For Create and some update + # operations, it is required and should be explicitly set. + # Below, we set it to the existing value if it has not been set. + size=dict(type='int'), + state=dict(choices=['absent', 'present'], default='present'), + zone=dict(required=True), + autoscaling=dict(type='dict', default=None), + named_ports=dict(type='list', default=None), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + pem_file=dict(), + credentials_file=dict(), + project_id=dict(), ), ) + + if not HAS_PYTHON26: + module.fail_json( + msg="GCE module requires python's 'ast' module, python v2.6+") + if not HAS_LIBCLOUD: + module.fail_json( + msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.') + + gce = gce_connect(module) + if not hasattr(gce, 'ex_create_instancegroupmanager'): + module.fail_json( + msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.', + changed=False) + + params = {} + params['state'] = module.params.get('state') + params['zone'] = module.params.get('zone') + params['name'] = module.params.get('name') + params['size'] = module.params.get('size') + params['template'] = module.params.get('template') + params['recreate_instances'] = module.params.get('recreate_instances') + params['autoscaling'] = module.params.get('autoscaling', None) + params['named_ports'] = module.params.get('named_ports', None) + + (valid_autoscaling, as_msg) = _validate_autoscaling_params(params) + if not valid_autoscaling: + module.fail_json(msg=as_msg, changed=False) + + if params['named_ports'] is not None and not hasattr( + gce, 'ex_instancegroup_set_named_ports'): + module.fail_json( + msg="Apache Libcloud 1.3.0+ is required to use 'named_ports' option", + changed=False) + + (valid_named_ports, np_msg) = _validate_named_port_params(params) + if not valid_named_ports: + module.fail_json(msg=np_msg, changed=False) + + changed = False + json_output = {'state': params['state'], 'zone': params['zone']} + mig = get_mig(gce, params['name'], params['zone']) + + if not mig: + if params['state'] == 'absent': + # Doesn't exist in GCE, and state==absent. + changed = False + module.fail_json( + msg="Cannot delete unknown managed instance group: %s" % + (params['name'])) + else: + # Create MIG + req_create_fields = [ + {'name': 'template', 'required': True, 'type': str}, + {'name': 'size', 'required': True, 'type': int} + ] # yapf: disable + + (valid_create_fields, valid_create_msg) = _check_params( + params, req_create_fields) + if not valid_create_fields: + module.fail_json(msg=valid_create_msg, changed=False) + + (changed, json_output['created_instances']) = create_mig(gce, + params) + if params['autoscaling'] and params['autoscaling'][ + 'enabled'] is True: + # Fetch newly-created MIG and create Autoscaler for it. + mig = get_mig(gce, params['name'], params['zone']) + if not mig: + module.fail_json( + msg='Unable to fetch created MIG %s to create \ + autoscaler in zone: %s' % ( + params['name'], params['zone']), changed=False) + + if not create_autoscaler(gce, mig, params['autoscaling']): + module.fail_json( + msg='Unable to fetch MIG %s to create autoscaler \ + in zone: %s' % (params['name'], params['zone']), + changed=False) + + json_output['created_autoscaler'] = True + # Add named ports if available + if params['named_ports']: + mig = get_mig(gce, params['name'], params['zone']) + if not mig: + module.fail_json( + msg='Unable to fetch created MIG %s to create \ + autoscaler in zone: %s' % ( + params['name'], params['zone']), changed=False) + json_output['set_named_ports'] = update_named_ports( + mig, params['named_ports']) + if json_output['set_named_ports']: + json_output['named_ports'] = params['named_ports'] + + elif params['state'] == 'absent': + # Delete MIG + + # First, check and remove the autoscaler, if present. + # Note: multiple autoscalers can be associated to a single MIG. We + # only handle the one that is named, but we might want to think about this. + if params['autoscaling']: + autoscaler = get_autoscaler(gce, params['autoscaling']['name'], + params['zone']) + if not autoscaler: + module.fail_json(msg='Unable to fetch autoscaler %s to delete \ + in zone: %s' % (params['autoscaling']['name'], params['zone']), + changed=False) + + changed = delete_autoscaler(autoscaler) + json_output['deleted_autoscaler'] = changed + + # Now, delete the MIG. + (changed, json_output['deleted_instances']) = delete_mig(mig) + + else: + # Update MIG + + # If we're going to update a MIG, we need a size and template values. + # If not specified, we use the values from the existing MIG. + if not params['size']: + params['size'] = mig.size + + if not params['template']: + params['template'] = mig.template.name + + if params['template'] != mig.template.name: + # Update Instance Template. + new_template = gce.ex_get_instancetemplate(params['template']) + mig.set_instancetemplate(new_template) + json_output['updated_instancetemplate'] = True + changed = True + if params['recreate_instances'] is True: + # Recreate Instances. + (changed, json_output['recreated_instances'] + ) = recreate_instances_in_mig(mig) + + if params['size'] != mig.size: + # Resize MIG. + keystr = 'created' if params['size'] > mig.size else 'deleted' + (changed, json_output['resize_%s_instances' % + (keystr)]) = resize_mig(mig, params['size']) + + # Update Autoscaler + if params['autoscaling']: + autoscaler = get_autoscaler(gce, params['autoscaling']['name'], + params['zone']) + if not autoscaler: + # Try to create autoscaler. + # Note: this isn't perfect, if the autoscaler name has changed + # we wouldn't know that here. + if not create_autoscaler(gce, mig, params['autoscaling']): + module.fail_json( + msg='Unable to create autoscaler %s for existing MIG %s\ + in zone: %s' % (params['autoscaling']['name'], + params['name'], params['zone']), + changed=False) + json_output['created_autoscaler'] = True + changed = True + else: + if params['autoscaling']['enabled'] is False: + # Delete autoscaler + changed = delete_autoscaler(autoscaler) + json_output['delete_autoscaler'] = changed + else: + # Update policy, etc. + changed = update_autoscaler(gce, autoscaler, + params['autoscaling']) + json_output['updated_autoscaler'] = changed + named_ports = params['named_ports'] or [] + json_output['updated_named_ports'] = update_named_ports(mig, + named_ports) + if json_output['updated_named_ports']: + json_output['named_ports'] = named_ports + + json_output['changed'] = changed + json_output.update(params) + module.exit_json(**json_output) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.gce import * +if __name__ == '__main__': + main() diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 0fee4a15ef3..aec0a294271 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: gce_net @@ -34,6 +38,7 @@ allowed: description: - the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800;udp:1-25') + this parameter is mandatory when creating or updating a firewall rule required: false default: null aliases: [] @@ -110,6 +115,39 @@ required: false default: null aliases: [] + mode: + version_added: "2.2" + description: + - network mode for Google Cloud + "legacy" indicates a network with an IP address range + "auto" automatically generates subnetworks in different regions + "custom" uses networks to group subnets of user specified IP address ranges + https://cloud.google.com/compute/docs/networking#network_types + required: false + default: "legacy" + choices: ["legacy", "auto", "custom"] + aliases: [] + subnet_name: + version_added: "2.2" + description: + - name of subnet to create + required: false + default: null + aliases: [] + subnet_region: + version_added: "2.2" + description: + - region of subnet to create + required: false + default: null + aliases: [] + subnet_desc: + version_added: "2.2" + description: + - description of subnet to create + required: false + default: null + aliases: [] requirements: - "python >= 2.6" @@ -132,6 +170,21 @@ allowed: tcp:80,8080 src_tags: ["web", "proxy"] +# Simple example of creating a new auto network +- local_action: + module: gce_net + name: privatenet + mode: auto + +# Simple example of creating a new custom subnet +- local_action: + module: gce_net + name: privatenet + mode: custom + subnet_name: subnet_example + subnet_region: us-central1 + ipv4_range: 10.0.0.0/16 + ''' try: @@ -173,6 +226,14 @@ def format_allowed(allowed): return_value.append(format_allowed_section(section)) return return_value +def sorted_allowed_list(allowed_list): + """Sort allowed_list (output of format_allowed) by protocol and port.""" + # sort by protocol + allowed_by_protocol = sorted(allowed_list,key=lambda x: x['IPProtocol']) + # sort the ports list + return sorted(allowed_by_protocol, key=lambda y: y['ports'].sort()) + + def main(): module = AnsibleModule( argument_spec = dict( @@ -188,6 +249,10 @@ def main(): pem_file = dict(), credentials_file = dict(), project_id = dict(), + mode = dict(default='legacy', choices=['legacy', 'auto', 'custom']), + subnet_name = dict(), + subnet_region = dict(), + subnet_desc = dict(), ) ) @@ -204,35 +269,68 @@ def main(): src_tags = module.params.get('src_tags') target_tags = module.params.get('target_tags') state = module.params.get('state') + mode = module.params.get('mode') + subnet_name = module.params.get('subnet_name') + subnet_region = module.params.get('subnet_region') + subnet_desc = module.params.get('subnet_desc') changed = False json_output = {'state': state} if state in ['active', 'present']: network = None + subnet = None try: network = gce.ex_get_network(name) json_output['name'] = name - json_output['ipv4_range'] = network.cidr + if mode == 'legacy': + json_output['ipv4_range'] = network.cidr + if network and mode == 'custom' and subnet_name: + if not hasattr(gce, 'ex_get_subnetwork'): + module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False) + + subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region) + json_output['subnet_name'] = subnet_name + json_output['ipv4_range'] = subnet.cidr except ResourceNotFoundError: pass - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) # user wants to create a new network that doesn't yet exist if name and not network: - if not ipv4_range: - module.fail_json(msg="Network '" + name + "' is not found. To create network, 'ipv4_range' parameter is required", + if not ipv4_range and mode != 'auto': + module.fail_json(msg="Network '" + name + "' is not found. To create network in legacy or custom mode, 'ipv4_range' parameter is required", changed=False) + args = [ipv4_range if mode =='legacy' else None] + kwargs = {} + if mode != 'legacy': + kwargs['mode'] = mode try: - network = gce.ex_create_network(name, ipv4_range) + network = gce.ex_create_network(name, *args, **kwargs) json_output['name'] = name json_output['ipv4_range'] = ipv4_range changed = True - except Exception, e: + except TypeError: + module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False) + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) + if (subnet_name or ipv4_range) and not subnet and mode == 'custom': + if not hasattr(gce, 'ex_create_subnetwork'): + module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed) + if not subnet_name or not ipv4_range or not subnet_region: + module.fail_json(msg="subnet_name, ipv4_range, and subnet_region required for custom mode", changed=changed) + + try: + subnet = gce.ex_create_subnetwork(subnet_name, cidr=ipv4_range, network=name, region=subnet_region, description=subnet_desc) + json_output['subnet_name'] = subnet_name + json_output['ipv4_range'] = ipv4_range + changed = True + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=changed) + if fwname: # user creating a firewall rule if not allowed and not src_range and not src_tags: @@ -246,13 +344,66 @@ def main(): allowed_list = format_allowed(allowed) + # Fetch existing rule and if it exists, compare attributes + # update if attributes changed. Create if doesn't exist. try: - gce.ex_create_firewall(fwname, allowed_list, network=name, + fw_changed = False + fw = gce.ex_get_firewall(fwname) + + # If old and new attributes are different, we update the firewall rule. + # This implicitly let's us clear out attributes as well. + # allowed_list is required and must not be None for firewall rules. + if allowed_list and (sorted_allowed_list(allowed_list) != sorted_allowed_list(fw.allowed)): + fw.allowed = allowed_list + fw_changed = True + + # If these attributes are lists, we sort them first, then compare. + # Otherwise, we update if they differ. + if fw.source_ranges != src_range: + if isinstance(src_range, list): + if sorted(fw.source_ranges) != sorted(src_range): + fw.source_ranges = src_range + fw_changed = True + else: + fw.source_ranges = src_range + fw_changed = True + + if fw.source_tags != src_tags: + if isinstance(src_range, list): + if sorted(fw.source_tags) != sorted(src_tags): + fw.source_tags = src_tags + fw_changed = True + else: + fw.source_tags = src_tags + fw_changed = True + + if fw.target_tags != target_tags: + if isinstance(target_tags, list): + if sorted(fw.target_tags) != sorted(target_tags): + fw.target_tags = target_tags + fw_changed = True + else: + fw.target_tags = target_tags + fw_changed = True + + if fw_changed is True: + try: + gce.ex_update_firewall(fw) + changed = True + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + # Firewall rule not found so we try to create it. + except ResourceNotFoundError: + try: + gce.ex_create_firewall(fwname, allowed_list, network=name, source_ranges=src_range, source_tags=src_tags, target_tags=target_tags) - changed = True - except ResourceExistsError: - pass - except Exception, e: + changed = True + + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) json_output['fwname'] = fwname @@ -269,26 +420,41 @@ def main(): fw = gce.ex_get_firewall(fwname) except ResourceNotFoundError: pass - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) if fw: gce.ex_destroy_firewall(fw) changed = True + elif subnet_name: + if not hasattr(gce, 'ex_get_subnetwork') or not hasattr(gce, 'ex_destroy_subnetwork'): + module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed) + json_output['name'] = subnet_name + subnet = None + try: + subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region) + except ResourceNotFoundError: + pass + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) + if subnet: + gce.ex_destroy_subnetwork(subnet) + changed = True elif name: json_output['name'] = name network = None try: network = gce.ex_get_network(name) -# json_output['d1'] = 'found network name %s' % name + except ResourceNotFoundError: -# json_output['d2'] = 'not found network name %s' % name pass - except Exception, e: -# json_output['d3'] = 'error with %s' % name + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) if network: # json_output['d4'] = 'deleting %s' % name - gce.ex_destroy_network(network) + try: + gce.ex_destroy_network(network) + except Exception as e: + module.fail_json(msg=unexpected_error_msg(e), changed=False) # json_output['d5'] = 'deleted %s' % name changed = True diff --git a/cloud/google/gce_pd.py b/cloud/google/gce_pd.py index 21433f4035f..a645806712b 100644 --- a/cloud/google/gce_pd.py +++ b/cloud/google/gce_pd.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: gce_pd @@ -218,7 +222,7 @@ def main(): json_output['size_gb'] = int(disk.size) except ResourceNotFoundError: pass - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) # user wants a disk to exist. If "instance_name" is supplied the user @@ -259,7 +263,7 @@ def main(): except QuotaExceededError: module.fail_json(msg='Requested disk size exceeds quota', changed=False) - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) json_output['size_gb'] = size_gb if image is not None: @@ -270,7 +274,7 @@ def main(): if inst and not is_attached: try: gce.attach_volume(inst, disk, device=name, ex_mode=mode) - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) json_output['attached_to_instance'] = inst.name json_output['attached_mode'] = mode @@ -282,15 +286,15 @@ def main(): if inst and is_attached: try: gce.detach_volume(disk, ex_node=inst) - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) changed = True if not detach_only: try: gce.destroy_volume(disk) - except ResourceInUseError, e: + except ResourceInUseError as e: module.fail_json(msg=str(e.value), changed=False) - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) changed = True diff --git a/cloud/linode/linode.py b/cloud/linode/linode.py index 9ebc770a47c..7de16bbb2b9 100644 --- a/cloud/linode/linode.py +++ b/cloud/linode/linode.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: linode @@ -40,7 +44,7 @@ linode_id: description: - Unique ID of a linode server - aliases: lid + aliases: [ 'lid' ] default: null type: integer plan: @@ -259,7 +263,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino api.linode_update(LinodeId=linode_id, Label='%s_%s' % (linode_id, name)) # Save server servers = api.linode_list(LinodeId=linode_id) - except Exception, e: + except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) if not disks: @@ -291,7 +295,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino Label='%s swap disk (lid: %s)' % (name, linode_id), Size=swap) jobs.append(res['JobID']) - except Exception, e: + except Exception as e: # TODO: destroy linode ? module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) @@ -334,7 +338,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id, Disklist=disks_list, Label='%s config' % name) configs = api.linode_config_list(LinodeId=linode_id) - except Exception, e: + except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) # Start / Ensure servers are running @@ -395,7 +399,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino if server['STATUS'] != 2: try: res = api.linode_shutdown(LinodeId=linode_id) - except Exception, e: + except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) instance['status'] = 'Stopping' changed = True @@ -415,7 +419,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino instance = getInstanceDetails(api, server) try: res = api.linode_reboot(LinodeId=server['LINODEID']) - except Exception, e: + except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) instance['status'] = 'Restarting' changed = True @@ -426,7 +430,7 @@ def linodeServers(module, api, state, name, plan, distribution, datacenter, lino instance = getInstanceDetails(api, server) try: api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) - except Exception, e: + except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) instance['status'] = 'Deleting' changed = True @@ -443,14 +447,14 @@ def main(): state = dict(default='present', choices=['active', 'present', 'started', 'deleted', 'absent', 'stopped', 'restarted']), - api_key = dict(), + api_key = dict(no_log=True), name = dict(type='str'), plan = dict(type='int'), distribution = dict(type='int'), datacenter = dict(type='int'), linode_id = dict(type='int', aliases=['lid']), payment_term = dict(type='int', default=1, choices=[1, 12, 24]), - password = dict(type='str'), + password = dict(type='str', no_log=True), ssh_pub_key = dict(type='str'), swap = dict(type='int', default=512), wait = dict(type='bool', default=True), @@ -481,14 +485,14 @@ def main(): if not api_key: try: api_key = os.environ['LINODE_API_KEY'] - except KeyError, e: + except KeyError as e: module.fail_json(msg = 'Unable to load %s' % e.message) # setup the auth try: api = linode_api.Api(api_key) api.test_echo() - except Exception, e: + except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id, diff --git a/cloud/openstack/README.md b/cloud/openstack/README.md index 4a872b11954..36cdcd383fe 100644 --- a/cloud/openstack/README.md +++ b/cloud/openstack/README.md @@ -54,3 +54,11 @@ Libraries users as a primary audience, they are for intra-server communication. The python-openstacksdk is the future there, and shade will migrate to it when its ready in a manner that is not noticable to ansible users. + +Testing +------- + +* Integration testing is currently done in OpenStack's CI system in + http://git.openstack.org/cgit/openstack-infra/shade/tree/shade/tests/ansible +* Testing in shade produces an obvious chicken-and-egg scenario. Work is under + way to trigger from and report on PRs directly. diff --git a/cloud/openstack/_glance_image.py b/cloud/openstack/_glance_image.py index bdcef26795e..a97255241ae 100644 --- a/cloud/openstack/_glance_image.py +++ b/cloud/openstack/_glance_image.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: glance_image @@ -44,7 +48,7 @@ description: - The keystone url for authentication required: false - default: 'http://127.0.0.1:35357/v2.0/' + default: http://127.0.0.1:35357/v2.0/ region_name: description: - Name of the region @@ -120,15 +124,16 @@ ''' EXAMPLES = ''' -# Upload an image from an HTTP URL -- glance_image: login_username=admin - login_password=passme - login_tenant_name=admin - name=cirros - container_format=bare - disk_format=qcow2 - state=present - copy_from=http:launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img +- name: Upload an image from an HTTP URL + glance_image: + login_username: admin + login_password: passme + login_tenant_name: admin + name: cirros + container_format: bare + disk_format: qcow2 + state: present + copy_from: http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img ''' import time @@ -150,7 +155,7 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception, e: + except Exception as e: module.fail_json(msg="Error authenticating to the keystone: %s " % e.message) return client @@ -158,7 +163,7 @@ def _get_ksclient(module, kwargs): def _get_endpoint(module, client, endpoint_type): try: endpoint = client.service_catalog.url_for(service_type='image', endpoint_type=endpoint_type) - except Exception, e: + except Exception as e: module.fail_json(msg="Error getting endpoint for glance: %s" % e.message) return endpoint @@ -172,7 +177,7 @@ def _get_glance_client(module, kwargs): } try: client = glanceclient.Client('1', endpoint, **kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg="Error in connecting to glance: %s" % e.message) return client @@ -183,7 +188,7 @@ def _glance_image_present(module, params, client): if image.name == params['name']: return image.id return None - except Exception, e: + except Exception as e: module.fail_json(msg="Error in fetching image list: %s" % e.message) @@ -207,7 +212,7 @@ def _glance_image_create(module, params, client): if image.status == 'active': break time.sleep(5) - except Exception, e: + except Exception as e: module.fail_json(msg="Error in creating image: %s" % e.message) if image.status == 'active': module.exit_json(changed=True, result=image.status, id=image.id) @@ -220,7 +225,7 @@ def _glance_delete_image(module, params, client): for image in client.images.list(): if image.name == params['name']: client.images.delete(image) - except Exception, e: + except Exception as e: module.fail_json(msg="Error in deleting image: %s" % e.message) module.exit_json(changed=True, result="Deleted") diff --git a/cloud/openstack/_keystone_user.py b/cloud/openstack/_keystone_user.py index 9586b8b70a9..250c8936bed 100644 --- a/cloud/openstack/_keystone_user.py +++ b/cloud/openstack/_keystone_user.py @@ -17,6 +17,10 @@ # Based on Jimmy Tang's implementation +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: keystone_user @@ -51,7 +55,7 @@ description: - The keystone url for authentication required: false - default: 'http://127.0.0.1:35357/v2.0/' + default: http://127.0.0.1:35357/v2.0/ user: description: - The name of the user that has to added/removed from OpenStack @@ -94,14 +98,22 @@ ''' EXAMPLES = ''' -# Create a tenant -- keystone_user: tenant=demo tenant_description="Default Tenant" - -# Create a user -- keystone_user: user=john tenant=demo password=secrete - -# Apply the admin role to the john user in the demo tenant -- keystone_user: role=admin user=john tenant=demo +- name: Create a tenant + keystone_user: + tenant: demo + tenant_description: "Default Tenant" + +- name: Create a user + keystone_user: + user: john + tenant: demo + password: secrete + +- name: Apply the admin role to the john user in the demo tenant + keystone_user: + role: admin + user: john + tenant: demo ''' try: @@ -229,7 +241,7 @@ def ensure_user_exists(keystone, user_name, password, email, tenant_name, check_mode): """ Check if user exists - Return (True, id) if a new user was created, (False, id) user alrady + Return (True, id) if a new user was created, (False, id) user already exists """ @@ -368,7 +380,7 @@ def main(): d = dispatch(keystone, user, password, tenant, tenant_description, email, role, state, endpoint, token, login_user, login_password, check_mode) - except Exception, e: + except Exception as e: if check_mode: # If we have a failure in check mode module.exit_json(changed=True, diff --git a/cloud/openstack/_nova_compute.py b/cloud/openstack/_nova_compute.py index 53af962adcd..0bea21048f5 100644 --- a/cloud/openstack/_nova_compute.py +++ b/cloud/openstack/_nova_compute.py @@ -30,6 +30,10 @@ except ImportError: HAS_NOVACLIENT = False +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: nova_compute @@ -58,7 +62,7 @@ description: - The keystone url for authentication required: false - default: 'http://127.0.0.1:35357/v2.0/' + default: http://127.0.0.1:35357/v2.0/ region_name: description: - Name of the region @@ -238,7 +242,7 @@ key_name: test wait_for: 200 flavor_id: 101 - floating-ips: + floating_ips: - 12.34.56.79 # Creates a new VM with 4G of RAM on Ubuntu Trusty, ignoring deprecated images @@ -286,7 +290,7 @@ def _delete_server(module, nova): if server_list: server = [x for x in server_list if x.name == module.params['name']] nova.servers.delete(server.pop()) - except Exception, e: + except Exception as e: module.fail_json( msg = "Error in deleting vm: %s" % e.message) if module.params['wait'] == 'no': module.exit_json(changed = True, result = "deleted") @@ -333,7 +337,7 @@ def _add_floating_ip_from_pool(module, nova, server): if not pool_ips: try: new_ip = nova.floating_ips.create(pool) - except Exception, e: + except Exception as e: module.fail_json(msg = "Unable to create floating ip: %s" % (e.message)) pool_ips.append(new_ip.ip) # Add to the main list @@ -348,7 +352,7 @@ def _add_floating_ip_from_pool(module, nova, server): # race condition and some other cloud operation may have # stolen an available floating ip break - except Exception, e: + except Exception as e: module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message)) @@ -357,7 +361,7 @@ def _add_floating_ip_list(module, server, ips): for ip in ips: try: server.add_floating_ip(ip) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message)) @@ -393,7 +397,7 @@ def _add_floating_ip(module, nova, server): # a recent server object if the above code path exec'd try: server = nova.servers.get(server.id) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in getting info from instance: %s " % e.message) return server @@ -438,14 +442,14 @@ def _create_server(module, nova): try: server = nova.servers.create(*bootargs, **bootkwargs) server = nova.servers.get(server.id) - except Exception, e: + except Exception as e: module.fail_json( msg = "Error in creating instance: %s " % e.message) if module.params['wait'] == 'yes': expire = time.time() + int(module.params['wait_for']) while time.time() < expire: try: server = nova.servers.get(server.id) - except Exception, e: + except Exception as e: module.fail_json( msg = "Error in getting info from instance: %s" % e.message) if server.status == 'ACTIVE': server = _add_floating_ip(module, nova, server) @@ -514,7 +518,7 @@ def _get_server_state(module, nova): servers = [x for x in servers if x.name == module.params['name']] if servers: server = servers[0] - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in getting the server list: %s" % e.message) if server and module.params['state'] == 'present': if server.status != 'ACTIVE': @@ -543,17 +547,17 @@ def main(): flavor_include = dict(default=None), key_name = dict(default=None), security_groups = dict(default='default'), - nics = dict(default=None), - meta = dict(default=None), + nics = dict(default=None, type='list'), + meta = dict(default=None, type='dict'), wait = dict(default='yes', choices=['yes', 'no']), wait_for = dict(default=180), state = dict(default='present', choices=['absent', 'present']), user_data = dict(default=None), config_drive = dict(default=False, type='bool'), auto_floating_ip = dict(default=False, type='bool'), - floating_ips = dict(default=None), - floating_ip_pools = dict(default=None), - scheduler_hints = dict(default=None), + floating_ips = dict(default=None, type='list'), + floating_ip_pools = dict(default=None, type='list'), + scheduler_hints = dict(default=None, type='dict'), )) module = AnsibleModule( argument_spec=argument_spec, @@ -577,9 +581,9 @@ def main(): service_type='compute') try: nova.authenticate() - except exceptions.Unauthorized, e: + except exceptions.Unauthorized as e: module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message) - except exceptions.AuthorizationFailure, e: + except exceptions.AuthorizationFailure as e: module.fail_json(msg = "Unable to authorize user: %s" % e.message) if module.params['state'] == 'present': diff --git a/cloud/openstack/_nova_keypair.py b/cloud/openstack/_nova_keypair.py index 68df0c5a2c4..914db91bf2f 100644 --- a/cloud/openstack/_nova_keypair.py +++ b/cloud/openstack/_nova_keypair.py @@ -25,11 +25,15 @@ except ImportError: HAS_NOVACLIENT = False +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: nova_keypair version_added: "1.2" -author: +author: - "Benno Joy (@bennojoy)" - "Michael DeHaan" deprecated: Deprecated in 2.0. Use os_keypair instead @@ -56,7 +60,7 @@ description: - The keystone url for authentication required: false - default: 'http://127.0.0.1:35357/v2.0/' + default: http://127.0.0.1:35357/v2.0/ region_name: description: - Name of the region @@ -83,14 +87,22 @@ - "python-novaclient" ''' EXAMPLES = ''' -# Creates a key pair with the running users public key -- nova_keypair: state=present login_username=admin - login_password=admin login_tenant_name=admin name=ansible_key - public_key={{ lookup('file','~/.ssh/id_rsa.pub') }} +- name: Create a key pair with the running users public key + nova_keypair: + state: present + login_username: admin + login_password: admin + login_tenant_name: admin + name: ansible_key + public_key: "{{ lookup('file','~/.ssh/id_rsa.pub') }}" -# Creates a new key pair and the private key returned after the run. -- nova_keypair: state=present login_username=admin login_password=admin - login_tenant_name=admin name=ansible_key +- name: Create a new key pair and the private key returned after the run. + nova_keypair: + state: present + login_username: admin + login_password: admin + login_tenant_name: admin + name: ansible_key ''' def main(): @@ -112,9 +124,9 @@ def main(): service_type='compute') try: nova.authenticate() - except exc.Unauthorized, e: + except exc.Unauthorized as e: module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message) - except exc.AuthorizationFailure, e: + except exc.AuthorizationFailure as e: module.fail_json(msg = "Unable to authorize user: %s" % e.message) if module.params['state'] == 'present': @@ -126,7 +138,7 @@ def main(): module.exit_json(changed = False, result = "Key present") try: key = nova.keypairs.create(module.params['name'], module.params['public_key']) - except Exception, e: + except Exception as e: module.exit_json(msg = "Error in creating the keypair: %s" % e.message) if not module.params['public_key']: module.exit_json(changed = True, key = key.private_key) @@ -136,7 +148,7 @@ def main(): if key.name == module.params['name']: try: nova.keypairs.delete(module.params['name']) - except Exception, e: + except Exception as e: module.fail_json(msg = "The keypair deletion has failed: %s" % e.message) module.exit_json( changed = True, result = "deleted") module.exit_json(changed = False, result = "not present") diff --git a/cloud/openstack/_quantum_floating_ip.py b/cloud/openstack/_quantum_floating_ip.py index 5220d307844..9c72c431d0b 100644 --- a/cloud/openstack/_quantum_floating_ip.py +++ b/cloud/openstack/_quantum_floating_ip.py @@ -29,6 +29,10 @@ except ImportError: HAVE_DEPS = False +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: quantum_floating_ip @@ -60,7 +64,7 @@ description: - The keystone url for authentication required: false - default: 'http://127.0.0.1:35357/v2.0/' + default: http://127.0.0.1:35357/v2.0/ region_name: description: - Name of the region @@ -95,10 +99,15 @@ ''' EXAMPLES = ''' -# Assign a floating ip to the instance from an external network -- quantum_floating_ip: state=present login_username=admin login_password=admin - login_tenant_name=admin network_name=external_network - instance_name=vm1 internal_network_name=internal_network +- name: Assign a floating ip to the instance from an external network + quantum_floating_ip: + state: present + login_username: admin + login_password: admin + login_tenant_name: admin + network_name: external_network + instance_name: vm1 + internal_network_name: internal_network ''' def _get_ksclient(module, kwargs): @@ -108,7 +117,7 @@ def _get_ksclient(module, kwargs): tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url'), region_name=kwargs.get('region_name')) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) global _os_keystone _os_keystone = kclient @@ -118,7 +127,7 @@ def _get_ksclient(module, kwargs): def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: + except Exception as e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint @@ -132,7 +141,7 @@ def _get_neutron_client(module, kwargs): } try: neutron = client.Client('2.0', **kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) return neutron @@ -148,7 +157,7 @@ def _get_server_state(module, nova): module.fail_json( msg="The VM is available but not Active. state:" + info['status']) server_info = info break - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in getting the server list: %s" % e.message) return server_info, server @@ -169,7 +178,7 @@ def _get_port_info(neutron, module, instance_id, internal_network_name=None): } try: ports = neutron.list_ports(**kwargs) - except Exception, e: + except Exception as e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if subnet_id: port = next(port for port in ports['ports'] if port['fixed_ips'][0]['subnet_id'] == subnet_id) @@ -188,7 +197,7 @@ def _get_floating_ip(module, neutron, fixed_ip_address, network_name): } try: ips = neutron.list_floatingips(**kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "error in fetching the floatingips's %s" % e.message) if not ips['floatingips']: return None, None @@ -211,7 +220,7 @@ def _create_floating_ip(neutron, module, port_id, net_id, fixed_ip): } try: result = neutron.create_floatingip({'floatingip': kwargs}) - except Exception, e: + except Exception as e: module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message) module.exit_json(changed=True, result=result, public_ip=result['floatingip']['floating_ip_address']) @@ -221,7 +230,7 @@ def _get_net_id(neutron, module): } try: networks = neutron.list_networks(**kwargs) - except Exception, e: + except Exception as e: module.fail_json("Error in listing neutron networks: %s" % e.message) if not networks['networks']: return None @@ -233,7 +242,7 @@ def _update_floating_ip(neutron, module, port_id, floating_ip_id): } try: result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs}) - except Exception, e: + except Exception as e: module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message) module.exit_json(changed=True, result=result) @@ -256,7 +265,7 @@ def main(): nova = nova_client.Client(module.params['login_username'], module.params['login_password'], module.params['login_tenant_name'], module.params['auth_url'], region_name=module.params['region_name'], service_type='compute') neutron = _get_neutron_client(module, module.params) - except Exception, e: + except Exception as e: module.fail_json(msg="Error in authenticating to nova: %s" % e.message) server_info, server_obj = _get_server_state(module, nova) diff --git a/cloud/openstack/_quantum_floating_ip_associate.py b/cloud/openstack/_quantum_floating_ip_associate.py index 8960e247b0f..f7eed5fe861 100644 --- a/cloud/openstack/_quantum_floating_ip_associate.py +++ b/cloud/openstack/_quantum_floating_ip_associate.py @@ -28,6 +28,10 @@ except ImportError: HAVE_DEPS = False +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: quantum_floating_ip_associate @@ -57,7 +61,7 @@ description: - the keystone url for authentication required: false - default: 'http://127.0.0.1:35357/v2.0/' + default: http://127.0.0.1:35357/v2.0/ region_name: description: - name of the region @@ -86,14 +90,14 @@ ''' EXAMPLES = ''' -# Associate a specific floating IP with an Instance -- quantum_floating_ip_associate: - state=present - login_username=admin - login_password=admin - login_tenant_name=admin - ip_address=1.1.1.1 - instance_name=vm1 +- name: Associate a specific floating IP with an Instance + quantum_floating_ip_associate: + state: present + login_username: admin + login_password: admin + login_tenant_name: admin + ip_address: 1.1.1.1 + instance_name: vm1 ''' def _get_ksclient(module, kwargs): @@ -102,7 +106,7 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) global _os_keystone _os_keystone = kclient @@ -112,7 +116,7 @@ def _get_ksclient(module, kwargs): def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: + except Exception as e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint @@ -126,7 +130,7 @@ def _get_neutron_client(module, kwargs): } try: neutron = client.Client('2.0', **kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) return neutron @@ -142,7 +146,7 @@ def _get_server_state(module, nova): module.fail_json(msg="The VM is available but not Active. state:" + info['status']) server_info = info break - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in getting the server list: %s" % e.message) return server_info, server @@ -150,7 +154,7 @@ def _get_port_id(neutron, module, instance_id): kwargs = dict(device_id = instance_id) try: ports = neutron.list_ports(**kwargs) - except Exception, e: + except Exception as e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if not ports['ports']: return None @@ -162,7 +166,7 @@ def _get_floating_ip_id(module, neutron): } try: ips = neutron.list_floatingips(**kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "error in fetching the floatingips's %s" % e.message) if not ips['floatingips']: module.fail_json(msg = "Could find the ip specified in parameter, Please check") @@ -179,7 +183,7 @@ def _update_floating_ip(neutron, module, port_id, floating_ip_id): } try: result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs}) - except Exception, e: + except Exception as e: module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message) module.exit_json(changed = True, result = result, public_ip=module.params['ip_address']) @@ -199,7 +203,7 @@ def main(): try: nova = nova_client.Client(module.params['login_username'], module.params['login_password'], module.params['login_tenant_name'], module.params['auth_url'], service_type='compute') - except Exception, e: + except Exception as e: module.fail_json( msg = " Error in authenticating to nova: %s" % e.message) neutron = _get_neutron_client(module, module.params) state, floating_ip_id = _get_floating_ip_id(module, neutron) diff --git a/cloud/openstack/_quantum_network.py b/cloud/openstack/_quantum_network.py index a0a29e6a062..db82e90d335 100644 --- a/cloud/openstack/_quantum_network.py +++ b/cloud/openstack/_quantum_network.py @@ -26,6 +26,10 @@ except ImportError: HAVE_DEPS = False +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: quantum_network @@ -59,7 +63,7 @@ description: - The keystone url for authentication required: false - default: 'http://127.0.0.1:35357/v2.0/' + default: http://127.0.0.1:35357/v2.0/ region_name: description: - Name of the region @@ -72,7 +76,7 @@ default: present name: description: - - Name to be assigned to the nework + - Name to be assigned to the network required: true default: None provider_network_type: @@ -113,15 +117,26 @@ ''' EXAMPLES = ''' -# Create a GRE backed Quantum network with tunnel id 1 for tenant1 -- quantum_network: name=t1network tenant_name=tenant1 state=present - provider_network_type=gre provider_segmentation_id=1 - login_username=admin login_password=admin login_tenant_name=admin - -# Create an external network -- quantum_network: name=external_network state=present - provider_network_type=local router_external=yes - login_username=admin login_password=admin login_tenant_name=admin +- name: Create a GRE backed Quantum network with tunnel id 1 for tenant1 + quantum_network: + name: t1network + tenant_name: tenant1 + state: present + provider_network_type: gre + provider_segmentation_id: 1 + login_username: admin + login_password: admin + login_tenant_name: admin + +- name: Create an external network + quantum_network: + name: external_network + state: present + provider_network_type: local + router_external: yes + login_username: admin + login_password: admin + login_tenant_name: admin ''' _os_keystone = None @@ -133,7 +148,7 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) global _os_keystone _os_keystone = kclient @@ -143,7 +158,7 @@ def _get_ksclient(module, kwargs): def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: + except Exception as e: module.fail_json(msg = "Error getting network endpoint: %s " %e.message) return endpoint @@ -157,7 +172,7 @@ def _get_neutron_client(module, kwargs): } try: neutron = client.Client('2.0', **kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = " Error in connecting to neutron: %s " %e.message) return neutron @@ -182,7 +197,7 @@ def _get_net_id(neutron, module): } try: networks = neutron.list_networks(**kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in listing neutron networks: %s" % e.message) if not networks['networks']: return None @@ -220,7 +235,7 @@ def _create_network(module, neutron): try: net = neutron.create_network({'network':network}) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in creating network: %s" % e.message) return net['network']['id'] @@ -228,7 +243,7 @@ def _delete_network(module, net_id, neutron): try: id = neutron.delete_network(net_id) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in deleting the network: %s" % e.message) return True diff --git a/cloud/openstack/_quantum_router.py b/cloud/openstack/_quantum_router.py index 0c4d2063017..c65f916d6b1 100644 --- a/cloud/openstack/_quantum_router.py +++ b/cloud/openstack/_quantum_router.py @@ -26,6 +26,10 @@ except ImportError: HAVE_DEPS = False +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: quantum_router @@ -55,7 +59,7 @@ description: - The keystone url for authentication required: false - default: 'http://127.0.0.1:35357/v2.0/' + default: http://127.0.0.1:35357/v2.0/ region_name: description: - Name of the region @@ -88,12 +92,13 @@ ''' EXAMPLES = ''' -# Creates a router for tenant admin -- quantum_router: state=present - login_username=admin - login_password=admin - login_tenant_name=admin - name=router1" +- name: Create a router for tenant admin + quantum_router: + state: present + login_username: admin + login_password: admin + login_tenant_name: admin + name: router1 ''' _os_keystone = None @@ -105,7 +110,7 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) global _os_keystone _os_keystone = kclient @@ -115,7 +120,7 @@ def _get_ksclient(module, kwargs): def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: + except Exception as e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint @@ -129,7 +134,7 @@ def _get_neutron_client(module, kwargs): } try: neutron = client.Client('2.0', **kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) return neutron @@ -154,7 +159,7 @@ def _get_router_id(module, neutron): } try: routers = neutron.list_routers(**kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in getting the router list: %s " % e.message) if not routers['routers']: return None @@ -168,7 +173,7 @@ def _create_router(module, neutron): } try: new_router = neutron.create_router(dict(router=router)) - except Exception, e: + except Exception as e: module.fail_json( msg = "Error in creating router: %s" % e.message) return new_router['router']['id'] diff --git a/cloud/openstack/_quantum_router_gateway.py b/cloud/openstack/_quantum_router_gateway.py index 891cee55a09..af6179bc62d 100644 --- a/cloud/openstack/_quantum_router_gateway.py +++ b/cloud/openstack/_quantum_router_gateway.py @@ -26,6 +26,10 @@ except ImportError: HAVE_DEPS = False +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: quantum_router_gateway @@ -55,7 +59,7 @@ description: - The keystone URL for authentication required: false - default: 'http://127.0.0.1:35357/v2.0/' + default: http://127.0.0.1:35357/v2.0/ region_name: description: - Name of the region @@ -83,10 +87,14 @@ ''' EXAMPLES = ''' -# Attach an external network with a router to allow flow of external traffic -- quantum_router_gateway: state=present login_username=admin login_password=admin - login_tenant_name=admin router_name=external_router - network_name=external_network +- name: Attach an external network with a router to allow flow of external traffic + quantum_router_gateway: + state: present + login_username: admin + login_password: admin + login_tenant_name: admin + router_name: external_router + network_name: external_network ''' _os_keystone = None @@ -96,7 +104,7 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) global _os_keystone _os_keystone = kclient @@ -106,7 +114,7 @@ def _get_ksclient(module, kwargs): def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: + except Exception as e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint @@ -120,7 +128,7 @@ def _get_neutron_client(module, kwargs): } try: neutron = client.Client('2.0', **kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) return neutron @@ -130,7 +138,7 @@ def _get_router_id(module, neutron): } try: routers = neutron.list_routers(**kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in getting the router list: %s " % e.message) if not routers['routers']: return None @@ -143,7 +151,7 @@ def _get_net_id(neutron, module): } try: networks = neutron.list_networks(**kwargs) - except Exception, e: + except Exception as e: module.fail_json("Error in listing neutron networks: %s" % e.message) if not networks['networks']: return None @@ -156,7 +164,7 @@ def _get_port_id(neutron, module, router_id, network_id): } try: ports = neutron.list_ports(**kwargs) - except Exception, e: + except Exception as e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if not ports['ports']: return None @@ -168,14 +176,14 @@ def _add_gateway_router(neutron, module, router_id, network_id): } try: neutron.add_gateway_router(router_id, kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in adding gateway to router: %s" % e.message) return True def _remove_gateway_router(neutron, module, router_id): try: neutron.remove_gateway_router(router_id) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in removing gateway to router: %s" % e.message) return True diff --git a/cloud/openstack/_quantum_router_interface.py b/cloud/openstack/_quantum_router_interface.py index c936e98ad65..b2a1784d99a 100644 --- a/cloud/openstack/_quantum_router_interface.py +++ b/cloud/openstack/_quantum_router_interface.py @@ -26,15 +26,19 @@ except ImportError: HAVE_DEPS = False +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: quantum_router_interface version_added: "1.2" author: "Benno Joy (@bennojoy)" deprecated: Deprecated in 2.0. Use os_router instead -short_description: Attach/Dettach a subnet's interface to a router +short_description: Attach/Detach a subnet's interface to a router description: - - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet. + - Attach/Detach a subnet interface to a router, to provide a gateway for the subnet. options: login_username: description: @@ -88,13 +92,15 @@ ''' EXAMPLES = ''' -# Attach tenant1's subnet to the external router -- quantum_router_interface: state=present login_username=admin - login_password=admin - login_tenant_name=admin - tenant_name=tenant1 - router_name=external_route - subnet_name=t1subnet +- name: "Attach tenant1's subnet to the external router" + quantum_router_interface: + state: present + login_username: admin + login_password: admin + login_tenant_name: admin + tenant_name: tenant1 + router_name: external_route + subnet_name: t1subnet ''' @@ -107,7 +113,7 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) global _os_keystone _os_keystone = kclient @@ -117,7 +123,7 @@ def _get_ksclient(module, kwargs): def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: + except Exception as e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint @@ -131,7 +137,7 @@ def _get_neutron_client(module, kwargs): } try: neutron = client.Client('2.0', **kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) return neutron @@ -155,7 +161,7 @@ def _get_router_id(module, neutron): } try: routers = neutron.list_routers(**kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in getting the router list: %s " % e.message) if not routers['routers']: return None @@ -170,7 +176,7 @@ def _get_subnet_id(module, neutron): } try: subnets = neutron.list_subnets(**kwargs) - except Exception, e: + except Exception as e: module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) if not subnets['subnets']: return None @@ -183,7 +189,7 @@ def _get_port_id(neutron, module, router_id, subnet_id): } try: ports = neutron.list_ports(**kwargs) - except Exception, e: + except Exception as e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if not ports['ports']: return None @@ -199,7 +205,7 @@ def _add_interface_router(neutron, module, router_id, subnet_id): } try: neutron.add_interface_router(router_id, kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error in adding interface to router: %s" % e.message) return True @@ -209,7 +215,7 @@ def _remove_interface_router(neutron, module, router_id, subnet_id): } try: neutron.remove_interface_router(router_id, kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg="Error in removing interface from router: %s" % e.message) return True diff --git a/cloud/openstack/_quantum_subnet.py b/cloud/openstack/_quantum_subnet.py index f2f125f64c8..e03f5962295 100644 --- a/cloud/openstack/_quantum_subnet.py +++ b/cloud/openstack/_quantum_subnet.py @@ -26,6 +26,10 @@ except ImportError: HAVE_DEPS = False +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: quantum_subnet @@ -54,7 +58,7 @@ description: - The keystone URL for authentication required: false - default: 'http://127.0.0.1:35357/v2.0/' + default: http://127.0.0.1:35357/v2.0/ region_name: description: - Name of the region @@ -123,10 +127,16 @@ ''' EXAMPLES = ''' -# Create a subnet for a tenant with the specified subnet -- quantum_subnet: state=present login_username=admin login_password=admin - login_tenant_name=admin tenant_name=tenant1 - network_name=network1 name=net1subnet cidr=192.168.0.0/24" +- name: Create a subnet for a tenant with the specified subnet + quantum_subnet: + state: present + login_username: admin + login_password: admin + login_tenant_name: admin + tenant_name: tenant1 + network_name: network1 + name: net1subnet + cidr: 192.168.0.0/24 ''' _os_keystone = None @@ -139,7 +149,7 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception, e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) global _os_keystone _os_keystone = kclient @@ -149,7 +159,7 @@ def _get_ksclient(module, kwargs): def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') - except Exception, e: + except Exception as e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint @@ -163,7 +173,7 @@ def _get_neutron_client(module, kwargs): } try: neutron = client.Client('2.0', **kwargs) - except Exception, e: + except Exception as e: module.fail_json(msg = " Error in connecting to neutron: %s" % e.message) return neutron @@ -188,7 +198,7 @@ def _get_net_id(neutron, module): } try: networks = neutron.list_networks(**kwargs) - except Exception, e: + except Exception as e: module.fail_json("Error in listing neutron networks: %s" % e.message) if not networks['networks']: return None @@ -208,7 +218,7 @@ def _get_subnet_id(module, neutron): } try: subnets = neutron.list_subnets(**kwargs) - except Exception, e: + except Exception as e: module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) if not subnets['subnets']: return None @@ -242,7 +252,7 @@ def _create_subnet(module, neutron): subnet.pop('dns_nameservers') try: new_subnet = neutron.create_subnet(dict(subnet=subnet)) - except Exception, e: + except Exception as e: module.fail_json(msg = "Failure in creating subnet: %s" % e.message) return new_subnet['subnet']['id'] @@ -250,7 +260,7 @@ def _create_subnet(module, neutron): def _delete_subnet(module, neutron, subnet_id): try: neutron.delete_subnet(subnet_id) - except Exception, e: + except Exception as e: module.fail_json( msg = "Error in deleting subnet: %s" % e.message) return True diff --git a/cloud/openstack/os_auth.py b/cloud/openstack/os_auth.py index 4f4d22eac94..bcc95aaaa1f 100644 --- a/cloud/openstack/os_auth.py +++ b/cloud/openstack/os_auth.py @@ -22,6 +22,10 @@ except ImportError: HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_auth @@ -37,10 +41,13 @@ ''' EXAMPLES = ''' -# Authenticate to the cloud and retrieve the service catalog -- os_auth: +- name: Authenticate to the cloud and retrieve the service catalog + os_auth: cloud: rax-dfw -- debug: var=service_catalog + +- name: Show service catalog + debug: + var: service_catalog ''' def main(): diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 1627bdfe322..cc840085f67 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -18,6 +18,10 @@ import os_client_config from os_client_config import exceptions +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_client_config @@ -39,15 +43,17 @@ ''' EXAMPLES = ''' -# Get list of clouds that do not support security groups -- os_client_config: -- debug: var={{ item }} - with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}" +- name: Get list of clouds that do not support security groups + os_client_config: + +- debug: + var: "{{ item }}" + with_items: "{{ openstack.clouds | rejectattr('secgroup_source', 'none') | list }}" -# Get the information back just about the mordred cloud -- os_client_config: +- name: Get the information back just about the mordred cloud + os_client_config: clouds: - - mordred + - mordred ''' diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index d4ea326c98a..16c217c2d86 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -23,6 +23,13 @@ except ImportError: HAS_SHADE = False +from distutils.version import StrictVersion + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_floating_ip @@ -59,6 +66,14 @@ - To which fixed IP of server the floating IP address should be attached to. required: false + nat_destination: + description: + - The name or id of a neutron private network that the fixed IP to + attach floating IP is on + required: false + default: None + aliases: ["fixed_network", "internal_network"] + version_added: "2.3" wait: description: - When attaching a floating IP address, specify whether we should @@ -107,6 +122,17 @@ wait: true timeout: 180 +# Assign a new floating IP from the network `ext_net` to the instance fixed +# ip in network `private_net` of `cattle001`. +- os_floating_ip: + cloud: dguerri + state: present + server: cattle001 + network: ext_net + nat_destination: private_net + wait: true + timeout: 180 + # Detach a floating IP address from a server - os_floating_ip: cloud: dguerri @@ -133,6 +159,8 @@ def main(): floating_ip_address=dict(required=False, default=None), reuse=dict(required=False, type='bool', default=False), fixed_address=dict(required=False, default=None), + nat_destination=dict(required=False, default=None, + aliases=['fixed_network', 'internal_network']), wait=dict(required=False, type='bool', default=False), timeout=dict(required=False, type='int', default=60), purge=dict(required=False, type='bool', default=False), @@ -144,12 +172,18 @@ def main(): if not HAS_SHADE: module.fail_json(msg='shade is required for this module') + if (module.params['nat_destination'] and + StrictVersion(shade.__version__) < StrictVersion('1.8.0')): + module.fail_json(msg="To utilize nat_destination, the installed version of" + "the shade library MUST be >= 1.8.0") + server_name_or_id = module.params['server'] state = module.params['state'] network = module.params['network'] floating_ip_address = module.params['floating_ip_address'] reuse = module.params['reuse'] fixed_address = module.params['fixed_address'] + nat_destination = module.params['nat_destination'] wait = module.params['wait'] timeout = module.params['timeout'] purge = module.params['purge'] @@ -163,10 +197,38 @@ def main(): msg="server {0} not found".format(server_name_or_id)) if state == 'present': + # If f_ip already assigned to server, check that it matches + # requirements. + public_ip = cloud.get_server_public_ip(server) + f_ip = _get_floating_ip(cloud, public_ip) if public_ip else public_ip + if f_ip: + if network: + network_id = cloud.get_network(name_or_id=network)["id"] + else: + network_id = None + if all([(fixed_address and f_ip.fixed_ip_address == fixed_address) or + (nat_destination and f_ip.internal_network == fixed_address), + network, f_ip.network != network_id]): + # Current state definitely conflicts with requirements + module.fail_json(msg="server {server} already has a " + "floating-ip on requested " + "interface but it doesn't match " + "requested network {network: {fip}" + .format(server=server_name_or_id, + network=network, + fip=remove_values(f_ip, + module.no_log_values))) + if not network or f_ip.network == network_id: + # Requirements are met + module.exit_json(changed=False, floating_ip=f_ip) + + # Requirements are vague enough to ignore existing f_ip and try + # to create a new f_ip to the server. + server = cloud.add_ips_to_server( server=server, ips=floating_ip_address, ip_pool=network, reuse=reuse, fixed_address=fixed_address, wait=wait, - timeout=timeout) + timeout=timeout, nat_destination=nat_destination) fip_address = cloud.get_server_public_ip(server) # Update the floating IP status f_ip = _get_floating_ip(cloud, fip_address) @@ -174,22 +236,27 @@ def main(): elif state == 'absent': if floating_ip_address is None: - module.fail_json(msg="floating_ip_address is required") + if not server_name_or_id: + module.fail_json(msg="either server or floating_ip_address are required") + server = cloud.get_server(server_name_or_id) + floating_ip_address = cloud.get_server_public_ip(server) f_ip = _get_floating_ip(cloud, floating_ip_address) if not f_ip: # Nothing to detach module.exit_json(changed=False) - - cloud.detach_ip_from_server( - server_id=server['id'], floating_ip_id=f_ip['id']) - # Update the floating IP status - f_ip = cloud.get_floating_ip(id=f_ip['id']) + changed = False + if f_ip["fixed_ip_address"]: + cloud.detach_ip_from_server( + server_id=server['id'], floating_ip_id=f_ip['id']) + # Update the floating IP status + f_ip = cloud.get_floating_ip(id=f_ip['id']) + changed = True if purge: cloud.delete_floating_ip(f_ip['id']) module.exit_json(changed=True) - module.exit_json(changed=True, floating_ip=f_ip) + module.exit_json(changed=changed, floating_ip=f_ip) except shade.OpenStackCloudException as e: module.fail_json(msg=str(e), extra_data=e.extra_data) diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py index 3aa49583f11..7632672555c 100644 --- a/cloud/openstack/os_image.py +++ b/cloud/openstack/os_image.py @@ -24,6 +24,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_image diff --git a/cloud/openstack/os_image_facts.py b/cloud/openstack/os_image_facts.py index 4058d4003e8..a810ad467f3 100644 --- a/cloud/openstack/os_image_facts.py +++ b/cloud/openstack/os_image_facts.py @@ -21,6 +21,10 @@ except ImportError: HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: os_image_facts short_description: Retrieve facts about an image within OpenStack. @@ -42,15 +46,18 @@ ''' EXAMPLES = ''' -# Gather facts about a previously created image named image1 -- os_image_facts: +- name: Gather facts about a previously created image named image1 + os_image_facts: auth: auth_url: https://your_api_url.com:9000/v2.0 username: user password: password project_name: someproject image: image1 -- debug: var=openstack + +- name: Show openstack facts + debug: + var: openstack ''' RETURN = ''' diff --git a/cloud/openstack/os_ironic.py b/cloud/openstack/os_ironic.py index 79751347615..2296082f321 100644 --- a/cloud/openstack/os_ironic.py +++ b/cloud/openstack/os_ironic.py @@ -23,6 +23,10 @@ HAS_SHADE = False import jsonpatch +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_ironic @@ -188,7 +192,7 @@ def _choose_id_value(module): def _choose_if_password_only(module, patch): if len(patch) is 1: if 'password' in patch[0]['path'] and module.params['skip_update_of_masked_password']: - # Return false to aabort update as the password appears + # Return false to abort update as the password appears # to be the only element in the patch. return False return True diff --git a/cloud/openstack/os_ironic_node.py b/cloud/openstack/os_ironic_node.py index 17e3b1578eb..fa41d6fcbc0 100644 --- a/cloud/openstack/os_ironic_node.py +++ b/cloud/openstack/os_ironic_node.py @@ -24,6 +24,10 @@ from distutils.version import StrictVersion +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_ironic_node diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index cf2b0a808ae..8651901a2af 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -25,6 +25,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_keypair diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py index 98ea4495d40..39f0afa219b 100644 --- a/cloud/openstack/os_network.py +++ b/cloud/openstack/os_network.py @@ -22,6 +22,13 @@ except ImportError: HAS_SHADE = False +from distutils.version import StrictVersion + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_network @@ -57,6 +64,34 @@ choices: ['present', 'absent'] required: false default: present + provider_physical_network: + description: + - The physical network where this network object is implemented. + required: false + default: None + version_added: "2.1" + provider_network_type: + description: + - The type of physical network that maps to this network resource. + choices: ['flat', 'vlan', 'vxlan', 'gre', 'uplink'] + required: false + default: None + version_added: "2.1" + provider_segmentation_id: + description: + - An isolated segment on the physical network. The I(network_type) + attribute defines the segmentation model. For example, if the + I(network_type) value is vlan, this ID is a vlan identifier. If + the I(network_type) value is gre, this ID is a gre key. + required: false + default: None + version_added: "2.1" + project: + description: + - Project name or ID containing the network (name admin-only) + required: false + default: None + version_added: "2.1" requirements: ["shade"] ''' @@ -115,6 +150,18 @@ description: The associated subnets. type: list sample: [] + "provider:physical_network": + description: The physical network where this network object is implemented. + type: string + sample: my_vlan_net + "provider:network_type": + description: The type of physical network that maps to this network resource. + type: string + sample: vlan + "provider:segmentation_id": + description: An isolated segment on the physical network. + type: string + sample: 101 ''' @@ -124,7 +171,12 @@ def main(): shared=dict(default=False, type='bool'), admin_state_up=dict(default=True, type='bool'), external=dict(default=False, type='bool'), + provider_physical_network=dict(required=False), + provider_network_type=dict(required=False, default=None, + choices=['flat', 'vlan', 'vxlan', 'gre', 'uplink']), + provider_segmentation_id=dict(required=False), state=dict(default='present', choices=['absent', 'present']), + project=dict(default=None) ) module_kwargs = openstack_module_kwargs() @@ -133,19 +185,53 @@ def main(): if not HAS_SHADE: module.fail_json(msg='shade is required for this module') + if (module.params['project'] and + StrictVersion(shade.__version__) < StrictVersion('1.6.0')): + module.fail_json(msg="To utilize project, the installed version of" + "the shade library MUST be >=1.6.0") + state = module.params['state'] name = module.params['name'] shared = module.params['shared'] admin_state_up = module.params['admin_state_up'] external = module.params['external'] + provider_physical_network = module.params['provider_physical_network'] + provider_network_type = module.params['provider_network_type'] + provider_segmentation_id = module.params['provider_segmentation_id'] + project = module.params.pop('project') try: cloud = shade.openstack_cloud(**module.params) - net = cloud.get_network(name) + if project is not None: + proj = cloud.get_project(project) + if proj is None: + module.fail_json(msg='Project %s could not be found' % project) + project_id = proj['id'] + filters = {'tenant_id': project_id} + else: + project_id = None + filters = None + net = cloud.get_network(name, filters=filters) if state == 'present': if not net: - net = cloud.create_network(name, shared, admin_state_up, external) + provider = {} + if provider_physical_network: + provider['physical_network'] = provider_physical_network + if provider_network_type: + provider['network_type'] = provider_network_type + if provider_segmentation_id: + provider['segmentation_id'] = provider_segmentation_id + + if provider and StrictVersion(shade.__version__) < StrictVersion('1.5.0'): + module.fail_json(msg="Shade >= 1.5.0 required to use provider options") + + if project_id is not None: + net = cloud.create_network(name, shared, admin_state_up, + external, provider, project_id) + else: + net = cloud.create_network(name, shared, admin_state_up, + external, provider) changed = True else: changed = False diff --git a/cloud/openstack/os_networks_facts.py b/cloud/openstack/os_networks_facts.py index 0cc868b547f..f39580321ee 100644 --- a/cloud/openstack/os_networks_facts.py +++ b/cloud/openstack/os_networks_facts.py @@ -21,6 +21,10 @@ except ImportError: HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_networks_facts @@ -33,7 +37,7 @@ - "python >= 2.6" - "shade" options: - network: + name: description: - Name or ID of the Network required: false @@ -46,28 +50,34 @@ ''' EXAMPLES = ''' -# Gather facts about previously created networks -- os_networks_facts: +- name: Gather facts about previously created networks + os_networks_facts: auth: auth_url: https://your_api_url.com:9000/v2.0 username: user password: password project_name: someproject -- debug: var=openstack_networks -# Gather facts about a previously created network by name -- os_networks_facts: +- name: Show openstack networks + debug: + var: openstack_networks + +- name: Gather facts about a previously created network by name + os_networks_facts: auth: auth_url: https://your_api_url.com:9000/v2.0 username: user password: password project_name: someproject name: network1 -- debug: var=openstack_networks -# Gather facts about a previously created network with filter (note: name and - filters parameters are Not mutually exclusive) -- os_networks_facts: +- name: Show openstack networks + debug: + var: openstack_networks + +- name: Gather facts about a previously created network with filter + # Note: name and filters parameters are Not mutually exclusive + os_networks_facts: auth: auth_url: https://your_api_url.com:9000/v2.0 username: user @@ -78,7 +88,10 @@ subnets: - 057d4bdf-6d4d-4728-bb0f-5ac45a6f7400 - 443d4dc0-91d4-4998-b21c-357d10433483 -- debug: var=openstack_networks + +- name: Show openstack networks + debug: + var: openstack_networks ''' RETURN = ''' diff --git a/cloud/openstack/os_nova_flavor.py b/cloud/openstack/os_nova_flavor.py index 102b2bf2aee..0f9f5afa91b 100644 --- a/cloud/openstack/os_nova_flavor.py +++ b/cloud/openstack/os_nova_flavor.py @@ -21,6 +21,10 @@ except ImportError: HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_nova_flavor @@ -88,22 +92,21 @@ ''' EXAMPLES = ''' -# Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of -# local disk, and 10GB of ephemeral. -- os_nova_flavor: - cloud=mycloud - state=present - name=tiny - ram=1024 - vcpus=1 - disk=10 - ephemeral=10 - -# Delete 'tiny' flavor -- os_nova_flavor: - cloud=mycloud - state=absent - name=tiny +- name: "Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of local disk, and 10GB of ephemeral." + os_nova_flavor: + cloud: mycloud + state: present + name: tiny + ram: 1024 + vcpus: 1 + disk: 10 + ephemeral: 10 + +- name: "Delete 'tiny' flavor" + os_nova_flavor: + cloud: mycloud + state: absent + name: tiny ''' RETURN = ''' diff --git a/cloud/openstack/os_object.py b/cloud/openstack/os_object.py index d5d77e1318c..9e67ab39dff 100644 --- a/cloud/openstack/os_object.py +++ b/cloud/openstack/os_object.py @@ -23,6 +23,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_object @@ -60,11 +64,19 @@ ''' EXAMPLES = ''' -# Creates a object named 'fstab' in the 'config' container -- os_object: cloud=mordred state=present name=fstab container=config filename=/etc/fstab - -# Deletes a container called config and all of its contents -- os_object: cloud=rax-iad state=absent container=config +- name: "Create a object named 'fstab' in the 'config' container" + os_object: + cloud: mordred + state: present + name: fstab + container: config + filename: /etc/fstab + +- name: Delete a container called config and all of its contents + os_object: + cloud: rax-iad + state: absent + container: config ''' diff --git a/cloud/openstack/os_port.py b/cloud/openstack/os_port.py index e31f4fa0e1d..56625a63f56 100644 --- a/cloud/openstack/os_port.py +++ b/cloud/openstack/os_port.py @@ -22,6 +22,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_port diff --git a/cloud/openstack/os_router.py b/cloud/openstack/os_router.py index e3de749d68b..d4a6b28bc82 100644 --- a/cloud/openstack/os_router.py +++ b/cloud/openstack/os_router.py @@ -19,6 +19,12 @@ except ImportError: HAS_SHADE = False +from distutils.version import StrictVersion + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -54,11 +60,17 @@ network: description: - Unique name or ID of the external gateway network. - - required I(interfaces) or I(enable_snat) are provided, + - required I(interfaces) or I(enable_snat) are provided. + type: string + required: false + default: None + project: + description: + - Unique name or ID of the project. type: string required: false - false otherwise. default: None + version_added: "2.2" external_fixed_ips: description: - The IP address parameters for the external gateway network. Each @@ -82,6 +94,13 @@ state: present name: simple_router +# Create a simple router, not attached to a gateway or subnets for a given project. +- os_router: + cloud: mycloud + state: present + name: simple_router + project: myproj + # Creates a router attached to ext_network1 on an IPv4 subnet and one # internal subnet interface. - os_router: @@ -210,6 +229,7 @@ def _needs_update(cloud, module, router, network, internal_subnet_ids): return False + def _system_state_change(cloud, module, router, network, internal_ids): """Check if the system state would be changed.""" state = module.params['state'] @@ -221,6 +241,7 @@ def _system_state_change(cloud, module, router, network, internal_ids): return _needs_update(cloud, module, router, network, internal_ids) return False + def _build_kwargs(cloud, module, router, network): kwargs = { 'admin_state_up': module.params['admin_state_up'], @@ -247,6 +268,7 @@ def _build_kwargs(cloud, module, router, network): return kwargs + def _validate_subnets(module, cloud): external_subnet_ids = [] internal_subnet_ids = [] @@ -264,7 +286,8 @@ def _validate_subnets(module, cloud): module.fail_json(msg='subnet %s not found' % iface) internal_subnet_ids.append(subnet['id']) - return (external_subnet_ids, internal_subnet_ids) + return external_subnet_ids, internal_subnet_ids + def main(): argument_spec = openstack_full_argument_spec( @@ -275,6 +298,7 @@ def main(): network=dict(default=None), interfaces=dict(type='list', default=None), external_fixed_ips=dict(type='list', default=None), + project=dict(default=None) ) module_kwargs = openstack_module_kwargs() @@ -285,17 +309,32 @@ def main(): if not HAS_SHADE: module.fail_json(msg='shade is required for this module') + if (module.params['project'] and + StrictVersion(shade.__version__) <= StrictVersion('1.9.0')): + module.fail_json(msg="To utilize project, the installed version of" + "the shade library MUST be > 1.9.0") + state = module.params['state'] name = module.params['name'] network = module.params['network'] + project = module.params['project'] if module.params['external_fixed_ips'] and not network: module.fail_json(msg='network is required when supplying external_fixed_ips') try: cloud = shade.openstack_cloud(**module.params) - router = cloud.get_router(name) - + if project is not None: + proj = cloud.get_project(project) + if proj is None: + module.fail_json(msg='Project %s could not be found' % project) + project_id = proj['id'] + filters = {'tenant_id': project_id} + else: + project_id = None + filters = None + + router = cloud.get_router(name, filters=filters) net = None if network: net = cloud.get_network(network) @@ -316,6 +355,8 @@ def main(): if not router: kwargs = _build_kwargs(cloud, module, router, net) + if project_id: + kwargs['project_id'] = project_id router = cloud.create_router(**kwargs) for internal_subnet_id in internal_ids: cloud.add_router_interface(router, subnet_id=internal_subnet_id) @@ -323,18 +364,23 @@ def main(): else: if _needs_update(cloud, module, router, net, internal_ids): kwargs = _build_kwargs(cloud, module, router, net) - router = cloud.update_router(**kwargs) + updated_router = cloud.update_router(**kwargs) + + # Protect against update_router() not actually + # updating the router. + if not updated_router: + changed = False # On a router update, if any internal interfaces were supplied, # just detach all existing internal interfaces and attach the new. - if internal_ids: + elif internal_ids: + router = updated_router ports = cloud.list_router_interfaces(router, 'internal') for port in ports: cloud.remove_router_interface(router, port_id=port['id']) for internal_subnet_id in internal_ids: cloud.add_router_interface(router, subnet_id=internal_subnet_id) - - changed = True + changed = True module.exit_json(changed=changed, router=router, @@ -347,9 +393,10 @@ def main(): # We need to detach all internal interfaces on a router before # we will be allowed to delete it. ports = cloud.list_router_interfaces(router, 'internal') + router_id = router['id'] for port in ports: cloud.remove_router_interface(router, port_id=port['id']) - cloud.delete_router(name) + cloud.delete_router(router_id) module.exit_json(changed=True) except shade.OpenStackCloudException as e: diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index f2a2bb6ac1e..3ed5dfceb7a 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -23,6 +23,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_security_group @@ -53,17 +57,17 @@ EXAMPLES = ''' # Create a security group - os_security_group: - cloud=mordred - state=present - name=foo - description=security group for foo servers + cloud: mordred + state: present + name: foo + description: security group for foo servers # Update the existing 'foo' security group description - os_security_group: - cloud=mordred - state=present - name=foo - description=updated description for the foo security group + cloud: mordred + state: present + name: foo + description: updated description for the foo security group ''' diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index ef63bfacd45..3379d160408 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -23,6 +23,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_security_group_rule @@ -34,12 +38,12 @@ options: security_group: description: - - Name of the security group + - Name or ID of the security group required: true protocol: description: - - IP protocol - choices: ['tcp', 'udp', 'icmp', None] + - IP protocols TCP UDP ICMP 112 (VRRP) + choices: ['tcp', 'udp', 'icmp', '112', None] default: None port_range_min: description: @@ -57,7 +61,8 @@ required: false remote_group: description: - - ID of Security group to link (exclusive with remote_ip_prefix) + - Name or ID of the Security group to link (exclusive with + remote_ip_prefix) required: false ethertype: description: @@ -120,6 +125,12 @@ security_group: foo protocol: tcp remote_ip_prefix: 0.0.0.0/0 + +# Create a rule for VRRP with numbered protocol 112 +- os_security_group_rule: + security_group: loadbalancer_sg + protocol: 112 + remote_group: loadbalancer-node_sg ''' RETURN = ''' @@ -204,7 +215,7 @@ def _ports_match(protocol, module_min, module_max, rule_min, rule_max): return module_min == rule_min and module_max == rule_max -def _find_matching_rule(module, secgroup): +def _find_matching_rule(module, secgroup, remotegroup): """ Find a rule in the group that matches the module parameters. :returns: The matching rule dict, or None if no matches. @@ -213,7 +224,7 @@ def _find_matching_rule(module, secgroup): remote_ip_prefix = module.params['remote_ip_prefix'] ethertype = module.params['ethertype'] direction = module.params['direction'] - remote_group_id = module.params['remote_group'] + remote_group_id = remotegroup['id'] for rule in secgroup['security_group_rules']: if (protocol == rule['protocol'] @@ -230,10 +241,10 @@ def _find_matching_rule(module, secgroup): return None -def _system_state_change(module, secgroup): +def _system_state_change(module, secgroup, remotegroup): state = module.params['state'] if secgroup: - rule_exists = _find_matching_rule(module, secgroup) + rule_exists = _find_matching_rule(module, secgroup, remotegroup) else: return False @@ -250,11 +261,10 @@ def main(): # NOTE(Shrews): None is an acceptable protocol value for # Neutron, but Nova will balk at this. protocol = dict(default=None, - choices=[None, 'tcp', 'udp', 'icmp']), + choices=[None, 'tcp', 'udp', 'icmp', '112']), port_range_min = dict(required=False, type='int'), port_range_max = dict(required=False, type='int'), remote_ip_prefix = dict(required=False, default=None), - # TODO(mordred): Make remote_group handle name and id remote_group = dict(required=False, default=None), ethertype = dict(default='IPv4', choices=['IPv4', 'IPv6']), @@ -279,21 +289,27 @@ def main(): state = module.params['state'] security_group = module.params['security_group'] + remote_group = module.params['remote_group'] changed = False try: cloud = shade.openstack_cloud(**module.params) secgroup = cloud.get_security_group(security_group) + if remote_group: + remotegroup = cloud.get_security_group(remote_group) + else: + remotegroup = { 'id' : None } + if module.check_mode: - module.exit_json(changed=_system_state_change(module, secgroup)) + module.exit_json(changed=_system_state_change(module, secgroup, remotegroup)) if state == 'present': if not secgroup: module.fail_json(msg='Could not find security group %s' % security_group) - rule = _find_matching_rule(module, secgroup) + rule = _find_matching_rule(module, secgroup, remotegroup) if not rule: rule = cloud.create_security_group_rule( secgroup['id'], @@ -301,7 +317,7 @@ def main(): port_range_max=module.params['port_range_max'], protocol=module.params['protocol'], remote_ip_prefix=module.params['remote_ip_prefix'], - remote_group_id=module.params['remote_group'], + remote_group_id=remotegroup['id'], direction=module.params['direction'], ethertype=module.params['ethertype'] ) @@ -309,7 +325,7 @@ def main(): module.exit_json(changed=changed, rule=rule, id=rule['id']) if state == 'absent' and secgroup: - rule = _find_matching_rule(module, secgroup) + rule = _find_matching_rule(module, secgroup, remotegroup) if rule: cloud.delete_security_group_rule(rule['id']) changed = True diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index f4bbb7e72a0..0bb7dbcfbc6 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -27,6 +27,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_server @@ -178,15 +182,33 @@ - Should the resource be present or absent. choices: [present, absent] default: present + delete_fip: + description: + - When I(state) is absent and this option is true, any floating IP + associated with the instance will be deleted along with the instance. + required: false + default: false + version_added: "2.2" + reuse_ips: + description: + - When I(auto_ip) is true and this option is true, the I(auto_ip) code + will attempt to re-use unassigned floating ips in the project before + creating a new one. It is important to note that it is impossible + to safely do this concurrently, so if your use case involves + concurrent server creation, it is highly recommended to set this to + false and to delete the floating ip associated with a server when + the server is deleted using I(delete_fip). + required: false + default: true + version_added: "2.2" requirements: - "python >= 2.6" - "shade" ''' EXAMPLES = ''' -# Creates a new instance and attaches to a network and passes metadata to -# the instance -- os_server: +- name: Create a new instance and attaches to a network and passes metadata to the instance + os_server: state: present auth: auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ @@ -205,97 +227,95 @@ hostname: test1 group: uge_master -# Creates a new instance in HP Cloud AE1 region availability zone az2 and +# Create a new instance in HP Cloud AE1 region availability zone az2 and # automatically assigns a floating IP - name: launch a compute instance hosts: localhost tasks: - - name: launch an instance - os_server: - state: present - auth: - auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - username: username - password: Equality7-2521 - project_name: username-project1 - name: vm1 - region_name: region-b.geo-1 - availability_zone: az2 - image: 9302692b-b787-4b52-a3a6-daebb79cb498 - key_name: test - timeout: 200 - flavor: 101 - security_groups: default - auto_ip: yes - -# Creates a new instance in named cloud mordred availability zone az2 + - name: launch an instance + os_server: + state: present + auth: + auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ + username: username + password: Equality7-2521 + project_name: username-project1 + name: vm1 + region_name: region-b.geo-1 + availability_zone: az2 + image: 9302692b-b787-4b52-a3a6-daebb79cb498 + key_name: test + timeout: 200 + flavor: 101 + security_groups: default + auto_ip: yes + +# Create a new instance in named cloud mordred availability zone az2 # and assigns a pre-known floating IP - name: launch a compute instance hosts: localhost tasks: - - name: launch an instance - os_server: - state: present - cloud: mordred - name: vm1 - availability_zone: az2 - image: 9302692b-b787-4b52-a3a6-daebb79cb498 - key_name: test - timeout: 200 - flavor: 101 - floating-ips: - - 12.34.56.79 - -# Creates a new instance with 4G of RAM on Ubuntu Trusty, ignoring + - name: launch an instance + os_server: + state: present + cloud: mordred + name: vm1 + availability_zone: az2 + image: 9302692b-b787-4b52-a3a6-daebb79cb498 + key_name: test + timeout: 200 + flavor: 101 + floating_ips: + - 12.34.56.79 + +# Create a new instance with 4G of RAM on Ubuntu Trusty, ignoring # deprecated images - name: launch a compute instance hosts: localhost tasks: - - name: launch an instance - os_server: - name: vm1 - state: present - cloud: mordred - region_name: region-b.geo-1 - image: Ubuntu Server 14.04 - image_exclude: deprecated - flavor_ram: 4096 - -# Creates a new instance with 4G of RAM on Ubuntu Trusty on a Performance node + - name: launch an instance + os_server: + name: vm1 + state: present + cloud: mordred + region_name: region-b.geo-1 + image: Ubuntu Server 14.04 + image_exclude: deprecated + flavor_ram: 4096 + +# Create a new instance with 4G of RAM on Ubuntu Trusty on a Performance node - name: launch a compute instance hosts: localhost tasks: - - name: launch an instance - os_server: - name: vm1 - cloud: rax-dfw - state: present - image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM) - flavor_ram: 4096 - flavor_include: Performance + - name: launch an instance + os_server: + name: vm1 + cloud: rax-dfw + state: present + image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM) + flavor_ram: 4096 + flavor_include: Performance # Creates a new instance and attaches to multiple network - name: launch a compute instance hosts: localhost tasks: - - name: launch an instance with a string - os_server: - name: vm1 - auth: - auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - username: admin - password: admin - project_name: admin - name: vm1 - image: 4f905f38-e52a-43d2-b6ec-754a13ffb529 - key_name: ansible_key - timeout: 200 - flavor: 4 - nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..." - -# Creates a new instance and attaches to a network and passes metadata to -# the instance -- os_server: + - name: launch an instance with a string + os_server: + auth: + auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ + username: admin + password: admin + project_name: admin + name: vm1 + image: 4f905f38-e52a-43d2-b6ec-754a13ffb529 + key_name: ansible_key + timeout: 200 + flavor: 4 + nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..." + +- name: Creates a new instance and attaches to a network and passes metadata to the instance + os_server: state: present auth: auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ @@ -312,51 +332,51 @@ - net-name: another_network meta: "hostname=test1,group=uge_master" -# Creates a new instance and attaches to a specific network -- os_server: - state: present - auth: - auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - username: admin - password: admin - project_name: admin - name: vm1 - image: 4f905f38-e52a-43d2-b6ec-754a13ffb529 - key_name: ansible_key - timeout: 200 - flavor: 4 - network: another_network - -# Creates a new instance with 4G of RAM on a 75G Ubuntu Trusty volume +- name: Creates a new instance and attaches to a specific network + os_server: + state: present + auth: + auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ + username: admin + password: admin + project_name: admin + name: vm1 + image: 4f905f38-e52a-43d2-b6ec-754a13ffb529 + key_name: ansible_key + timeout: 200 + flavor: 4 + network: another_network + +# Create a new instance with 4G of RAM on a 75G Ubuntu Trusty volume - name: launch a compute instance hosts: localhost tasks: - - name: launch an instance - os_server: - name: vm1 - state: present - cloud: mordred - region_name: ams01 - image: Ubuntu Server 14.04 - flavor_ram: 4096 - boot_from_volume: True - volume_size: 75 + - name: launch an instance + os_server: + name: vm1 + state: present + cloud: mordred + region_name: ams01 + image: Ubuntu Server 14.04 + flavor_ram: 4096 + boot_from_volume: True + volume_size: 75 # Creates a new instance with 2 volumes attached - name: launch a compute instance hosts: localhost tasks: - - name: launch an instance - os_server: - name: vm1 - state: present - cloud: mordred - region_name: ams01 - image: Ubuntu Server 14.04 - flavor_ram: 4096 - volumes: - - photos - - music + - name: launch an instance + os_server: + name: vm1 + state: present + cloud: mordred + region_name: ams01 + image: Ubuntu Server 14.04 + flavor_ram: 4096 + volumes: + - photos + - music ''' @@ -368,7 +388,7 @@ def _exit_hostvars(module, cloud, server, changed=True): def _parse_nics(nics): for net in nics: - if type(net) == str: + if isinstance(net, str): for nic in net.split(','): yield dict((nic.split('='),)) else: @@ -378,11 +398,11 @@ def _network_args(module, cloud): args = [] nics = module.params['nics'] - if type(nics) != list: + if not isinstance(nics, list): module.fail_json(msg='The \'nics\' parameter must be a list.') for net in _parse_nics(nics): - if type(net) != dict: + if not isinstance(net, dict): module.fail_json( msg='Each entry in the \'nics\' parameter must be a dict.') @@ -411,7 +431,8 @@ def _delete_server(module, cloud): try: cloud.delete_server( module.params['name'], wait=module.params['wait'], - timeout=module.params['timeout']) + timeout=module.params['timeout'], + delete_ips=module.params['delete_fip']) except Exception as e: module.fail_json(msg="Error in deleting vm: %s" % e.message) module.exit_json(changed=True, result='deleted') @@ -430,15 +451,15 @@ def _create_server(module, cloud): if flavor: flavor_dict = cloud.get_flavor(flavor) if not flavor_dict: - module.fail_json(msg="Could not find flavor %s" % flavor) + module.fail_json(msg="Could not find flavor %s" % flavor) else: flavor_dict = cloud.get_flavor_by_ram(flavor_ram, flavor_include) if not flavor_dict: - module.fail_json(msg="Could not find any matching flavor") + module.fail_json(msg="Could not find any matching flavor") nics = _network_args(module, cloud) - if type(module.params['meta']) is str: + if isinstance(module.params['meta'], str): metas = {} for kv_str in module.params['meta'].split(","): k, v = kv_str.split("=") @@ -468,6 +489,7 @@ def _create_server(module, cloud): boot_volume=module.params['boot_volume'], boot_from_volume=module.params['boot_from_volume'], terminate_volume=module.params['terminate_volume'], + reuse_ips=module.params['reuse_ips'], wait=module.params['wait'], timeout=module.params['timeout'], **bootkwargs ) @@ -567,6 +589,8 @@ def main(): volumes = dict(default=[], type='list'), scheduler_hints = dict(default=None, type='dict'), state = dict(default='present', choices=['absent', 'present']), + delete_fip = dict(default=False, type='bool'), + reuse_ips = dict(default=True, type='bool'), ) module_kwargs = openstack_module_kwargs( mutually_exclusive=[ diff --git a/cloud/openstack/os_server_actions.py b/cloud/openstack/os_server_actions.py index 44ff6afc8a8..ae37e358a84 100644 --- a/cloud/openstack/os_server_actions.py +++ b/cloud/openstack/os_server_actions.py @@ -25,6 +25,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_server_actions @@ -35,6 +39,7 @@ description: - Perform server actions on an existing compute instance from OpenStack. This module does not return any data other than changed true/false. + When I(action) is 'rebuild', then I(image) parameter is required. options: server: description: @@ -55,8 +60,14 @@ description: - Perform the given action. The lock and unlock actions always return changed as the servers API does not provide lock status. - choices: [stop, start, pause, unpause, lock, unlock, suspend, resume] + choices: [stop, start, pause, unpause, lock, unlock, suspend, resume, + rebuild] default: present + image: + description: + - Image the server should be rebuilt with + default: null + version_added: "2.3" requirements: - "python >= 2.6" - "shade" @@ -82,7 +93,8 @@ 'lock': 'ACTIVE', # API doesn't show lock/unlock status 'unlock': 'ACTIVE', 'suspend': 'SUSPENDED', - 'resume': 'ACTIVE',} + 'resume': 'ACTIVE', + 'rebuild': 'ACTIVE'} _admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock'] @@ -113,11 +125,15 @@ def main(): argument_spec = openstack_full_argument_spec( server=dict(required=True), action=dict(required=True, choices=['stop', 'start', 'pause', 'unpause', - 'lock', 'unlock', 'suspend', 'resume']), + 'lock', 'unlock', 'suspend', 'resume', + 'rebuild']), + image=dict(required=False), ) module_kwargs = openstack_module_kwargs() - module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) + module = AnsibleModule(argument_spec, supports_check_mode=True, + required_if=[('action', 'rebuild', ['image'])], + **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') @@ -125,6 +141,7 @@ def main(): action = module.params['action'] wait = module.params['wait'] timeout = module.params['timeout'] + image = module.params['image'] try: if action in _admin_actions: @@ -203,6 +220,18 @@ def main(): _wait(timeout, cloud, server, action) module.exit_json(changed=True) + elif action == 'rebuild': + image = cloud.get_image(image) + + if image is None: + module.fail_json(msg="Image does not exist") + + # rebuild doesn't set a state, just do it + cloud.nova_client.servers.rebuild(server=server.id, image=image.id) + if wait: + _wait(timeout, cloud, server, action) + module.exit_json(changed=True) + except shade.OpenStackCloudException as e: module.fail_json(msg=str(e), extra_data=e.extra_data) diff --git a/cloud/openstack/os_server_facts.py b/cloud/openstack/os_server_facts.py index bcda1d4f78f..efeb7780691 100644 --- a/cloud/openstack/os_server_facts.py +++ b/cloud/openstack/os_server_facts.py @@ -24,6 +24,10 @@ except ImportError: HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_server_facts diff --git a/cloud/openstack/os_server_volume.py b/cloud/openstack/os_server_volume.py index e71e0954a20..a6549649d87 100644 --- a/cloud/openstack/os_server_volume.py +++ b/cloud/openstack/os_server_volume.py @@ -25,6 +25,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_server_volume diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index 4d06425d2c9..54b12adf327 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -23,6 +23,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_subnet @@ -42,7 +46,7 @@ network_name: description: - Name of the network to which the subnet should be attached - - requried when I(state) is 'present' + - Required when I(state) is 'present' required: false name: description: @@ -71,6 +75,12 @@ - The ip that would be assigned to the gateway for this subnet required: false default: None + no_gateway_ip: + description: + - The gateway IP would not be assigned for this subnet + required: false + default: false + version_added: "2.2" dns_nameservers: description: - List of DNS nameservers for this subnet. @@ -105,6 +115,12 @@ choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] required: false default: None + project: + description: + - Project name or ID containing the subnet (name admin-only) + required: false + default: None + version_added: "2.1" requirements: - "python >= 2.6" - "shade" @@ -164,7 +180,7 @@ def _can_update(subnet, module, cloud): subnet') if ip_version and subnet['ip_version'] != ip_version: module.fail_json(msg='Cannot update ip_version in existing subnet') - if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ip_version: + if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ipv6_ra_mode: module.fail_json(msg='Cannot update ipv6_ra_mode in existing subnet') if ipv6_a_mode and subnet.get('ipv6_address_mode', None) != ipv6_a_mode: module.fail_json(msg='Cannot update ipv6_address_mode in existing \ @@ -182,6 +198,7 @@ def _needs_update(subnet, module, cloud): pool_start = module.params['allocation_pool_start'] pool_end = module.params['allocation_pool_end'] gateway_ip = module.params['gateway_ip'] + no_gateway_ip = module.params['no_gateway_ip'] dns = module.params['dns_nameservers'] host_routes = module.params['host_routes'] curr_pool = subnet['allocation_pools'][0] @@ -203,6 +220,8 @@ def _needs_update(subnet, module, cloud): new_hr = sorted(host_routes, key=lambda t: t.keys()) if sorted(curr_hr) != sorted(new_hr): return True + if no_gateway_ip and subnet['gateway_ip']: + return True return False @@ -226,6 +245,7 @@ def main(): ip_version=dict(default='4', choices=['4', '6']), enable_dhcp=dict(default='true', type='bool'), gateway_ip=dict(default=None), + no_gateway_ip=dict(default=False, type='bool'), dns_nameservers=dict(default=None, type='list'), allocation_pool_start=dict(default=None), allocation_pool_end=dict(default=None), @@ -233,6 +253,7 @@ def main(): ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices), ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices), state=dict(default='present', choices=['absent', 'present']), + project=dict(default=None) ) module_kwargs = openstack_module_kwargs() @@ -250,12 +271,14 @@ def main(): enable_dhcp = module.params['enable_dhcp'] subnet_name = module.params['name'] gateway_ip = module.params['gateway_ip'] + no_gateway_ip = module.params['no_gateway_ip'] dns = module.params['dns_nameservers'] pool_start = module.params['allocation_pool_start'] pool_end = module.params['allocation_pool_end'] host_routes = module.params['host_routes'] ipv6_ra_mode = module.params['ipv6_ra_mode'] ipv6_a_mode = module.params['ipv6_address_mode'] + project = module.params.pop('project') # Check for required parameters when state == 'present' if state == 'present': @@ -270,9 +293,22 @@ def main(): else: pool = None + if no_gateway_ip and gateway_ip: + module.fail_json(msg='no_gateway_ip is not allowed with gateway_ip') + try: cloud = shade.openstack_cloud(**module.params) - subnet = cloud.get_subnet(subnet_name) + if project is not None: + proj = cloud.get_project(project) + if proj is None: + module.fail_json(msg='Project %s could not be found' % project) + project_id = proj['id'] + filters = {'tenant_id': project_id} + else: + project_id = None + filters = None + + subnet = cloud.get_subnet(subnet_name, filters=filters) if module.check_mode: module.exit_json(changed=_system_state_change(module, subnet, @@ -285,11 +321,13 @@ def main(): enable_dhcp=enable_dhcp, subnet_name=subnet_name, gateway_ip=gateway_ip, + disable_gateway_ip=no_gateway_ip, dns_nameservers=dns, allocation_pools=pool, host_routes=host_routes, ipv6_ra_mode=ipv6_ra_mode, - ipv6_address_mode=ipv6_a_mode) + ipv6_address_mode=ipv6_a_mode, + tenant_id=project_id) changed = True else: if _needs_update(subnet, module, cloud): @@ -297,6 +335,7 @@ def main(): subnet_name=subnet_name, enable_dhcp=enable_dhcp, gateway_ip=gateway_ip, + disable_gateway_ip=no_gateway_ip, dns_nameservers=dns, allocation_pools=pool, host_routes=host_routes) diff --git a/cloud/openstack/os_subnets_facts.py b/cloud/openstack/os_subnets_facts.py index 8d853de76de..b4beedab5a1 100644 --- a/cloud/openstack/os_subnets_facts.py +++ b/cloud/openstack/os_subnets_facts.py @@ -21,6 +21,10 @@ except ImportError: HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_subnets_facts @@ -46,28 +50,34 @@ ''' EXAMPLES = ''' -# Gather facts about previously created subnets -- os_subnets_facts: +- name: Gather facts about previously created subnets + os_subnets_facts: auth: auth_url: https://your_api_url.com:9000/v2.0 username: user password: password project_name: someproject -- debug: var=openstack_subnets -# Gather facts about a previously created subnet by name -- os_subnets_facts: +- name: Show openstack subnets + debug: + var: openstack_subnets + +- name: Gather facts about a previously created subnet by name + os_subnets_facts: auth: auth_url: https://your_api_url.com:9000/v2.0 username: user password: password project_name: someproject - name: subnet1 -- debug: var=openstack_subnets + name: subnet1 -# Gather facts about a previously created subnet with filter (note: name and - filters parameters are Not mutually exclusive) -- os_subnets_facts: +- name: Show openstack subnets + debug: + var: openstack_subnets + +- name: Gather facts about a previously created subnet with filter + # Note: name and filters parameters are not mutually exclusive + os_subnets_facts: auth: auth_url: https://your_api_url.com:9000/v2.0 username: user @@ -75,7 +85,10 @@ project_name: someproject filters: tenant_id: 55e2ce24b2a245b09f181bf025724cbe -- debug: var=openstack_subnets + +- name: Show openstack subnets + debug: + var: openstack_subnets ''' RETURN = ''' diff --git a/cloud/openstack/os_user.py b/cloud/openstack/os_user.py index d40372990c5..2ef4fe75e94 100644 --- a/cloud/openstack/os_user.py +++ b/cloud/openstack/os_user.py @@ -21,6 +21,10 @@ except ImportError: HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_user @@ -41,9 +45,16 @@ password: description: - Password for the user - - Required when I(state) is present required: false default: None + update_password: + required: false + default: always + choices: ['always', 'on_create'] + version_added: "2.3" + description: + - C(always) will attempt to update password. C(on_create) will only + set the password for newly created users. email: description: - Email address for the user @@ -90,6 +101,17 @@ cloud: mycloud state: absent name: demouser + +# Create a user but don't update password if user exists +- os_user: + cloud: mycloud + state: present + name: demouser + password: secret + update_password: on_create + email: demo@example.com + domain: default + default_project: demo ''' @@ -121,19 +143,41 @@ sample: "demouser" ''' -def _needs_update(module, user): - keys = ('email', 'default_project', 'domain', 'enabled') - for key in keys: - if module.params[key] is not None and module.params[key] != user.get(key): +def _needs_update(params_dict, user): + for k, v in params_dict.items(): + if k not in ('password', 'update_password') and user[k] != v: return True # We don't get password back in the user object, so assume any supplied # password is a change. - if module.params['password'] is not None: + if (params_dict['password'] is not None and + params_dict['update_password'] == 'always'): return True return False +def _get_domain_id(cloud, domain): + try: + # We assume admin is passing domain id + domain_id = cloud.get_domain(domain)['id'] + except: + # If we fail, maybe admin is passing a domain name. + # Note that domains have unique names, just like id. + try: + domain_id = cloud.search_domains(filters={'name': domain})[0]['id'] + except: + # Ok, let's hope the user is non-admin and passing a sane id + domain_id = domain + + return domain_id + +def _get_default_project_id(cloud, default_project): + project = cloud.get_project(default_project) + if not project: + module.fail_json(msg='Default project %s is not valid' % default_project) + + return project['id'] + def main(): argument_spec = openstack_full_argument_spec( @@ -144,14 +188,13 @@ def main(): domain=dict(required=False, default=None), enabled=dict(default=True, type='bool'), state=dict(default='present', choices=['absent', 'present']), + update_password=dict(default='always', choices=['always', + 'on_create']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule( argument_spec, - required_if=[ - ('state', 'present', ['password']) - ], **module_kwargs) if not HAS_SHADE: @@ -164,47 +207,53 @@ def main(): domain = module.params['domain'] enabled = module.params['enabled'] state = module.params['state'] + update_password = module.params['update_password'] try: cloud = shade.openstack_cloud(**module.params) user = cloud.get_user(name) - project_id = None - if default_project: - project = cloud.get_project(default_project) - if not project: - module.fail_json(msg='Default project %s is not valid' % default_project) - project_id = project['id'] - + domain_id = None if domain: opcloud = shade.operator_cloud(**module.params) - try: - # We assume admin is passing domain id - dom = opcloud.get_domain(domain)['id'] - domain = dom - except: - # If we fail, maybe admin is passing a domain name. - # Note that domains have unique names, just like id. - try: - dom = opcloud.search_domains(filters={'name': domain})[0]['id'] - domain = dom - except: - # Ok, let's hope the user is non-admin and passing a sane id - pass + domain_id = _get_domain_id(opcloud, domain) if state == 'present': + if update_password in ('always', 'on_create'): + if not password: + msg = ("update_password is %s but a password value is " + "missing") % update_password + self.fail_json(msg=msg) + default_project_id = None + if default_project: + default_project_id = _get_default_project_id(cloud, default_project) + if user is None: user = cloud.create_user( name=name, password=password, email=email, - default_project=default_project, domain_id=domain, + default_project=default_project_id, domain_id=domain_id, enabled=enabled) changed = True else: - if _needs_update(module, user): - user = cloud.update_user( - user['id'], password=password, email=email, - default_project=project_id, domain_id=domain, - enabled=enabled) + params_dict = {'email': email, 'enabled': enabled, + 'password': password, + 'update_password': update_password} + if domain_id is not None: + params_dict['domain_id'] = domain_id + if default_project_id is not None: + params_dict['default_project_id'] = default_project_id + + if _needs_update(params_dict, user): + if update_password == 'always': + user = cloud.update_user( + user['id'], password=password, email=email, + default_project=default_project_id, + domain_id=domain_id, enabled=enabled) + else: + user = cloud.update_user( + user['id'], email=email, + default_project=default_project_id, + domain_id=domain_id, enabled=enabled) changed = True else: changed = False diff --git a/cloud/openstack/os_user_group.py b/cloud/openstack/os_user_group.py index 9d21a7f033f..2f39f346792 100644 --- a/cloud/openstack/os_user_group.py +++ b/cloud/openstack/os_user_group.py @@ -21,6 +21,10 @@ except ImportError: HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_user_group diff --git a/cloud/openstack/os_volume.py b/cloud/openstack/os_volume.py index 9e7436e1a5f..6d6cc08d749 100644 --- a/cloud/openstack/os_volume.py +++ b/cloud/openstack/os_volume.py @@ -23,6 +23,10 @@ HAS_SHADE = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: os_volume diff --git a/cloud/rackspace/rax.py b/cloud/rackspace/rax.py index cb1edbab31c..e9a1fd48768 100644 --- a/cloud/rackspace/rax.py +++ b/cloud/rackspace/rax.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax @@ -182,7 +186,7 @@ description: - how long before wait gives up, in seconds default: 300 -author: +author: - "Jesse Keating (@j2sol)" - "Matt Martz (@sivel)" notes: @@ -287,11 +291,11 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, if user_data and os.path.isfile(os.path.expanduser(user_data)): try: - user_data = os.path.expanduser('user_data') + user_data = os.path.expanduser(user_data) f = open(user_data) user_data = f.read() f.close() - except Exception, e: + except Exception as e: module.fail_json(msg='Failed to load %s' % user_data) # Handle the file contents @@ -301,7 +305,7 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, fileobj = open(lpath, 'r') files[rpath] = fileobj.read() fileobj.close() - except Exception, e: + except Exception as e: module.fail_json(msg='Failed to load %s' % lpath) try: servers = [] @@ -316,7 +320,7 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, userdata=user_data, block_device_mapping_v2=bdmv2, **extra_create_args)) - except Exception, e: + except Exception as e: if e.message: msg = str(e.message) else: @@ -398,7 +402,7 @@ def delete(module, instance_ids=[], wait=True, wait_timeout=300, kept=[]): for server in servers: try: server.delete() - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: changed = True @@ -546,7 +550,7 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, # %d to the end try: name % 0 - except TypeError, e: + except TypeError as e: if e.message.startswith('not all'): name = '%s%%d' % name else: @@ -636,7 +640,7 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, # %d to the end try: name % 0 - except TypeError, e: + except TypeError as e: if e.message.startswith('not all'): name = '%s%%d' % name else: @@ -890,4 +894,6 @@ def main(): from ansible.module_utils.rax import * # invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py index ac4e8de424f..a09ce53def1 100644 --- a/cloud/rackspace/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_cbs @@ -151,7 +155,7 @@ def cloud_block_storage(module, state, name, description, meta, size, metadata=meta, snapshot_id=snapshot_id, **kwargs) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: if wait: @@ -180,7 +184,7 @@ def cloud_block_storage(module, state, name, description, meta, size, try: volume.delete() changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, volume=instance) @@ -233,4 +237,6 @@ def main(): from ansible.module_utils.rax import * # invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_cbs_attachments.py b/cloud/rackspace/rax_cbs_attachments.py index d762b797457..0c8032b35eb 100644 --- a/cloud/rackspace/rax_cbs_attachments.py +++ b/cloud/rackspace/rax_cbs_attachments.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_cbs_attachments @@ -120,7 +124,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device, try: volume.attach_to_instance(server, mountpoint=device) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) volume.get() @@ -159,7 +163,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device, interval=3, attempts=0, verbose=False) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) volume.get() @@ -218,4 +222,6 @@ def main(): from ansible.module_utils.rax import * ### invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_cdb.py b/cloud/rackspace/rax_cdb.py index f4a1c4d27c9..4706457ae61 100644 --- a/cloud/rackspace/rax_cdb.py +++ b/cloud/rackspace/rax_cdb.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_cdb @@ -136,7 +140,7 @@ def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait, try: instance = cdb.create(name=name, flavor=flavor, volume=volume, type=cdb_type, version=cdb_version) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: changed = True @@ -187,7 +191,7 @@ def delete_instance(module, name, wait, wait_timeout): try: instance.delete() - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: changed = True @@ -258,4 +262,6 @@ def main(): from ansible.module_utils.rax import * # invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_cdb_database.py b/cloud/rackspace/rax_cdb_database.py index bfd5dbbf128..d2f061d4a93 100644 --- a/cloud/rackspace/rax_cdb_database.py +++ b/cloud/rackspace/rax_cdb_database.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: rax_cdb_database short_description: 'create / delete a database in the Cloud Databases' @@ -83,7 +87,7 @@ def save_database(module, cdb_id, name, character_set, collate): try: instance = cdb.get(cdb_id) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) changed = False @@ -95,7 +99,7 @@ def save_database(module, cdb_id, name, character_set, collate): database = instance.create_database(name=name, character_set=character_set, collate=collate) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: changed = True @@ -109,7 +113,7 @@ def delete_database(module, cdb_id, name): try: instance = cdb.get(cdb_id) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) changed = False @@ -119,7 +123,7 @@ def delete_database(module, cdb_id, name): if database: try: database.delete() - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: changed = True @@ -172,4 +176,6 @@ def main(): from ansible.module_utils.rax import * # invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_cdb_user.py b/cloud/rackspace/rax_cdb_user.py index e5169def8df..7fa1bc50485 100644 --- a/cloud/rackspace/rax_cdb_user.py +++ b/cloud/rackspace/rax_cdb_user.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_cdb_user @@ -98,7 +102,7 @@ def save_user(module, cdb_id, name, password, databases, host): try: instance = cdb.get(cdb_id) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) changed = False @@ -112,7 +116,7 @@ def save_user(module, cdb_id, name, password, databases, host): password=password, database_names=databases, host=host) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: changed = True @@ -134,7 +138,7 @@ def save_user(module, cdb_id, name, password, databases, host): new_dbs = [db for db in databases if db not in former_dbs] user.grant_user_access(db_names=new_dbs) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: changed = True @@ -153,7 +157,7 @@ def delete_user(module, cdb_id, name): try: instance = cdb.get(cdb_id) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) changed = False @@ -163,7 +167,7 @@ def delete_user(module, cdb_id, name): if user: try: user.delete() - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: changed = True @@ -217,4 +221,6 @@ def main(): from ansible.module_utils.rax import * # invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_clb.py b/cloud/rackspace/rax_clb.py index af46d82e0b4..9d4d75c2291 100644 --- a/cloud/rackspace/rax_clb.py +++ b/cloud/rackspace/rax_clb.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_clb @@ -182,7 +186,7 @@ def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, algorithm=algorithm, protocol=protocol, timeout=timeout, virtual_ips=virtual_ips) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: balancer = balancers[0] @@ -240,7 +244,7 @@ def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, try: balancer.delete() changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) instance = rax_to_dict(balancer, 'clb') @@ -305,4 +309,6 @@ def main(): from ansible.module_utils.rax import * ### invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_clb_nodes.py b/cloud/rackspace/rax_clb_nodes.py index d832f5f26eb..844834d7476 100644 --- a/cloud/rackspace/rax_clb_nodes.py +++ b/cloud/rackspace/rax_clb_nodes.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_clb_nodes @@ -192,7 +196,7 @@ def main(): if virtualenv: try: _activate_virtualenv(virtualenv) - except IOError, e: + except IOError as e: module.fail_json(msg='Failed to activate virtualenv %s (%s)' % ( virtualenv, e)) @@ -205,7 +209,7 @@ def main(): try: lb = pyrax.cloud_loadbalancers.get(load_balancer_id) - except pyrax.exc.PyraxException, e: + except pyrax.exc.PyraxException as e: module.fail_json(msg='%s' % e.message) node = _get_node(lb, node_id, address, port) @@ -220,7 +224,7 @@ def main(): result = {} except pyrax.exc.NotFound: module.exit_json(changed=False, state=state) - except pyrax.exc.PyraxException, e: + except pyrax.exc.PyraxException as e: module.fail_json(msg='%s' % e.message) else: # present if not node: @@ -237,7 +241,7 @@ def main(): weight=weight, type=typ) resp, body = lb.add_nodes([node]) result.update(body['nodes'][0]) - except pyrax.exc.PyraxException, e: + except pyrax.exc.PyraxException as e: module.fail_json(msg='%s' % e.message) else: # Updating an existing node mutable = { @@ -258,7 +262,7 @@ def main(): # type; this should probably be fixed in pyrax lb.update_node(node, diff=mutable) result.update(mutable) - except pyrax.exc.PyraxException, e: + except pyrax.exc.PyraxException as e: module.fail_json(msg='%s' % e.message) if wait: @@ -278,4 +282,6 @@ def main(): from ansible.module_utils.rax import * # invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_dns.py b/cloud/rackspace/rax_dns.py index cd9daf8d1a4..eb62eaac356 100644 --- a/cloud/rackspace/rax_dns.py +++ b/cloud/rackspace/rax_dns.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_dns @@ -89,14 +93,14 @@ def rax_dns(module, comment, email, name, state, ttl): try: domain = dns.find(name=name) - except pyrax.exceptions.NoUniqueMatch, e: + except pyrax.exceptions.NoUniqueMatch as e: module.fail_json(msg='%s' % e.message) except pyrax.exceptions.NotFound: try: domain = dns.create(name=name, emailAddress=email, ttl=ttl, comment=comment) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) update = {} @@ -112,7 +116,7 @@ def rax_dns(module, comment, email, name, state, ttl): domain.update(**update) changed = True domain.get() - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) elif state == 'absent': @@ -121,14 +125,14 @@ def rax_dns(module, comment, email, name, state, ttl): except pyrax.exceptions.NotFound: domain = {} pass - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) if domain: try: domain.delete() changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, domain=rax_to_dict(domain)) @@ -170,4 +174,6 @@ def main(): from ansible.module_utils.rax import * ### invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_dns_record.py b/cloud/rackspace/rax_dns_record.py index 478c09a7754..1499b09eb68 100644 --- a/cloud/rackspace/rax_dns_record.py +++ b/cloud/rackspace/rax_dns_record.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_dns_record @@ -50,6 +54,7 @@ record with matching name. If there are already multiple records with matching name and overwrite=true, this module will fail. default: true + version_added: 2.1 priority: description: - Required for MX and SRV records, but forbidden for other record types. @@ -151,7 +156,7 @@ def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, try: dns.update_ptr_record(item, record, name, data, ttl) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) record.ttl = ttl record.name = name @@ -167,7 +172,7 @@ def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, try: results = dns.add_ptr_records(item, [record]) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, records=results) @@ -183,7 +188,7 @@ def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None, try: dns.delete_ptr_records(item, data) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, records=results) @@ -209,7 +214,7 @@ def rax_dns_record(module, comment=None, data=None, domain=None, name=None, try: domain = dns.find(name=domain) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) try: @@ -217,9 +222,9 @@ def rax_dns_record(module, comment=None, data=None, domain=None, name=None, record = domain.find_record(record_type, name=name) else: record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotUnique, e: + except pyrax.exceptions.DomainRecordNotUnique as e: module.fail_json(msg='overwrite=true and there are multiple matching records') - except pyrax.exceptions.DomainRecordNotFound, e: + except pyrax.exceptions.DomainRecordNotFound as e: try: record_data = { 'type': record_type, @@ -234,7 +239,7 @@ def rax_dns_record(module, comment=None, data=None, domain=None, name=None, record = domain.add_records([record_data])[0] changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) update = {} @@ -252,28 +257,28 @@ def rax_dns_record(module, comment=None, data=None, domain=None, name=None, record.update(**update) changed = True record.get() - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) elif state == 'absent': try: domain = dns.find(name=domain) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) try: record = domain.find_record(record_type, name=name, data=data) - except pyrax.exceptions.DomainRecordNotFound, e: + except pyrax.exceptions.DomainRecordNotFound as e: record = {} pass - except pyrax.exceptions.DomainRecordNotUnique, e: + except pyrax.exceptions.DomainRecordNotUnique as e: module.fail_json(msg='%s' % e.message) if record: try: record.delete() changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, record=rax_to_dict(record)) @@ -344,4 +349,6 @@ def main(): from ansible.module_utils.rax import * ### invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_facts.py b/cloud/rackspace/rax_facts.py index 481732c0af7..8c49e8df988 100644 --- a/cloud/rackspace/rax_facts.py +++ b/cloud/rackspace/rax_facts.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_facts @@ -79,7 +83,7 @@ def rax_facts(module, address, name, server_id): search_opts = dict(name='^%s$' % name) try: servers = cs.servers.list(search_opts=search_opts) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) elif address: servers = [] @@ -89,13 +93,13 @@ def rax_facts(module, address, name, server_id): if address in addresses: servers.append(server) break - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) elif server_id: servers = [] try: servers.append(cs.servers.get(server_id)) - except Exception, e: + except Exception as e: pass servers[:] = [server for server in servers if server.status != "DELETED"] @@ -143,4 +147,6 @@ def main(): from ansible.module_utils.rax import * ### invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_files.py b/cloud/rackspace/rax_files.py index c3f9e8ddec4..aac6b8d5bf1 100644 --- a/cloud/rackspace/rax_files.py +++ b/cloud/rackspace/rax_files.py @@ -19,6 +19,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_files @@ -86,10 +90,12 @@ gather_facts: no tasks: - name: "List all containers" - rax_files: state=list + rax_files: + state: list - name: "Create container called 'mycontainer'" - rax_files: container=mycontainer + rax_files: + container: mycontainer - name: "Create container 'mycontainer2' with metadata" rax_files: @@ -99,19 +105,30 @@ file_for: someuser@example.com - name: "Set a container's web index page" - rax_files: container=mycontainer web_index=index.html + rax_files: + container: mycontainer + web_index: index.html - name: "Set a container's web error page" - rax_files: container=mycontainer web_error=error.html + rax_files: + container: mycontainer + web_error: error.html - name: "Make container public" - rax_files: container=mycontainer public=yes + rax_files: + container: mycontainer + public: yes - name: "Make container public with a 24 hour TTL" - rax_files: container=mycontainer public=yes ttl=86400 + rax_files: + container: mycontainer + public: yes + ttl: 86400 - name: "Make container private" - rax_files: container=mycontainer private=yes + rax_files: + container: mycontainer + private: yes - name: "Test Cloud Files Containers Metadata Storage" hosts: local @@ -142,7 +159,7 @@ try: import pyrax HAS_PYRAX = True -except ImportError, e: +except ImportError as e: HAS_PYRAX = False EXIT_DICT = dict(success=True) @@ -152,7 +169,7 @@ def _get_container(module, cf, container): try: return cf.get_container(container) - except pyrax.exc.NoSuchContainer, e: + except pyrax.exc.NoSuchContainer as e: module.fail_json(msg=e.message) @@ -162,7 +179,7 @@ def _fetch_meta(module, container): for k, v in container.get_metadata().items(): split_key = k.split(META_PREFIX)[-1] EXIT_DICT['meta'][split_key] = v - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) @@ -172,7 +189,7 @@ def meta(cf, module, container_, state, meta_, clear_meta): if meta_ and state == 'present': try: meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) elif meta_ and state == 'absent': remove_results = [] @@ -214,12 +231,12 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, try: c = cf.get_container(container_) - except pyrax.exc.NoSuchContainer, e: + except pyrax.exc.NoSuchContainer as e: # Make the container if state=present, otherwise bomb out if state == 'present': try: c = cf.create_container(container_) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: EXIT_DICT['changed'] = True @@ -232,7 +249,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, if state == 'absent': try: cont_deleted = c.delete() - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: EXIT_DICT['deleted'] = True @@ -240,7 +257,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, if meta_: try: meta_set = c.set_metadata(meta_, clear=clear_meta) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) finally: _fetch_meta(module, c) @@ -248,7 +265,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, if ttl: try: c.cdn_ttl = ttl - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: EXIT_DICT['ttl'] = c.cdn_ttl @@ -256,7 +273,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, if public: try: cont_public = c.make_public() - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: EXIT_DICT['container_urls'] = dict(url=c.cdn_uri, @@ -267,7 +284,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, if private: try: cont_private = c.make_private() - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: EXIT_DICT['set_private'] = True @@ -275,7 +292,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, if web_index: try: cont_web_index = c.set_web_index_page(web_index) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: EXIT_DICT['set_index'] = True @@ -285,7 +302,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, if web_error: try: cont_err_index = c.set_web_error_page(web_error) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: EXIT_DICT['set_error'] = True @@ -376,4 +393,6 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_files_objects.py b/cloud/rackspace/rax_files_objects.py index bdeee6ad424..a1124913aef 100644 --- a/cloud/rackspace/rax_files_objects.py +++ b/cloud/rackspace/rax_files_objects.py @@ -19,6 +19,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_files_objects @@ -102,28 +106,50 @@ gather_facts: False tasks: - name: "Get objects from test container" - rax_files_objects: container=testcont dest=~/Downloads/testcont + rax_files_objects: + container: testcont + dest: ~/Downloads/testcont - name: "Get single object from test container" - rax_files_objects: container=testcont src=file1 dest=~/Downloads/testcont + rax_files_objects: + container: testcont + src: file1 + dest: ~/Downloads/testcont - name: "Get several objects from test container" - rax_files_objects: container=testcont src=file1,file2,file3 dest=~/Downloads/testcont + rax_files_objects: + container: testcont + src: file1,file2,file3 + dest: ~/Downloads/testcont - name: "Delete one object in test container" - rax_files_objects: container=testcont method=delete dest=file1 + rax_files_objects: + container: testcont + method: delete + dest: file1 - name: "Delete several objects in test container" - rax_files_objects: container=testcont method=delete dest=file2,file3,file4 + rax_files_objects: + container: testcont + method: delete + dest: file2,file3,file4 - name: "Delete all objects in test container" - rax_files_objects: container=testcont method=delete + rax_files_objects: + container: testcont + method: delete - name: "Upload all files to test container" - rax_files_objects: container=testcont method=put src=~/Downloads/onehundred + rax_files_objects: + container: testcont + method: put + src: ~/Downloads/onehundred - name: "Upload one file to test container" - rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file1 + rax_files_objects: + container: testcont + method: put + src: ~/Downloads/testcont/file1 - name: "Upload one file to test container with metadata" rax_files_objects: @@ -135,14 +161,25 @@ who_uploaded_this: someuser@example.com - name: "Upload one file to test container with TTL of 60 seconds" - rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file3 expires=60 + rax_files_objects: + container: testcont + method: put + src: ~/Downloads/testcont/file3 + expires: 60 - name: "Attempt to get remote object that does not exist" - rax_files_objects: container=testcont method=get src=FileThatDoesNotExist.jpg dest=~/Downloads/testcont + rax_files_objects: + container: testcont + method: get + src: FileThatDoesNotExist.jpg + dest: ~/Downloads/testcont ignore_errors: yes - name: "Attempt to delete remote object that does not exist" - rax_files_objects: container=testcont method=delete dest=FileThatDoesNotExist.jpg + rax_files_objects: + container: testcont + method: delete + dest: FileThatDoesNotExist.jpg ignore_errors: yes - name: "Test Cloud Files Objects Metadata" @@ -150,10 +187,16 @@ gather_facts: false tasks: - name: "Get metadata on one object" - rax_files_objects: container=testcont type=meta dest=file2 + rax_files_objects: + container: testcont + type: meta + dest: file2 - name: "Get metadata on several objects" - rax_files_objects: container=testcont type=meta src=file2,file1 + rax_files_objects: + container: testcont + type: meta + src: file2,file1 - name: "Set metadata on an object" rax_files_objects: @@ -167,7 +210,10 @@ clear_meta: true - name: "Verify metadata is set" - rax_files_objects: container=testcont type=meta src=file17 + rax_files_objects: + container: testcont + type: meta + src: file17 - name: "Delete metadata" rax_files_objects: @@ -180,7 +226,9 @@ key2: '' - name: "Get metadata on all objects" - rax_files_objects: container=testcont type=meta + rax_files_objects: + container: testcont + type: meta ''' try: @@ -196,7 +244,7 @@ def _get_container(module, cf, container): try: return cf.get_container(container) - except pyrax.exc.NoSuchContainer, e: + except pyrax.exc.NoSuchContainer as e: module.fail_json(msg=e.message) @@ -238,17 +286,17 @@ def upload(module, cf, container, src, dest, meta, expires): if dest and not is_dir: try: cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) elif is_dir: try: total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: try: cont_obj = c.upload_file(src, ttl=expires, headers=meta) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) EXIT_DICT['success'] = True @@ -299,7 +347,7 @@ def download(module, cf, container, src, dest, structure): for obj in objs: try: c.download_object(obj, dest, structure=structure) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: results.append(obj) @@ -348,7 +396,7 @@ def delete(module, cf, container, src, dest): for obj in objs: try: result = c.delete_object(obj) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: results.append(result) @@ -396,7 +444,7 @@ def get_meta(module, cf, container, src, dest): for obj in objs: try: meta = c.get_object(obj).get_metadata() - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: results[obj] = dict() @@ -434,7 +482,7 @@ def put_meta(module, cf, container, src, dest, meta, clear_meta): for obj in objs: try: result = c.get_object(obj).set_metadata(meta, clear=clear_meta) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: results.append(result) @@ -473,20 +521,20 @@ def delete_meta(module, cf, container, src, dest, meta): for k, v in meta.items(): try: result = c.get_object(obj).remove_metadata_key(k) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) else: results.append(result) else: try: o = c.get_object(obj) - except pyrax.exc.NoSuchObject, e: + except pyrax.exc.NoSuchObject as e: module.fail_json(msg=e.message) for k, v in o.get_metadata().items(): try: result = o.remove_metadata_key(k) - except Exception, e: + except Exception as e: module.fail_json(msg=e.message) results.append(result) @@ -573,4 +621,6 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_identity.py b/cloud/rackspace/rax_identity.py index a0697742b8e..baa856447a3 100644 --- a/cloud/rackspace/rax_identity.py +++ b/cloud/rackspace/rax_identity.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_identity @@ -29,6 +33,7 @@ - Indicate desired state of the resource choices: ['present', 'absent'] default: present + required: false author: - "Christopher H. Laco (@claco)" - "Matt Martz (@sivel)" @@ -105,4 +110,6 @@ def main(): from ansible.module_utils.rax import * # invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_keypair.py b/cloud/rackspace/rax_keypair.py index e8cadd968fb..5fab5ca79a5 100644 --- a/cloud/rackspace/rax_keypair.py +++ b/cloud/rackspace/rax_keypair.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_keypair @@ -109,7 +113,7 @@ def rax_keypair(module, name, public_key, state): f = open(public_key) public_key = f.read() f.close() - except Exception, e: + except Exception as e: module.fail_json(msg='Failed to load %s' % public_key) try: @@ -118,9 +122,9 @@ def rax_keypair(module, name, public_key, state): try: keypair = cs.keypairs.create(name, public_key) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) elif state == 'absent': @@ -133,7 +137,7 @@ def rax_keypair(module, name, public_key, state): try: keypair.delete() changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, keypair=rax_to_dict(keypair)) @@ -171,4 +175,6 @@ def main(): from ansible.module_utils.rax import * ### invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_meta.py b/cloud/rackspace/rax_meta.py index 1c6c3bfd234..18bce5a4f4e 100644 --- a/cloud/rackspace/rax_meta.py +++ b/cloud/rackspace/rax_meta.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_meta @@ -91,7 +95,7 @@ def rax_meta(module, address, name, server_id, meta): search_opts = dict(name='^%s$' % name) try: servers = cs.servers.list(search_opts=search_opts) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) elif address: servers = [] @@ -101,13 +105,13 @@ def rax_meta(module, address, name, server_id, meta): if address in addresses: servers.append(server) break - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) elif server_id: servers = [] try: servers.append(cs.servers.get(server_id)) - except Exception, e: + except Exception as e: pass if len(servers) > 1: @@ -175,4 +179,6 @@ def main(): from ansible.module_utils.rax import * ### invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_network.py b/cloud/rackspace/rax_network.py index cea7531a8db..7ff6edd0eca 100644 --- a/cloud/rackspace/rax_network.py +++ b/cloud/rackspace/rax_network.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_network @@ -86,9 +90,9 @@ def cloud_network(module, state, label, cidr): try: network = pyrax.cloud_networks.create(label, cidr=cidr) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) elif state == 'absent': @@ -98,7 +102,7 @@ def cloud_network(module, state, label, cidr): changed = True except pyrax.exceptions.NetworkNotFound: pass - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) if network: @@ -143,4 +147,6 @@ def main(): from ansible.module_utils.rax import * ### invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_queue.py b/cloud/rackspace/rax_queue.py index 8b1a60e4b81..a1112460db8 100644 --- a/cloud/rackspace/rax_queue.py +++ b/cloud/rackspace/rax_queue.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_queue @@ -93,7 +97,7 @@ def cloud_queue(module, state, name): try: queue = cq.create(name) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: queue = queues[0] @@ -108,7 +112,7 @@ def cloud_queue(module, state, name): try: queue.delete() changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, queue=instance) @@ -144,4 +148,6 @@ def main(): from ansible.module_utils.rax import * ### invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py index 79b7395f400..95aef91cc57 100644 --- a/cloud/rackspace/rax_scaling_group.py +++ b/cloud/rackspace/rax_scaling_group.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_scaling_group @@ -173,7 +177,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, f = open(user_data) user_data = f.read() f.close() - except Exception, e: + except Exception as e: module.fail_json(msg='Failed to load %s' % user_data) if state == 'present': @@ -215,7 +219,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, 'contents': f.read() }) f.close() - except Exception, e: + except Exception as e: module.fail_json(msg='Failed to load %s' % lpath) lbs = [] @@ -237,7 +241,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, try: sg = au.find(name=name) - except pyrax.exceptions.NoUniqueMatch, e: + except pyrax.exceptions.NoUniqueMatch as e: module.fail_json(msg='%s' % e.message) except pyrax.exceptions.NotFound: try: @@ -252,7 +256,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, key_name=key_name, config_drive=config_drive, user_data=user_data) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) if not changed: @@ -344,9 +348,9 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, sg = au.find(name=name) sg.delete() changed = True - except pyrax.exceptions.NotFound, e: + except pyrax.exceptions.NotFound as e: sg = {} - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) @@ -426,4 +430,6 @@ def main(): from ansible.module_utils.rax import * # invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_scaling_policy.py b/cloud/rackspace/rax_scaling_policy.py index b216ca13274..c56cee50dd2 100644 --- a/cloud/rackspace/rax_scaling_policy.py +++ b/cloud/rackspace/rax_scaling_policy.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_scaling_policy @@ -141,12 +145,12 @@ def rax_asp(module, at=None, change=0, cron=None, cooldown=300, except ValueError: try: sg = au.find(name=scaling_group) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: try: sg = au.get(scaling_group) - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) if state == 'present': @@ -168,7 +172,7 @@ def rax_asp(module, at=None, change=0, cron=None, cooldown=300, desired_capacity=desired_capacity, args=args) changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) else: @@ -217,7 +221,7 @@ def rax_asp(module, at=None, change=0, cron=None, cooldown=300, else: policy.delete() changed = True - except Exception, e: + except Exception as e: module.fail_json(msg='%s' % e.message) module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy)) @@ -280,4 +284,6 @@ def main(): from ansible.module_utils.rax import * # invoke the module -main() + +if __name__ == '__main__': + main() diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index ea571a71b20..5425db6f89e 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -19,6 +19,7 @@ # TODO: # Ability to set CPU/Memory reservations + try: import json except ImportError: @@ -36,6 +37,10 @@ import ssl +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vsphere_guest @@ -92,7 +97,7 @@ default: null state: description: - - Indicate desired state of the vm. 'reconfigured' only applies changes to 'memory_mb' and 'num_cpus' in vm_hardware parameter, and only when hot-plugging is enabled for the guest. + - Indicate desired state of the vm. 'reconfigured' only applies changes to 'vm_cdrom', 'memory_mb', and 'num_cpus' in vm_hardware parameter. The 'memory_mb' and 'num_cpus' changes are applied to powered-on vms when hot-plugging is enabled for the guest. default: present choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] from_template: @@ -262,7 +267,7 @@ vm_extra_config: folder: MyFolder -# Task to gather facts from a vSphere cluster only if the system is a VMWare guest +# Task to gather facts from a vSphere cluster only if the system is a VMware guest - vsphere_guest: vcenter_hostname: vcenter.mydomain.local @@ -527,7 +532,7 @@ def find_datastore(module, s, datastore, config_target): datastore = d.Datastore.Name break else: - for ds_mor, ds_name in server.get_datastores().items(): + for ds_mor, ds_name in s.get_datastores().items(): ds_props = VIProperty(s, ds_mor) if (ds_props.summary.accessible and (datastore and ds_name == datastore) or (not datastore)): @@ -622,6 +627,26 @@ def spec_singleton(spec, request, vm): spec = request.new_spec() return spec +def get_cdrom_params(module, s, vm_cdrom): + cdrom_type = None + cdrom_iso_path = None + try: + cdrom_type = vm_cdrom['type'] + except KeyError: + s.disconnect() + module.fail_json( + msg="Error on %s definition. cdrom type needs to be" + " specified." % vm_cdrom) + if cdrom_type == 'iso': + try: + cdrom_iso_path = vm_cdrom['iso_path'] + except KeyError: + s.disconnect() + module.fail_json( + msg="Error on %s definition. cdrom iso_path needs" + " to be specified." % vm_cdrom) + + return cdrom_type, cdrom_iso_path def vmdisk_id(vm, current_datastore_name): id_list = [] @@ -663,7 +688,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo try: hostmor = [k for k, v in vsphere_client.get_hosts().items() if v == esxi_hostname][0] - except IndexError, e: + except IndexError: vsphere_client.disconnect() module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname) @@ -688,7 +713,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo try: cluster = [k for k, v in vsphere_client.get_clusters().items() if v == cluster_name][0] if cluster_name else None - except IndexError, e: + except IndexError: vsphere_client.disconnect() module.fail_json(msg="Cannot find Cluster named: %s" % cluster_name) @@ -697,7 +722,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo rpmor = [k for k, v in vsphere_client.get_resource_pools( from_mor=cluster).items() if v == resource_pool][0] - except IndexError, e: + except IndexError: vsphere_client.disconnect() module.fail_json(msg="Cannot find Resource Pool named: %s" % resource_pool) @@ -716,7 +741,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo try: if not vmTarget: - cloneArgs = dict(resourcepool=rpmor, power_on=power_on_after_clone) + cloneArgs = dict(resourcepool=rpmor, power_on=False) if snapshot_to_clone is not None: #check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone. @@ -728,6 +753,18 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo cloneArgs["folder"] = vm_extra_config.get("folder") vmTemplate.clone(guest, **cloneArgs) + + vm = vsphere_client.get_vm_by_name(guest) + + # VM was created. If there is any extra config options specified, set + if vm_extra_config: + vm.set_extra_config(vm_extra_config) + + # Power on if asked + if power_on_after_clone == True: + state = 'powered_on' + power_state(vm, state, True) + changed = True else: changed = False @@ -812,6 +849,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name request = None shutdown = False poweron = vm.is_powered_on() + devices = [] memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled) cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled) @@ -821,6 +859,18 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name module, vm_disk, changes) request = VI.ReconfigVM_TaskRequestMsg() + # Change extra config + if vm_extra_config: + spec = spec_singleton(spec, request, vm) + extra_config = [] + for k,v in vm_extra_config.iteritems(): + ec = spec.new_extraConfig() + ec.set_element_key(str(k)) + ec.set_element_value(str(v)) + extra_config.append(ec) + spec.set_element_extraConfig(extra_config) + changes["extra_config"] = vm_extra_config + # Change Memory if 'memory_mb' in vm_hardware: @@ -854,7 +904,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name if vm_nic: changed = reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name) - # ====( Config Memory )====# + # Change Num CPUs if 'num_cpus' in vm_hardware: if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU: spec = spec_singleton(spec, request, vm) @@ -885,6 +935,49 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name changes['cpu'] = vm_hardware['num_cpus'] + # Change CDROM + if 'vm_cdrom' in vm_hardware: + spec = spec_singleton(spec, request, vm) + + cdrom_type, cdrom_iso_path = get_cdrom_params(module, vsphere_client, vm_hardware['vm_cdrom']) + + cdrom = None + current_devices = vm.properties.config.hardware.device + + for dev in current_devices: + if dev._type == 'VirtualCdrom': + cdrom = dev._obj + break + + if cdrom_type == 'iso': + iso_location = cdrom_iso_path.split('/', 1) + datastore, ds = find_datastore( + module, vsphere_client, iso_location[0], None) + iso_path = iso_location[1] + iso = VI.ns0.VirtualCdromIsoBackingInfo_Def('iso').pyclass() + iso.set_element_fileName('%s %s' % (datastore, iso_path)) + cdrom.set_element_backing(iso) + cdrom.Connectable.set_element_connected(True) + cdrom.Connectable.set_element_startConnected(True) + elif cdrom_type == 'client': + client = VI.ns0.VirtualCdromRemoteAtapiBackingInfo_Def('client').pyclass() + client.set_element_deviceName("") + cdrom.set_element_backing(client) + cdrom.Connectable.set_element_connected(True) + cdrom.Connectable.set_element_startConnected(True) + else: + vsphere_client.disconnect() + module.fail_json( + msg="Error adding cdrom of type %s to vm spec. " + " cdrom type can either be iso or client" % (cdrom_type)) + + dev_change = spec.new_deviceChange() + dev_change.set_element_device(cdrom) + dev_change.set_element_operation('edit') + devices.append(dev_change) + + changes['cdrom'] = vm_hardware['vm_cdrom'] + # Resize hard drives if vm_disk: spec = spec_singleton(spec, request, vm) @@ -898,7 +991,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name disk_num = 0 dev_changes = [] disks_changed = {} - for disk in sorted(vm_disk.iterkeys()): + for disk in sorted(vm_disk): try: disksize = int(vm_disk[disk]['size_gb']) # Convert the disk size to kilobytes @@ -929,7 +1022,6 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name spec.set_element_deviceChange(dev_changes) changes['disks'] = disks_changed - if len(changes): if shutdown and vm.is_powered_on(): @@ -937,11 +1029,15 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name vm.power_off(sync_run=True) vm.get_status() - except Exception, e: + except Exception: + e = get_exception() module.fail_json( msg='Failed to shutdown vm %s: %s' % (guest, e) ) + if len(devices): + spec.set_element_deviceChange(devices) + request.set_element_spec(spec) ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval @@ -957,7 +1053,8 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name if vm.is_powered_off() and poweron: try: vm.power_on(sync_run=True) - except Exception, e: + except Exception: + e = get_exception() module.fail_json( msg='Failed to power on vm %s : %s' % (guest, e) ) @@ -1158,7 +1255,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, try: hostmor = [k for k, v in vsphere_client.get_hosts().items() if v == esxi_hostname][0] - except IndexError, e: + except IndexError: vsphere_client.disconnect() module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname) @@ -1184,7 +1281,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, try: cluster = [k for k, v in vsphere_client.get_clusters().items() if v == cluster_name][0] if cluster_name else None - except IndexError, e: + except IndexError: vsphere_client.disconnect() module.fail_json(msg="Cannot find Cluster named: %s" % cluster_name) @@ -1193,7 +1290,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, rpmor = [k for k, v in vsphere_client.get_resource_pools( from_mor=cluster).items() if v == resource_pool][0] - except IndexError, e: + except IndexError: vsphere_client.disconnect() module.fail_json(msg="Cannot find Resource Pool named: %s" % resource_pool) @@ -1250,7 +1347,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, if vm_disk: disk_num = 0 disk_key = 0 - for disk in sorted(vm_disk.iterkeys()): + for disk in sorted(vm_disk): try: datastore = vm_disk[disk]['datastore'] except KeyError: @@ -1280,23 +1377,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, disk_num = disk_num + 1 disk_key = disk_key + 1 if 'vm_cdrom' in vm_hardware: - cdrom_iso_path = None - cdrom_type = None - try: - cdrom_type = vm_hardware['vm_cdrom']['type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. cdrom type needs to be" - " specified." % vm_hardware['vm_cdrom']) - if cdrom_type == 'iso': - try: - cdrom_iso_path = vm_hardware['vm_cdrom']['iso_path'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. cdrom iso_path needs" - " to be specified." % vm_hardware['vm_cdrom']) + cdrom_type, cdrom_iso_path = get_cdrom_params(module, vsphere_client, vm_hardware['vm_cdrom']) # Add a CD-ROM device to the VM. add_cdrom(module, vsphere_client, config_target, config, devices, default_devs, cdrom_type, cdrom_iso_path) @@ -1322,7 +1403,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, add_floppy(module, vsphere_client, config_target, config, devices, default_devs, floppy_type, floppy_image_path) if vm_nic: - for nic in sorted(vm_nic.iterkeys()): + for nic in sorted(vm_nic): try: nictype = vm_nic[nic]['type'] except KeyError: @@ -1397,7 +1478,8 @@ def delete_vm(vsphere_client, module, guest, vm, force): vm.power_off(sync_run=True) vm.get_status() - except Exception, e: + except Exception: + e = get_exception() module.fail_json( msg='Failed to shutdown vm %s: %s' % (guest, e)) else: @@ -1421,7 +1503,8 @@ def delete_vm(vsphere_client, module, guest, vm, force): module.fail_json(msg="Error removing vm: %s %s" % task.get_error_message()) module.exit_json(changed=True, changes="VM %s deleted" % guest) - except Exception, e: + except Exception: + e = get_exception() module.fail_json( msg='Failed to delete vm %s : %s' % (guest, e)) @@ -1463,8 +1546,8 @@ def power_state(vm, state, force): % power_status return True - except Exception, e: - return e + except Exception: + return get_exception() return False @@ -1481,6 +1564,7 @@ def gather_facts(vm): 'hw_guest_full_name': vm.properties.config.guestFullName, 'hw_guest_id': vm.properties.config.guestId, 'hw_product_uuid': vm.properties.config.uuid, + 'hw_instance_uuid': vm.properties.config.instanceUuid, 'hw_processor_count': vm.properties.config.hardware.numCPU, 'hw_memtotal_mb': vm.properties.config.hardware.memoryMB, 'hw_interfaces':[], @@ -1706,7 +1790,8 @@ def main(): module.fail_json(msg='Unable to validate the certificate of the vcenter host %s' % vcenter_hostname) else: raise - except VIApiException, err: + except VIApiException: + err = get_exception() module.fail_json(msg="Cannot connect to %s: %s" % (vcenter_hostname, err)) @@ -1721,7 +1806,8 @@ def main(): if vmware_guest_facts: try: module.exit_json(ansible_facts=gather_facts(vm)) - except Exception, e: + except Exception: + e = get_exception() module.fail_json( msg="Fact gather failed with exception %s" % e) # Power Changes diff --git a/commands/command.py b/commands/command.py index 26fa43dff42..9b8afe3ef58 100644 --- a/commands/command.py +++ b/commands/command.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # (c) 2012, Michael DeHaan , and others +# (c) 2016, Toshio Kuratomi # # This file is part of Ansible # @@ -18,14 +19,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import copy -import sys -import datetime -import glob -import traceback -import re -import shlex -import os +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -36,7 +32,7 @@ - The M(command) module takes the command name followed by a list of space-delimited arguments. - The given command will be executed on all selected nodes. It will not be processed through the shell, so variables like C($HOME) and operations - like C("<"), C(">"), C("|"), and C("&") will not work (use the M(shell) + like C("<"), C(">"), C("|"), C(";") and C("&") will not work (use the M(shell) module if you need these features). options: free_form: @@ -101,47 +97,24 @@ creates: /path/to/database ''' -# Dict of options and their defaults -OPTIONS = {'chdir': None, - 'creates': None, - 'executable': None, - 'NO_LOG': None, - 'removes': None, - 'warn': True, - } +import datetime +import glob +import shlex +import os -# This is a pretty complex regex, which functions as follows: -# -# 1. (^|\s) -# ^ look for a space or the beginning of the line -# 2. ({options_list})= -# ^ expanded to (chdir|creates|executable...)= -# look for a valid param, followed by an '=' -# 3. (?P[\'"])? -# ^ look for an optional quote character, which can either be -# a single or double quote character, and store it for later -# 4. (.*?) -# ^ match everything in a non-greedy manner until... -# 5. (?(quote)(?[\'"])?(.*?)(?(quote)(?. +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: raw @@ -28,6 +32,8 @@ executable: description: - change the shell used to execute the command. Should be an absolute path to the executable. + - when using privilege escalation (C(become)), a default shell will be assigned if one is not provided + as privilege escalation requires a shell. required: false version_added: "1.0" description: @@ -48,10 +54,10 @@ using C(gather_facts: no) if you're using C(raw) to bootstrap python onto the machine." - If you want to execute a command securely and predictably, it may be - better to use the M(command) module instead. Best practices when writing - playbooks will follow the trend of using M(command) unless M(shell) is - explicitly required. When running ad-hoc commands, use your best - judgement. + better to use the M(command) or M(shell) modules instead. + - the C(environment) keyword does not work with raw normally, it requires a shell + which means it only works if C(executable) is set or using the module + with privilege escalation (C(become)). author: - Ansible Core Team - Michael DeHaan diff --git a/commands/script.py b/commands/script.py index 9fed7928ce0..47a6571455f 100644 --- a/commands/script.py +++ b/commands/script.py @@ -13,6 +13,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: script @@ -46,7 +50,8 @@ version_added: "1.5" notes: - It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points! -author: + - The ssh connection plugin will force psuedo-tty allocation via -tt when scripts are executed. psuedo-ttys do not have a stderr channel and all stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script. +author: - Ansible Core Team - Michael DeHaan """ @@ -56,8 +61,12 @@ - script: /some/local/script.sh --some-arguments 1234 # Run a script that creates a file, but only if the file is not yet created -- script: /some/local/create_file.sh --some-arguments 1234 creates=/the/created/file.txt +- script: /some/local/create_file.sh --some-arguments 1234 + args: + creates: /the/created/file.txt # Run a script that removes a file, but only if the file is not yet removed -- script: /some/local/remove_file.sh --some-arguments 1234 removes=/the/removed/file.txt +- script: /some/local/remove_file.sh --some-arguments 1234 + args: + removes: /the/removed/file.txt ''' diff --git a/commands/shell.py b/commands/shell.py index 96bbae5e296..93d187b81ec 100644 --- a/commands/shell.py +++ b/commands/shell.py @@ -17,6 +17,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: shell @@ -68,11 +72,11 @@ playbooks will follow the trend of using M(command) unless M(shell) is explicitly required. When running ad-hoc commands, use your best judgement. - - To sanitize any variables passed to the shell module, you should use + - To sanitize any variables passed to the shell module, you should use "{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons. requirements: [ ] -author: +author: - Ansible Core Team - Michael DeHaan ''' @@ -83,7 +87,9 @@ - shell: somescript.sh >> somelog.txt # Change the working directory to somedir/ before executing the command. -- shell: somescript.sh >> somelog.txt chdir=somedir/ +- shell: somescript.sh >> somelog.txt + args: + chdir: somedir/ # You can also use the 'args' form to provide the options. This command # will change the working directory to somedir/ and will only run when @@ -146,4 +152,4 @@ returned: always type: list of strings sample: [u'Clustering node rabbit@slave1 with rabbit@master ...'] -''' \ No newline at end of file +''' diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 37cccba11dc..b3ae64f206c 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -19,6 +19,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: mysql_db @@ -48,7 +52,7 @@ default: null encoding: description: - - Encoding mode + - Encoding mode to use, examples include C(utf8) or C(latin1_swedish_ci) required: false default: null target: @@ -56,23 +60,55 @@ - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz (Added in 2.0) compressed files are supported. required: false + single_transaction: + description: + - Execute the dump in a single transaction + required: false + default: false + version_added: "2.1" + quick: + description: + - Option used for dumping large tables + required: false + default: true + version_added: "2.1" author: "Ansible Core Team" +requirements: + - mysql (command line binary) + - mysqldump (command line binary) +notes: + - Requires the python-mysqldb package on the remote host, as well as mysql and mysqldump binaries. extends_documentation_fragment: mysql ''' EXAMPLES = ''' -# Create a new database with name 'bobdata' -- mysql_db: name=bobdata state=present +- name: Create a new database with name 'bobdata' + mysql_db: + name: bobdata + state: present # Copy database dump file to remote host and restore it to database 'my_db' -- copy: src=dump.sql.bz2 dest=/tmp -- mysql_db: name=my_db state=import target=/tmp/dump.sql.bz2 +- name: Copy database dump file + copy: + src: dump.sql.bz2 + dest: /tmp +- name: Restore database + mysql_db: + name: my_db + state: import + target: /tmp/dump.sql.bz2 -# Dumps all databases to hostname.sql -- mysql_db: state=dump name=all target=/tmp/{{ inventory_hostname }}.sql +- name: Dump all databases to hostname.sql + mysql_db: + state: dump + name: all + target: /tmp/{{ inventory_hostname }}.sql -# Imports file.sql similiar to mysql -u -p < hostname.sql -- mysql_db: state=import name=all target=/tmp/{{ inventory_hostname }}.sql +- name: Import file.sql similar to mysql -u -p < hostname.sql + mysql_db: + state: import + name: all + target: /tmp/{{ inventory_hostname }}.sql ''' import os @@ -100,12 +136,11 @@ def db_delete(cursor, db): cursor.execute(query) return True -def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None): +def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None, single_transaction=None, quick=None): cmd = module.get_bin_path('mysqldump', True) # If defined, mysqldump demands --defaults-extra-file be the first option if config_file: cmd += " --defaults-extra-file=%s" % pipes.quote(config_file) - cmd += " --quick" if user is not None: cmd += " --user=%s" % pipes.quote(user) if password is not None: @@ -124,6 +159,10 @@ def db_dump(module, host, user, password, db_name, target, all_databases, port, cmd += " --all-databases" else: cmd += " %s" % pipes.quote(db_name) + if single_transaction: + cmd += " --single-transaction=true" + if quick: + cmd += " --quick" path = None if os.path.splitext(target)[-1] == '.gz': @@ -212,20 +251,22 @@ def main(): module = AnsibleModule( argument_spec = dict( login_user=dict(default=None), - login_password=dict(default=None), + login_password=dict(default=None, no_log=True), login_host=dict(default="localhost"), login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), name=dict(required=True, aliases=['db']), encoding=dict(default=""), collation=dict(default=""), - target=dict(default=None), + target=dict(default=None, type='path'), state=dict(default="present", choices=["absent", "present","dump", "import"]), - ssl_cert=dict(default=None), - ssl_key=dict(default=None), - ssl_ca=dict(default=None), + ssl_cert=dict(default=None, type='path'), + ssl_key=dict(default=None, type='path'), + ssl_ca=dict(default=None, type='path'), connect_timeout=dict(default=30, type='int'), - config_file=dict(default="~/.my.cnf"), + config_file=dict(default="~/.my.cnf", type='path'), + single_transaction=dict(default=False, type='bool'), + quick=dict(default=True, type='bool'), ), supports_check_mode=True ) @@ -247,14 +288,11 @@ def main(): ssl_ca = module.params["ssl_ca"] connect_timeout = module.params['connect_timeout'] config_file = module.params['config_file'] - config_file = os.path.expanduser(os.path.expandvars(config_file)) login_password = module.params["login_password"] login_user = module.params["login_user"] login_host = module.params["login_host"] - - # make sure the target path is expanded for ~ and $HOME - if target is not None: - target = os.path.expandvars(os.path.expanduser(target)) + single_transaction = module.params["single_transaction"] + quick = module.params["quick"] if state in ['dump','import']: if target is None: @@ -270,7 +308,8 @@ def main(): try: cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, connect_timeout=connect_timeout) - except Exception, e: + except Exception: + e = get_exception() if os.path.exists(config_file): module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e)) else: @@ -282,23 +321,27 @@ def main(): if db_exists(cursor, db): if state == "absent": if module.check_mode: - changed = True + module.exit_json(changed=True, db=db) else: try: changed = db_delete(cursor, db) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="error deleting database: " + str(e)) + module.exit_json(changed=changed, db=db) + elif state == "dump": if module.check_mode: module.exit_json(changed=True, db=db) else: rc, stdout, stderr = db_dump(module, login_host, login_user, login_password, db, target, all_databases, - login_port, config_file, socket, ssl_cert, ssl_key, ssl_ca) + login_port, config_file, socket, ssl_cert, ssl_key, ssl_ca, single_transaction, quick) if rc != 0: module.fail_json(msg="%s" % stderr) else: module.exit_json(changed=True, db=db, msg=stdout) + elif state == "import": if module.check_mode: module.exit_json(changed=True, db=db) @@ -310,6 +353,12 @@ def main(): module.fail_json(msg="%s" % stderr) else: module.exit_json(changed=True, db=db, msg=stdout) + + elif state == "present": + if module.check_mode: + module.exit_json(changed=False, db=db) + module.exit_json(changed=False, db=db) + else: if state == "present": if module.check_mode: @@ -317,10 +366,38 @@ def main(): else: try: changed = db_create(cursor, db, encoding, collation) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="error creating database: " + str(e)) + module.exit_json(changed=changed, db=db) - module.exit_json(changed=changed, db=db) + elif state == "import": + if module.check_mode: + module.exit_json(changed=True, db=db) + else: + try: + changed = db_create(cursor, db, encoding, collation) + if changed: + rc, stdout, stderr = db_import(module, login_host, login_user, + login_password, db, target, all_databases, + login_port, config_file, socket, ssl_cert, ssl_key, ssl_ca) + if rc != 0: + module.fail_json(msg="%s" % stderr) + else: + module.exit_json(changed=True, db=db, msg=stdout) + except Exception: + e = get_exception() + module.fail_json(msg="error creating database: " + str(e)) + + elif state == "absent": + if module.check_mode: + module.exit_json(changed=False, db=db) + module.exit_json(changed=False, db=db) + + elif state == "dump": + if module.check_mode: + module.exit_json(changed=False, db=db) + module.fail_json(msg="Cannot dump database %s - not found" % (db)) # import module snippets from ansible.module_utils.basic import * diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 0a3e1ff014c..286106fe711 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: mysql_user @@ -32,7 +36,7 @@ required: true password: description: - - set the user's password. (Required when adding a user) + - set the user's password. required: false default: null encrypted: @@ -103,7 +107,7 @@ without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from the file." - - Currently, there is only support for the `mysql_native_password` encryted password hash module. + - Currently, there is only support for the `mysql_native_password` encrypted password hash module. author: "Jonathan Mainguy (@Jmainguy)" extends_documentation_fragment: mysql @@ -111,43 +115,89 @@ EXAMPLES = """ # Removes anonymous user account for localhost -- mysql_user: name='' host=localhost state=absent +- mysql_user: + name: '' + host: localhost + state: absent # Removes all anonymous user accounts -- mysql_user: name='' host_all=yes state=absent +- mysql_user: + name: '' + host_all: yes + state: absent # Create database user with name 'bob' and password '12345' with all database privileges -- mysql_user: name=bob password=12345 priv=*.*:ALL state=present +- mysql_user: + name: bob + password: 12345 + priv: '*.*:ALL' + state: present # Create database user with name 'bob' and previously hashed mysql native password '*EE0D72C1085C46C5278932678FBE2C6A782821B4' with all database privileges -- mysql_user: name=bob password='*EE0D72C1085C46C5278932678FBE2C6A782821B4' encrypted=yes priv=*.*:ALL state=present +- mysql_user: + name: bob + password: '*EE0D72C1085C46C5278932678FBE2C6A782821B4' + encrypted: yes + priv: '*.*:ALL' + state: present # Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' -- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present +- mysql_user: + name: bob + password: 12345 + priv: '*.*:ALL,GRANT' + state: present # Modify user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself. -- mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present +- mysql_user: + name: bob + append_privs: true + priv: '*.*:REQUIRESSL' + state: present # Ensure no user named 'sally'@'localhost' exists, also passing in the auth credentials. -- mysql_user: login_user=root login_password=123456 name=sally state=absent +- mysql_user: + login_user: root + login_password: 123456 + name: sally + state: absent # Ensure no user named 'sally' exists at all -- mysql_user: name=sally host_all=yes state=absent +- mysql_user: + name: sally + host_all: yes + state: absent # Specify grants composed of more than one word -- mysql_user: name=replication password=12345 priv="*.*:REPLICATION CLIENT" state=present +- mysql_user: + name: replication + password: 12345 + priv: "*.*:REPLICATION CLIENT" + state: present # Revoke all privileges for user 'bob' and password '12345' -- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present +- mysql_user: + name: bob + password: 12345 + priv: "*.*:USAGE" + state: present # Example privileges string format mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL # Example using login_unix_socket to connect to server -- mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock +- mysql_user: + name: root + password: abc123 + login_unix_socket: /var/run/mysqld/mysqld.sock # Example of skipping binary logging while adding user 'bob' -- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present sql_log_bin=no +- mysql_user: + name: bob + password: 12345 + priv: "*.*:USAGE" + state: present + sql_log_bin: no # Example .my.cnf file for setting the root password @@ -166,6 +216,7 @@ mysqldb_found = False else: mysqldb_found = True +from ansible.module_utils.six import iteritems VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION', 'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER', @@ -213,7 +264,7 @@ def get_mode(cursor): def user_exists(cursor, user, host, host_all): if host_all: - cursor.execute("SELECT count(*) FROM user WHERE user = %s", user) + cursor.execute("SELECT count(*) FROM user WHERE user = %s", ([user])) else: cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host)) @@ -232,9 +283,10 @@ def user_add(cursor, user, host, host_all, password, encrypted, new_priv, check_ cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user,host,password)) elif password and not encrypted: cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password)) - + else: + cursor.execute("CREATE USER %s@%s", (user,host)) if new_priv is not None: - for db_table, priv in new_priv.iteritems(): + for db_table, priv in iteritems(new_priv): privileges_grant(cursor, user,host,db_table,priv) return True @@ -245,12 +297,12 @@ def is_hash(password): ishash = True return ishash -def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append_privs, check_mode): +def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append_privs, module): changed = False grant_option = False if host_all: - hostnames = user_get_hostnames(cursor, user) + hostnames = user_get_hostnames(cursor, [user]) else: hostnames = [host] @@ -259,18 +311,18 @@ def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append if bool(password): # Determine what user management method server uses old_user_mgmt = server_version_check(cursor) - + if old_user_mgmt: cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) else: cursor.execute("SELECT authentication_string FROM user WHERE user = %s AND host = %s", (user,host)) current_pass_hash = cursor.fetchone() - + if encrypted: encrypted_string = (password) if is_hash(password): if current_pass_hash[0] != encrypted_string: - if check_mode: + if module.check_mode: return True if old_user_mgmt: cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password)) @@ -286,36 +338,36 @@ def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append cursor.execute("SELECT CONCAT('*', UCASE(SHA1(UNHEX(SHA1(%s)))))", (password,)) new_pass_hash = cursor.fetchone() if current_pass_hash[0] != new_pass_hash[0]: - if check_mode: + if module.check_mode: return True if old_user_mgmt: cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user, host, password)) else: - cursor.execute("ALTER USER %s@%s IDENTIFIED BY %s", (user, host, password)) + cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password BY %s", (user, host, password)) changed = True - + # Handle privileges if new_priv is not None: curr_priv = privileges_get(cursor, user,host) # If the user has privileges on a db.table that doesn't appear at all in # the new specification, then revoke all privileges on it. - for db_table, priv in curr_priv.iteritems(): + for db_table, priv in iteritems(curr_priv): # If the user has the GRANT OPTION on a db.table, revoke it first. if "GRANT" in priv: grant_option = True if db_table not in new_priv: if user != "root" and "PROXY" not in priv and not append_privs: - if check_mode: + if module.check_mode: return True privileges_revoke(cursor, user,host,db_table,priv,grant_option) changed = True # If the user doesn't currently have any privileges on a db.table, then # we can perform a straight grant operation. - for db_table, priv in new_priv.iteritems(): + for db_table, priv in iteritems(new_priv): if db_table not in curr_priv: - if check_mode: + if module.check_mode: return True privileges_grant(cursor, user,host,db_table,priv) changed = True @@ -326,7 +378,7 @@ def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append for db_table in db_table_intersect: priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table]) if (len(priv_diff) > 0): - if check_mode: + if module.check_mode: return True if not append_privs: privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option) @@ -340,7 +392,7 @@ def user_delete(cursor, user, host, host_all, check_mode): return True if host_all: - hostnames = user_get_hostnames(cursor, user) + hostnames = user_get_hostnames(cursor, [user]) for hostname in hostnames: cursor.execute("DROP USER %s@%s", (user, hostname)) @@ -413,9 +465,12 @@ def privileges_unpack(priv, mode): for item in priv.strip().split('/'): pieces = item.strip().split(':') dbpriv = pieces[0].rsplit(".", 1) - # Do not escape if privilege is for database '*' (all databases) - if dbpriv[0].strip('`') != '*': - pieces[0] = '%s%s%s.%s' % (quote, dbpriv[0].strip('`'), quote, dbpriv[1]) + # Do not escape if privilege is for database or table, i.e. + # neither quote *. nor .* + for i, side in enumerate(dbpriv): + if side.strip('`') != '*': + dbpriv[i] = '%s%s%s' % (quote, side.strip('`'), quote) + pieces[0] = '.'.join(dbpriv) if '(' in pieces[1]: output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper()) @@ -474,7 +529,7 @@ def main(): module = AnsibleModule( argument_spec = dict( login_user=dict(default=None), - login_password=dict(default=None), + login_password=dict(default=None, no_log=True), login_host=dict(default="localhost"), login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), @@ -489,11 +544,11 @@ def main(): check_implicit_admin=dict(default=False, type='bool'), update_password=dict(default="always", choices=["always", "on_create"]), connect_timeout=dict(default=30, type='int'), - config_file=dict(default="~/.my.cnf"), + config_file=dict(default="~/.my.cnf", type='path'), sql_log_bin=dict(default=True, type='bool'), - ssl_cert=dict(default=None), - ssl_key=dict(default=None), - ssl_ca=dict(default=None), + ssl_cert=dict(default=None, type='path'), + ssl_key=dict(default=None, type='path'), + ssl_ca=dict(default=None, type='path'), ), supports_check_mode=True ) @@ -517,7 +572,6 @@ def main(): db = 'mysql' sql_log_bin = module.params["sql_log_bin"] - config_file = os.path.expanduser(os.path.expandvars(config_file)) if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") @@ -533,7 +587,8 @@ def main(): if not cursor: cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db, connect_timeout=connect_timeout) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e)) if not sql_log_bin: @@ -542,31 +597,33 @@ def main(): if priv is not None: try: mode = get_mode(cursor) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) try: priv = privileges_unpack(priv, mode) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="invalid privileges string: %s" % str(e)) if state == "present": if user_exists(cursor, user, host, host_all): try: if update_password == 'always': - changed = user_mod(cursor, user, host, host_all, password, encrypted, priv, append_privs, module.check_mode) + changed = user_mod(cursor, user, host, host_all, password, encrypted, priv, append_privs, module) else: - changed = user_mod(cursor, user, host, host_all, None, encrypted, priv, append_privs, module.check_mode) + changed = user_mod(cursor, user, host, host_all, None, encrypted, priv, append_privs, module) - except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: + except (SQLParseError, InvalidPrivsError, MySQLdb.Error): + e = get_exception() module.fail_json(msg=str(e)) else: - if password is None: - module.fail_json(msg="password parameter required when adding a user") if host_all: module.fail_json(msg="host_all parameter cannot be used when adding a user") try: changed = user_add(cursor, user, host, host_all, password, encrypted, priv, module.check_mode) - except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: + except (SQLParseError, InvalidPrivsError, MySQLdb.Error): + e = get_exception() module.fail_json(msg=str(e)) elif state == "absent": if user_exists(cursor, user, host, host_all): diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index e46ceba5208..506ff705d56 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -22,6 +22,10 @@ along with Ansible. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: mysql_variables @@ -44,14 +48,16 @@ ''' EXAMPLES = ''' # Check for sync_binlog setting -- mysql_variables: variable=sync_binlog +- mysql_variables: + variable: sync_binlog # Set read_only variable to 1 -- mysql_variables: variable=read_only value=1 +- mysql_variables: + variable: read_only + value: 1 ''' -import os import warnings from re import match @@ -110,7 +116,8 @@ def setvariable(cursor, mysqlvar, value): cursor.execute(query + "%s", (value,)) cursor.fetchall() result = True - except Exception, e: + except Exception: + e = get_exception() result = str(e) return result @@ -118,9 +125,9 @@ def main(): module = AnsibleModule( argument_spec = dict( login_user=dict(default=None), - login_password=dict(default=None), - login_host=dict(default="127.0.0.1"), - login_port=dict(default="3306", type='int'), + login_password=dict(default=None, no_log=True), + login_host=dict(default="localhost"), + login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), variable=dict(default=None), value=dict(default=None), @@ -128,19 +135,16 @@ def main(): ssl_key=dict(default=None), ssl_ca=dict(default=None), connect_timeout=dict(default=30, type='int'), - config_file=dict(default="~/.my.cnf") + config_file=dict(default="~/.my.cnf", type="path") ) ) user = module.params["login_user"] password = module.params["login_password"] - host = module.params["login_host"] - port = module.params["login_port"] ssl_cert = module.params["ssl_cert"] ssl_key = module.params["ssl_key"] ssl_ca = module.params["ssl_ca"] connect_timeout = module.params['connect_timeout'] config_file = module.params['config_file'] - config_file = os.path.expanduser(os.path.expandvars(config_file)) db = 'mysql' mysqlvar = module.params["variable"] @@ -157,7 +161,8 @@ def main(): try: cursor = mysql_connect(module, user, password, config_file, ssl_cert, ssl_key, ssl_ca, db, connect_timeout=connect_timeout) - except Exception, e: + except Exception: + e = get_exception() if os.path.exists(config_file): module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e)) else: @@ -176,7 +181,8 @@ def main(): module.exit_json(msg="Variable already set to requested value", changed=False) try: result = setvariable(cursor, mysqlvar, value_wanted) - except SQLParseError, e: + except SQLParseError: + e = get_exception() result = str(e) if result is True: module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True) @@ -187,4 +193,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.database import * from ansible.module_utils.mysql import * -main() +if __name__ == '__main__': + main() diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py old mode 100644 new mode 100755 index cca41963293..ffd22060096 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: postgresql_db @@ -95,16 +99,18 @@ EXAMPLES = ''' # Create a new database with name "acme" -- postgresql_db: name=acme +- postgresql_db: + name: acme # Create a new database with name "acme" and specific encoding and locale # settings. If a template different from "template0" is specified, encoding # and locale settings must match those of the template. -- postgresql_db: name=acme - encoding='UTF-8' - lc_collate='de_DE.UTF-8' - lc_ctype='de_DE.UTF-8' - template='template0' +- postgresql_db: + name: acme + encoding: UTF-8 + lc_collate: de_DE.UTF-8 + lc_ctype: de_DE.UTF-8 + template: template0 ''' try: @@ -114,6 +120,7 @@ postgresqldb_found = False else: postgresqldb_found = True +from ansible.module_utils.six import iteritems class NotSupportedError(Exception): pass @@ -261,7 +268,7 @@ def main(): "login_password":"password", "port":"port" } - kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() + kw = dict( (params_map[k], v) for (k, v) in iteritems(module.params) if k in params_map and v != '' ) # If a login_unix_socket is specified, incorporate it here. @@ -280,36 +287,41 @@ def main(): .ISOLATION_LEVEL_AUTOCOMMIT) cursor = db_connection.cursor( cursor_factory=psycopg2.extras.DictCursor) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to connect to database: %s" % e) try: if module.check_mode: if state == "absent": - changed = not db_exists(cursor, db) + changed = db_exists(cursor, db) elif state == "present": changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype) - module.exit_json(changed=changed,db=db) + module.exit_json(changed=changed, db=db) if state == "absent": try: changed = db_delete(cursor, db) - except SQLParseError, e: + except SQLParseError: + e = get_exception() module.fail_json(msg=str(e)) elif state == "present": try: changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype) - except SQLParseError, e: + except SQLParseError: + e = get_exception() module.fail_json(msg=str(e)) - except NotSupportedError, e: + except NotSupportedError: + e = get_exception() module.fail_json(msg=str(e)) except SystemExit: # Avoid catching this on Python 2.4 raise - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Database query failed: %s" % e) module.exit_json(changed=changed, db=db) diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index 8fefd3de648..ae606464dc9 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: postgresql_privs @@ -143,90 +147,90 @@ # On database "library": # GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors # TO librarian, reader WITH GRANT OPTION -- postgresql_privs: > - database=library - state=present - privs=SELECT,INSERT,UPDATE - type=table - objs=books,authors - schema=public - roles=librarian,reader - grant_option=yes +- postgresql_privs: + database: library + state: present + privs: SELECT,INSERT,UPDATE + type: table + objs: books,authors + schema: public + roles: librarian,reader + grant_option: yes # Same as above leveraging default values: -- postgresql_privs: > - db=library - privs=SELECT,INSERT,UPDATE - objs=books,authors - roles=librarian,reader - grant_option=yes +- postgresql_privs: + db: library + privs: SELECT,INSERT,UPDATE + objs: books,authors + roles: librarian,reader + grant_option: yes # REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader # Note that role "reader" will be *granted* INSERT privilege itself if this -# isn't already the case (since state=present). -- postgresql_privs: > - db=library - state=present - priv=INSERT - obj=books - role=reader - grant_option=no +# isn't already the case (since state: present). +- postgresql_privs: + db: library + state: present + priv: INSERT + obj: books + role: reader + grant_option: no # REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader # "public" is the default schema. This also works for PostgreSQL 8.x. -- postgresql_privs: > - db=library - state=absent - privs=INSERT,UPDATE - objs=ALL_IN_SCHEMA - role=reader +- postgresql_privs: + db: library + state: absent + privs: INSERT,UPDATE + objs: ALL_IN_SCHEMA + role: reader # GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian -- postgresql_privs: > - db=library - privs=ALL - type=schema - objs=public,math - role=librarian +- postgresql_privs: + db: library + privs: ALL + type: schema + objs: public,math + role: librarian # GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader # Note the separation of arguments with colons. -- postgresql_privs: > - db=library - privs=ALL - type=function - obj=add(int:int) - schema=math - roles=librarian,reader +- postgresql_privs: + db: library + privs: ALL + type: function + obj: add(int:int) + schema: math + roles: librarian,reader # GRANT librarian, reader TO alice, bob WITH ADMIN OPTION # Note that group role memberships apply cluster-wide and therefore are not # restricted to database "library" here. -- postgresql_privs: > - db=library - type=group - objs=librarian,reader - roles=alice,bob - admin_option=yes +- postgresql_privs: + db: library + type: group + objs: librarian,reader + roles: alice,bob + admin_option: yes # GRANT ALL PRIVILEGES ON DATABASE library TO librarian -# Note that here "db=postgres" specifies the database to connect to, not the +# Note that here "db: postgres" specifies the database to connect to, not the # database to grant privileges on (which is specified via the "objs" param) -- postgresql_privs: > - db=postgres - privs=ALL - type=database - obj=library - role=librarian +- postgresql_privs: + db: postgres + privs: ALL + type: database + obj: library + role: librarian # GRANT ALL PRIVILEGES ON DATABASE library TO librarian # If objs is omitted for type "database", it defaults to the database # to which the connection is established -- postgresql_privs: > - db=library - privs=ALL - type=database - role=librarian +- postgresql_privs: + db: library + privs: ALL + type: database + role: librarian """ try: @@ -537,7 +541,7 @@ def main(): port=dict(type='int', default=5432), unix_socket=dict(default='', aliases=['login_unix_socket']), login=dict(default='postgres', aliases=['login_user']), - password=dict(default='', aliases=['login_password']) + password=dict(default='', aliases=['login_password'], no_log=True) ), supports_check_mode = True ) @@ -573,7 +577,8 @@ def main(): module.fail_json(msg='Python module "psycopg2" must be installed.') try: conn = Connection(p) - except psycopg2.Error, e: + except psycopg2.Error: + e = get_exception() module.fail_json(msg='Could not connect to database: %s' % e) try: @@ -613,11 +618,13 @@ def main(): schema_qualifier=p.schema ) - except Error, e: + except Error: + e = get_exception() conn.rollback() module.fail_json(msg=e.message) - except psycopg2.Error, e: + except psycopg2.Error: + e = get_exception() conn.rollback() # psycopg2 errors come in connection encoding, reencode msg = e.message.decode(conn.encoding).encode(sys.getdefaultencoding(), diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 4f2174330f6..95c19caaba9 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: postgresql_user @@ -142,22 +146,41 @@ EXAMPLES = ''' # Create django user and grant access to database and products table -- postgresql_user: db=acme name=django password=ceec4eif7ya priv=CONNECT/products:ALL +- postgresql_user: + db: acme + name: django + password: ceec4eif7ya + priv: "CONNECT/products:ALL" # Create rails user, grant privilege to create other databases and demote rails from super user status -- postgresql_user: name=rails password=secret role_attr_flags=CREATEDB,NOSUPERUSER +- postgresql_user: + name: rails + password: secret + role_attr_flags: CREATEDB,NOSUPERUSER # Remove test user privileges from acme -- postgresql_user: db=acme name=test priv=ALL/products:ALL state=absent fail_on_user=no +- postgresql_user: + db: acme + name: test + priv: "ALL/products:ALL" + state: absent + fail_on_user: no # Remove test user from test database and the cluster -- postgresql_user: db=test name=test priv=ALL state=absent +- postgresql_user: + db: test + name: test + priv: ALL + state: absent # Example privileges string format INSERT,UPDATE/table:SELECT/anothertable:ALL # Remove an existing user's password -- postgresql_user: db=test user=test password=NULL +- postgresql_user: + db: test + user: test + password: NULL ''' import re @@ -170,6 +193,7 @@ postgresqldb_found = False else: postgresqldb_found = True +from ansible.module_utils.six import iteritems _flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION') VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags))) @@ -290,7 +314,8 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir try: cursor.execute(' '.join(alter), query_password_data) - except psycopg2.InternalError, e: + except psycopg2.InternalError: + e = get_exception() if e.pgcode == '25006': # Handle errors due to read-only transactions indicated by pgcode 25006 # ERROR: cannot execute ALTER ROLE in a read-only transaction @@ -298,7 +323,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir module.fail_json(msg=e.pgerror) return changed else: - raise psycopg2.InternalError, e + raise psycopg2.InternalError(e) # Grab new role attributes. cursor.execute(select, {"user": user}) @@ -432,7 +457,7 @@ def revoke_privileges(cursor, user, privs): changed = False for type_ in privs: - for name, privileges in privs[type_].iteritems(): + for name, privileges in iteritems(privs[type_]): # Check that any of the privileges requested to be removed are # currently granted to the user differences = check_funcs[type_](cursor, user, name, privileges) @@ -448,12 +473,9 @@ def grant_privileges(cursor, user, privs): grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges) check_funcs = dict(table=has_table_privileges, database=has_database_privileges) - grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges) - check_funcs = dict(table=has_table_privileges, database=has_database_privileges) - changed = False for type_ in privs: - for name, privileges in privs[type_].iteritems(): + for name, privileges in iteritems(privs[type_]): # Check that any of the privileges requested for the user are # currently missing differences = check_funcs[type_](cursor, user, name, privileges) @@ -545,11 +567,11 @@ def main(): module = AnsibleModule( argument_spec=dict( login_user=dict(default="postgres"), - login_password=dict(default=""), + login_password=dict(default="", no_log=True), login_host=dict(default=""), login_unix_socket=dict(default=""), user=dict(required=True, aliases=['name']), - password=dict(default=None), + password=dict(default=None, no_log=True), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), db=dict(default=''), @@ -575,7 +597,8 @@ def main(): no_password_changes = module.params["no_password_changes"] try: role_attr_flags = parse_role_attrs(module.params["role_attr_flags"]) - except InvalidFlagsError, e: + except InvalidFlagsError: + e = get_exception() module.fail_json(msg=str(e)) if module.params["encrypted"]: encrypted = "ENCRYPTED" @@ -596,7 +619,7 @@ def main(): "port":"port", "db":"database" } - kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() + kw = dict( (params_map[k], v) for (k, v) in iteritems(module.params) if k in params_map and v != "" ) # If a login_unix_socket is specified, incorporate it here. @@ -607,7 +630,8 @@ def main(): try: db_connection = psycopg2.connect(**kw) cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to connect to database: %s" % e) kw = dict(user=user) @@ -618,16 +642,19 @@ def main(): if user_exists(cursor, user): try: changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires, no_password_changes) - except SQLParseError, e: + except SQLParseError: + e = get_exception() module.fail_json(msg=str(e)) else: try: changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires) - except SQLParseError, e: + except SQLParseError: + e = get_exception() module.fail_json(msg=str(e)) try: changed = grant_privileges(cursor, user, privs) or changed - except SQLParseError, e: + except SQLParseError: + e = get_exception() module.fail_json(msg=str(e)) else: if user_exists(cursor, user): @@ -638,7 +665,8 @@ def main(): try: changed = revoke_privileges(cursor, user, privs) user_removed = user_delete(cursor, user) - except SQLParseError, e: + except SQLParseError: + e = get_exception() module.fail_json(msg=str(e)) changed = changed or user_removed if fail_on_user and not user_removed: @@ -658,4 +686,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.database import * -main() + +if __name__ == '__main__': + main() diff --git a/files/acl.py b/files/acl.py index 32910b7cb89..ebd46d53c9b 100644 --- a/files/acl.py +++ b/files/acl.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: acl @@ -78,7 +82,7 @@ required: false default: null description: - - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields. + - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always required. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields. recursive: version_added: "2.0" @@ -97,19 +101,38 @@ EXAMPLES = ''' # Grant user Joe read access to a file -- acl: name=/etc/foo.conf entity=joe etype=user permissions="r" state=present +- acl: + name: /etc/foo.conf + entity: joe + etype: user + permissions: r + state: present # Removes the acl for Joe on a specific file -- acl: name=/etc/foo.conf entity=joe etype=user state=absent +- acl: + name: /etc/foo.conf + entity: joe + etype: user + state: absent # Sets default acl for joe on foo.d -- acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present +- acl: + name: /etc/foo.d + entity: joe + etype: user + permissions: rw + default: yes + state: present # Same as previous but using entry shorthand -- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present +- acl: + name: /etc/foo.d + entry: "default:user:joe:rw-" + state: present # Obtain the acl for a specific file -- acl: name=/etc/foo.conf +- acl: + name: /etc/foo.conf register: acl_info ''' @@ -152,8 +175,10 @@ def split_entry(entry): return [d, t, e, p] -def build_entry(etype, entity, permissions=None): +def build_entry(etype, entity, permissions=None, use_nfsv4_acls=False): '''Builds and returns an entry string. Does not include the permissions bit if they are not provided.''' + if use_nfsv4_acls: + return ':'.join([etype, entity, permissions, 'allow']) if permissions: return etype + ':' + entity + ':' + permissions else: @@ -171,14 +196,18 @@ def build_command(module, mode, path, follow, default, recursive, entry=''): else: # mode == 'get' cmd = [module.get_bin_path('getfacl', True)] # prevents absolute path warnings and removes headers - cmd.append('--omit-header') - cmd.append('--absolute-names') + if get_platform().lower() == 'linux': + cmd.append('--omit-header') + cmd.append('--absolute-names') if recursive: cmd.append('--recursive') if not follow: - cmd.append('--physical') + if get_platform().lower() == 'linux': + cmd.append('--physical') + elif get_platform().lower() == 'freebsd': + cmd.append('-h') if default: if(mode == 'rm'): @@ -192,7 +221,11 @@ def build_command(module, mode, path, follow, default, recursive, entry=''): def acl_changed(module, cmd): '''Returns true if the provided command affects the existing ACLs, false otherwise.''' - cmd = cmd[:] # lists are mutables so cmd would be overriden without this + # FreeBSD do not have a --test flag, so by default, it is safer to always say "true" + if get_platform().lower() == 'freebsd': + return True + + cmd = cmd[:] # lists are mutables so cmd would be overwritten without this cmd.insert(1, '--test') lines = run_acl(module, cmd) @@ -206,10 +239,15 @@ def run_acl(module, cmd, check_rc=True): try: (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=e.strerror) - lines = out.splitlines() + lines = [] + for l in out.splitlines(): + if not l.startswith('#'): + lines.append(l.strip()) + if lines and not lines[-1].split(): # trim last line only when it is empty return lines[:-1] @@ -218,12 +256,9 @@ def run_acl(module, cmd, check_rc=True): def main(): - if get_platform().lower() != 'linux': - module.fail_json(msg="The acl module is only available for Linux distributions.") - module = AnsibleModule( argument_spec=dict( - name=dict(required=True, aliases=['path'], type='str'), + name=dict(required=True, aliases=['path'], type='path'), entry=dict(required=False, type='str'), entity=dict(required=False, type='str', default=''), etype=dict( @@ -241,11 +276,15 @@ def main(): follow=dict(required=False, type='bool', default=True), default=dict(required=False, type='bool', default=False), recursive=dict(required=False, type='bool', default=False), + use_nfsv4_acls=dict(required=False, type='bool', default=False) ), supports_check_mode=True, ) - path = os.path.expanduser(module.params.get('name')) + if get_platform().lower() not in ['linux', 'freebsd']: + module.fail_json(msg="The acl module is not available on this system.") + + path = module.params.get('name') entry = module.params.get('entry') entity = module.params.get('entity') etype = module.params.get('etype') @@ -254,6 +293,7 @@ def main(): follow = module.params.get('follow') default = module.params.get('default') recursive = module.params.get('recursive') + use_nfsv4_acls = module.params.get('use_nfsv4_acls') if not os.path.exists(path): module.fail_json(msg="Path not found or not accessible.") @@ -288,11 +328,15 @@ def main(): if default_flag != None: default = default_flag + if get_platform().lower() == 'freebsd': + if recursive: + module.fail_json(msg="recursive is not supported on that platform.") + changed = False msg = "" if state == 'present': - entry = build_entry(etype, entity, permissions) + entry = build_entry(etype, entity, permissions, use_nfsv4_acls) command = build_command( module, 'set', path, follow, default, recursive, entry @@ -304,7 +348,7 @@ def main(): msg = "%s is present" % entry elif state == 'absent': - entry = build_entry(etype, entity) + entry = build_entry(etype, entity, use_nfsv4_acls) command = build_command( module, 'rm', path, follow, default, recursive, entry @@ -328,4 +372,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/files/assemble.py b/files/assemble.py index 9f92e0209d3..41e7530e446 100644 --- a/files/assemble.py +++ b/files/assemble.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # (c) 2012, Stephen Fromm +# (c) 2016, Toshio Kuratomi # # This file is part of Ansible # @@ -18,10 +19,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import os -import os.path -import tempfile -import re +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -99,51 +99,70 @@ EXAMPLES = ''' # Example from Ansible Playbooks -- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf +- assemble: + src: /etc/someapp/fragments + dest: /etc/someapp/someapp.conf # When a delimiter is specified, it will be inserted in between each fragment -- assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf delimiter='### START FRAGMENT ###' +- assemble: + src: /etc/someapp/fragments + dest: /etc/someapp/someapp.conf + delimiter: '### START FRAGMENT ###' # Copy a new "sshd_config" file into place, after passing validation with sshd -- assemble: src=/etc/ssh/conf.d/ dest=/etc/ssh/sshd_config validate='/usr/sbin/sshd -t -f %s' +- assemble: + src: /etc/ssh/conf.d/ + dest: /etc/ssh/sshd_config + validate: '/usr/sbin/sshd -t -f %s' ''' +import codecs +import os +import os.path +import re +import tempfile + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.six import b + + # =========================================== # Support method def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp() - tmp = os.fdopen(tmpfd,'w') + tmp = os.fdopen(tmpfd, 'wb') delimit_me = False add_newline = False for f in sorted(os.listdir(src_path)): if compiled_regexp and not compiled_regexp.search(f): continue - fragment = "%s/%s" % (src_path, f) + fragment = u"%s/%s" % (src_path, f) if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): continue - fragment_content = file(fragment).read() + fragment_content = open(fragment, 'rb').read() # always put a newline between fragments if the previous fragment didn't end with a newline. if add_newline: - tmp.write('\n') + tmp.write(b('\n')) # delimiters should only appear between fragments if delimit_me: if delimiter: # un-escape anything like newlines - delimiter = delimiter.decode('unicode-escape') + delimiter = codecs.escape_decode(delimiter)[0] tmp.write(delimiter) # always make sure there's a newline after the # delimiter, so lines don't run together - if delimiter[-1] != '\n': - tmp.write('\n') + if delimiter[-1] != b('\n'): + tmp.write(b('\n')) tmp.write(fragment_content) delimit_me = True - if fragment_content.endswith('\n'): + if fragment_content.endswith(b('\n')): add_newline = False else: add_newline = True @@ -151,18 +170,18 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, igno tmp.close() return temp_path + def cleanup(path, result=None): # cleanup just in case if os.path.exists(path): try: os.remove(path) - except (IOError, OSError), e: + except (IOError, OSError): + e = get_exception() # don't error on possible race conditions, but keep warning if result is not None: result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, str(e))] -# ============================================================== -# main def main(): @@ -200,10 +219,11 @@ def main(): if not os.path.isdir(src): module.fail_json(msg="Source (%s) is not a directory" % src) - if regexp != None: + if regexp is not None: try: compiled_regexp = re.compile(regexp) - except re.error, e: + except re.error: + e = get_exception() module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp)) if validate and "%s" not in validate: @@ -229,12 +249,11 @@ def main(): result['validation'] = dict(rc=rc, stdout=out, stderr=err) if rc != 0: cleanup(path) - result['msg'] = "failed to validate: rc:%s error:%s" % (rc, err) - module.fail_json(result) + module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err)) if backup and dest_hash is not None: result['backup_file'] = module.backup_local(dest) - module.atomic_move(path, dest) + module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes']) changed = True cleanup(path, result) @@ -247,8 +266,5 @@ def main(): result['msg'] = "OK" module.exit_json(**result) -# import module snippets -from ansible.module_utils.basic import * - -main() - +if __name__ == '__main__': + main() diff --git a/files/copy.py b/files/copy.py index e80ea32ede2..f9bbd1baa4f 100644 --- a/files/copy.py +++ b/files/copy.py @@ -18,8 +18,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import os -import tempfile +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -105,19 +106,43 @@ EXAMPLES = ''' # Example from Ansible Playbooks -- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode=0644 +- copy: + src: /srv/myfiles/foo.conf + dest: /etc/foo.conf + owner: foo + group: foo + mode: 0644 # The same example as above, but using a symbolic mode equivalent to 0644 -- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode="u=rw,g=r,o=r" +- copy: + src: /srv/myfiles/foo.conf + dest: /etc/foo.conf + owner: foo + group: foo + mode: "u=rw,g=r,o=r" # Another symbolic mode example, adding some permissions and removing others -- copy: src=/srv/myfiles/foo.conf dest=/etc/foo.conf owner=foo group=foo mode="u+rw,g-wx,o-rwx" +- copy: + src: /srv/myfiles/foo.conf + dest: /etc/foo.conf + owner: foo + group: foo + mode: "u+rw,g-wx,o-rwx" # Copy a new "ntp.conf file into place, backing up the original if it differs from the copied version -- copy: src=/mine/ntp.conf dest=/etc/ntp.conf owner=root group=root mode=644 backup=yes +- copy: + src: /mine/ntp.conf + dest: /etc/ntp.conf + owner: root + group: root + mode: 0644 + backup: yes # Copy a new "sudoers" file into place, after passing validation with visudo -- copy: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s' +- copy: + src: /mine/sudoers + dest: /etc/sudoers + validate: 'visudo -cf %s' ''' RETURN = ''' @@ -137,7 +162,7 @@ type: string sample: "2a5aeecc61dc98c4d780b14b330e3282" checksum: - description: checksum of the file after running copy + description: sha1 checksum of the file after running copy returned: success type: string sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827" @@ -183,16 +208,28 @@ sample: "file" ''' +import os +import shutil +import tempfile +import traceback + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils._text import to_bytes, to_native + + def split_pre_existing_dir(dirname): ''' Return the first pre-existing directory and a list of the new directories that will be created. ''' head, tail = os.path.split(dirname) - if not os.path.exists(head): + b_head = to_bytes(head, errors='surrogate_or_strict') + if not os.path.exists(b_head): (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head) else: - return (head, [ tail ]) + return (head, [tail]) new_directory_list.append(tail) return (pre_existing_dir, new_directory_list) @@ -215,10 +252,10 @@ def main(): module = AnsibleModule( # not checking because of daisy chain to file module argument_spec = dict( - src = dict(required=False), - original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack + src = dict(required=False, type='path'), + original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack content = dict(required=False, no_log=True), - dest = dict(required=True), + dest = dict(required=True, type='path'), backup = dict(default=False, type='bool'), force = dict(default=True, aliases=['thirsty'], type='bool'), validate = dict(required=False, type='str'), @@ -229,21 +266,23 @@ def main(): supports_check_mode=True, ) - src = os.path.expanduser(module.params['src']) - dest = os.path.expanduser(module.params['dest']) + src = module.params['src'] + b_src = to_bytes(src, errors='surrogate_or_strict') + dest = module.params['dest'] + b_dest = to_bytes(dest, errors='surrogate_or_strict') backup = module.params['backup'] - force = module.params['force'] - original_basename = module.params.get('original_basename',None) - validate = module.params.get('validate',None) + force = module.params['force'] + original_basename = module.params.get('original_basename', None) + validate = module.params.get('validate', None) follow = module.params['follow'] - mode = module.params['mode'] + mode = module.params['mode'] remote_src = module.params['remote_src'] - if not os.path.exists(src): + if not os.path.exists(b_src): module.fail_json(msg="Source %s not found" % (src)) - if not os.access(src, os.R_OK): + if not os.access(b_src, os.R_OK): module.fail_json(msg="Source %s not readable" % (src)) - if os.path.isdir(src): + if os.path.isdir(b_src): module.fail_json(msg="Remote copy does not support recursive copy of directory: %s" % (src)) checksum_src = module.sha1(src) @@ -259,10 +298,12 @@ def main(): # Special handling for recursive copy - create intermediate dirs if original_basename and dest.endswith(os.sep): dest = os.path.join(dest, original_basename) + b_dest = to_bytes(dest, errors='surrogate_or_strict') dirname = os.path.dirname(dest) - if not os.path.exists(dirname) and os.path.isabs(dirname): + b_dirname = to_bytes(dirname, errors='surrogate_or_strict') + if not os.path.exists(b_dirname) and os.path.isabs(b_dirname): (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname) - os.makedirs(dirname) + os.makedirs(b_dirname) directory_args = module.load_file_common_arguments(module.params) directory_mode = module.params["directory_mode"] if directory_mode is not None: @@ -271,78 +312,82 @@ def main(): directory_args['mode'] = None adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed) - if os.path.exists(dest): - if os.path.islink(dest) and follow: - dest = os.path.realpath(dest) + if os.path.isdir(b_dest): + basename = os.path.basename(src) + if original_basename: + basename = original_basename + dest = os.path.join(dest, basename) + b_dest = to_bytes(dest, errors='surrogate_or_strict') + + if os.path.exists(b_dest): + if os.path.islink(b_dest) and follow: + b_dest = os.path.realpath(b_dest) + dest = to_native(b_dest, errors='surrogate_or_strict') if not force: module.exit_json(msg="file already exists", src=src, dest=dest, changed=False) - if (os.path.isdir(dest)): - basename = os.path.basename(src) - if original_basename: - basename = original_basename - dest = os.path.join(dest, basename) - if os.access(dest, os.R_OK): + if os.access(b_dest, os.R_OK): checksum_dest = module.sha1(dest) else: - if not os.path.exists(os.path.dirname(dest)): + if not os.path.exists(os.path.dirname(b_dest)): try: # os.path.exists() can return false in some # circumstances where the directory does not have # the execute bit for the current user set, in # which case the stat() call will raise an OSError - os.stat(os.path.dirname(dest)) - except OSError, e: - if "permission denied" in str(e).lower(): + os.stat(os.path.dirname(b_dest)) + except OSError: + e = get_exception() + if "permission denied" in to_native(e).lower(): module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest))) module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest))) - if not os.access(os.path.dirname(dest), os.W_OK): + if not os.access(os.path.dirname(b_dest), os.W_OK): module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest))) backup_file = None - if checksum_src != checksum_dest or os.path.islink(dest): - try: - if backup: - if os.path.exists(dest): - backup_file = module.backup_local(dest) - # allow for conversion from symlink. - if os.path.islink(dest): - os.unlink(dest) - open(dest, 'w').close() - if validate: - # if we have a mode, make sure we set it on the temporary - # file source as some validations may require it - # FIXME: should we do the same for owner/group here too? - if mode is not None: - module.set_mode_if_different(src, mode, False) - if "%s" not in validate: - module.fail_json(msg="validate must contain %%s: %s" % (validate)) - (rc,out,err) = module.run_command(validate % src) - if rc != 0: - module.fail_json(msg="failed to validate", exit_status=rc, stdout=out, stderr=err) - if remote_src: - _, tmpdest = tempfile.mkstemp(dir=os.path.dirname(dest)) - shutil.copy2(src, tmpdest) - module.atomic_move(tmpdest, dest) - else: - module.atomic_move(src, dest) - except IOError: - module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc()) + if checksum_src != checksum_dest or os.path.islink(b_dest): + if not module.check_mode: + try: + if backup: + if os.path.exists(b_dest): + backup_file = module.backup_local(dest) + # allow for conversion from symlink. + if os.path.islink(b_dest): + os.unlink(b_dest) + open(b_dest, 'w').close() + if validate: + # if we have a mode, make sure we set it on the temporary + # file source as some validations may require it + # FIXME: should we do the same for owner/group here too? + if mode is not None: + module.set_mode_if_different(src, mode, False) + if "%s" not in validate: + module.fail_json(msg="validate must contain %%s: %s" % (validate)) + (rc, out, err) = module.run_command(validate % src) + if rc != 0: + module.fail_json(msg="failed to validate", exit_status=rc, stdout=out, stderr=err) + b_mysrc = b_src + if remote_src: + _, b_mysrc = tempfile.mkstemp(dir=os.path.dirname(b_dest)) + shutil.copy2(b_src, b_mysrc) + module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes']) + except IOError: + module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc()) changed = True else: changed = False res_args = dict( - dest = dest, src = src, md5sum = md5sum_src, checksum = checksum_src, changed = changed + dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed ) if backup_file: res_args['backup_file'] = backup_file module.params['dest'] = dest - file_args = module.load_file_common_arguments(module.params) - res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed']) + if not module.check_mode: + file_args = module.load_file_common_arguments(module.params) + res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed']) module.exit_json(**res_args) -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/files/fetch.py b/files/fetch.py index 4ca191b8121..f069a23acfb 100644 --- a/files/fetch.py +++ b/files/fetch.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: fetch @@ -45,7 +49,7 @@ fail_on_missing: version_added: "1.1" description: - - Makes it fails when the source file is missing. + - When set to 'yes', the task will fail if the source file is missing. required: false choices: [ "yes", "no" ] default: "no" @@ -65,7 +69,7 @@ will use the basename of the source file, similar to the copy module. Obviously this is only handy if the filenames are unique. requirements: [] -author: +author: - "Ansible Core Team" - "Michael DeHaan" notes: @@ -79,14 +83,25 @@ EXAMPLES = ''' # Store file into /tmp/fetched/host.example.com/tmp/somefile -- fetch: src=/tmp/somefile dest=/tmp/fetched +- fetch: + src: /tmp/somefile + dest: /tmp/fetched # Specifying a path directly -- fetch: src=/tmp/somefile dest=/tmp/prefix-{{ ansible_hostname }} flat=yes +- fetch: + src: /tmp/somefile + dest: /tmp/prefix-{{ inventory_hostname }} + flat: yes # Specifying a destination path -- fetch: src=/tmp/uniquefile dest=/tmp/special/ flat=yes +- fetch: + src: /tmp/uniquefile + dest: /tmp/special/ + flat: yes # Storing in a path relative to the playbook -- fetch: src=/tmp/uniquefile dest=special/prefix-{{ ansible_hostname }} flat=yes +- fetch: + src: /tmp/uniquefile + dest: special/prefix-{{ inventory_hostname }} + flat: yes ''' diff --git a/files/file.py b/files/file.py index fc77f5094b6..ef98f036e8b 100644 --- a/files/file.py +++ b/files/file.py @@ -18,16 +18,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import errno -import shutil -import stat -import grp -import pwd -try: - import selinux - HAVE_SELINUX=True -except ImportError: - HAVE_SELINUX=False +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -42,7 +35,7 @@ notes: - See also M(copy), M(template), M(assemble) requirements: [ ] -author: +author: - "Ansible Core Team" - "Michael DeHaan" options: @@ -60,6 +53,7 @@ or M(template) module if you want that behavior. If C(link), the symbolic link will be created or changed. Use C(hard) for hardlinks. If C(absent), directories will be recursively deleted, and files or symlinks will be unlinked. + Note that M(file) will not fail if the C(path) does not exist as the state did not change. If C(touch) (new in 1.4), an empty file will be created if the C(path) does not exist, while an existing file or directory will receive updated file access and modification times (similar to the way `touch` works from the command line). @@ -98,34 +92,65 @@ EXAMPLES = ''' # change file ownership, group and mode. When specifying mode using octal numbers, first digit should always be 0. -- file: path=/etc/foo.conf owner=foo group=foo mode=0644 -- file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link -- file: src=/tmp/{{ item.src }} dest={{ item.dest }} state=link +- file: + path: /etc/foo.conf + owner: foo + group: foo + mode: 0644 +- file: + src: /file/to/link/to + dest: /path/to/symlink + owner: foo + group: foo + state: link +- file: + src: '/tmp/{{ item.src }}' + dest: '{{ item.dest }}' + state: link with_items: - { src: 'x', dest: 'y' } - { src: 'z', dest: 'k' } # touch a file, using symbolic modes to set the permissions (equivalent to 0644) -- file: path=/etc/foo.conf state=touch mode="u=rw,g=r,o=r" +- file: + path: /etc/foo.conf + state: touch + mode: "u=rw,g=r,o=r" # touch the same file, but add/remove some permissions -- file: path=/etc/foo.conf state=touch mode="u+rw,g-wx,o-rwx" +- file: + path: /etc/foo.conf + state: touch + mode: "u+rw,g-wx,o-rwx" # create a directory if it doesn't exist -- file: path=/etc/some_directory state=directory mode=0755 - +- file: + path: /etc/some_directory + state: directory + mode: 0755 ''' +import errno +import os +import shutil +import time + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.six import b +from ansible.module_utils._text import to_bytes, to_native -def get_state(path): + +def get_state(b_path): ''' Find out current state ''' - if os.path.lexists(path): - if os.path.islink(path): + if os.path.lexists(b_path): + if os.path.islink(b_path): return 'link' - elif os.path.isdir(path): + elif os.path.isdir(b_path): return 'directory' - elif os.stat(path).st_nlink > 1: + elif os.stat(b_path).st_nlink > 1: return 'hard' else: # could be many other things, but defaulting to file @@ -133,70 +158,73 @@ def get_state(path): return 'absent' -def recursive_set_attributes(module, path, follow, file_args): + +def recursive_set_attributes(module, b_path, follow, file_args): changed = False - for root, dirs, files in os.walk(path): - for fsobj in dirs + files: - fsname = os.path.join(root, fsobj) - if not os.path.islink(fsname): + for b_root, b_dirs, b_files in os.walk(b_path): + for b_fsobj in b_dirs + b_files: + b_fsname = os.path.join(b_root, b_fsobj) + if not os.path.islink(b_fsname): tmp_file_args = file_args.copy() - tmp_file_args['path']=fsname + tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict') changed |= module.set_fs_attributes_if_different(tmp_file_args, changed) else: tmp_file_args = file_args.copy() - tmp_file_args['path']=fsname + tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict') changed |= module.set_fs_attributes_if_different(tmp_file_args, changed) if follow: - fsname = os.path.join(root, os.readlink(fsname)) - if os.path.isdir(fsname): - changed |= recursive_set_attributes(module, fsname, follow, file_args) + b_fsname = os.path.join(b_root, os.readlink(b_fsname)) + if os.path.isdir(b_fsname): + changed |= recursive_set_attributes(module, b_fsname, follow, file_args) tmp_file_args = file_args.copy() - tmp_file_args['path']=fsname + tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict') changed |= module.set_fs_attributes_if_different(tmp_file_args, changed) return changed + def main(): module = AnsibleModule( - argument_spec = dict( - state = dict(choices=['file','directory','link','hard','touch','absent'], default=None), - path = dict(aliases=['dest', 'name'], required=True), - original_basename = dict(required=False), # Internal use only, for recursive ops - recurse = dict(default=False, type='bool'), - force = dict(required=False, default=False, type='bool'), - diff_peek = dict(default=None), # Internal use only, for internal checks in the action plugins - validate = dict(required=False, default=None), # Internal use only, for template and copy - src = dict(required=False, default=None), + argument_spec=dict( + state=dict(choices=['file', 'directory', 'link', 'hard', 'touch', 'absent'], default=None), + path=dict(aliases=['dest', 'name'], required=True, type='path'), + original_basename=dict(required=False), # Internal use only, for recursive ops + recurse=dict(default=False, type='bool'), + force=dict(required=False, default=False, type='bool'), + diff_peek=dict(default=None), # Internal use only, for internal checks in the action plugins + validate=dict(required=False, default=None), # Internal use only, for template and copy + src=dict(required=False, default=None, type='path'), ), add_file_common_args=True, supports_check_mode=True ) params = module.params - state = params['state'] + state = params['state'] force = params['force'] diff_peek = params['diff_peek'] src = params['src'] + b_src = to_bytes(src, errors='surrogate_or_strict') follow = params['follow'] # modify source as we later reload and pass, specially relevant when used by other modules. - params['path'] = path = os.path.expanduser(params['path']) + path = params['path'] + b_path = to_bytes(path, errors='surrogate_or_strict') # short-circuit for diff_peek if diff_peek is not None: appears_binary = False try: - f = open(path) - b = f.read(8192) + f = open(b_path, 'rb') + head = f.read(8192) f.close() - if "\x00" in b: + if b("\x00") in head: appears_binary = True except: pass module.exit_json(path=path, changed=False, appears_binary=appears_binary) - prev_state = get_state(path) - + prev_state = get_state(b_path) # state should default to file, but since that creates many conflicts, # default to 'current' when it exists. @@ -208,18 +236,17 @@ def main(): # source is both the source of a symlink or an informational passing of the src for a template module # or copy module, even if this module never uses it, it is needed to key off some things - if src is not None: - src = os.path.expanduser(src) - else: - if state in ['link','hard']: + if src is None: + if state in ('link', 'hard'): if follow and state == 'link': # use the current target of the link as the source - src = os.path.realpath(path) + src = to_native(os.path.realpath(b_path), errors='strict') + b_src = to_bytes(os.path.realpath(b_path), errors='strict') else: module.fail_json(msg='src and dest are required for creating links') # original_basename is used by other modules that depend on file. - if os.path.isdir(path) and state not in ["link", "absent"]: + if os.path.isdir(b_path) and state not in ("link", "absent"): basename = None if params['original_basename']: basename = params['original_basename'] @@ -227,6 +254,7 @@ def main(): basename = os.path.basename(src) if basename: params['path'] = path = os.path.join(path, basename) + b_path = to_bytes(path, errors='surrogate_or_strict') # make sure the target path is a directory when we're doing a recursive operation recurse = params['recurse'] @@ -236,11 +264,8 @@ def main(): file_args = module.load_file_common_arguments(params) changed = False - diff = {'before': - {'path': path} - , - 'after': - {'path': path} + diff = {'before': {'path': path}, + 'after': {'path': path}, } state_change = False @@ -254,13 +279,15 @@ def main(): if not module.check_mode: if prev_state == 'directory': try: - shutil.rmtree(path, ignore_errors=False) - except Exception, e: + shutil.rmtree(b_path, ignore_errors=False) + except Exception: + e = get_exception() module.fail_json(msg="rmtree failed: %s" % str(e)) else: try: - os.unlink(path) - except Exception, e: + os.unlink(b_path) + except Exception: + e = get_exception() module.fail_json(path=path, msg="unlinking failed: %s " % str(e)) module.exit_json(path=path, changed=True, diff=diff) else: @@ -271,11 +298,12 @@ def main(): if state_change: if follow and prev_state == 'link': # follow symlink and operate on original - path = os.path.realpath(path) - prev_state = get_state(path) + b_path = os.path.realpath(b_path) + path = to_native(b_path, errors='strict') + prev_state = get_state(b_path) file_args['path'] = path - if prev_state not in ['file','hard']: + if prev_state not in ('file', 'hard'): # file is not absent and any other state is a conflict module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) @@ -284,8 +312,9 @@ def main(): elif state == 'directory': if follow and prev_state == 'link': - path = os.path.realpath(path) - prev_state = get_state(path) + b_path = os.path.realpath(b_path) + path = to_native(b_path, errors='strict') + prev_state = get_state(b_path) if prev_state == 'absent': if module.check_mode: @@ -303,18 +332,21 @@ def main(): # Remove leading slash if we're creating a relative path if not os.path.isabs(path): curpath = curpath.lstrip('/') - if not os.path.exists(curpath): + b_curpath = to_bytes(curpath, errors='surrogate_or_strict') + if not os.path.exists(b_curpath): try: - os.mkdir(curpath) - except OSError, ex: + os.mkdir(b_curpath) + except OSError: + ex = get_exception() # Possibly something else created the dir since the os.path.exists # check above. As long as it's a dir, we don't need to error out. - if not (ex.errno == errno.EEXIST and os.path.isdir(curpath)): + if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)): raise tmp_file_args = file_args.copy() - tmp_file_args['path']=curpath + tmp_file_args['path'] = curpath changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(path=path, msg='There was an issue creating %s as requested: %s' % (curpath, str(e))) # We already know prev_state is not 'absent', therefore it exists in some form. @@ -324,45 +356,47 @@ def main(): changed = module.set_fs_attributes_if_different(file_args, changed, diff) if recurse: - changed |= recursive_set_attributes(module, file_args['path'], follow, file_args) + changed |= recursive_set_attributes(module, to_bytes(file_args['path'], errors='surrogate_or_strict'), follow, file_args) module.exit_json(path=path, changed=changed, diff=diff) - elif state in ['link','hard']: + elif state in ('link', 'hard'): - if os.path.isdir(path) and not os.path.islink(path): + if os.path.isdir(b_path) and not os.path.islink(b_path): relpath = path else: - relpath = os.path.dirname(path) + b_relpath = os.path.dirname(b_path) + relpath = to_native(b_relpath, errors='strict') absrc = os.path.join(relpath, src) - if not os.path.exists(absrc) and not force: + b_absrc = to_bytes(absrc, errors='surrogate_or_strict') + if not os.path.exists(b_absrc) and not force: module.fail_json(path=path, src=src, msg='src file does not exist, use "force=yes" if you really want to create the link: %s' % absrc) if state == 'hard': - if not os.path.isabs(src): + if not os.path.isabs(b_src): module.fail_json(msg="absolute paths are required") elif prev_state == 'directory': if not force: module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, path)) - elif len(os.listdir(path)) > 0: + elif len(os.listdir(b_path)) > 0: # refuse to replace a directory that has files in it module.fail_json(path=path, msg='the directory %s is not empty, refusing to convert it' % path) - elif prev_state in ['file', 'hard'] and not force: + elif prev_state in ('file', 'hard') and not force: module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, path)) if prev_state == 'absent': changed = True elif prev_state == 'link': - old_src = os.readlink(path) - if old_src != src: + b_old_src = os.readlink(b_path) + if b_old_src != b_src: changed = True elif prev_state == 'hard': - if not (state == 'hard' and os.stat(path).st_ino == os.stat(src).st_ino): + if not (state == 'hard' and os.stat(b_path).st_ino == os.stat(b_src).st_ino): changed = True if not force: module.fail_json(dest=path, src=src, msg='Cannot link, different hard link exists at destination') - elif prev_state in ['file', 'directory']: + elif prev_state in ('file', 'directory'): changed = True if not force: module.fail_json(dest=path, src=src, msg='Cannot link, %s exists at destination' % prev_state) @@ -372,29 +406,33 @@ def main(): if changed and not module.check_mode: if prev_state != 'absent': # try to replace atomically - tmppath = '/'.join([os.path.dirname(path), ".%s.%s.tmp" % (os.getpid(),time.time())]) + b_tmppath = to_bytes(os.path.sep).join( + [os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))] + ) try: if prev_state == 'directory' and (state == 'hard' or state == 'link'): - os.rmdir(path) + os.rmdir(b_path) if state == 'hard': - os.link(src,tmppath) + os.link(b_src, b_tmppath) else: - os.symlink(src, tmppath) - os.rename(tmppath, path) - except OSError, e: - if os.path.exists(tmppath): - os.unlink(tmppath) - module.fail_json(path=path, msg='Error while replacing: %s' % str(e)) + os.symlink(b_src, b_tmppath) + os.rename(b_tmppath, b_path) + except OSError: + e = get_exception() + if os.path.exists(b_tmppath): + os.unlink(b_tmppath) + module.fail_json(path=path, msg='Error while replacing: %s' % to_native(e, nonstring='simplerepr')) else: try: if state == 'hard': - os.link(src,path) + os.link(b_src, b_path) else: - os.symlink(src, path) - except OSError, e: - module.fail_json(path=path, msg='Error while linking: %s' % str(e)) + os.symlink(b_src, b_path) + except OSError: + e = get_exception() + module.fail_json(path=path, msg='Error while linking: %s' % to_native(e, nonstring='simplerepr')) - if module.check_mode and not os.path.exists(path): + if module.check_mode and not os.path.exists(b_path): module.exit_json(dest=path, src=src, changed=changed, diff=diff) changed = module.set_fs_attributes_if_different(file_args, changed, diff) @@ -405,33 +443,33 @@ def main(): if prev_state == 'absent': try: - open(path, 'w').close() - except OSError, e: - module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e)) - elif prev_state in ['file', 'directory', 'hard']: + open(b_path, 'wb').close() + except OSError: + e = get_exception() + module.fail_json(path=path, msg='Error, could not touch target: %s' % to_native(e, nonstring='simplerepr')) + elif prev_state in ('file', 'directory', 'hard'): try: - os.utime(path, None) - except OSError, e: - module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) + os.utime(b_path, None) + except OSError: + e = get_exception() + module.fail_json(path=path, msg='Error while touching existing target: %s' % to_native(e, nonstring='simplerepr')) else: module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state)) try: module.set_fs_attributes_if_different(file_args, True, diff) - except SystemExit, e: + except SystemExit: + e = get_exception() if e.code: # We take this to mean that fail_json() was called from # somewhere in basic.py if prev_state == 'absent': # If we just created the file we can safely remove it - os.remove(path) + os.remove(b_path) raise e module.exit_json(dest=path, changed=True, diff=diff) module.fail_json(path=path, msg='unexpected position reached') -# import module snippets -from ansible.module_utils.basic import * if __name__ == '__main__': main() - diff --git a/files/find.py b/files/find.py index 7d7d4ab03a5..e2988aa36e9 100644 --- a/files/find.py +++ b/files/find.py @@ -26,6 +26,10 @@ import time import re +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: find @@ -34,7 +38,7 @@ short_description: return a list of files based on specific criteria requirements: [] description: - - Return a list files based on specific criteria. Multiple criteria are AND'd together. + - Return a list of files based on specific criteria. Multiple criteria are AND'd together. options: age: required: false @@ -48,7 +52,7 @@ required: false default: '*' description: - - One or more (shell or regex) patterns, which type is controled by C(use_regex) option. + - One or more (shell or regex) patterns, which type is controlled by C(use_regex) option. - The patterns restrict the list of files to be returned to those whose basenames match at least one of the patterns specified. Multiple patterns can be specified using a list. aliases: ['pattern'] @@ -56,7 +60,7 @@ required: false default: null description: - - One or more re patterns which should be matched against the file content + - One or more regex patterns which should be matched against the file content paths: required: true aliases: [ "name", "path" ] @@ -66,7 +70,8 @@ required: false description: - Type of file to select - choices: [ "file", "directory" ] + - The 'link' and 'any' choices were added in version 2.3 + choices: [ "file", "directory", "link", "any" ] default: "file" recurse: required: false @@ -118,19 +123,37 @@ EXAMPLES = ''' # Recursively find /tmp files older than 2 days -- find: paths="/tmp" age="2d" recurse=yes +- find: + paths: "/tmp" + age: "2d" + recurse: yes # Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte -- find: paths="/tmp" age="4w" size="1m" recurse=yes +- find: + paths: "/tmp" + age: "4w" + size: "1m" + recurse: yes # Recursively find /var/tmp files with last access time greater than 3600 seconds -- find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes +- find: + paths: "/var/tmp" + age: "3600" + age_stamp: atime + recurse: yes # find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz -- find: paths="/var/tmp" patterns="*.old,*.log.gz" size="10m" +- find: + paths: "/var/tmp" + patterns: "*.old,*.log.gz" + size: "10m" # find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex -- find: paths="/var/tmp" patterns="^.*?\.(?:old|log\.gz)$" size="10m" use_regex=True +- find: + paths: "/var/tmp" + patterns: "^.*?\.(?:old|log\.gz)$" + size: "10m" + use_regex: True ''' RETURN = ''' @@ -139,13 +162,13 @@ returned: success type: list of dictionaries sample: [ - { path="/var/tmp/test1", - mode=0644, - ..., - checksum=16fac7be61a6e4591a33ef4b729c5c3302307523 + { path: "/var/tmp/test1", + mode: "0644", + "...": "...", + checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523 }, - { path="/var/tmp/test2", - ... + { path: "/var/tmp/test2", + "...": "..." }, ] matched: @@ -257,7 +280,7 @@ def main(): paths = dict(required=True, aliases=['name','path'], type='list'), patterns = dict(default=['*'], type='list', aliases=['pattern']), contains = dict(default=None, type='str'), - file_type = dict(default="file", choices=['file', 'directory'], type='str'), + file_type = dict(default="file", choices=['file', 'directory', 'link', 'any'], type='str'), age = dict(default=None, type='str'), age_stamp = dict(default="mtime", choices=['atime','mtime','ctime'], type='str'), size = dict(default=None, type='str'), @@ -313,13 +336,17 @@ def main(): continue try: - st = os.stat(fsname) + st = os.lstat(fsname) except: msg+="%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname continue r = {'path': fsname} - if stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory': + if params['file_type'] == 'any': + if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']): + r.update(statinfo(st)) + filelist.append(r) + elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory': if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']): r.update(statinfo(st)) @@ -336,6 +363,11 @@ def main(): r['checksum'] = module.sha1(fsname) filelist.append(r) + elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link': + if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']): + r.update(statinfo(st)) + filelist.append(r) + if not params['recurse']: break else: @@ -346,5 +378,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/files/ini_file.py b/files/ini_file.py index 2dd021ad27c..f9f08ac5697 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -20,6 +20,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ini_file @@ -71,14 +75,23 @@ required: false default: "present" choices: [ "present", "absent" ] + no_extra_spaces: + description: + - do not insert spaces before and after '=' symbol + required: false + default: false + version_added: "2.1" + create: + required: false + choices: [ "yes", "no" ] + default: "yes" + description: + - If set to 'no', the module will fail if the file does not already exist. + By default it will create the file if it is missing. + version_added: "2.2" notes: - While it is possible to add an I(option) without specifying a I(value), this makes no sense. - - A section named C(default) cannot be added by the module, but if it exists, individual - options within the section can be updated. (This is a limitation of Python's I(ConfigParser).) - Either use M(template) to create a base INI file with a C([default]) section, or use - M(lineinfile) to add the missing line. -requirements: [ ConfigParser ] author: - "Jan-Piet Mens (@jpmens)" - "Ales Nosek (@noseka1)" @@ -86,57 +99,80 @@ EXAMPLES = ''' # Ensure "fav=lemonade is in section "[drinks]" in specified file -- ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes +- ini_file: + dest: /etc/conf + section: drinks + option: fav + value: lemonade + mode: 0600 + backup: yes -- ini_file: dest=/etc/anotherconf - section=drinks - option=temperature - value=cold - backup=yes +- ini_file: + dest: /etc/anotherconf + section: drinks + option: temperature + value: cold + backup: yes ''' -import ConfigParser -import sys import os +import re # ============================================================== # match_opt def match_opt(option, line): option = re.escape(option) - return re.match('%s *=' % option, line) \ - or re.match('# *%s *=' % option, line) \ - or re.match('; *%s *=' % option, line) + return re.match(' *%s( |\t)*=' % option, line) \ + or re.match('# *%s( |\t)*=' % option, line) \ + or re.match('; *%s( |\t)*=' % option, line) # ============================================================== # match_active_opt def match_active_opt(option, line): option = re.escape(option) - return re.match('%s *=' % option, line) + return re.match(' *%s( |\t)*=' % option, line) # ============================================================== # do_ini -def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False): +def do_ini(module, filename, section=None, option=None, value=None, + state='present', backup=False, no_extra_spaces=False, create=False): + diff = {'before': '', + 'after': '', + 'before_header': '%s (content)' % filename, + 'after_header': '%s (content)' % filename} if not os.path.exists(filename): - try: - open(filename,'w').close() - except: - module.fail_json(msg="Destination file %s not writable" % filename) - ini_file = open(filename, 'r') - try: - ini_lines = ini_file.readlines() - # append a fake section line to simplify the logic - ini_lines.append('[') - finally: - ini_file.close() + if not create: + module.fail_json(rc=257, msg='Destination %s does not exist !' % filename) + destpath = os.path.dirname(filename) + if not os.path.exists(destpath) and not module.check_mode: + os.makedirs(destpath) + ini_lines = [] + else: + ini_file = open(filename, 'r') + try: + ini_lines = ini_file.readlines() + finally: + ini_file.close() + + if module._diff: + diff['before'] = ''.join(ini_lines) + + # append a fake section line to simplify the logic + ini_lines.append('[') within_section = not section section_start = 0 changed = False + msg = 'OK' + if no_extra_spaces: + assignment_format = '%s=%s\n' + else: + assignment_format = '%s = %s\n' for index, line in enumerate(ini_lines): if line.startswith('[%s]' % section): @@ -146,11 +182,17 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if within_section: if state == 'present': # insert missing option line at the end of the section - ini_lines.insert(index, '%s = %s\n' % (option, value)) - changed = True + for i in range(index, 0, -1): + # search backwards for previous non-blank or non-comment line + if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]): + ini_lines.insert(i, assignment_format % (option, value)) + msg = 'option added' + changed = True + break elif state == 'absent' and not option: # remove the entire section del ini_lines[section_start:index] + msg = 'section removed' changed = True break else: @@ -158,11 +200,13 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if state == 'present': # change the existing option line if match_opt(option, line): - newline = '%s = %s\n' % (option, value) + newline = assignment_format % (option, value) changed = ini_lines[index] != newline + if changed: + msg = 'option changed' ini_lines[index] = newline if changed: - # remove all possible option occurences from the rest of the section + # remove all possible option occurrences from the rest of the section index = index + 1 while index < len(ini_lines): line = ini_lines[index] @@ -173,11 +217,12 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese else: index = index + 1 break - else: - # comment out the existing option line + elif state == 'absent': + # delete the existing line if match_active_opt(option, line): - ini_lines[index] = '#%s' % ini_lines[index] + del ini_lines[index] changed = True + msg = 'option changed' break # remove the fake section line @@ -185,20 +230,24 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if not within_section and option and state == 'present': ini_lines.append('[%s]\n' % section) - ini_lines.append('%s = %s\n' % (option, value)) + ini_lines.append(assignment_format % (option, value)) changed = True + msg = 'section and option added' + if module._diff: + diff['after'] = ''.join(ini_lines) + backup_file = None if changed and not module.check_mode: if backup: - module.backup_local(filename) + backup_file = module.backup_local(filename) ini_file = open(filename, 'w') try: ini_file.writelines(ini_lines) finally: ini_file.close() - return changed + return (changed, backup_file, diff, msg) # ============================================================== # main @@ -212,28 +261,35 @@ def main(): option = dict(required=False), value = dict(required=False), backup = dict(default='no', type='bool'), - state = dict(default='present', choices=['present', 'absent']) + state = dict(default='present', choices=['present', 'absent']), + no_extra_spaces = dict(required=False, default=False, type='bool'), + create=dict(default=True, type='bool') ), add_file_common_args = True, supports_check_mode = True ) - info = dict() - dest = os.path.expanduser(module.params['dest']) section = module.params['section'] option = module.params['option'] value = module.params['value'] state = module.params['state'] backup = module.params['backup'] + no_extra_spaces = module.params['no_extra_spaces'] + create = module.params['create'] + + (changed,backup_file,diff,msg) = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces, create) - changed = do_ini(module, dest, section, option, value, state, backup) + if not module.check_mode and os.path.exists(dest): + file_args = module.load_file_common_arguments(module.params) + changed = module.set_fs_attributes_if_different(file_args, changed) - file_args = module.load_file_common_arguments(module.params) - changed = module.set_fs_attributes_if_different(file_args, changed) + results = { 'changed': changed, 'msg': msg, 'dest': dest, 'diff': diff } + if backup_file is not None: + results['backup_file'] = backup_file # Mission complete - module.exit_json(dest=dest, changed=changed, msg="OK") + module.exit_json(**results) # import module snippets from ansible.module_utils.basic import * diff --git a/files/lineinfile.py b/files/lineinfile.py index 134d70d4158..ba7f4a3cc16 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -19,10 +19,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import re -import os -import pipes -import tempfile +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} DOCUMENTATION = """ --- @@ -39,8 +38,8 @@ - This module will search a file for a line, and ensure that it is present or absent. - This is primarily useful when you want to change a single line in a file only. See the M(replace) module if you want to change - multiple, similar lines; for other cases, see the M(copy) or - M(template) modules. + multiple, similar lines or check M(blockinfile) if you want to insert/update/remove a block of lines in a file. + For other cases, see the M(copy) or M(template) modules. version_added: "0.7" options: dest: @@ -89,7 +88,7 @@ description: - Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is - available; C(EOF) for inserting the line at the end of the file. + available; C(EOF) for inserting the line at the end of the file. If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs). choices: [ 'EOF', '*regex*' ] @@ -98,7 +97,7 @@ version_added: "1.1" description: - Used with C(state=present). If specified, the line will be inserted - before the last match of specified regular expression. A value is + before the last match of specified regular expression. A value is available; C(BOF) for inserting the line at the beginning of the file. If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs). @@ -125,33 +124,78 @@ """ EXAMPLES = r""" -- lineinfile: dest=/etc/selinux/config regexp=^SELINUX= line=SELINUX=enforcing - -- lineinfile: dest=/etc/sudoers state=absent regexp="^%wheel" - -- lineinfile: dest=/etc/hosts regexp='^127\.0\.0\.1' line='127.0.0.1 localhost' owner=root group=root mode=0644 - -- lineinfile: dest=/etc/httpd/conf/httpd.conf regexp="^Listen " insertafter="^#Listen " line="Listen 8080" - -- lineinfile: dest=/etc/services regexp="^# port for http" insertbefore="^www.*80/tcp" line="# port for http by default" +- lineinfile: + dest: /etc/selinux/config + regexp: '^SELINUX=' + line: 'SELINUX=enforcing' + +- lineinfile: + dest: /etc/sudoers + state: absent + regexp: '^%wheel' + +- lineinfile: + dest: /etc/hosts + regexp: '^127\.0\.0\.1' + line: '127.0.0.1 localhost' + owner: root + group: root + mode: 0644 + +- lineinfile: + dest: /etc/httpd/conf/httpd.conf + regexp: '^Listen ' + insertafter: '^#Listen ' + line: 'Listen 8080' + +- lineinfile: + dest: /etc/services + regexp: '^# port for http' + insertbefore: '^www.*80/tcp' + line: '# port for http by default' # Add a line to a file if it does not exist, without passing regexp -- lineinfile: dest=/tmp/testfile line="192.168.1.99 foo.lab.net foo" +- lineinfile: + dest: /tmp/testfile + line: '192.168.1.99 foo.lab.net foo' # Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs. -- lineinfile: "dest=/etc/sudoers state=present regexp='^%wheel' line='%wheel ALL=(ALL) NOPASSWD: ALL'" - -- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes +- lineinfile: " + dest: /etc/sudoers + state: present + regexp: '^%wheel' + line: '%wheel ALL=(ALL) NOPASSWD: ALL' + +- lineinfile: + dest: /opt/jboss-as/bin/standalone.conf + regexp: '^(.*)Xms(\d+)m(.*)$' + line: '\1Xms${xms}m\3' + backrefs: yes # Validate the sudoers file before saving -- lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s' +- lineinfile: + dest: /etc/sudoers + state: present + regexp: '^%ADMIN ALL=' + line: '%ADMIN ALL=(ALL) NOPASSWD: ALL' + validate: 'visudo -cf %s' """ -def write_changes(module,lines,dest): +import re +import os +import tempfile + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import b +from ansible.module_utils._text import to_bytes, to_native + + +def write_changes(module, b_lines, dest): tmpfd, tmpfile = tempfile.mkstemp() - f = os.fdopen(tmpfd,'wb') - f.writelines(lines) + f = os.fdopen(tmpfd, 'wb') + f.writelines(b_lines) f.close() validate = module.params.get('validate', None) @@ -159,13 +203,16 @@ def write_changes(module,lines,dest): if validate: if "%s" not in validate: module.fail_json(msg="validate must contain %%s: %s" % (validate)) - (rc, out, err) = module.run_command(validate % tmpfile) + (rc, out, err) = module.run_command(to_bytes(validate % tmpfile, errors='surrogate_or_strict')) valid = rc == 0 if rc != 0: module.fail_json(msg='failed to validate: ' - 'rc:%s error:%s' % (rc,err)) + 'rc:%s error:%s' % (rc, err)) if valid: - module.atomic_move(tmpfile, os.path.realpath(dest)) + module.atomic_move(tmpfile, + to_native(os.path.realpath(to_bytes(dest, errors='surrogate_or_strict')), errors='surrogate_or_strict'), + unsafe_writes=module.params['unsafe_writes']) + def check_file_attrs(module, changed, message, diff): @@ -188,44 +235,46 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, 'before_header': '%s (content)' % dest, 'after_header': '%s (content)' % dest} - if not os.path.exists(dest): + b_dest = to_bytes(dest, errors='surrogate_or_strict') + if not os.path.exists(b_dest): if not create: module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) - destpath = os.path.dirname(dest) - if not os.path.exists(destpath) and not module.check_mode: - os.makedirs(destpath) - lines = [] + b_destpath = os.path.dirname(b_dest) + if not os.path.exists(b_destpath) and not module.check_mode: + os.makedirs(b_destpath) + b_lines = [] else: - f = open(dest, 'rb') - lines = f.readlines() + f = open(b_dest, 'rb') + b_lines = f.readlines() f.close() if module._diff: - diff['before'] = ''.join(lines) + diff['before'] = to_native(b('').join(b_lines)) if regexp is not None: - mre = re.compile(regexp) + bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict')) if insertafter not in (None, 'BOF', 'EOF'): - insre = re.compile(insertafter) + bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict')) elif insertbefore not in (None, 'BOF'): - insre = re.compile(insertbefore) + bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict')) else: - insre = None + bre_ins = None # index[0] is the line num where regexp has been found # index[1] is the line num where insertafter/inserbefore has been found index = [-1, -1] m = None - for lineno, cur_line in enumerate(lines): + b_line = to_bytes(line, errors='surrogate_or_strict') + for lineno, b_cur_line in enumerate(b_lines): if regexp is not None: - match_found = mre.search(cur_line) + match_found = bre_m.search(b_cur_line) else: - match_found = line == cur_line.rstrip('\r\n') + match_found = b_line == b_cur_line.rstrip(b('\r\n')) if match_found: index[0] = lineno m = match_found - elif insre is not None and insre.search(cur_line): + elif bre_ins is not None and bre_ins.search(b_cur_line): if insertafter: # + 1 for the next line index[1] = lineno + 1 @@ -236,18 +285,19 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, msg = '' changed = False # Regexp matched a line in the file + b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict') if index[0] != -1: if backrefs: - new_line = m.expand(line) + b_new_line = m.expand(b_line) else: # Don't do backref expansion if not asked. - new_line = line + b_new_line = b_line - if not new_line.endswith(os.linesep): - new_line += os.linesep + if not b_new_line.endswith(b_linesep): + b_new_line += b_linesep - if lines[index[0]] != new_line: - lines[index[0]] = new_line + if b_lines[index[0]] != b_new_line: + b_lines[index[0]] = b_new_line msg = 'line replaced' changed = True elif backrefs: @@ -256,7 +306,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, pass # Add it to the beginning of the file elif insertbefore == 'BOF' or insertafter == 'BOF': - lines.insert(0, line + os.linesep) + b_lines.insert(0, b_line + b_linesep) msg = 'line added' changed = True # Add it to the end of the file if requested or @@ -265,28 +315,28 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, elif insertafter == 'EOF' or index[1] == -1: # If the file is not empty then ensure there's a newline before the added line - if len(lines)>0 and not (lines[-1].endswith('\n') or lines[-1].endswith('\r')): - lines.append(os.linesep) + if len(b_lines) > 0 and not b_lines[-1][-1:] in (b('\n'), b('\r')): + b_lines.append(b_linesep) - lines.append(line + os.linesep) + b_lines.append(b_line + b_linesep) msg = 'line added' changed = True # insert* matched, but not the regexp else: - lines.insert(index[1], line + os.linesep) + b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True if module._diff: - diff['after'] = ''.join(lines) + diff['after'] = to_native(b('').join(b_lines)) backupdest = "" if changed and not module.check_mode: - if backup and os.path.exists(dest): + if backup and os.path.exists(b_dest): backupdest = module.backup_local(dest) - write_changes(module, lines, dest) + write_changes(module, b_lines, dest) - if module.check_mode and not os.path.exists(dest): + if module.check_mode and not os.path.exists(b_dest): module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff) attr_diff = {} @@ -301,7 +351,8 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, def absent(module, dest, regexp, line, backup): - if not os.path.exists(dest): + b_dest = to_bytes(dest, errors='surrogate_or_strict') + if not os.path.exists(b_dest): module.exit_json(changed=False, msg="file not present") msg = '' @@ -310,37 +361,38 @@ def absent(module, dest, regexp, line, backup): 'before_header': '%s (content)' % dest, 'after_header': '%s (content)' % dest} - f = open(dest, 'rb') - lines = f.readlines() + f = open(b_dest, 'rb') + b_lines = f.readlines() f.close() if module._diff: - diff['before'] = ''.join(lines) + diff['before'] = to_native(b('').join(b_lines)) if regexp is not None: - cre = re.compile(regexp) + bre_c = re.compile(to_bytes(regexp, errors='surrogate_or_strict')) found = [] - def matcher(cur_line): + b_line = to_bytes(line, errors='surrogate_or_strict') + def matcher(b_cur_line): if regexp is not None: - match_found = cre.search(cur_line) + match_found = bre_c.search(b_cur_line) else: - match_found = line == cur_line.rstrip('\r\n') + match_found = b_line == b_cur_line.rstrip(b('\r\n')) if match_found: - found.append(cur_line) + found.append(b_cur_line) return not match_found - lines = filter(matcher, lines) + b_lines = [l for l in b_lines if matcher(l)] changed = len(found) > 0 if module._diff: - diff['after'] = ''.join(lines) + diff['after'] = to_native(b('').join(b_lines)) backupdest = "" if changed and not module.check_mode: if backup: backupdest = module.backup_local(dest) - write_changes(module, lines, dest) + write_changes(module, b_lines, dest) if changed: msg = "%s line(s) removed" % len(found) @@ -359,7 +411,7 @@ def matcher(cur_line): def main(): module = AnsibleModule( argument_spec=dict( - dest=dict(required=True, aliases=['name', 'destfile']), + dest=dict(required=True, aliases=['name', 'destfile'], type='path'), state=dict(default='present', choices=['absent', 'present']), regexp=dict(default=None), line=dict(aliases=['value']), @@ -376,13 +428,13 @@ def main(): ) params = module.params - create = module.params['create'] - backup = module.params['backup'] - backrefs = module.params['backrefs'] - dest = os.path.expanduser(params['dest']) + create = params['create'] + backup = params['backup'] + backrefs = params['backrefs'] + dest = params['dest'] - - if os.path.isdir(dest): + b_dest = to_bytes(dest, errors='surrogate_or_strict') + if os.path.isdir(b_dest): module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) if params['state'] == 'present': @@ -408,8 +460,5 @@ def main(): absent(module, dest, params['regexp'], params.get('line', None), backup) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.splitter import * if __name__ == '__main__': main() diff --git a/files/replace.py b/files/replace.py index a48e49f8dec..85d6d91e1f7 100644 --- a/files/replace.py +++ b/files/replace.py @@ -22,6 +22,10 @@ import os import tempfile +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: replace @@ -77,11 +81,24 @@ """ EXAMPLES = r""" -- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes - -- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644 - -- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t' +- replace: + dest: /etc/hosts + regexp: '(\s+)old\.host\.name(\s+.*)?$' + replace: '\1new.host.name\2' + backup: yes + +- replace: + dest: /home/jdoe/.ssh/known_hosts + regexp: '^old\.host\.name[^\n]*\n' + owner: jdoe + group: jdoe + mode: 0644 + +- replace: + dest: /etc/apache/ports + regexp: '^(NameVirtualHost|Listen)\s+80\s*$' + replace: '\1 127.0.0.1:8080' + validate: '/usr/sbin/apache2ctl -f %s -t' """ def write_changes(module,contents,dest): @@ -102,7 +119,7 @@ def write_changes(module,contents,dest): module.fail_json(msg='failed to validate: ' 'rc:%s error:%s' % (rc,err)) if valid: - module.atomic_move(tmpfile, dest) + module.atomic_move(tmpfile, dest, unsafe_writes=module.params['unsafe_writes']) def check_file_attrs(module, changed, message): @@ -131,6 +148,7 @@ def main(): params = module.params dest = os.path.expanduser(params['dest']) + res_args = dict() if os.path.isdir(dest): module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) @@ -148,19 +166,26 @@ def main(): if result[1] > 0 and contents != result[0]: msg = '%s replacements made' % result[1] changed = True + if module._diff: + res_args['diff'] = { + 'before_header': dest, + 'before': contents, + 'after_header': dest, + 'after': result[0], + } else: msg = '' changed = False if changed and not module.check_mode: if params['backup'] and os.path.exists(dest): - module.backup_local(dest) + res_args['backup_file'] = module.backup_local(dest) if params['follow'] and os.path.islink(dest): dest = os.path.realpath(dest) write_changes(module, result[0], dest) - msg, changed = check_file_attrs(module, changed, msg) - module.exit_json(changed=changed, msg=msg) + res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg) + module.exit_json(**res_args) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * diff --git a/files/stat.py b/files/stat.py index f14717ca103..8cccae1d685 100644 --- a/files/stat.py +++ b/files/stat.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: stat @@ -34,7 +38,8 @@ default: no get_md5: description: - - Whether to return the md5 sum of the file. Will return None if we're unable to use md5 (Common for FIPS-140 compliant systems) + - Whether to return the md5 sum of the file. Will return None if we're + unable to use md5 (Common for FIPS-140 compliant systems) required: false default: yes get_checksum: @@ -45,30 +50,42 @@ version_added: "1.8" checksum_algorithm: description: - - Algorithm to determine checksum of file. Will throw an error if the host is unable to use specified algorithm. + - Algorithm to determine checksum of file. Will throw an error if the + host is unable to use specified algorithm. required: false choices: [ 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ] default: sha1 aliases: [ 'checksum_algo', 'checksum' ] version_added: "2.0" - mime: + get_mime: description: - - Use file magic and return data about the nature of the file. this uses the 'file' utility found on most Linux/Unix systems. + - Use file magic and return data about the nature of the file. this uses + the 'file' utility found on most Linux/Unix systems. - This will add both `mime_type` and 'charset' fields to the return, if possible. + - In 2.3 this option changed from 'mime' to 'get_mime' and the default changed to 'Yes' required: false choices: [ Yes, No ] - default: No + default: Yes version_added: "2.1" - aliases: [ 'mime_type', 'mime-type' ] + aliases: [ 'mime', 'mime_type', 'mime-type' ] + get_attributes: + description: + - Get file attributes using lsattr tool if present. + required: false + default: True + version_added: "2.3" + aliases: [ 'attributes', 'attr' ] author: "Bruce Pennypacker (@bpennypacker)" ''' EXAMPLES = ''' # Obtain the stats of /etc/foo.conf, and check that the file still belongs # to 'root'. Fail otherwise. -- stat: path=/etc/foo.conf +- stat: + path: /etc/foo.conf register: st -- fail: msg="Whoops! file ownership has changed" +- fail: + msg: "Whoops! file ownership has changed" when: st.stat.pw_name != 'root' # Determine if a path exists and is a symlink. Note that if the path does @@ -76,35 +93,50 @@ # therefore, we must test whether it is defined. # Run this to understand the structure, the skipped ones do not pass the # check performed by 'when' -- stat: path=/path/to/something +- stat: + path: /path/to/something register: sym -- debug: msg="islnk isn't defined (path doesn't exist)" + +- debug: + msg: "islnk isn't defined (path doesn't exist)" when: sym.stat.islnk is not defined -- debug: msg="islnk is defined (path must exist)" + +- debug: + msg: "islnk is defined (path must exist)" when: sym.stat.islnk is defined -- debug: msg="Path exists and is a symlink" + +- debug: + msg: "Path exists and is a symlink" when: sym.stat.islnk is defined and sym.stat.islnk -- debug: msg="Path exists and isn't a symlink" + +- debug: + msg: "Path exists and isn't a symlink" when: sym.stat.islnk is defined and sym.stat.islnk == False # Determine if a path exists and is a directory. Note that we need to test # both that p.stat.isdir actually exists, and also that it's set to true. -- stat: path=/path/to/something +- stat: + path: /path/to/something register: p -- debug: msg="Path exists and is a directory" +- debug: + msg: "Path exists and is a directory" when: p.stat.isdir is defined and p.stat.isdir # Don't do md5 checksum -- stat: path=/path/to/myhugefile get_md5=no +- stat: + path: /path/to/myhugefile + get_md5: no # Use sha256 to calculate checksum -- stat: path=/path/to/something checksum_algorithm=sha256 +- stat: + path: /path/to/something + checksum_algorithm: sha256 ''' RETURN = ''' stat: - description: dictionary containing all the stat data + description: dictionary containing all the stat data, some platforms might add additional fields returned: success type: dictionary contains: @@ -169,7 +201,7 @@ type: int sample: 1003 size: - description: Size in bytes for a plain file, ammount of data for some special files + description: Size in bytes for a plain file, amount of data for some special files returned: success, path exists and user can read stats type: int sample: 203 @@ -265,15 +297,16 @@ sample: /home/foobar/21102015-1445431274-908472971 md5: description: md5 hash of the path - returned: success, path exists and user can read stats and path supports hashing and md5 is supported + returned: success, path exists and user can read stats and path + supports hashing and md5 is supported type: string sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0 - checksum_algorithm: + checksum: description: hash of the path - returned: success, path exists, user can read stats, path supports hashing and supplied checksum algorithm is available + returned: success, path exists, user can read stats, path supports + hashing and supplied checksum algorithm is available type: string sample: 50ba294cdf28c0d5bcde25708df53346825a429f - aliases: ['checksum', 'checksum_algo'] pw_name: description: User name of owner returned: success, path exists and user can read stats and installed python supports it @@ -286,129 +319,218 @@ sample: www-data mime_type: description: file magic data or mime-type - returned: success, path exists and user can read stats and installed python supports it and the `mime` option was true, will return 'unknown' on error. + returned: success, path exists and user can read stats and + installed python supports it and the `mime` option was true, will + return 'unknown' on error. type: string sample: PDF document, version 1.2 charset: description: file character set or encoding - returned: success, path exists and user can read stats and installed python supports it and the `mime` option was true, will return 'unknown' on error. + returned: success, path exists and user can read stats and + installed python supports it and the `mime` option was true, will + return 'unknown' on error. type: string sample: us-ascii + readable: + description: Tells you if the invoking user has the right to read the path + returned: success, path exists and user can read the path + type: boolean + sample: False + version_added: 2.2 + writeable: + description: Tells you if the invoking user has the right to write the path + returned: success, path exists and user can write the path + type: boolean + sample: False + version_added: 2.2 + executable: + description: Tells you if the invoking user has the execute the path + returned: success, path exists and user can execute the path + type: boolean + sample: False + version_added: 2.2 + attributes: + description: list of file attributes + returned: success, path exists and user can execute the path + type: boolean + sample: [ immutable, extent ] + version_added: 2.3 ''' +import errno +import grp import os -import sys -from stat import * import pwd -import grp +import stat + +# import module snippets +from ansible.module_utils.basic import AnsibleModule, format_attributes +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils._text import to_bytes + +def format_output(module, path, st): + mode = st.st_mode + + # back to ansible + output = dict( + exists=True, + path=path, + mode="%04o" % stat.S_IMODE(mode), + isdir=stat.S_ISDIR(mode), + ischr=stat.S_ISCHR(mode), + isblk=stat.S_ISBLK(mode), + isreg=stat.S_ISREG(mode), + isfifo=stat.S_ISFIFO(mode), + islnk=stat.S_ISLNK(mode), + issock=stat.S_ISSOCK(mode), + uid=st.st_uid, + gid=st.st_gid, + size=st.st_size, + inode=st.st_ino, + dev=st.st_dev, + nlink=st.st_nlink, + atime=st.st_atime, + mtime=st.st_mtime, + ctime=st.st_ctime, + wusr=bool(mode & stat.S_IWUSR), + rusr=bool(mode & stat.S_IRUSR), + xusr=bool(mode & stat.S_IXUSR), + wgrp=bool(mode & stat.S_IWGRP), + rgrp=bool(mode & stat.S_IRGRP), + xgrp=bool(mode & stat.S_IXGRP), + woth=bool(mode & stat.S_IWOTH), + roth=bool(mode & stat.S_IROTH), + xoth=bool(mode & stat.S_IXOTH), + isuid=bool(mode & stat.S_ISUID), + isgid=bool(mode & stat.S_ISGID), + ) + + # Platform dependant flags: + for other in [ + # Some Linux + ('st_blocks','blocks'), + ('st_blksize', 'block_size'), + ('st_rdev','device_type'), + ('st_flags', 'flags'), + # Some Berkley based + ('st_gen', 'generation'), + ('st_birthtime', 'birthtime'), + # RISCOS + ('st_ftype', 'file_type'), + ('st_attrs', 'attrs'), + ('st_obtype', 'object_type'), + # OS X + ('st_rsize', 'real_size'), + ('st_creator', 'creator'), + ('st_type', 'file_type'), + ]: + if hasattr(st, other[0]): + output[other[1]] = getattr(st, other[0]) + + + return output + def main(): module = AnsibleModule( - argument_spec = dict( - path = dict(required=True, type='path'), - follow = dict(default='no', type='bool'), - get_md5 = dict(default='yes', type='bool'), - get_checksum = dict(default='yes', type='bool'), - checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'], aliases=['checksum_algo', 'checksum']), - mime = dict(default=False, type='bool', aliases=['mime_type', 'mime-type']), + argument_spec=dict( + path=dict(required=True, type='path'), + follow=dict(default='no', type='bool'), + get_md5=dict(default='yes', type='bool'), + get_checksum=dict(default='yes', type='bool'), + get_mime=dict(default=True, type='bool', aliases=['mime', 'mime_type', 'mime-type']), + get_attributes=dict(default=True, type='bool', aliases=['attributes', 'attr']), + checksum_algorithm=dict(default='sha1', type='str', + choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'], + aliases=['checksum_algo', 'checksum']), ), - supports_check_mode = True + supports_check_mode=True ) path = module.params.get('path') + b_path = to_bytes(path, errors='surrogate_or_strict') follow = module.params.get('follow') + get_mime = module.params.get('get_mime') + get_attr = module.params.get('get_attributes') get_md5 = module.params.get('get_md5') get_checksum = module.params.get('get_checksum') checksum_algorithm = module.params.get('checksum_algorithm') + # main stat data try: if follow: - st = os.stat(path) + st = os.stat(b_path) else: - st = os.lstat(path) - except OSError, e: + st = os.lstat(b_path) + except OSError: + e = get_exception() if e.errno == errno.ENOENT: - d = { 'exists' : False } - module.exit_json(changed=False, stat=d) + output = {'exists': False} + module.exit_json(changed=False, stat=output) - module.fail_json(msg = e.strerror) + module.fail_json(msg=e.strerror) - mode = st.st_mode + # process base results + output = format_output(module, path, st) - # back to ansible - d = { - 'exists' : True, - 'path' : path, - 'mode' : "%04o" % S_IMODE(mode), - 'isdir' : S_ISDIR(mode), - 'ischr' : S_ISCHR(mode), - 'isblk' : S_ISBLK(mode), - 'isreg' : S_ISREG(mode), - 'isfifo' : S_ISFIFO(mode), - 'islnk' : S_ISLNK(mode), - 'issock' : S_ISSOCK(mode), - 'uid' : st.st_uid, - 'gid' : st.st_gid, - 'size' : st.st_size, - 'inode' : st.st_ino, - 'dev' : st.st_dev, - 'nlink' : st.st_nlink, - 'atime' : st.st_atime, - 'mtime' : st.st_mtime, - 'ctime' : st.st_ctime, - 'wusr' : bool(mode & stat.S_IWUSR), - 'rusr' : bool(mode & stat.S_IRUSR), - 'xusr' : bool(mode & stat.S_IXUSR), - 'wgrp' : bool(mode & stat.S_IWGRP), - 'rgrp' : bool(mode & stat.S_IRGRP), - 'xgrp' : bool(mode & stat.S_IXGRP), - 'woth' : bool(mode & stat.S_IWOTH), - 'roth' : bool(mode & stat.S_IROTH), - 'xoth' : bool(mode & stat.S_IXOTH), - 'isuid' : bool(mode & stat.S_ISUID), - 'isgid' : bool(mode & stat.S_ISGID), - } - - if S_ISLNK(mode): - d['lnk_source'] = os.path.realpath(path) - - if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK): - # Will fail on FIPS-140 compliant systems - try: - d['md5'] = module.md5(path) - except ValueError: - d['md5'] = None - - if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK): - d['checksum'] = module.digest_from_file(path, checksum_algorithm) + # resolved permissions + for perm in [('readable', os.R_OK), ('writeable', os.W_OK), ('executable', os.X_OK)]: + output[perm[0]] = os.access(path, perm[1]) - try: - pw = pwd.getpwuid(st.st_uid) + # symlink info + if output.get('islnk'): + output['lnk_source'] = os.path.realpath(path) - d['pw_name'] = pw.pw_name + try: # user data + pw = pwd.getpwuid(st.st_uid) + output['pw_name'] = pw.pw_name + except: + pass + try: # group data grp_info = grp.getgrgid(st.st_gid) - d['gr_name'] = grp_info.gr_name + output['gr_name'] = grp_info.gr_name except: pass - if module.params.get('mime'): - d['mime_type'] = 'unknown' - d['charset'] = 'unknown' + # checksums + if output.get('isreg') and output.get('readable'): + if get_md5: + # Will fail on FIPS-140 compliant systems + try: + output['md5'] = module.md5(path) + except ValueError: + output['md5'] = None - filecmd = [module.get_bin_path('file', True),'-i', path] - try: - rc, out, err = module.run_command(filecmd) - if rc == 0: - mtype, chset = out.split(':')[1].split(';') - d['mime_type'] = mtype.strip() - d['charset'] = chset.split('=')[1].strip() - except: - pass + if get_checksum: + output['checksum'] = module.digest_from_file(path, checksum_algorithm) - module.exit_json(changed=False, stat=d) + # try to get mime data if requested + if get_mime: + output['mimetype'] = output['charset'] = 'unknown' + mimecmd = module.get_bin_path('file') + if mimecmd: + mimecmd = [mimecmd, '-i', path] + try: + rc, out, err = module.run_command(mimecmd) + if rc == 0: + mimetype, charset = out.split(':')[1].split(';') + output['mimetype'] = mimetype.strip() + output['charset'] = charset.split('=')[1].strip() + except: + pass -# import module snippets -from ansible.module_utils.basic import * + # try to get attr data + if get_attr: + output['version'] = None + output['attributes'] = [] + output['attr_flags'] = '' + out = module.get_file_attributes(path) + for x in ('version', 'attributes', 'attr_flags'): + if x in out: + output[x] = out[x] + + module.exit_json(changed=False, stat=output) -main() +if __name__ == '__main__': + main() diff --git a/files/synchronize.py b/files/synchronize.py index 262597729ab..4a7933adf1d 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -16,13 +16,17 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: synchronize version_added: "1.4" -short_description: Uses rsync to make synchronizing file paths in your playbooks quick and easy. +short_description: A wrapper around rsync to make common tasks in your playbooks quick and easy. description: - - C(synchronize) is a wrapper around the rsync command, meant to make common tasks with rsync easier. It is run and originates on the local host where Ansible is being run. Of course, you could just use the command action to call rsync yourself, but you also have to add a fair number of boilerplate options and host facts. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case. C(synchronize) does not provide access to the full power of rsync, but does make most invocations easier to follow. + - C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy. It is run and originates on the local host where Ansible is being run. Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of boilerplate options and host facts. C(synchronize) is not intended to provide access to the full power of rsync, but does make the most common invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case. options: src: description: @@ -35,7 +39,7 @@ dest_port: description: - Port number for ssh on the destination host. Prior to ansible 2.0, the ansible_ssh_port inventory var took precedence over this value. - default: Value of ansible_ssh_port for this host, remote_port config setting, or 22 if none of those are set + default: Value of ansible_ssh_port for this host, remote_port config setting, or the value from ssh client configuration if none of those are set version_added: "1.5" mode: description: @@ -72,7 +76,7 @@ version_added: "1.5" delete: description: - - Delete files that don't exist (after transfer, not before) in the C(src) path. This option requires C(recursive=yes). + - Delete files in C(dest) that don't exist (after transfer, not before) in the C(src) path. This option requires C(recursive=yes). choices: [ 'yes', 'no' ] default: 'no' required: false @@ -130,7 +134,7 @@ required: false rsync_timeout: description: - - Specify a --timeout for the rsync command in seconds. + - Specify a --timeout for the rsync command in seconds. default: 0 required: false set_remote_user: @@ -174,53 +178,108 @@ - Expect that dest=~/x will be ~/x even if using sudo. - Inspect the verbose output to validate the destination user/host/path are what was expected. - - To exclude files and directories from being synchronized, you may add + - To exclude files and directories from being synchronized, you may add C(.rsync-filter) files to the source directory. - + - rsync daemon must be up and running with correct permission when using + rsync protocol in source or destination path. + - The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly. author: "Timothy Appnel (@tima)" ''' EXAMPLES = ''' # Synchronization of src on the control machine to dest on the remote hosts -synchronize: src=some/relative/path dest=/some/absolute/path +- synchronize: + src: some/relative/path + dest: /some/absolute/path + +# Synchronization using rsync protocol (push) +- synchronize: + src: some/relative/path/ + dest: rsync://somehost.com/path/ + +# Synchronization using rsync protocol (pull) +- synchronize: + mode: pull + src: rsync://somehost.com/path/ + dest: /some/absolute/path/ + +# Synchronization using rsync protocol on delegate host (push) +- synchronize: + src: /some/absolute/path/ + dest: rsync://somehost.com/path/ + delegate_to: delegate.host + +# Synchronization using rsync protocol on delegate host (pull) +- synchronize: + mode: pull + src: rsync://somehost.com/path/ + dest: /some/absolute/path/ + delegate_to: delegate.host # Synchronization without any --archive options enabled -synchronize: src=some/relative/path dest=/some/absolute/path archive=no +- synchronize: + src: some/relative/path + dest: /some/absolute/path + archive: no # Synchronization with --archive options enabled except for --recursive -synchronize: src=some/relative/path dest=/some/absolute/path recursive=no +- synchronize: + src: some/relative/path + dest: /some/absolute/path + recursive: no # Synchronization with --archive options enabled except for --times, with --checksum option enabled -synchronize: src=some/relative/path dest=/some/absolute/path checksum=yes times=no +- synchronize: + src: some/relative/path + dest: /some/absolute/path + checksum: yes + times: no # Synchronization without --archive options enabled except use --links -synchronize: src=some/relative/path dest=/some/absolute/path archive=no links=yes +- synchronize: + src: some/relative/path + dest: /some/absolute/path + archive: no + links: yes # Synchronization of two paths both on the control machine -local_action: synchronize src=some/relative/path dest=/some/absolute/path +- synchronize + src: some/relative/path + dest: /some/absolute/path + delegate_to: localhost # Synchronization of src on the inventory host to the dest on the localhost in pull mode -synchronize: mode=pull src=some/relative/path dest=/some/absolute/path +- synchronize: + mode: pull + src: some/relative/path + dest: /some/absolute/path # Synchronization of src on delegate host to dest on the current inventory host. -synchronize: +- synchronize: src: /first/absolute/path dest: /second/absolute/path -delegate_to: delegate.host + delegate_to: delegate.host # Synchronize two directories on one remote host. -synchronize: +- synchronize: src: /first/absolute/path dest: /second/absolute/path -delegate_to: "{{ inventory_hostname }}" + delegate_to: "{{ inventory_hostname }}" # Synchronize and delete files in dest on the remote host that are not found in src of localhost. -synchronize: src=some/relative/path dest=/some/absolute/path delete=yes +- synchronize: + src: some/relative/path + dest: /some/absolute/path + delete: yes + recursive: yes # Synchronize using an alternate rsync command # This specific command is granted su privileges on the destination -synchronize: src=some/relative/path dest=/some/absolute/path rsync_path="su -c rsync" +- synchronize: + src: some/relative/path + dest: /some/absolute/path + rsync_path: "su -c rsync" # Example .rsync-filter file in the source directory - var # exclude any path whose last part is 'var' @@ -228,9 +287,9 @@ + /var/conf # include /var/conf even though it was previously excluded # Synchronize passing in extra rsync options -synchronize: +- synchronize: src: /tmp/helloworld - dest: /var/www/helloword + dest: /var/www/helloworld rsync_opts: - "--no-motd" - "--exclude=.git" @@ -263,7 +322,7 @@ def main(): argument_spec = dict( src = dict(required=True), dest = dict(required=True), - dest_port = dict(default=22, type='int'), + dest_port = dict(default=None, type='int'), delete = dict(default='no', type='bool'), private_key = dict(default=None), rsync_path = dict(default=None), @@ -379,7 +438,7 @@ def main(): if private_key is None: private_key = '' else: - private_key = '-i '+ private_key + private_key = '-i '+ private_key ssh_opts = '-S none' @@ -389,10 +448,17 @@ def main(): if ssh_args: ssh_opts = '%s %s' % (ssh_opts, ssh_args) - if dest_port != 22: - cmd += " --rsh '%s %s %s -o Port=%s'" % (ssh, private_key, ssh_opts, dest_port) - else: - cmd += " --rsh '%s %s %s'" % (ssh, private_key, ssh_opts) # need ssh param + if source.startswith('"rsync://') and dest.startswith('"rsync://'): + module.fail_json(msg='either src or dest must be a localhost', rc=1) + + if not source.startswith('"rsync://') and not dest.startswith('"rsync://'): + # If the user specified a port value + # Note: The action plugin takes care of setting this to a port from + # inventory if the user didn't specify an explicit dest_port + if dest_port is not None: + cmd += " --rsh 'ssh %s %s -o Port=%s'" % (private_key, ssh_opts, dest_port) + else: + cmd += " --rsh 'ssh %s %s'" % (private_key, ssh_opts) if rsync_path: cmd = cmd + " --rsync-path=%s" % (rsync_path) @@ -408,9 +474,9 @@ def main(): # expand the paths if '@' not in source: - source = os.path.expanduser(source) + source = os.path.expanduser(source) if '@' not in dest: - dest = os.path.expanduser(dest) + dest = os.path.expanduser(dest) cmd = ' '.join([cmd, source, dest]) cmdstr = cmd @@ -421,7 +487,7 @@ def main(): changed = changed_marker in out out_clean=out.replace(changed_marker,'') out_lines=out_clean.split('\n') - while '' in out_lines: + while '' in out_lines: out_lines.remove('') if module._diff: diff = {'prepared': out_clean} @@ -435,5 +501,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() - +if __name__ == '__main__': + main() diff --git a/files/template.py b/files/template.py index 808aa13b4ca..c603ec8f428 100644 --- a/files/template.py +++ b/files/template.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: template @@ -25,19 +29,19 @@ (U(http://jinja.pocoo.org/docs/)) - documentation on the template formatting can be found in the Template Designer Documentation (U(http://jinja.pocoo.org/docs/templates/)). - - "Six additional variables can be used in templates: C(ansible_managed) + - "Six additional variables can be used in templates: C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to describe the template name, host, modification time of the - template file and the owner uid, C(template_host) contains the node name of + template file and the owner uid, C(template_host) contains the node name of the template's machine, C(template_uid) the owner, C(template_path) the - absolute path of the template, C(template_fullpath) is the absolute path of the + absolute path of the template, C(template_fullpath) is the absolute path of the template, and C(template_run_date) is the date that the template was rendered. Note that including a string that uses a date in the template will result in the template being marked 'changed' each time." options: src: description: - - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. + - Path of a Jinja2 formatted template on the Ansible controller. This can be a relative or absolute path. required: true dest: description: @@ -60,6 +64,13 @@ default: "yes" notes: - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." + - "Also, you can override jinja2 settings by adding a special header to template file. + i.e. C(#jinja2:variable_start_string:'[%' , variable_end_string:'%]', trim_blocks: False) + which changes the variable interpolation markers to [% var %] instead of {{ var }}. + This is the best way to prevent evaluation of things that look like, but should not be Jinja2. + raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated." + + author: - Ansible Core Team - Michael DeHaan @@ -70,11 +81,24 @@ EXAMPLES = ''' # Example from Ansible Playbooks -- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode=0644 +- template: + src: /mytemplates/foo.j2 + dest: /etc/file.conf + owner: bin + group: wheel + mode: 0644 # The same example, but using symbolic modes equivalent to 0644 -- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode="u=rw,g=r,o=r" +- template: + src: /mytemplates/foo.j2 + dest: /etc/file.conf + owner: bin + group: wheel + mode: "u=rw,g=r,o=r" # Copy a new "sudoers" file into place, after passing validation with visudo -- template: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s' +- template: + src: /mine/sudoers + dest: /etc/sudoers + validate: 'visudo -cf %s' ''' diff --git a/files/unarchive.py b/files/unarchive.py index e4bb230aef5..c919d1f0ae8 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -4,6 +4,7 @@ # (c) 2012, Michael DeHaan # (c) 2013, Dylan Martin # (c) 2015, Toshio Kuratomi +# (c) 2016, Dag Wieers # # This file is part of Ansible # @@ -20,6 +21,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: unarchive @@ -27,12 +32,12 @@ short_description: Unpacks an archive after (optionally) copying it from the local machine. extends_documentation_fragment: files description: - - The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set copy=no to unpack an archive which already exists on the target.. + - The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set remote_src=yes to unpack an archive which already exists on the target.. options: src: description: - - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack. - - If copy=no and src contains ://, the remote machine will download the file from the url first. (version_added 2.0) + - If remote_src=no (default), local path to archive file to copy to the target server; can be absolute or relative. If remote_src=yes, path on the target server to existing archive file to unpack. + - If remote_src=yes and src contains ://, the remote machine will download the file from the url first. (version_added 2.0) required: true default: null dest: @@ -43,6 +48,8 @@ copy: description: - "If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine." + - "This option has been deprecated in favor of C(remote_src)" + - "This option is mutually exclusive with C(remote_src)." required: false choices: [ "yes", "no" ] default: "yes" @@ -59,17 +66,50 @@ choices: [ "yes", "no" ] default: "no" version_added: "2.0" -author: "Dylan Martin (@pileofrogs)" + exclude: + description: + - List the directory and file entries that you would like to exclude from the unarchive action. + required: false + default: [] + version_added: "2.1" + keep_newer: + description: + - Do not replace existing files that are newer than files from the archive. + required: false + default: no + version_added: "2.1" + extra_opts: + description: + - Specify additional options by passing in an array. + default: + required: false + version_added: "2.1" + remote_src: + description: + - "Set to C(yes) to indicate the archived file is already on the remote system and not local to the Ansible controller." + - "This option is mutually exclusive with C(copy)." + required: false + default: "no" + choices: ["yes", "no"] + version_added: "2.2" + validate_certs: + description: + - This only applies if using a https url as the source of the file. + - This should only set to C(no) used on personally controlled sites using self-signed cer + - Prior to 2.2 the code worked as if this was set to C(yes). + required: false + default: "yes" + choices: ["yes", "no"] + version_added: "2.2" +author: "Dag Wieers (@dagwieers)" todo: - - detect changed/unchanged for .zip files - - handle common unarchive args, like preserve owner/timestamp etc... + - re-implement tar support using native tarfile module + - re-implement zip support using native zipfile module notes: - - requires C(tar)/C(unzip) command on target host - - can handle I(gzip), I(bzip2) and I(xz) compressed as well as uncompressed tar files - - detects type of archive automatically - - uses tar's C(--diff arg) to calculate if changed or not. If this C(arg) is not + - requires C(gtar)/C(unzip) command on target host + - can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2) and I(.tar.xz) files using C(gtar) + - uses gtar's C(--diff arg) to calculate if changed or not. If this C(arg) is not supported, it will always unpack the archive - - does not detect if a .zip file is different from destination - always unzips - existing files/directories in the destination which are not in the archive are not touched. This is the same behavior as a normal archive extraction - existing files/directories in the destination which are not in the archive @@ -78,179 +118,642 @@ EXAMPLES = ''' # Example from Ansible Playbooks -- unarchive: src=foo.tgz dest=/var/lib/foo +- unarchive: + src: foo.tgz + dest: /var/lib/foo # Unarchive a file that is already on the remote machine -- unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no +- unarchive: + src: /tmp/foo.zip + dest: /usr/local/bin + remote_src: yes # Unarchive a file that needs to be downloaded (added in 2.0) -- unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no +- unarchive: + src: "https://example.com/example.zip" + dest: /usr/local/bin + remote_src: yes ''' import re import os -from zipfile import ZipFile +import stat +import pwd +import grp +import datetime +import time +import binascii +import codecs +from zipfile import ZipFile, BadZipfile +from ansible.module_utils._text import to_text + +try: # python 3.3+ + from shlex import quote +except ImportError: # older python + from pipes import quote # String from tar that shows the tar contents are different from the # filesystem -DIFFERENCE_RE = re.compile(r': (.*) differs$') +OWNER_DIFF_RE = re.compile(r': Uid differs$') +GROUP_DIFF_RE = re.compile(r': Gid differs$') +MODE_DIFF_RE = re.compile(r': Mode differs$') +MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$') +#NEWER_DIFF_RE = re.compile(r' is newer or same age.$') +EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$') +MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$') +ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}') # When downloading an archive, how much of the archive to download before # saving to a tempfile (64k) BUFSIZE = 65536 +def crc32(path): + ''' Return a CRC32 checksum of a file ''' + return binascii.crc32(open(path).read()) & 0xffffffff + +def shell_escape(string): + ''' Quote meta-characters in the args for the unix shell ''' + return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string) + class UnarchiveError(Exception): pass # class to handle .zip files class ZipArchive(object): - def __init__(self, src, dest, module): + def __init__(self, src, dest, file_args, module): self.src = src self.dest = dest + self.file_args = file_args + self.opts = module.params['extra_opts'] self.module = module + self.excludes = module.params['exclude'] + self.includes = [] self.cmd_path = self.module.get_bin_path('unzip') self._files_in_archive = [] + self._infodict = dict() + + def _permstr_to_octal(self, modestr, umask): + ''' Convert a Unix permission string (rw-r--r--) into a mode (0644) ''' + revstr = modestr[::-1] + mode = 0 + for j in range(0, 3): + for i in range(0, 3): + if revstr[i+3*j] in ['r', 'w', 'x', 's', 't']: + mode += 2**(i+3*j) + # The unzip utility does not support setting the stST bits +# if revstr[i+3*j] in ['s', 't', 'S', 'T' ]: +# mode += 2**(9+j) + return ( mode & ~umask ) + + def _legacy_file_list(self, force_refresh=False): + unzip_bin = self.module.get_bin_path('unzip') + if not unzip_bin: + raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src) + + rc, out, err = self.module.run_command([unzip_bin, '-v', self.src]) + if rc: + raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src) + + for line in out.splitlines()[3:-2]: + fields = line.split(None, 7) + self._files_in_archive.append(fields[7]) + self._infodict[fields[7]] = long(fields[6]) + + def _crc32(self, path): + if self._infodict: + return self._infodict[path] + + try: + archive = ZipFile(self.src) + except BadZipfile: + e = get_exception() + if e.args[0].lower().startswith('bad magic number'): + # Python2.4 can't handle zipfiles with > 64K files. Try using + # /usr/bin/unzip instead + self._legacy_file_list() + else: + raise + else: + try: + for item in archive.infolist(): + self._infodict[item.filename] = long(item.CRC) + except: + archive.close() + raise UnarchiveError('Unable to list files in the archive') + + return self._infodict[path] @property def files_in_archive(self, force_refresh=False): if self._files_in_archive and not force_refresh: return self._files_in_archive - archive = ZipFile(self.src) + self._files_in_archive = [] + try: + archive = ZipFile(self.src) + except BadZipfile: + e = get_exception() + if e.args[0].lower().startswith('bad magic number'): + # Python2.4 can't handle zipfiles with > 64K files. Try using + # /usr/bin/unzip instead + self._legacy_file_list(force_refresh) + else: + raise + else: + try: + for member in archive.namelist(): + if member not in self.excludes: + self._files_in_archive.append(to_native(member)) + except: + archive.close() + raise UnarchiveError('Unable to list files in the archive') + + archive.close() + return self._files_in_archive + + def is_unarchived(self): + cmd = [ self.cmd_path, '-ZT', '-s', self.src ] + if self.excludes: + cmd.extend([ ' -x ', ] + self.excludes) + rc, out, err = self.module.run_command(cmd) + + old_out = out + diff = '' + out = '' + if rc == 0: + unarchived = True + else: + unarchived = False + + # Get some information related to user/group ownership + umask = os.umask(0) + os.umask(umask) + + # Get current user and group information + groups = os.getgroups() + run_uid = os.getuid() + run_gid = os.getgid() try: - self._files_in_archive = archive.namelist() + run_owner = pwd.getpwuid(run_uid).pw_name except: - raise UnarchiveError('Unable to list files in the archive') + run_owner = run_uid + try: + run_group = grp.getgrgid(run_gid).gr_name + except: + run_group = run_gid - return self._files_in_archive + # Get future user ownership + fut_owner = fut_uid = None + if self.file_args['owner']: + try: + tpw = pwd.getpwname(self.file_args['owner']) + except: + try: + tpw = pwd.getpwuid(self.file_args['owner']) + except: + tpw = pwd.getpwuid(run_uid) + fut_owner = tpw.pw_name + fut_uid = tpw.pw_uid + else: + try: + fut_owner = run_owner + except: + pass + fut_uid = run_uid + + # Get future group ownership + fut_group = fut_gid = None + if self.file_args['group']: + try: + tgr = grp.getgrnam(self.file_args['group']) + except: + try: + tgr = grp.getgrgid(self.file_args['group']) + except: + tgr = grp.getgrgid(run_gid) + fut_group = tgr.gr_name + fut_gid = tgr.gr_gid + else: + try: + fut_group = run_group + except: + pass + fut_gid = run_gid + + for line in old_out.splitlines(): + change = False + + pcs = line.split(None, 7) + if len(pcs) != 8: + # Too few fields... probably a piece of the header or footer + continue + + # Check first and seventh field in order to skip header/footer + if len(pcs[0]) != 7 and len(pcs[0]) != 10: continue + if len(pcs[6]) != 15: continue + + # Possible entries: + # -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660 + # -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs + # -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF + # --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr + if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'): + continue + + ztype = pcs[0][0] + permstr = pcs[0][1:] + version = pcs[1] + ostype = pcs[2] + size = int(pcs[3]) + path = to_text(pcs[7], errors='surrogate_or_strict') + + # Skip excluded files + if path in self.excludes: + out += 'Path %s is excluded on request\n' % path + continue + + # Itemized change requires L for symlink + if path[-1] == '/': + if ztype != 'd': + err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype) + ftype = 'd' + elif ztype == 'l': + ftype = 'L' + elif ztype == '-': + ftype = 'f' + elif ztype == '?': + ftype = 'f' + + # Some files may be storing FAT permissions, not Unix permissions + if len(permstr) == 6: + if path[-1] == '/': + permstr = 'rwxrwxrwx' + elif permstr == 'rwx---': + permstr = 'rwxrwxrwx' + else: + permstr = 'rw-rw-rw-' + + # Test string conformity + if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr): + raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr) + + # DEBUG +# err += "%s%s %10d %s\n" % (ztype, permstr, size, path) + + dest = os.path.join(self.dest, path) + try: + st = os.lstat(dest) + except: + change = True + self.includes.append(path) + err += 'Path %s is missing\n' % path + diff += '>%s++++++.?? %s\n' % (ftype, path) + continue + + # Compare file types + if ftype == 'd' and not stat.S_ISDIR(st.st_mode): + change = True + self.includes.append(path) + err += 'File %s already exists, but not as a directory\n' % path + diff += 'c%s++++++.?? %s\n' % (ftype, path) + continue + + if ftype == 'f' and not stat.S_ISREG(st.st_mode): + change = True + unarchived = False + self.includes.append(path) + err += 'Directory %s already exists, but not as a regular file\n' % path + diff += 'c%s++++++.?? %s\n' % (ftype, path) + continue + + if ftype == 'L' and not stat.S_ISLNK(st.st_mode): + change = True + self.includes.append(path) + err += 'Directory %s already exists, but not as a symlink\n' % path + diff += 'c%s++++++.?? %s\n' % (ftype, path) + continue + + itemized = list('.%s.......??' % ftype) + + # Note: this timestamp calculation has a rounding error + # somewhere... unzip and this timestamp can be one second off + # When that happens, we report a change and re-unzip the file + dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6])) + timestamp = time.mktime(dt_object.timetuple()) + + # Compare file timestamps + if stat.S_ISREG(st.st_mode): + if self.module.params['keep_newer']: + if timestamp > st.st_mtime: + change = True + self.includes.append(path) + err += 'File %s is older, replacing file\n' % path + itemized[4] = 't' + elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime: + # Add to excluded files, ignore other changes + out += 'File %s is newer, excluding file\n' % path + self.excludes.append(path) + continue + else: + if timestamp != st.st_mtime: + change = True + self.includes.append(path) + err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime) + itemized[4] = 't' + + # Compare file sizes + if stat.S_ISREG(st.st_mode) and size != st.st_size: + change = True + err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size) + itemized[3] = 's' + + # Compare file checksums + if stat.S_ISREG(st.st_mode): + crc = crc32(dest) + if crc != self._crc32(path): + change = True + err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc) + itemized[2] = 'c' + + # Compare file permissions + + # Do not handle permissions of symlinks + if ftype != 'L': + + # Use the new mode provided with the action, if there is one + if self.file_args['mode']: + if isinstance(self.file_args['mode'], int): + mode = self.file_args['mode'] + else: + try: + mode = int(self.file_args['mode'], 8) + except Exception: + e = get_exception() + self.module.fail_json(path=path, msg="mode %(mode)s must be in octal form" % self.file_args, details=str(e)) + # Only special files require no umask-handling + elif ztype == '?': + mode = self._permstr_to_octal(permstr, 0) + else: + mode = self._permstr_to_octal(permstr, umask) + + if mode != stat.S_IMODE(st.st_mode): + change = True + itemized[5] = 'p' + err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode)) + + # Compare file user ownership + owner = uid = None + try: + owner = pwd.getpwuid(st.st_uid).pw_name + except: + uid = st.st_uid + + # If we are not root and requested owner is not our user, fail + if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid): + raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner)) + + if owner and owner != fut_owner: + change = True + err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner) + itemized[6] = 'o' + elif uid and uid != fut_uid: + change = True + err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid) + itemized[6] = 'o' + + # Compare file group ownership + group = gid = None + try: + group = grp.getgrgid(st.st_gid).gr_name + except: + gid = st.st_gid + + if run_uid != 0 and fut_gid not in groups: + raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner)) + + if group and group != fut_group: + change = True + err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group) + itemized[6] = 'g' + elif gid and gid != fut_gid: + change = True + err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid) + itemized[6] = 'g' - def is_unarchived(self, mode, owner, group): - return dict(unarchived=False) + # Register changed files and finalize diff output + if change: + if path not in self.includes: + self.includes.append(path) + diff += '%s %s\n' % (''.join(itemized), path) + + if self.includes: + unarchived = False + + # DEBUG +# out = old_out + out + + return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff) def unarchive(self): - cmd = '%s -o "%s" -d "%s"' % (self.cmd_path, self.src, self.dest) + cmd = [ self.cmd_path, '-o', self.src ] + if self.opts: + cmd.extend(self.opts) + # NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments) +# if self.includes: + # NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped +# cmd.extend(map(shell_escape, self.includes)) + if self.excludes: + cmd.extend([ '-x' ] + self.excludes) + cmd.extend([ '-d', self.dest ]) rc, out, err = self.module.run_command(cmd) return dict(cmd=cmd, rc=rc, out=out, err=err) def can_handle_archive(self): if not self.cmd_path: - return False - cmd = '%s -l "%s"' % (self.cmd_path, self.src) + return False, 'Command "unzip" not found.' + cmd = [ self.cmd_path, '-l', self.src ] rc, out, err = self.module.run_command(cmd) if rc == 0: - return True - return False + return True, None + return False, 'Command "%s" could not handle archive.' % self.cmd_path # class to handle gzipped tar files class TgzArchive(object): - def __init__(self, src, dest, module): + def __init__(self, src, dest, file_args, module): self.src = src self.dest = dest + self.file_args = file_args + self.opts = module.params['extra_opts'] self.module = module - # Prefer gtar (GNU tar) as it supports the compression options -zjJ + if self.module.check_mode: + self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name) + self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']] + # Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J self.cmd_path = self.module.get_bin_path('gtar', None) if not self.cmd_path: # Fallback to tar self.cmd_path = self.module.get_bin_path('tar') - self.zipflag = 'z' + self.zipflag = '-z' self._files_in_archive = [] + if self.cmd_path: + self.tar_type = self._get_tar_type() + else: + self.tar_type = None + + def _get_tar_type(self): + cmd = [self.cmd_path, '--version'] + (rc, out, err) = self.module.run_command(cmd) + tar_type = None + if out.startswith('bsdtar'): + tar_type = 'bsd' + elif out.startswith('tar') and 'GNU' in out: + tar_type = 'gnu' + return tar_type + @property def files_in_archive(self, force_refresh=False): if self._files_in_archive and not force_refresh: return self._files_in_archive - cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd) + cmd = [ self.cmd_path, '--list', '-C', self.dest ] + if self.zipflag: + cmd.append(self.zipflag) + if self.opts: + cmd.extend([ '--show-transformed-names' ] + self.opts) + if self.excludes: + cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ]) + cmd.extend([ '-f', self.src ]) + rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')) if rc != 0: raise UnarchiveError('Unable to list files in the archive') for filename in out.splitlines(): - if filename: - self._files_in_archive.append(filename) + # Compensate for locale-related problems in gtar output (octal unicode representation) #11348 +# filename = filename.decode('string_escape') + filename = codecs.escape_decode(filename)[0] + if filename and filename not in self.excludes: + self._files_in_archive.append(to_native(filename)) return self._files_in_archive - def is_unarchived(self, mode, owner, group): - cmd = '%s -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd) - unarchived = (rc == 0) - if not unarchived: - # Check whether the differences are in something that we're - # setting anyway - - # What will be set - to_be_set = set() - for perm in (('Mode', mode), ('Gid', group), ('Uid', owner)): - if perm[1] is not None: - to_be_set.add(perm[0]) - - # What is different - changes = set() - if err: - # Assume changes if anything returned on stderr - # * Missing files are known to trigger this - return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) - for line in out.splitlines(): - match = DIFFERENCE_RE.search(line) - if not match: - # Unknown tar output. Assume we have changes - return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) - changes.add(match.groups()[0]) - - if changes and changes.issubset(to_be_set): - unarchived = True + def is_unarchived(self): + cmd = [ self.cmd_path, '--diff', '-C', self.dest ] + if self.zipflag: + cmd.append(self.zipflag) + if self.opts: + cmd.extend([ '--show-transformed-names' ] + self.opts) + if self.file_args['owner']: + cmd.append('--owner=' + quote(self.file_args['owner'])) + if self.file_args['group']: + cmd.append('--group=' + quote(self.file_args['group'])) + if self.module.params['keep_newer']: + cmd.append('--keep-newer-files') + if self.excludes: + cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ]) + cmd.extend([ '-f', self.src ]) + rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')) + + # Check whether the differences are in something that we're + # setting anyway + + # What is different + unarchived = True + old_out = out + out = '' + run_uid = os.getuid() + # When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient + # Only way to be sure is to check request with what is on disk (as we do for zip) + # Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change + for line in old_out.splitlines() + err.splitlines(): + # FIXME: Remove the bogus lines from error-output as well ! + # Ignore bogus errors on empty filenames (when using --split-component) + if EMPTY_FILE_RE.search(line): + continue + if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line): + out += line + '\n' + if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line): + out += line + '\n' + if not self.file_args['mode'] and MODE_DIFF_RE.search(line): + out += line + '\n' + if MOD_TIME_DIFF_RE.search(line): + out += line + '\n' + if MISSING_FILE_RE.search(line): + out += line + '\n' + if out: + unarchived = False return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) def unarchive(self): - cmd = '%s -x%sf "%s"' % (self.cmd_path, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd, cwd=self.dest) + cmd = [ self.cmd_path, '--extract', '-C', self.dest ] + if self.zipflag: + cmd.append(self.zipflag) + if self.opts: + cmd.extend([ '--show-transformed-names' ] + self.opts) + if self.file_args['owner']: + cmd.append('--owner=' + quote(self.file_args['owner'])) + if self.file_args['group']: + cmd.append('--group=' + quote(self.file_args['group'])) + if self.module.params['keep_newer']: + cmd.append('--keep-newer-files') + if self.excludes: + cmd.extend([ '--exclude=' + quote(f) for f in self.excludes ]) + cmd.extend([ '-f', self.src ]) + rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')) return dict(cmd=cmd, rc=rc, out=out, err=err) def can_handle_archive(self): if not self.cmd_path: - return False + return False, 'Commands "gtar" and "tar" not found.' + + if self.tar_type != 'gnu': + return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type) try: if self.files_in_archive: - return True + return True, None except UnarchiveError: - pass + return False, 'Command "%s" could not handle archive.' % self.cmd_path # Errors and no files in archive assume that we weren't able to # properly unarchive it - return False + return False, 'Command "%s" found no files in archive.' % self.cmd_path # class to handle tar files that aren't compressed class TarArchive(TgzArchive): - def __init__(self, src, dest, module): - super(TarArchive, self).__init__(src, dest, module) + def __init__(self, src, dest, file_args, module): + super(TarArchive, self).__init__(src, dest, file_args, module) + # argument to tar self.zipflag = '' # class to handle bzip2 compressed tar files class TarBzipArchive(TgzArchive): - def __init__(self, src, dest, module): - super(TarBzipArchive, self).__init__(src, dest, module) - self.zipflag = 'j' + def __init__(self, src, dest, file_args, module): + super(TarBzipArchive, self).__init__(src, dest, file_args, module) + self.zipflag = '-j' # class to handle xz compressed tar files class TarXzArchive(TgzArchive): - def __init__(self, src, dest, module): - super(TarXzArchive, self).__init__(src, dest, module) - self.zipflag = 'J' + def __init__(self, src, dest, file_args, module): + super(TarXzArchive, self).__init__(src, dest, file_args, module) + self.zipflag = '-J' # try handlers in order and return the one that works or bail if none work -def pick_handler(src, dest, module): - handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive] +def pick_handler(src, dest, file_args, module): + handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive] + reasons = set() for handler in handlers: - obj = handler(src, dest, module) - if obj.can_handle_archive(): + obj = handler(src, dest, file_args, module) + (can_handle, reason) = obj.can_handle_archive() + if can_handle: return obj - module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed.' % src) + reasons.add(reason) + reason_msg = ' '.join(reasons) + module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed. %s' % (src, reason_msg)) def main(): @@ -260,25 +763,33 @@ def main(): src = dict(required=True, type='path'), original_basename = dict(required=False, type='str'), # used to handle 'dest is a directory' via template, a slight hack dest = dict(required=True, type='path'), - copy = dict(default=True, type='bool'), + copy = dict(required=False, default=True, type='bool'), + remote_src = dict(required=False, default=False, type='bool'), creates = dict(required=False, type='path'), - list_files = dict(required=False, default=False, type='bool'), + list_files = dict(required=False, default=False, type='bool'), + keep_newer = dict(required=False, default=False, type='bool'), + exclude = dict(required=False, default=[], type='list'), + extra_opts = dict(required=False, default=[], type='list'), + validate_certs = dict(required=False, default=True, type='bool'), ), - add_file_common_args=True, + add_file_common_args = True, + mutually_exclusive = [("copy", "remote_src"),], + # check-mode only works for zip files, we cover that later + supports_check_mode = True, ) - src = module.params['src'] - dest = module.params['dest'] - copy = module.params['copy'] + src = os.path.expanduser(module.params['src']) + dest = os.path.expanduser(module.params['dest']) + copy = module.params['copy'] + remote_src = module.params['remote_src'] file_args = module.load_file_common_arguments(module.params) - # did tar file arrive? if not os.path.exists(src): - if copy: + if not remote_src and copy: module.fail_json(msg="Source '%s' failed to transfer" % src) # If copy=false, and src= contains ://, try and download the file to a temp directory. elif '://' in src: - tempdir = os.path.dirname(__file__) + tempdir = os.path.dirname(os.path.realpath(__file__)) package = os.path.join(tempdir, str(src.rsplit('/', 1)[1])) try: rsp, info = fetch_url(module, src) @@ -296,7 +807,8 @@ def main(): f.write(data) f.close() src = package - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failure downloading %s, %s" % (src, e)) else: module.fail_json(msg="Source '%s' does not exist" % src) @@ -307,21 +819,27 @@ def main(): try: if os.path.getsize(src) == 0: module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Source '%s' not readable" % src) # is dest OK to receive tar file? if not os.path.isdir(dest): module.fail_json(msg="Destination '%s' is not a directory" % dest) - handler = pick_handler(src, dest, module) + handler = pick_handler(src, dest, file_args, module) res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src) # do we need to do unpack? - res_args['check_results'] = handler.is_unarchived(file_args['mode'], - file_args['owner'], file_args['group']) - if res_args['check_results']['unarchived']: + check_results = handler.is_unarchived() + + # DEBUG +# res_args['check_results'] = check_results + + if module.check_mode: + res_args['changed'] = not check_results['unarchived'] + elif check_results['unarchived']: res_args['changed'] = False else: # do the unpack @@ -330,17 +848,24 @@ def main(): if res_args['extract_results']['rc'] != 0: module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) except IOError: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) + module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) else: res_args['changed'] = True - # do we need to change perms? - for filename in handler.files_in_archive: - file_args['path'] = os.path.join(dest, filename) - try: - res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed']) - except (IOError, OSError), e: - module.fail_json(msg="Unexpected error when accessing exploded file: %s" % str(e)) + # Get diff if required + if check_results.get('diff', False): + res_args['diff'] = { 'prepared': check_results['diff'] } + + # Run only if we found differences (idempotence) or diff was missing + if res_args.get('diff', True) and not module.check_mode: + # do we need to change perms? + for filename in handler.files_in_archive: + file_args['path'] = os.path.join(dest, filename) + try: + res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed']) + except (IOError, OSError): + e = get_exception() + module.fail_json(msg="Unexpected error when accessing exploded file: %s" % str(e), **res_args) if module.params['list_files']: res_args['files'] = handler.files_in_archive @@ -350,5 +875,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * +from ansible.module_utils._text import to_native + if __name__ == '__main__': main() diff --git a/files/xattr.py b/files/xattr.py index 519fb4d0f26..0ae74efbaa9 100644 --- a/files/xattr.py +++ b/files/xattr.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: xattr @@ -63,16 +67,25 @@ EXAMPLES = ''' # Obtain the extended attributes of /etc/foo.conf -- xattr: name=/etc/foo.conf +- xattr: + name: /etc/foo.conf # Sets the key 'foo' to value 'bar' -- xattr: path=/etc/foo.conf key=user.foo value=bar +- xattr: + path: /etc/foo.conf + key: user.foo + value: bar # Removes the key 'foo' -- xattr: name=/etc/foo.conf key=user.foo state=absent +- xattr: + name: /etc/foo.conf + key: user.foo + state: absent ''' import operator +import re +import os def get_xattr_keys(module,path,follow): cmd = [ module.get_bin_path('getfattr', True) ] @@ -124,7 +137,8 @@ def _run_xattr(module,cmd,check_rc=True): try: (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="%s!" % e.strerror) #result = {'raw': out} @@ -201,7 +215,7 @@ def main(): module.exit_json(changed=changed, msg=msg, xattr=res) # import module snippets -from ansible.module_utils.basic import * - +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception if __name__ == '__main__': main() diff --git a/inventory/add_host.py b/inventory/add_host.py index ef01ed1051b..44ae5303cc3 100644 --- a/inventory/add_host.py +++ b/inventory/add_host.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: add_host @@ -37,20 +41,31 @@ notes: - This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it to iterate use a with\_ directive. -author: +author: - "Ansible Core Team" - "Seth Vidal" ''' EXAMPLES = ''' # add host to group 'just_created' with variable foo=42 -- add_host: name={{ ip_from_ec2 }} groups=just_created foo=42 +- add_host: + name: "{{ ip_from_ec2 }}" + groups: just_created + foo: 42 # add a host with a non-standard port local to your machines -- add_host: name={{ new_ip }}:{{ new_port }} +- add_host: + name: "{{ new_ip }}:{{ new_port }}" + +# add a host alias that we reach through a tunnel (Ansible <= 1.9) +- add_host: + hostname: "{{ new_ip }}" + ansible_ssh_host: "{{ inventory_hostname }}" + ansible_ssh_port: "{{ new_port }}" -# add a host alias that we reach through a tunnel -- add_host: hostname={{ new_ip }} - ansible_ssh_host={{ inventory_hostname }} - ansible_ssh_port={{ new_port }} +# add a host alias that we reach through a tunnel (Ansible >= 2.0) +- add_host: + hostname: "{{ new_ip }}" + ansible_host: "{{ inventory_hostname }}" + ansible_port: "{{ new_port }}" ''' diff --git a/inventory/group_by.py b/inventory/group_by.py index 4bfd20206be..c7cb6a034ba 100644 --- a/inventory/group_by.py +++ b/inventory/group_by.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: group_by @@ -34,7 +38,10 @@ EXAMPLES = ''' # Create groups based on the machine architecture -- group_by: key=machine_{{ ansible_machine }} +- group_by: + key: machine_{{ ansible_machine }} + # Create groups like 'kvm-host' -- group_by: key=virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }} +- group_by: + key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }} ''' diff --git a/network/basics/get_url.py b/network/basics/get_url.py index e762a7c69e6..a15b78df4fe 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -25,6 +25,10 @@ import re import tempfile +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: get_url @@ -101,7 +105,7 @@ If you worry about portability, only the sha1 algorithm is available on all platforms and python versions. The third party hashlib library can be installed for access to additional algorithms. - Additionaly, if a checksum is passed to this parameter, and the file exist under + Additionally, if a checksum is passed to this parameter, and the file exist under the C(dest) location, the destination_checksum would be calculated, and if checksum equals destination_checksum, the file download would be skipped (unless C(force) is true). ' @@ -162,31 +166,55 @@ required: false # informational: requirements for nodes requirements: [ ] +extends_documentation_fragment: + - files author: "Jan-Piet Mens (@jpmens)" ''' EXAMPLES=''' - name: download foo.conf - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf mode=0440 + get_url: + url: http://example.com/path/file.conf + dest: /etc/foo.conf + mode: 0440 - name: download file and force basic auth - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes + get_url: + url: http://example.com/path/file.conf + dest: /etc/foo.conf + force_basic_auth: yes - name: download file with custom HTTP headers - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf headers='key:value,key:value' - -- name: download file with check - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c - get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf checksum=md5:66dffb5228a211e61d6d7ef4a86f5758 + get_url: + url: http://example.com/path/file.conf + dest: /etc/foo.conf + headers: 'key:value,key:value' + +- name: download file with check (sha256) + get_url: + url: http://example.com/path/file.conf + dest: /etc/foo.conf + checksum: sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c + +- name: download file with check (md5) + get_url: + url: http://example.com/path/file.conf + dest: /etc/foo.conf + checksum: md5:66dffb5228a211e61d6d7ef4a86f5758 + +- name: download file from a file path + get_url: + url: "file:///tmp/afile.txt" + dest: /tmp/afilecopy.txt ''' -import urlparse +from ansible.module_utils.six.moves.urllib.parse import urlsplit # ============================================================== # url handling def url_filename(url): - fn = os.path.basename(urlparse.urlsplit(url)[2]) + fn = os.path.basename(urlsplit(url)[2]) if fn == '': return 'index.html' return fn @@ -203,10 +231,14 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head if info['status'] == 304: module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) - # create a temporary file and copy content to do checksum-based replacement - if info['status'] != 200: + # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases + if info['status'] == -1: + module.fail_json(msg=info['msg'], url=url, dest=dest) + + if info['status'] != 200 and not url.startswith('file:/') and not (url.startswith('ftp:/') and info.get('msg', '').startswith('OK')): module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest) + # create a temporary file and copy content to do checksum-based replacement if tmp_dest != '': # tmp_dest should be an existing dir tmp_dest_is_dir = os.path.isdir(tmp_dest) @@ -214,7 +246,7 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head if os.path.exists(tmp_dest): module.fail_json(msg="%s is a file but should be a directory." % tmp_dest) else: - module.fail_json(msg="%s directoy does not exist." % tmp_dest) + module.fail_json(msg="%s directory does not exist." % tmp_dest) fd, tempname = tempfile.mkstemp(dir=tmp_dest) else: @@ -223,7 +255,8 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head f = os.fdopen(fd, 'wb') try: shutil.copyfileobj(rsp, f) - except Exception, err: + except Exception: + err = get_exception() os.remove(tempname) module.fail_json(msg="failed to create temporary content file: %s" % str(err)) f.close() @@ -285,7 +318,7 @@ def main(): # Parse headers to dict if module.params['headers']: try: - headers = dict(item.split(':') for item in module.params['headers'].split(',')) + headers = dict(item.split(':', 1) for item in module.params['headers'].split(',')) except: module.fail_json(msg="The header parameter requires a key:value,key:value syntax to be properly parsed.") else: @@ -340,6 +373,11 @@ def main(): mtime = os.path.getmtime(dest) last_mod_time = datetime.datetime.utcfromtimestamp(mtime) + # If the checksum does not match we have to force the download + # because last_mod_time may be newer than on remote + if checksum_mismatch: + force = True + # download to tmpsrc tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest) @@ -389,7 +427,8 @@ def main(): if os.path.exists(dest): backup_file = module.backup_local(dest) shutil.copyfile(tmpsrc, dest) - except Exception, err: + except Exception: + err = get_exception() os.remove(tmpsrc) module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) changed = True diff --git a/network/basics/slurp.py b/network/basics/slurp.py index d017e849e64..f2ece413da8 100644 --- a/network/basics/slurp.py +++ b/network/basics/slurp.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: slurp @@ -35,6 +39,7 @@ default: null aliases: [] notes: + - This module returns an 'in memory' base64 encoded version of the file, take into account that this will require at least twice the RAM as the original file size. - "See also: M(fetch)" requirements: [] author: diff --git a/network/basics/uri.py b/network/basics/uri.py index f73802e8875..24257dc3566 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -20,15 +20,9 @@ # # see examples/playbooks/uri.yml -import cgi -import shutil -import tempfile -import datetime - -try: - import json -except ImportError: - import simplejson as json +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -63,7 +57,7 @@ body: description: - The body of the http request/response to the web service. If C(body_format) is set - to 'json' it will take an already formated JSON string or convert a data structure + to 'json' it will take an already formatted JSON string or convert a data structure into JSON. required: false default: null @@ -163,20 +157,24 @@ ''' EXAMPLES = ''' -# Check that you can connect (GET) to a page and it returns a status 200 -- uri: url=http://www.example.com +- name: Check that you can connect (GET) to a page and it returns a status 200 + uri: + url: http://www.example.com # Check that a page returns a status 200 and fail if the word AWESOME is not # in the page contents. -- action: uri url=http://www.example.com return_content=yes +- uri: + url: http://www.example.com + return_content: yes register: webpage -- action: fail +- name: Fail if AWESOME is not in the page content + fail: when: "'AWESOME' not in webpage.content" -# Create a JIRA issue -- uri: +- name: Create a JIRA issue + uri: url: https://your.jira.example.com/rest/api/2/issue/ method: POST user: your_username @@ -203,8 +201,8 @@ return_content: yes HEADER_Cookie: "{{login.set_cookie}}" -# Queue build of a project in Jenkins: -- uri: +- name: Queue build of a project in Jenkins + uri: url: "http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}" method: GET user: "{{ jenkins.user }}" @@ -214,6 +212,23 @@ ''' +import cgi +import datetime +import os +import shutil +import tempfile + +try: + import json +except ImportError: + import simplejson as json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +import ansible.module_utils.six as six +from ansible.module_utils._text import to_text +from ansible.module_utils.urls import fetch_url, url_argument_spec + def write_file(module, url, dest, content): # create a tempfile with some test content @@ -221,7 +236,8 @@ def write_file(module, url, dest, content): f = open(tmpsrc, 'wb') try: f.write(content) - except Exception, err: + except Exception: + err = get_exception() os.remove(tmpsrc) module.fail_json(msg="failed to create temporary content file: %s" % str(err)) f.close() @@ -256,7 +272,8 @@ def write_file(module, url, dest, content): if checksum_src != checksum_dest: try: shutil.copyfile(tmpsrc, dest) - except Exception, err: + except Exception: + err = get_exception() os.remove(tmpsrc) module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) @@ -264,7 +281,7 @@ def write_file(module, url, dest, content): def url_filename(url): - fn = os.path.basename(urlparse.urlsplit(url)[2]) + fn = os.path.basename(six.moves.urllib.parse.urlsplit(url)[2]) if fn == '': return 'index.html' return fn @@ -279,7 +296,7 @@ def absolute_location(url, location): return location elif location.startswith('/'): - parts = urlparse.urlsplit(url) + parts = six.moves.urllib.parse.urlsplit(url) base = url.replace(parts[2], '') return '%s%s' % (base, location) @@ -326,13 +343,18 @@ def uri(module, url, dest, body, body_format, method, headers, socket_timeout): resp, info = fetch_url(module, url, data=body, headers=headers, method=method, timeout=socket_timeout) - r['redirected'] = redirected or info['url'] != url - r.update(redir_info) - r.update(info) + try: content = resp.read() except AttributeError: - content = '' + # there was no content, but the error read() + # may have been stored in the info as 'body' + content = info.pop('body', '') + + r['redirected'] = redirected or info['url'] != url + r.update(redir_info) + r.update(info) + return r, content, dest @@ -374,14 +396,14 @@ def main(): dict_headers = module.params['headers'] if body_format == 'json': - # Encode the body unless its a string, then assume it is preformatted JSON + # Encode the body unless its a string, then assume it is pre-formatted JSON if not isinstance(body, basestring): body = json.dumps(body) dict_headers['Content-Type'] = 'application/json' # Grab all the http headers. Need this hack since passing multi-values is # currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}') - for key, value in module.params.iteritems(): + for key, value in six.iteritems(module.params): if key.startswith("HEADER_"): skey = key.replace("HEADER_", "") dict_headers[skey] = value @@ -424,9 +446,11 @@ def main(): # Transmogrify the headers, replacing '-' with '_', since variables dont # work with dashes. + # In python3, the headers are title cased. Lowercase them to be + # compatible with the python2 behaviour. uresp = {} - for key, value in resp.iteritems(): - ukey = key.replace("-", "_") + for key, value in six.iteritems(resp): + ukey = key.replace("-", "_").lower() uresp[ukey] = value try: @@ -440,16 +464,15 @@ def main(): content_type, params = cgi.parse_header(uresp['content_type']) if 'charset' in params: content_encoding = params['charset'] - u_content = unicode(content, content_encoding, errors='replace') - if content_type.startswith('application/json') or \ - content_type.startswith('text/json'): + u_content = to_text(content, encoding=content_encoding) + if 'application/json' in content_type or 'text/json' in content_type: try: js = json.loads(u_content) uresp['json'] = js except: pass else: - u_content = unicode(content, content_encoding, errors='replace') + u_content = to_text(content, encoding=content_encoding) if resp['status'] not in status_code: uresp['msg'] = 'Status code was not %s: %s' % (status_code, uresp.get('msg', '')) @@ -460,9 +483,5 @@ def main(): module.exit_json(changed=changed, **uresp) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - if __name__ == '__main__': main() diff --git a/network/cumulus/cl_bond.py b/network/cumulus/cl_bond.py index 89725810f1b..baf9b0fe844 100644 --- a/network/cumulus/cl_bond.py +++ b/network/cumulus/cl_bond.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cl_bond @@ -25,70 +29,79 @@ - Configures a bond interface on Cumulus Linux To configure a bridge port use the cl_bridge module. To configure any other type of interface use the cl_interface module. Follow the guidelines for bonding found in the - Cumulus User Guide at http://docs.cumulusnetworks.com + Cumulus User Guide at U(http://docs.cumulusnetworks.com). options: name: description: - - name of the interface + - Name of the interface. required: true alias_name: description: - - add a port description + - Description of the port. ipv4: description: - - list of IPv4 addresses to configure on the interface. - use X.X.X.X/YY syntax. + - List of IPv4 addresses to configure on the interface. + In the form I(X.X.X.X/YY). ipv6: description: - - list of IPv6 addresses to configure on the interface. - use X:X:X::X/YYY syntax + - List of IPv6 addresses to configure on the interface. + In the form I(X:X:X::X/YYY). addr_method: description: - - configures the port to use DHCP. - To enable this feature use the option 'dhcp' + - Configures the port to use DHCP. + To enable this feature use the option I(dhcp). choices: ['dhcp'] mtu: description: - - set MTU. Configure Jumbo Frame by setting MTU to 9000. + - Set MTU. Configure Jumbo Frame by setting MTU to I(9000). virtual_ip: description: - - define IPv4 virtual IP used by the Cumulus Linux VRR feature + - Define IPv4 virtual IP used by the Cumulus Linux VRR feature. virtual_mac: description: - - define Ethernet mac associated with Cumulus Linux VRR feature + - Define Ethernet mac associated with Cumulus Linux VRR feature. vids: description: - - in vlan aware mode, lists vlans defined under the interface + - In vlan-aware mode, lists VLANs defined under the interface. mstpctl_bpduguard: description: - - Enables BPDU Guard on a port in vlan-aware mode + - Enables BPDU Guard on a port in vlan-aware mode. + choices: + - true + - false mstpctl_portnetwork: description: - - Enables bridge assurance in vlan-aware mode + - Enables bridge assurance in vlan-aware mode. + choices: + - true + - false mstpctl_portadminedge: description: - - Enables admin edge port + - Enables admin edge port. + choices: + - true + - false clag_id: description: - - specify a unique clag_id for every dual connected bond on each + - Specify a unique clag_id for every dual connected bond on each peer switch. The value must be between 1 and 65535 and must be the same on both peer switches in order for the bond to be considered - dual-connected + dual-connected. pvid: description: - - in vlan aware mode, defines vlan that is the untagged vlan + - In vlan-aware mode, defines vlan that is the untagged vlan. miimon: description: - - mii link monitoring interval + - The mii link monitoring interval. default: 100 mode: description: - - bond mode. as of Cumulus Linux 2.5 only LACP bond mode is - supported + - The bond mode, as of Cumulus Linux 2.5 only LACP bond mode is + supported. default: '802.3ad' min_links: description: - - minimum number of links + - Minimum number of links. default: 1 lacp_bypass_allow: description: @@ -98,7 +111,7 @@ - Period for enabling LACP bypass. Max value is 900. lacp_bypass_priority: description: - - List of ports and priorities. Example "swp1=10, swp2=20" + - List of ports and priorities. Example I("swp1=10, swp2=20"). lacp_bypass_all_active: description: - Activate all interfaces for bypass. @@ -106,27 +119,27 @@ of using bypass_priority. lacp_rate: description: - - lacp rate + - The lacp rate. default: 1 slaves: description: - - bond members + - Bond members. required: True xmit_hash_policy: description: - - transmit load balancing algorithm. As of Cumulus Linux 2.5 only - layer3+4 policy is supported + - Transmit load balancing algorithm. As of Cumulus Linux 2.5 only + I(layer3+4) policy is supported. default: layer3+4 location: description: - - interface directory location + - Interface directory location. default: - - /etc/network/interfaces.d + - '/etc/network/interfaces.d' requirements: [ Alternate Debian network interface manager - \ ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ] notes: - - because the module writes the interface directory location. Ensure that + - As this module writes the interface directory location, ensure that ``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or whatever path is mentioned in the ``location`` attribute. @@ -137,33 +150,39 @@ EXAMPLES = ''' # Options ['virtual_mac', 'virtual_ip'] are required together # configure a bond interface with IP address -cl_bond: name=bond0 slaves="swp4-5" ipv4=10.1.1.1/24 -notify: reload networking +- cl_bond: + name: bond0 + slaves: "swp4-5" + ipv4: 10.1.1.1/24 + notify: reload networking # configure bond as a dual-connected clag bond -cl_bond: name=bond1 slaves="swp1s0 swp2s0" clag_id=1 -notify: reload networking +- cl_bond: + name: bond1 + slaves: "swp1s0 swp2s0" + clag_id: 1 + notify: reload networking # define cl_bond once in tasks file -# then write inteface config in variables file +# then write interface config in variables file # with just the options you want. -cl_bond: - name: "{{ item.key }}" - slaves: "{{ item.value.slaves }}" - clag_id: "{{ item.value.clag_id|default(omit) }}" - ipv4: "{{ item.value.ipv4|default(omit) }}" - ipv6: "{{ item.value.ipv6|default(omit) }}" - alias_name: "{{ item.value.alias_name|default(omit) }}" - addr_method: "{{ item.value.addr_method|default(omit) }}" - mtu: "{{ item.value.mtu|default(omit) }}" - vids: "{{ item.value.vids|default(omit) }}" - virtual_ip: "{{ item.value.virtual_ip|default(omit) }}" - virtual_mac: "{{ item.value.virtual_mac|default(omit) }}" - mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}" - mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}" - mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}" -with_dict: cl_bonds -notify: reload networking +- cl_bond: + name: "{{ item.key }}" + slaves: "{{ item.value.slaves }}" + clag_id: "{{ item.value.clag_id|default(omit) }}" + ipv4: "{{ item.value.ipv4|default(omit) }}" + ipv6: "{{ item.value.ipv6|default(omit) }}" + alias_name: "{{ item.value.alias_name|default(omit) }}" + addr_method: "{{ item.value.addr_method|default(omit) }}" + mtu: "{{ item.value.mtu|default(omit) }}" + vids: "{{ item.value.vids|default(omit) }}" + virtual_ip: "{{ item.value.virtual_ip|default(omit) }}" + virtual_mac: "{{ item.value.virtual_mac|default(omit) }}" + mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}" + mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}" + mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}" + with_dict: "{{ cl_bonds }}" + notify: reload networking # In vars file # ============ diff --git a/network/cumulus/cl_bridge.py b/network/cumulus/cl_bridge.py index c0bf2d523a7..e2805307c28 100644 --- a/network/cumulus/cl_bridge.py +++ b/network/cumulus/cl_bridge.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cl_bridge @@ -25,70 +29,72 @@ - Configures a bridge interface on Cumulus Linux To configure a bond port use the cl_bond module. To configure any other type of interface use the cl_interface module. Follow the guidelines for bridging found in the - Cumulus User Guide at http://docs.cumulusnetworks.com + Cumulus User Guide at U(http://docs.cumulusnetworks.com) options: name: description: - - name of the interface + - Name of the interface. required: true alias_name: description: - - add a port description + - Description of the port. ipv4: description: - - list of IPv4 addresses to configure on the interface. - use X.X.X.X/YY syntax. + - List of IPv4 addresses to configure on the interface. + In the form I(X.X.X.X/YY). ipv6: description: - - list of IPv6 addresses to configure on the interface. - use X:X:X::X/YYY syntax + - List of IPv6 addresses to configure on the interface. + In the form I(X:X:X::X/YYY). addr_method: description: - - configures the port to use DHCP. - To enable this feature use the option 'dhcp' + - Configures the port to use DHCP. + To enable this feature use the option I(dhcp). choices: ['dhcp'] mtu: description: - - set MTU. Configure Jumbo Frame by setting MTU to 9000. + - Set MTU. Configure Jumbo Frame by setting MTU to I(9000). virtual_ip: description: - - define IPv4 virtual IP used by the Cumulus Linux VRR feature + - Define IPv4 virtual IP used by the Cumulus Linux VRR feature. virtual_mac: description: - - define Ethernet mac associated with Cumulus Linux VRR feature + - Define Ethernet mac associated with Cumulus Linux VRR feature. vids: description: - - in vlan aware mode, lists vlans defined under the interface + - In vlan-aware mode, lists VLANs defined under the interface. pvid: description: - - in vlan aware mode, defines vlan that is the untagged vlan + - In vlan-aware mode, defines vlan that is the untagged vlan. stp: description: - - enables spanning tree. As of Cumulus Linux 2.5 the default + - Enables spanning tree Protocol. As of Cumulus Linux 2.5 the default bridging mode, only per vlan RSTP or 802.1d is supported. For the vlan aware mode, only common instance STP is supported default: 'yes' + choices: ['yes', 'no'] ports: description: - - list of bridge members + - List of bridge members. required: True vlan_aware: description: - - enables vlan aware mode. + - Enables vlan-aware mode. + choices: ['yes', 'no'] mstpctl_treeprio: description: - - set spanning tree root priority. Must be a multiple of 4096 + - Set spanning tree root priority. Must be a multiple of 4096. location: description: - - interface directory location + - Interface directory location. default: - - /etc/network/interfaces.d + - '/etc/network/interfaces.d' requirements: [ Alternate Debian network interface manager ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ] notes: - - because the module writes the interface directory location. Ensure that + - As this module writes the interface directory location, ensure that ``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or whatever path is mentioned in the ``location`` attribute. @@ -99,40 +105,47 @@ EXAMPLES = ''' # Options ['virtual_mac', 'virtual_ip'] are required together # configure a bridge vlan aware bridge. -cl_bridge: name=br0 ports='swp1-12' vlan_aware='yes' -notify: reload networking +- cl_bridge: + name: br0 + ports: 'swp1-12' + vlan_aware: 'yes' + notify: reload networking # configure bridge interface to define a default set of vlans -cl_bridge: name=bridge ports='swp1-12' vlan_aware='yes' vids='1-100' -notify: reload networking +- cl_bridge: + name: bridge + ports: 'swp1-12' + vlan_aware: 'yes' + vids: '1-100' + notify: reload networking # define cl_bridge once in tasks file -# then write inteface config in variables file +# then write interface config in variables file # with just the options you want. -cl_bridge: - name: "{{ item.key }}" - ports: "{{ item.value.ports }}" - vlan_aware: "{{ item.value.vlan_aware|default(omit) }}" - ipv4: "{{ item.value.ipv4|default(omit) }}" - ipv6: "{{ item.value.ipv6|default(omit) }}" - alias_name: "{{ item.value.alias_name|default(omit) }}" - addr_method: "{{ item.value.addr_method|default(omit) }}" - mtu: "{{ item.value.mtu|default(omit) }}" - vids: "{{ item.value.vids|default(omit) }}" - virtual_ip: "{{ item.value.virtual_ip|default(omit) }}" - virtual_mac: "{{ item.value.virtual_mac|default(omit) }}" - mstpctl_treeprio: "{{ item.value.mstpctl_treeprio|default(omit) }}" -with_dict: cl_bridges -notify: reload networking +- cl_bridge: + name: "{{ item.key }}" + ports: "{{ item.value.ports }}" + vlan_aware: "{{ item.value.vlan_aware|default(omit) }}" + ipv4: "{{ item.value.ipv4|default(omit) }}" + ipv6: "{{ item.value.ipv6|default(omit) }}" + alias_name: "{{ item.value.alias_name|default(omit) }}" + addr_method: "{{ item.value.addr_method|default(omit) }}" + mtu: "{{ item.value.mtu|default(omit) }}" + vids: "{{ item.value.vids|default(omit) }}" + virtual_ip: "{{ item.value.virtual_ip|default(omit) }}" + virtual_mac: "{{ item.value.virtual_mac|default(omit) }}" + mstpctl_treeprio: "{{ item.value.mstpctl_treeprio|default(omit) }}" + with_dict: "{{ cl_bridges }}" + notify: reload networking # In vars file # ============ cl_bridge: - br0: - alias_name: 'vlan aware bridge' - ports: ['swp1', 'swp3'] - vlan_aware: true - vids: ['1-100'] + br0: + alias_name: 'vlan aware bridge' + ports: ['swp1', 'swp3'] + vlan_aware: true + vids: ['1-100'] ''' RETURN = ''' diff --git a/network/cumulus/cl_img_install.py b/network/cumulus/cl_img_install.py index 79c0eaf0d80..26fe8857b6f 100644 --- a/network/cumulus/cl_img_install.py +++ b/network/cumulus/cl_img_install.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cl_img_install @@ -23,30 +27,33 @@ short_description: Install a different Cumulus Linux version. description: - install a different version of Cumulus Linux in the inactive slot. For - more details go the Image Management User Guide @ - http://docs.cumulusnetworks.com/ + more details go the Image Management User Guide at + U(http://docs.cumulusnetworks.com/). options: src: description: - - full path to the Cumulus Linux binary image. Can be a local path, + - The full path to the Cumulus Linux binary image. Can be a local path, http or https URL. If the code version is in the name of the file, the module will assume this is the version of code you wish to install. required: true version: description: - - inform the module of the exact version one is installing. This + - Inform the module of the exact version one is installing. This overrides the automatic check of version in the file name. For example, if the binary file name is called CumulusLinux-2.2.3.bin, and version is set to '2.5.0', then the module will assume it is installing '2.5.0' not '2.2.3'. If version is not included, then the module will assume '2.2.3' is the version to install. + default: None + required: false switch_slot: description: - Switch slots after installing the image. - To run the installed code, reboot the switch + To run the installed code, reboot the switch. choices: ['yes', 'no'] default: 'no' + required: false requirements: ["Cumulus Linux OS"] @@ -56,32 +63,40 @@ ## Download and install the image from a webserver. - - name: install image using using http url. Switch slots so the subsequent - will load the new version - cl_img_install: version=2.0.1 - src='http://10.1.1.1/CumulusLinux-2.0.1.bin' - switch_slot=yes +- name: Install image using using http url. Switch slots so the subsequent will load the new version + cl_img_install: + version: 2.0.1 + src: http://10.1.1.1/CumulusLinux-2.0.1.bin + switch_slot: yes ## Copy the software from the ansible server to the switch. ## The module will get the code version from the filename ## The code will be installed in the alternate slot but the slot will not be primary ## A subsequent reload will not run the new code - - name: download cumulus linux to local system - get_url: src=ftp://cumuluslinux.bin dest=/root/CumulusLinux-2.0.1.bin +- name: Download cumulus linux to local system + get_url: + src: ftp://cumuluslinux.bin + dest: /root/CumulusLinux-2.0.1.bin - - name: install image from local filesystem. Get version from the filename - cl_img_install: src='/root/CumulusLinux-2.0.1.bin' +- name: Install image from local filesystem. Get version from the filename. + cl_img_install: + src: /root/CumulusLinux-2.0.1.bin ## If the image name has been changed from the original name, use the `version` option ## to inform the module exactly what code version is been installed - - name: download cumulus linux to local system - get_url: src=ftp://CumulusLinux-2.0.1.bin dest=/root/image.bin +- name: Download cumulus linux to local system + get_url: + src: ftp://CumulusLinux-2.0.1.bin + dest: /root/image.bin - - name: install image and switch slots. only reboot needed - cl_img_install: version=2.0.1 src=/root/image.bin switch_slot=yes' +- name: install image and switch slots. only reboot needed + cl_img_install: + version: 2.0.1 + src: /root/image.bin + switch_slot: yes ''' RETURN = ''' @@ -111,7 +126,8 @@ def check_url(module, url): def run_cl_cmd(module, cmd, check_rc=True): try: (rc, out, err) = module.run_command(cmd, check_rc=check_rc) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=e.strerror) # trim last line as it is always empty ret = out.splitlines() diff --git a/network/cumulus/cl_interface.py b/network/cumulus/cl_interface.py index d8dd3bf9e83..c8262b0710e 100644 --- a/network/cumulus/cl_interface.py +++ b/network/cumulus/cl_interface.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cl_interface @@ -28,82 +32,83 @@ bond ports use the cl_bond module. When configuring bridge related features like the "vid" option, please follow the guidelines for configuring "vlan aware" bridging. For more details review the Layer2 - Interface Guide at http://docs.cumulusnetworks.com + Interface Guide at U(http://docs.cumulusnetworks.com) options: name: description: - - name of the interface + - Name of the interface. required: true alias_name: description: - - add a port description + - Description of the port. ipv4: description: - - list of IPv4 addresses to configure on the interface. - use X.X.X.X/YY syntax. + - List of IPv4 addresses to configure on the interface. + In the form I(X.X.X.X/YY). ipv6: description: - - list of IPv6 addresses to configure on the interface. - use X:X:X::X/YYY syntax + - List of IPv6 addresses to configure on the interface. + In the form I(X:X:X::X/YYY). addr_method: description: - - can be loopback for loopback interfaces or dhcp for dhcp - interfaces. + - Address method. + choices: + - loopback + - dhcp speed: description: - - set speed of the swp(front panel) or management(eth0) interface. - speed is in MB + - Set speed of the swp(front panel) or management(eth0) interface. + speed is in MB. mtu: description: - - set MTU. Configure Jumbo Frame by setting MTU to 9000. - + - Set MTU. Configure Jumbo Frame by setting MTU to I(9000). virtual_ip: description: - - define IPv4 virtual IP used by the Cumulus VRR feature + - Define IPv4 virtual IP used by the Cumulus Linux VRR feature. virtual_mac: description: - - define Ethernet mac associated with Cumulus VRR feature + - Define Ethernet mac associated with Cumulus Linux VRR feature. vids: description: - - in vlan aware mode, lists vlans defined under the interface + - In vlan-aware mode, lists VLANs defined under the interface. mstpctl_bpduguard: description: - - Enables BPDU Guard on a port in vlan-aware mode + - Enables BPDU Guard on a port in vlan-aware mode. mstpctl_portnetwork: description: - - Enables bridge assurance in vlan-aware mode + - Enables bridge assurance in vlan-aware mode. mstpctl_portadminedge: description: - - Enables admin edge port + - Enables admin edge port. clagd_enable: description: - Enables the clagd daemon. This command should only be applied to - the clag peerlink interface + the clag peerlink interface. clagd_priority: description: - Integer that changes the role the switch has in the clag domain. The lower priority switch will assume the primary role. The number - can be between 0 and 65535 + can be between 0 and 65535. clagd_peer_ip: description: - - IP address of the directly connected peer switch interface + - IP address of the directly connected peer switch interface. clagd_sys_mac: description: - Clagd system mac address. Recommended to use the range starting - with 44:38:39:ff. Needs to be the same between 2 Clag switches + with 44:38:39:ff. Needs to be the same between 2 Clag switches. pvid: description: - - in vlan aware mode, defines vlan that is the untagged vlan + - In vlan-aware mode, defines vlan that is the untagged vlan. location: description: - - interface directory location + - Interface directory location default: - - /etc/network/interfaces.d + - '/etc/network/interfaces.d' requirements: [ Alternate Debian network interface manager - \ ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ] notes: - - because the module writes the interface directory location. Ensure that + - As this module writes the interface directory location, ensure that ``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or whatever path is mentioned in the ``location`` attribute. @@ -113,45 +118,55 @@ EXAMPLES = ''' # Options ['virtual_mac', 'virtual_ip'] are required together -# configure a front panel port with an IP -cl_interface: name=swp1 ipv4=10.1.1.1/24 -notify: reload networking - -# configure front panel to use DHCP -cl_interface: name=swp2 addr_family=dhcp -notify: reload networking - -# configure a SVI for vlan 100 interface with an IP -cl_interface: name=bridge.100 ipv4=10.1.1.1/24 -notify: reload networking - -# configure subinterface with an IP -cl_interface: name=bond0.100 alias_name='my bond' ipv4=10.1.1.1/24 -notify: reload networking +- name: Configure a front panel port with an IP + cl_interface: + name: swp1 + ipv4: 10.1.1.1/24 + notify: reload networking + +- name: Configure front panel to use DHCP + cl_interface: + name: swp2 + addr_family: dhcp + notify: reload networking + +- name: Configure a SVI for vlan 100 interface with an IP + cl_interface: + name: bridge.100 + ipv4: 10.1.1.1/24 + notify: reload networking + +- name: Configure subinterface with an IP + cl_interface: + name: bond0.100 + alias_name: 'my bond' + ipv4: 10.1.1.1/24 + notify: reload networking # define cl_interfaces once in tasks -# then write intefaces in variables file +# then write interfaces in variables file # with just the options you want. -cl_interface: - name: "{{ item.key }}" - ipv4: "{{ item.value.ipv4|default(omit) }}" - ipv6: "{{ item.value.ipv6|default(omit) }}" - alias_name: "{{ item.value.alias_name|default(omit) }}" - addr_method: "{{ item.value.addr_method|default(omit) }}" - speed: "{{ item.value.link_speed|default(omit) }}" - mtu: "{{ item.value.mtu|default(omit) }}" - clagd_enable: "{{ item.value.clagd_enable|default(omit) }}" - clagd_peer_ip: "{{ item.value.clagd_peer_ip|default(omit) }}" - clagd_sys_mac: "{{ item.value.clagd_sys_mac|default(omit) }}" - clagd_priority: "{{ item.value.clagd_priority|default(omit) }}" - vids: "{{ item.value.vids|default(omit) }}" - virtual_ip: "{{ item.value.virtual_ip|default(omit) }}" - virtual_mac: "{{ item.value.virtual_mac|default(omit) }}" - mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork|default('no') }}" - mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge|default('no') }}" - mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard|default('no') }}" -with_dict: cl_interfaces -notify: reload networking + - name: Create interfaces + cl_interface: + name: "{{ item.key }}" + ipv4: "{{ item.value.ipv4 | default(omit) }}" + ipv6: "{{ item.value.ipv6 | default(omit) }}" + alias_name: "{{ item.value.alias_name | default(omit) }}" + addr_method: "{{ item.value.addr_method | default(omit) }}" + speed: "{{ item.value.link_speed | default(omit) }}" + mtu: "{{ item.value.mtu | default(omit) }}" + clagd_enable: "{{ item.value.clagd_enable | default(omit) }}" + clagd_peer_ip: "{{ item.value.clagd_peer_ip | default(omit) }}" + clagd_sys_mac: "{{ item.value.clagd_sys_mac | default(omit) }}" + clagd_priority: "{{ item.value.clagd_priority | default(omit) }}" + vids: "{{ item.value.vids | default(omit) }}" + virtual_ip: "{{ item.value.virtual_ip | default(omit) }}" + virtual_mac: "{{ item.value.virtual_mac | default(omit) }}" + mstpctl_portnetwork: "{{ item.value.mstpctl_portnetwork | default('no') }}" + mstpctl_portadminedge: "{{ item.value.mstpctl_portadminedge | default('no') }}" + mstpctl_bpduguard: "{{ item.value.mstpctl_bpduguard | default('no') }}" + with_dict: "{{ cl_interfaces }}" + notify: reload networking # In vars file diff --git a/network/cumulus/cl_interface_policy.py b/network/cumulus/cl_interface_policy.py index 307c477ccfa..597f11a3fc3 100644 --- a/network/cumulus/cl_interface_policy.py +++ b/network/cumulus/cl_interface_policy.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cl_interface_policy @@ -29,17 +33,18 @@ the switch, but not found in the "allowed" list, this interface will be unconfigured. By default this is `/etc/network/interface.d` For more details go the Configuring Interfaces at - http://docs.cumulusnetworks.com + U(http://docs.cumulusnetworks.com). notes: - lo must be included in the allowed list. - eth0 must be in allowed list if out of band management is done options: allowed: description: - - list of ports to run initial run at 10G + - List of ports to run initial run at 10G. + required: true location: description: - - folder to store interface files. + - Directory to store interface files. default: '/etc/network/interfaces.d/' ''' @@ -72,7 +77,7 @@ def read_current_int_dir(module): module.custom_currentportlist = os.listdir(module.params.get('location')) -# take the allowed list and conver it to into a list +# take the allowed list and convert it to into a list # of ports. def convert_allowed_list_to_port_range(module): allowedlist = module.params.get('allowed') diff --git a/network/cumulus/cl_license.py b/network/cumulus/cl_license.py index deafed5e49a..a0656e0abce 100644 --- a/network/cumulus/cl_license.py +++ b/network/cumulus/cl_license.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cl_license @@ -24,14 +28,14 @@ description: - Installs a Cumulus Linux license. The module reports no change of status when a license is installed. - For more details go the Cumulus Linux License Documentation @ - http://docs.cumulusnetwork.com and the Licensing KB Site @ - https://support.cumulusnetworks.com/hc/en-us/sections/200507688 + For more details go the Cumulus Linux License Documentation at + U(http://docs.cumulusnetwork.com) and the Licensing KB Site at + U(https://support.cumulusnetworks.com/hc/en-us/sections/200507688) notes: - - to activate a license for the FIRST time, the switchd service must be + - To activate a license for the FIRST time, the switchd service must be restarted. This action is disruptive. The license renewal process occurs via the Cumulus Networks Customer Portal - - http://customers.cumulusnetworks.com. + U(http://customers.cumulusnetworks.com). - A non-EULA license is REQUIRED for automation. Manually install the license on a test switch, using the command "cl-license -i " to confirm the license is a Non-EULA license. @@ -39,45 +43,53 @@ options: src: description: - - full path to the license. Can be local path or http url + - The full path to the license. Can be local path or HTTP URL. + required: true force: description: - - force installation of a license. Typically not needed. + - Force installation of a license. Typically not needed. It is recommended to manually run this command via the ansible command. A reload of switchd is not required. Running the force option in a playbook will break the idempotent state machine of the module and cause the switchd notification to kick in all the time, causing a disruption. + choices: + - yes + - no ''' EXAMPLES = ''' -Example playbook using the cl_license module to manage licenses on Cumulus Linux - ---- - - hosts: all - tasks: - - name: install license using http url - cl_license: src='http://10.1.1.1/license.txt' - notify: restart switchd - - - name: Triggers switchd to be restarted right away, before play, or role - is over. This is desired behaviour - meta: flush_handlers - - - name: configure interfaces - template: src=interfaces.j2 dest=/etc/network/interfaces - notify: restart networking - - handlers: - - name: restart switchd - service: name=switchd state=restarted - - name: restart networking - service: name=networking state=reloaded - ----- +# Example playbook using the cl_license module to manage licenses on Cumulus Linux + +- hosts: all + tasks: + - name: install license using http url + cl_license: + src: http://10.1.1.1/license.txt + notify: restart switchd + + - name: Triggers switchd to be restarted right away, before play, or role + is over. This is desired behaviour + meta: flush_handlers + + - name: Configure interfaces + template: + src: interfaces.j2 + dest: /etc/network/interfaces + notify: restart networking + + handlers: + - name: restart switchd + service: + name: switchd + state: restarted + - name: restart networking + service: + name: networking + state: reloaded # Force all switches to accept a new license. Typically not needed -ansible -m cl_license -a "src='http://10.1.1.1/new_lic' force=yes" -u root all +ansible -m cl_license -a "src='http://10.1.1.1/new_lic' force=yes" -u root all ---- diff --git a/network/cumulus/cl_ports.py b/network/cumulus/cl_ports.py index e632cdddac2..85b3ed94d02 100644 --- a/network/cumulus/cl_ports.py +++ b/network/cumulus/cl_ports.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cl_ports @@ -27,20 +31,20 @@ to not include ports that do not exist on the switch. Carefully read the original ports.conf file for any exceptions or limitations. For more details go the Configure Switch Port Attribute Documentation at - http://docs.cumulusnetworks.com + U(http://docs.cumulusnetworks.com). options: speed_10g: description: - - list of ports to run initial run at 10G + - List of ports to run initial run at 10G. speed_40g: description: - - list of ports to run initial run at 40G + - List of ports to run initial run at 40G. speed_4_by_10g: description: - - list of 40G ports that will be unganged to run as 4 10G ports. + - List of 40G ports that will be unganged to run as 4 10G ports. speed_40g_div_4: description: - - list of 10G ports that will be ganged to form a 40G port + - List of 10G ports that will be ganged to form a 40G port. ''' EXAMPLES = ''' Example playbook entries using the cl_ports module to manage the switch @@ -48,7 +52,9 @@ ## Unganged port config using simple args - name: configure ports.conf setup - cl_ports: speed_4_by_10g="swp1, swp32" speed_40g="swp2-31" + cl_ports: + speed_4_by_10g: "swp1, swp32" + speed_40g: "swp2-31" notify: restart switchd ## Unganged port configuration on certain ports using complex args @@ -84,7 +90,8 @@ def hash_existing_ports_conf(module): try: existing_ports_conf = open(PORTS_CONF).readlines() - except IOError, error_msg: + except IOError: + error_msg = get_exception() _msg = "Failed to open %s: %s" % (PORTS_CONF, error_msg) module.fail_json(msg=_msg) return # for testing only should return on module.fail_json @@ -143,7 +150,8 @@ def make_copy_of_orig_ports_conf(module): try: shutil.copyfile(PORTS_CONF, PORTS_CONF + '.orig') - except IOError, error_msg: + except IOError: + error_msg = get_exception() _msg = "Failed to save the original %s: %s" % (PORTS_CONF, error_msg) module.fail_json(msg=_msg) return # for testing only @@ -165,7 +173,8 @@ def write_to_ports_conf(module): temp.write(_str) temp.seek(0) shutil.copyfile(temp.name, PORTS_CONF) - except IOError, error_msg: + except IOError: + error_msg = get_exception() module.fail_json( msg="Failed to write to %s: %s" % (PORTS_CONF, error_msg)) finally: diff --git a/__init__.py b/network/dellos10/__init__.py similarity index 100% rename from __init__.py rename to network/dellos10/__init__.py diff --git a/network/dellos10/dellos10_command.py b/network/dellos10/dellos10_command.py new file mode 100644 index 00000000000..77e381c9117 --- /dev/null +++ b/network/dellos10/dellos10_command.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# +# Copyright (c) 2016 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: dellos10_command +version_added: "2.2" +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: Run commands on remote devices running Dell OS10 +description: + - Sends arbitrary commands to a Dell OS10 node and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(dellos10_config) to configure Dell OS10 devices. +extends_documentation_fragment: dellos10 +options: + commands: + description: + - List of commands to send to the remote dellos10 device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of I(retries), the task fails. + See examples. + required: false + default: null + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + required: false + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + required: false + default: 1 +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +tasks: + - name: run show version on remote devices + dellos10_command: + commands: show version + provider: "{{ cli }}" + + - name: run show version and check to see if output contains OS10 + dellos10_command: + commands: show version + wait_for: result[0] contains OS10 + provider: "{{ cli }}" + + - name: run multiple commands on remote nodes + dellos10_command: + commands: + - show version + - show interface + provider: "{{ cli }}" + + - name: run multiple commands and evaluate the output + dellos10_command: + commands: + - show version + - show interface + wait_for: + - result[0] contains OS10 + - result[1] contains Ethernet + provider: "{{ cli }}" +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always + type: list + sample: ['...', '...'] + +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] + +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] + +warnings: + description: The list of warnings (if any) generated by module based on arguments + returned: always + type: list + sample: ['...', '...'] +""" + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner, FailedConditionsError +from ansible.module_utils.network import NetworkModule, NetworkError +import ansible.module_utils.dellos10 + +def to_lines(stdout): + for item in stdout: + if isinstance(item, basestring): + item = str(item).split('\n') + yield item + +def main(): + spec = dict( + commands=dict(type='list', required=True), + wait_for=dict(type='list'), + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = NetworkModule(argument_spec=spec, + connect_on_load=False, + supports_check_mode=True) + + commands = module.params['commands'] + conditionals = module.params['wait_for'] or list() + + warnings = list() + + runner = CommandRunner(module) + + for cmd in commands: + if module.check_mode and not cmd.startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd) + else: + if cmd.startswith('conf'): + module.fail_json(msg='dellos10_command does not support running ' + 'config mode commands. Please use ' + 'dellos10_config instead') + runner.add_command(cmd) + + for item in conditionals: + runner.add_conditional(item) + + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result = dict(changed=False) + + result['stdout'] = list() + for cmd in commands: + try: + output = runner.get_command(cmd) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) + + + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/dellos10/dellos10_config.py b/network/dellos10/dellos10_config.py new file mode 100644 index 00000000000..469fa4ca0b3 --- /dev/null +++ b/network/dellos10/dellos10_config.py @@ -0,0 +1,287 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# +# Copyright (c) 2016 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: dellos10_config +version_added: "2.2" +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: Manage Dell OS10 configuration sections +description: + - Dell OS10 configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with Dell OS10 configuration sections in + a deterministic way. +extends_documentation_fragment: dellos10 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. This argument is mutually exclusive with I(src). + required: false + default: null + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + required: false + default: null + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root dir. This argument is mutually + exclusive with I(lines). + required: false + default: null + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + required: false + default: null + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + required: false + default: null + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + required: false + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + required: false + default: line + choices: ['line', 'block'] + update: + description: + - The I(update) argument controls how the configuration statements + are processed on the remote device. Valid choices for the I(update) + argument are I(merge) and I(check). When the argument is set to + I(merge), the configuration changes are merged with the current + device running configuration. When the argument is set to I(check) + the configuration updates are determined but not actually configured + on the remote device. + required: false + default: merge + choices: ['merge', 'check'] + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + required: false + default: no + choices: ['yes', 'no'] + config: + description: + - The C(config) argument allows the playbook designer to supply + the base configuration to be used to validate configuration + changes necessary. If this argument is provided, the module + will not download the running-config from the remote node. + required: false + default: null + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + required: false + default: no + choices: ['yes', 'no'] +""" + +EXAMPLES = """ +- dellos10_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" + +- dellos10_config: + lines: + - 10 permit ip host 1.1.1.1 any log + - 20 permit ip host 2.2.2.2 any log + - 30 permit ip host 3.3.3.3 any log + - 40 permit ip host 4.4.4.4 any log + - 50 permit ip host 5.5.5.5 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + match: exact + provider: "{{ cli }}" + +- dellos10_config: + lines: + - 10 permit ip host 1.1.1.1 any log + - 20 permit ip host 2.2.2.2 any log + - 30 permit ip host 3.3.3.3 any log + - 40 permit ip host 4.4.4.4 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + replace: block + provider: "{{ cli }}" + +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['...', '...'] + +responses: + description: The set of responses from issuing the commands on the device + returned: when not check_mode + type: list + sample: ['...', '...'] + +saved: + description: Returns whether the configuration is saved to the startup + configuration or not. + returned: when not check_mode + type: bool + sample: True + +""" +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.dellos10 import get_config, get_sublevel_config + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def main(): + + argument_spec = dict( + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + src=dict(type='path'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', + choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + update=dict(choices=['merge', 'check'], default='merge'), + save=dict(type='bool', default=False), + config=dict(), + backup=dict(type='bool', default=False) + ) + + mutually_exclusive = [('lines', 'src')] + + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + parents = module.params['parents'] or list() + + match = module.params['match'] + replace = module.params['replace'] + result = dict(changed=False, saved=False) + + candidate = get_candidate(module) + + if match != 'none': + config = get_config(module) + if parents: + contents = get_sublevel_config(config, module) + config = NetworkConfig(contents=contents, indent=1) + configobjs = candidate.difference(config, match=match, replace=replace) + + else: + configobjs = candidate.items + + if module.params['backup']: + result['__backup__'] = module.cli('show running-config')[0] + + commands = list() + if configobjs: + commands = dumps(configobjs, 'commands') + commands = commands.split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + if not module.check_mode and module.params['update'] == 'merge': + response = module.config.load_config(commands) + result['responses'] = response + + if module.params['save']: + module.config.save_config() + result['saved'] = True + + result['changed'] = True + + result['updates'] = commands + + module.exit_json(**result) + +if __name__ == '__main__': + main() diff --git a/network/dellos10/dellos10_facts.py b/network/dellos10/dellos10_facts.py new file mode 100644 index 00000000000..e73785a0e15 --- /dev/null +++ b/network/dellos10/dellos10_facts.py @@ -0,0 +1,454 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# +# Copyright (c) 2016 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: dellos10_facts +version_added: "2.2" +author: "Senthil Kumar Ganesan (@skg-net)" +short_description: Collect facts from remote devices running Dell OS10 +description: + - Collects a base set of device facts from a remote device that + is running Dell OS10. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: dellos10 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +""" + +EXAMPLES = """ +# Collect all facts from the device +- dellos10_facts: + gather_subset: all + +# Collect only the config and default facts +- dellos10_facts: + gather_subset: + - config + +# Do not collect hardware facts +- dellos10_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_name: + description: The name of the OS which is running + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_servicetag: + description: The service tag number of the remote device + returned: always + type: str +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str + +# hardware +ansible_net_cpu_arch: + description: Cpu Architecture of the remote device + returned: when hardware is configured + type: str +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" + +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.network import NetworkModule +import ansible.module_utils.dellos10 + +try: + from lxml import etree as ET +except ImportError: + import xml.etree.ElementTree as ET + +class FactsBase(object): + + def __init__(self, runner): + self.runner = runner + self.facts = dict() + + self.commands() + + +class Default(FactsBase): + + def commands(self): + self.runner.add_command('show version | display-xml') + self.runner.add_command('show system | display-xml') + self.runner.add_command('show running-configuration | grep hostname') + + def populate(self): + + data = self.runner.get_command('show version | display-xml') + xml_data = ET.fromstring(data) + + self.facts['name'] = self.parse_name(xml_data) + self.facts['version'] = self.parse_version(xml_data) + + data = self.runner.get_command('show system | display-xml') + xml_data = ET.fromstring(data) + + self.facts['servicetag'] = self.parse_serialnum(xml_data) + self.facts['model'] = self.parse_model(xml_data) + + data = self.runner.get_command('show running-configuration | grep hostname') + self.facts['hostname'] = self.parse_hostname(data) + + def parse_name(self, data): + sw_name = data.find('./data/system-sw-state/sw-version/sw-name') + if sw_name is not None: + return sw_name.text + else: + return "" + + def parse_version(self, data): + sw_ver = data.find('./data/system-sw-state/sw-version/sw-version') + if sw_ver is not None: + return sw_ver.text + else: + return "" + + def parse_hostname(self, data): + match = re.search(r'hostname\s+(\S+)', data, re.M) + if match: + return match.group(1) + + def parse_model(self, data): + prod_name = data.find('./data/system/node/mfg-info/product-name') + if prod_name is not None: + return prod_name.text + else: + return "" + + def parse_serialnum(self, data): + svc_tag = data.find('./data/system/node/unit/mfg-info/service-tag') + if svc_tag is not None: + return svc_tag.text + else: + return "" + + +class Hardware(FactsBase): + + def commands(self): + self.runner.add_command('show processes memory | grep Total') + + def populate(self): + + data = self.runner.get_command('show version | display-xml') + xml_data = ET.fromstring(data) + + self.facts['cpu_arch'] = self.parse_cpu_arch(xml_data) + + data = self.runner.get_command('show processes memory | grep Total') + + match = self.parse_memory(data) + if match: + self.facts['memtotal_mb'] = int(match[0]) / 1024 + self.facts['memfree_mb'] = int(match[2]) / 1024 + + def parse_cpu_arch(self, data): + cpu_arch = data.find('./data/system-sw-state/sw-version/cpu-arch') + if cpu_arch is not None: + return cpu_arch.text + else: + return "" + + def parse_memory(self, data): + return re.findall(r'\:\s*(\d+)', data, re.M) + + +class Config(FactsBase): + + def commands(self): + self.runner.add_command('show running-config') + + def populate(self): + config = self.runner.get_command('show running-config') + self.facts['config'] = config + + +class Interfaces(FactsBase): + + def commands(self): + self.runner.add_command('show interface | display-xml') + + def populate(self): + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.runner.get_command('show interface | display-xml') + + xml_data = ET.fromstring(data) + + self.facts['interfaces'] = self.populate_interfaces(xml_data) + self.facts['neighbors'] = self.populate_neighbors(xml_data) + + def populate_interfaces(self, interfaces): + int_facts = dict() + + for interface in interfaces.findall('./data/interfaces/interface'): + intf = dict() + name = self.parse_item(interface, 'name') + + intf['description'] = self.parse_item(interface, 'description') + intf['duplex'] = self.parse_item(interface, 'duplex') + intf['primary_ipv4'] = self.parse_primary_ipv4(interface) + intf['secondary_ipv4'] = self.parse_secondary_ipv4(interface) + intf['ipv6'] = self.parse_ipv6_address(interface) + intf['mtu'] = self.parse_item(interface, 'mtu') + intf['type'] = self.parse_item(interface, 'type') + + int_facts[name] = intf + + for interface in interfaces.findall('./data/interfaces-state/interface'): + name = self.parse_item(interface, 'name') + intf = int_facts[name] + intf['bandwidth'] = self.parse_item(interface, 'speed') + intf['adminstatus'] = self.parse_item(interface, 'admin-status') + intf['operstatus'] = self.parse_item(interface, 'oper-status') + intf['macaddress'] = self.parse_item(interface, 'phys-address') + + for interface in interfaces.findall('./data/ports/ports-state/port'): + name = self.parse_item(interface, 'name') + fanout = self.parse_item(interface, 'fanout-state') + mediatype = self.parse_item(interface, 'media-type') + + typ, sname = name.split('-eth') + + if fanout == "BREAKOUT_1x1": + name = "ethernet" + sname + intf = int_facts[name] + intf['mediatype'] = mediatype + else: + #TODO: Loop for the exact subport + for subport in xrange(1, 5): + name = "ethernet" + sname + ":" + str(subport) + intf = int_facts[name] + intf['mediatype'] = mediatype + + return int_facts + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_item(self, interface, item): + elem = interface.find(item) + if elem is not None: + return elem.text + else: + return "" + + def parse_primary_ipv4(self, interface): + ipv4 = interface.find('ipv4') + ip_address = "" + if ipv4 is not None: + prim_ipaddr = ipv4.find('./address/primary-addr') + if prim_ipaddr is not None: + ip_address = prim_ipaddr.text + self.add_ip_address(ip_address, 'ipv4') + + return ip_address + + def parse_secondary_ipv4(self, interface): + ipv4 = interface.find('ipv4') + ip_address = "" + if ipv4 is not None: + sec_ipaddr = ipv4.find('./address/secondary-addr') + if sec_ipaddr is not None: + ip_address = sec_ipaddr.text + self.add_ip_address(ip_address, 'ipv4') + + return ip_address + + def parse_ipv6_address(self, interface): + ipv6 = interface.find('ipv6') + ip_address = "" + if ipv6 is not None: + ipv6_addr = ipv6.find('./address/ipv6-address') + if ipv6_addr is not None: + ip_address = ipv6_addr.text + self.add_ip_address(ip_address, 'ipv6') + + return ip_address + + def populate_neighbors(self, interfaces): + lldp_facts = dict() + for interface in interfaces.findall('./data/interfaces-state/interface'): + name = interface.find('name').text + rem_sys_name = interface.find('./lldp-rem-neighbor-info/info/rem-system-name') + if rem_sys_name is not None: + lldp_facts[name] = list() + fact = dict() + fact['host'] = rem_sys_name.text + rem_sys_port = interface.find('./lldp-rem-neighbor-info/info/rem-lldp-port-id') + fact['port'] = rem_sys_port.text + lldp_facts[name].append(fact) + + return lldp_facts + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = NetworkModule(argument_spec=spec, supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + runner = CommandRunner(module) + + instances = list() + for key in runable_subsets: + runs = FACT_SUBSETS[key](runner) + instances.append(runs) + + runner.run() + + try: + for inst in instances: + inst.populate() + facts.update(inst.facts) + except Exception: + module.exit_json(out=module.from_json(runner.items)) + + ansible_facts = dict() + for key, value in facts.iteritems(): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts) + + +if __name__ == '__main__': + main() diff --git a/network/dellos6/__init__.py b/network/dellos6/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/dellos6/dellos6_command.py b/network/dellos6/dellos6_command.py new file mode 100644 index 00000000000..19efe7e083b --- /dev/null +++ b/network/dellos6/dellos6_command.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# +# Copyright (c) 2016 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: dellos6_command +version_added: "2.2" +short_description: Run commands on remote devices running Dell OS6 +description: + - Sends arbitrary commands to a Dell OS6 node and returns the results + read from the device. The M(dellos6_command) module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(dellos6_config) to configure Dell OS6 devices. +extends_documentation_fragment: dellos6 +options: + commands: + description: + - List of commands to send to the remote dellos6 device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of I(retries) as expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of I(retries), the task fails. + See examples. + required: false + default: null + retries: + description: + - Specifies the number of retries a command should be tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + required: false + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + required: false + default: 1 +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +tasks: + - name: run show version on remote devices + dellos6_command: + commands: show version + provider "{{ cli }}" + + - name: run show version and check to see if output contains Dell + dellos6_command: + commands: show version + wait_for: result[0] contains Dell + provider "{{ cli }}" + + - name: run multiple commands on remote nodes + dellos6_command: + commands: + - show version + - show interfaces + provider "{{ cli }}" + + - name: run multiple commands and evaluate the output + dellos6_command: + commands: + - show version + - show interfaces + wait_for: + - result[0] contains Dell + - result[1] contains Access + provider "{{ cli }}" +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always + type: list + sample: ['...', '...'] + +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] + +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] + +warnings: + description: The list of warnings (if any) generated by module based on arguments + returned: always + type: list + sample: ['...', '...'] +""" + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner, FailedConditionsError +from ansible.module_utils.network import NetworkModule, NetworkError +import ansible.module_utils.dellos6 + +def to_lines(stdout): + for item in stdout: + if isinstance(item, basestring): + item = str(item).split('\n') + yield item + + +def main(): + spec = dict( + commands=dict(type='list', required=True), + wait_for=dict(type='list'), + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = NetworkModule(argument_spec=spec, + connect_on_load=False, + supports_check_mode=True) + + commands = module.params['commands'] + conditionals = module.params['wait_for'] or list() + + warnings = list() + + runner = CommandRunner(module) + + for cmd in commands: + if module.check_mode and not cmd.startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd) + else: + if cmd.startswith('conf'): + module.fail_json(msg='dellos6_command does not support running ' + 'config mode commands. Please use ' + 'dellos6_config instead') + runner.add_command(cmd) + + for item in conditionals: + runner.add_conditional(item) + + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result = dict(changed=False) + + result['stdout'] = list() + for cmd in commands: + try: + output = runner.get_command(cmd) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) + + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/dellos6/dellos6_config.py b/network/dellos6/dellos6_config.py new file mode 100644 index 00000000000..e2c4743319e --- /dev/null +++ b/network/dellos6/dellos6_config.py @@ -0,0 +1,285 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# +# Copyright (c) 2016 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: dellos6_config +version_added: "2.2" +author: "Abirami N(@abirami-n)" +short_description: Manage Dell OS6 configuration sections +description: + - Dell OS6 configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with Dell OS6 configuration sections in + a deterministic way. +extends_documentation_fragment: dellos6 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. This argument is mutually exclusive with I(src). + required: false + default: null + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + required: false + default: null + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root dir. This argument is mutually + exclusive with I(lines). + required: false + default: null + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + required: false + default: null + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + required: false + default: null + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + required: false + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + required: false + default: line + choices: ['line', 'block'] + update: + description: + - The I(update) argument controls how the configuration statements + are processed on the remote device. Valid choices for the I(update) + argument are I(merge) and I(check). When the argument is set to + I(merge), the configuration changes are merged with the current + device running configuration. When the argument is set to I(check) + the configuration updates are determined but not actually configured + on the remote device. + required: false + default: merge + choices: ['merge', 'check'] + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + required: false + default: no + choices: ['yes', 'no'] + config: + description: + - The C(config) argument allows the playbook designer to supply + the base configuration to be used to validate configuration + changes necessary. If this argument is provided, the module + will not download the running-config from the remote node. + required: false + default: null + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + required: false + default: no + choices: ['yes', 'no'] +""" + +EXAMPLES = """ +- dellos6_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" + +- dellos6_config: + lines: + - 10 permit ip 1.1.1.1 any log + - 20 permit ip 2.2.2.2 any log + - 30 permit ip 3.3.3.3 any log + - 40 permit ip 4.4.4.4 any log + - 50 permit ip 5.5.5.5 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + match: exact + provider: "{{ cli }}" + +- dellos6_config: + lines: + - 10 permit ip 1.1.1.1 any log + - 20 permit ip 2.2.2.2 any log + - 30 permit ip 3.3.3.3 any log + - 40 permit ip 4.4.4.4 any log + parents: ['ip access-list test'] + before: ['no ip access-list test'] + replace: block + provider: "{{ cli }}" + +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['...', '...'] + +responses: + description: The set of responses from issuing the commands on the device + returned: when not check_mode + type: list + sample: ['...', '...'] + +saved: + description: Returns whether the configuration is saved to the startup + configuration or not. + returned: when not check_mode + type: bool + sample: True + +""" +from ansible.module_utils.netcfg import dumps +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.dellos6 import get_config, get_sublevel_config, Dellos6NetworkConfig + + +def get_candidate(module): + candidate = Dellos6NetworkConfig(indent=0) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def main(): + + argument_spec = dict( + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + src=dict(type='path'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', + choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + update=dict(choices=['merge', 'check'], default='merge'), + save=dict(type='bool', default=False), + config=dict(), + backup=dict(type='bool', default=False) + ) + + mutually_exclusive = [('lines', 'src')] + + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + parents = module.params['parents'] or list() + + match = module.params['match'] + replace = module.params['replace'] + result = dict(changed=False, saved=False) + candidate = get_candidate(module) + + if match != 'none': + config = get_config(module) + if parents: + config = get_sublevel_config(config, module) + configobjs = candidate.difference(config, match=match, replace=replace) + else: + configobjs = candidate.items + + if module.params['backup']: + result['__backup__'] = module.cli('show running-config')[0] + + commands = list() + if configobjs: + commands = dumps(configobjs, 'commands') + commands = commands.split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + if not module.check_mode and module.params['update'] == 'merge': + response = module.config.load_config(commands) + result['responses'] = response + + if module.params['save']: + module.config.save_config() + result['saved'] = True + + result['changed'] = True + + result['updates'] = commands + + module.exit_json(**result) + +if __name__ == '__main__': + main() diff --git a/network/dellos6/dellos6_facts.py b/network/dellos6/dellos6_facts.py new file mode 100644 index 00000000000..bfb82fbc6f5 --- /dev/null +++ b/network/dellos6/dellos6_facts.py @@ -0,0 +1,445 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# +# Copyright (c) 2016 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: dellos6_facts +version_added: "2.2" +author: "Abirami N(@abirami-n)" +short_description: Collect facts from remote devices running Dell OS6 +description: + - Collects a base set of device facts from a remote device that + is running OS6. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: dellos6 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +""" + +EXAMPLES = """ +# Collect all facts from the device +- dellos6_facts: + gather_subset: all + +# Collect only the config and default facts +- dellos6_facts: + gather_subset: + - config + +# Do not collect hardware facts +- dellos6_facts: + gather_subset: + - "!interfaces" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: string +ansible_net_image: + description: The image file the device is running + returned: always + type: string + +# hardware +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict + +""" +import re + +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.network import NetworkModule +import ansible.module_utils.dellos6 + +class FactsBase(object): + + def __init__(self, runner): + self.runner = runner + self.facts = dict() + + self.commands() + +class Default(FactsBase): + + def commands(self): + self.runner.add_command('show version') + self.runner.add_command('show running-config | include hostname') + + def populate(self): + data = self.runner.get_command('show version') + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + self.facts['image'] = self.parse_image(data) + hdata =self.runner.get_command('show running-config | include hostname') + self.facts['hostname'] = self.parse_hostname(hdata) + + def parse_version(self, data): + match = re.search(r'HW Version(.+)\s(\d+)', data) + if match: + return match.group(2) + + def parse_hostname(self, data): + match = re.search(r'\S+\s(\S+)', data, re.M) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'System Model ID(.+)\s([A-Z0-9]*)\n', data, re.M) + if match: + return match.group(2) + + def parse_image(self, data): + match = re.search(r'Image File(.+)\s([A-Z0-9a-z_.]*)\n', data) + if match: + return match.group(2) + + def parse_serialnum(self, data): + match = re.search(r'Serial Number(.+)\s([A-Z0-9]*)\n', data) + if match: + return match.group(2) + + +class Hardware(FactsBase): + + def commands(self): + self.runner.add_command('show memory cpu') + + def populate(self): + + data = self.runner.get_command('show memory cpu') + match = re.findall('\s(\d+)\s', data) + if match: + self.facts['memtotal_mb'] = int(match[0]) / 1024 + self.facts['memfree_mb'] = int(match[1]) / 1024 + + +class Config(FactsBase): + + def commands(self): + self.runner.add_command('show running-config') + + def populate(self): + self.facts['config'] = self.runner.get_command('show running-config') + + +class Interfaces(FactsBase): + def commands(self): + self.runner.add_command('show interfaces') + self.runner.add_command('show interfaces status') + self.runner.add_command('show interfaces transceiver properties') + self.runner.add_command('show ip int') + self.runner.add_command('show lldp') + self.runner.add_command('show lldp remote-device all') + + def populate(self): + vlan_info = dict() + data = self.runner.get_command('show interfaces') + interfaces = self.parse_interfaces(data) + desc = self.runner.get_command('show interfaces status') + properties = self.runner.get_command('show interfaces transceiver properties') + vlan = self.runner.get_command('show ip int') + vlan_info = self.parse_vlan(vlan) + self.facts['interfaces'] = self.populate_interfaces(interfaces,desc,properties) + self.facts['interfaces'].update(vlan_info) + if 'LLDP is not enabled' not in self.runner.get_command('show lldp'): + neighbors = self.runner.get_command('show lldp remote-device all') + self.facts['neighbors'] = self.parse_neighbors(neighbors) + + def parse_vlan(self,vlan): + facts =dict() + vlan_info, vlan_info_next = vlan.split('---------- ----- --------------- --------------- -------') + for en in vlan_info_next.splitlines(): + if en == '': + continue + match = re.search('^(\S+)\s+(\S+)\s+(\S+)', en) + intf = match.group(1) + if intf not in facts: + facts[intf] = list() + fact = dict() + matc=re.search('^([\w+\s\d]*)\s+(\S+)\s+(\S+)',en) + fact['address'] = matc.group(2) + fact['masklen'] = matc.group(3) + facts[intf].append(fact) + return facts + + def populate_interfaces(self, interfaces, desc, properties): + facts = dict() + for key, value in interfaces.iteritems(): + intf = dict() + intf['description'] = self.parse_description(key,desc) + intf['macaddress'] = self.parse_macaddress(value) + intf['mtu'] = self.parse_mtu(value) + intf['bandwidth'] = self.parse_bandwidth(value) + intf['mediatype'] = self.parse_mediatype(key,properties) + intf['duplex'] = self.parse_duplex(value) + intf['lineprotocol'] = self.parse_lineprotocol(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(key,properties) + facts[key] = intf + return facts + + def parse_neighbors(self, neighbors): + facts = dict() + neighbor, neighbor_next = neighbors.split('--------- ------- ------------------- ----------------- -----------------') + for en in neighbor_next.splitlines(): + if en == '': + continue + intf = self.parse_lldp_intf(en.split()[0]) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = self.parse_lldp_host(en.split()[4]) + fact['port'] = self.parse_lldp_port(en.split()[3]) + facts[intf].append(fact) + + return facts + + def parse_interfaces(self, data): + parsed = dict() + for line in data.split('\n'): + if len(line) == 0: + continue + else: + match = re.match(r'Interface Name(.+)\s([A-Za-z0-9/]*)', line) + if match: + key = match.group(2) + parsed[key] = line + else: + parsed[key] += '\n%s' % line + return parsed + + def parse_description(self, key, desc): + desc, desc_next = desc.split('--------- --------------- ------ ------- ---- ------ ----- -- -------------------') + desc_val, desc_info = desc_next.split('Oob') + for en in desc_val.splitlines(): + if key in en: + match = re.search('^(\S+)\s+(\S+)', en) + if match.group(2) in ['Full','N/A']: + return "Null" + else: + return match.group(2) + + def parse_macaddress(self, data): + match = re.search(r'Burned MAC Address(.+)\s([A-Z0-9.]*)\n', data) + if match: + return match.group(2) + + def parse_mtu(self, data): + match = re.search(r'MTU Size(.+)\s(\d+)\n', data) + if match: + return int(match.group(2)) + + def parse_bandwidth(self, data): + match = re.search(r'Port Speed(.+)\s(\d+)\n', data) + if match: + return int(match.group(2)) + + def parse_duplex(self, data): + match = re.search(r'Port Mode\s([A-Za-z]*)(.+)\s([A-Za-z/]*)\n', data) + if match: + return match.group(3) + + def parse_mediatype(self, key, properties): + mediatype, mediatype_next = properties.split('--------- ------- --------------------- --------------------- --------------') + flag=1 + for en in mediatype_next.splitlines(): + if key in en: + flag=0 + match = re.search('^(\S+)\s+(\S+)\s+(\S+)',en) + if match: + strval = match.group(3) + return match.group(3) + if flag==1: + return "null" + + def parse_type(self, key, properties): + type_val, type_val_next = properties.split('--------- ------- --------------------- --------------------- --------------') + flag=1 + for en in type_val_next.splitlines(): + if key in en: + flag=0 + match = re.search('^(\S+)\s+(\S+)\s+(\S+)',en) + if match: + strval = match.group(2) + return match.group(2) + if flag==1: + return "null" + + def parse_lineprotocol(self, data): + match = re.search(r'Link Status.*\s(\S+)\s+(\S+)\n', data) + if match: + strval= match.group(2) + return strval.strip('/') + + def parse_operstatus(self, data): + match = re.search(r'Link Status.*\s(\S+)\s+(\S+)\n', data) + if match: + return match.group(1) + + def parse_lldp_intf(self, data): + match = re.search(r'^([A-Za-z0-9/]*)', data) + if match: + return match.group(1) + + def parse_lldp_host(self, data): + match = re.search(r'^([A-Za-z0-9]*)', data) + if match: + return match.group(1) + + def parse_lldp_port(self, data): + match = re.search(r'^([A-Za-z0-9/]*)', data) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + module = NetworkModule(argument_spec=spec, supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + runner = CommandRunner(module) + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](runner)) + runner.run() + + try: + for inst in instances: + inst.populate() + facts.update(inst.facts) + except Exception: + module.exit_json(out=module.from_json(runner.items)) + + ansible_facts = dict() + for key, value in facts.iteritems(): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts) + + +if __name__ == '__main__': + main() + diff --git a/network/dellos9/__init__.py b/network/dellos9/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/dellos9/dellos9_command.py b/network/dellos9/dellos9_command.py new file mode 100755 index 00000000000..fcd70f0c35e --- /dev/null +++ b/network/dellos9/dellos9_command.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# +# Copyright (c) 2016 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: dellos9_command +version_added: "2.2" +author: "Dhivya P (@dhivyap)" +short_description: Run commands on remote devices running Dell OS9 +description: + - Sends arbitrary commands to a Dell OS9 node and returns the results + read from the device. This module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(dellos9_config) to configure Dell OS9 devices. +extends_documentation_fragment: dellos9 +options: + commands: + description: + - List of commands to send to the remote dellos9 device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of I(retries), the task fails. + See examples. + required: false + default: null + retries: + description: + - Specifies the number of retries a command should be tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + required: false + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + required: false + default: 1 + +notes: + - This module requires Dell OS9 version 9.10.0.1P13 or above. + + - This module requires to increase the ssh connection rate limit. + Use the following command I(ip ssh connection-rate-limit 60) + to configure the same. This can be done via M(dnos_config) module + as well. + +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +tasks: + - name: run show version on remote devices + dellos9_command: + commands: show version + provider: "{{ cli }}" + + - name: run show version and check to see if output contains OS9 + dellos9_command: + commands: show version + wait_for: result[0] contains OS9 + provider: "{{ cli }}" + + - name: run multiple commands on remote nodes + dellos9_command: + commands: + - show version + - show interfaces + provider: "{{ cli }}" + + - name: run multiple commands and evaluate the output + dellos9_command: + commands: + - show version + - show interfaces + wait_for: + - result[0] contains OS9 + - result[1] contains Loopback + provider: "{{ cli }}" +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always + type: list + sample: ['...', '...'] + +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] + +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] + +warnings: + description: The list of warnings (if any) generated by module based on arguments + returned: always + type: list + sample: ['...', '...'] +""" + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner, FailedConditionsError +from ansible.module_utils.network import NetworkModule, NetworkError +import ansible.module_utils.dellos9 + + +def to_lines(stdout): + for item in stdout: + if isinstance(item, basestring): + item = str(item).split('\n') + yield item + + +def main(): + spec = dict( + commands=dict(type='list', required=True), + wait_for=dict(type='list'), + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = NetworkModule(argument_spec=spec, + connect_on_load=False, + supports_check_mode=True) + + commands = module.params['commands'] + conditionals = module.params['wait_for'] or list() + + warnings = list() + + runner = CommandRunner(module) + + for cmd in commands: + if module.check_mode and not cmd.startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd) + else: + if cmd.startswith('conf'): + module.fail_json(msg='dellos9_command does not support running ' + 'config mode commands. Please use ' + 'dellos9_config instead') + runner.add_command(cmd) + + for item in conditionals: + runner.add_conditional(item) + + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result = dict(changed=False) + + result['stdout'] = list() + for cmd in commands: + try: + output = runner.get_command(cmd) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) + + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/dellos9/dellos9_config.py b/network/dellos9/dellos9_config.py new file mode 100755 index 00000000000..bee90ee1c3f --- /dev/null +++ b/network/dellos9/dellos9_config.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# +# Copyright (c) 2016 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: dellos9_config +version_added: "2.2" +author: "Dhivya P (@dhivyap)" +short_description: Manage Dell OS9 configuration sections +description: + - Dell OS9 configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with Dell OS9 configuration sections in + a deterministic way. +extends_documentation_fragment: dellos9 +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. This argument is mutually exclusive with I(src). + required: false + default: null + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + required: false + default: null + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root dir. This argument is mutually + exclusive with I(lines). + required: false + default: null + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + required: false + default: null + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + required: false + default: null + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + required: false + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + required: false + default: line + choices: ['line', 'block'] + update: + description: + - The I(update) argument controls how the configuration statements + are processed on the remote device. Valid choices for the I(update) + argument are I(merge) and I(check). When the argument is set to + I(merge), the configuration changes are merged with the current + device running configuration. When the argument is set to I(check) + the configuration updates are determined but not actually configured + on the remote device. + required: false + default: merge + choices: ['merge', 'check'] + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + required: false + default: no + choices: ['yes', 'no'] + config: + description: + - The C(config) argument allows the playbook designer to supply + the base configuration to be used to validate configuration + changes necessary. If this argument is provided, the module + will not download the running-config from the remote node. + required: false + default: null + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + required: false + default: no + choices: ['yes', 'no'] + +notes: + - This module requires Dell OS9 version 9.10.0.1P13 or above. + + - This module requires to increase the ssh connection rate limit. + Use the following command I(ip ssh connection-rate-limit 60) + to configure the same. This can be done via M(dnos_config) module + as well. +""" + +EXAMPLES = """ +- dellos9_config: + lines: ['hostname {{ inventory_hostname }}'] + provider: "{{ cli }}" + +- dellos9_config: + lines: + - 10 permit ip host 1.1.1.1 any log + - 20 permit ip host 2.2.2.2 any log + - 30 permit ip host 3.3.3.3 any log + - 40 permit ip host 4.4.4.4 any log + - 50 permit ip host 5.5.5.5 any log + parents: ['ip access-list extended test'] + before: ['no ip access-list extended test'] + match: exact + provider: "{{ cli }}" + +- dellos9_config: + lines: + - 10 permit ip host 1.1.1.1 any log + - 20 permit ip host 2.2.2.2 any log + - 30 permit ip host 3.3.3.3 any log + - 40 permit ip host 4.4.4.4 any log + parents: ['ip access-list extended test'] + before: ['no ip access-list extended test'] + replace: block + provider: "{{ cli }}" + +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['...', '...'] + +responses: + description: The set of responses from issuing the commands on the device + returned: when not check_mode + type: list + sample: ['...', '...'] + +saved: + description: Returns whether the configuration is saved to the startup + configuration or not. + returned: when not check_mode + type: bool + sample: True + +""" +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.dellos9 import get_config, get_sublevel_config + + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + + +def main(): + + argument_spec = dict( + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + src=dict(type='path'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', + choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + update=dict(choices=['merge', 'check'], default='merge'), + save=dict(type='bool', default=False), + config=dict(), + backup=dict(type='bool', default=False) + ) + + mutually_exclusive = [('lines', 'src')] + + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + parents = module.params['parents'] or list() + + match = module.params['match'] + replace = module.params['replace'] + result = dict(changed=False, saved=False) + + candidate = get_candidate(module) + + if match != 'none': + config = get_config(module) + if parents: + contents = get_sublevel_config(config, module) + config = NetworkConfig(contents=contents, indent=1) + configobjs = candidate.difference(config, match=match, replace=replace) + + else: + configobjs = candidate.items + + if module.params['backup']: + result['__backup__'] = module.cli('show running-config')[0] + + commands = list() + if configobjs: + commands = dumps(configobjs, 'commands') + commands = commands.split('\n') + + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + if not module.check_mode and module.params['update'] == 'merge': + response = module.config.load_config(commands) + result['responses'] = response + + if module.params['save']: + module.config.save_config() + result['saved'] = True + + result['changed'] = True + + result['updates'] = commands + + module.exit_json(**result) + +if __name__ == '__main__': + main() diff --git a/network/dellos9/dellos9_facts.py b/network/dellos9/dellos9_facts.py new file mode 100644 index 00000000000..fe752ac373f --- /dev/null +++ b/network/dellos9/dellos9_facts.py @@ -0,0 +1,567 @@ +#!/usr/bin/python +# +# (c) 2015 Peter Sprygada, +# +# Copyright (c) 2016 Dell Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: dellos9_facts +version_added: "2.2" +author: "Dhivya P (@dhivyap)" +short_description: Collect facts from remote devices running Dell OS9 +description: + - Collects a base set of device facts from a remote device that + is running Dell OS9. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: dellos9 +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +notes: + - This module requires Dell OS9 version 9.10.0.1P13 or above. + + - This module requires to increase the ssh connection rate limit. + Use the following command I(ip ssh connection-rate-limit 60) + to configure the same. This can be done via M(dnos_config) module + as well. +""" + +EXAMPLES = """ +# Collect all facts from the device +- dellos9_facts: + gather_subset: all + +# Collect only the config and default facts +- dellos9_facts: + gather_subset: + - config + +# Do not collect hardware facts +- dellos9_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: string +ansible_net_image: + description: The image file the device is running + returned: always + type: string + +# hardware +ansible_net_filesystems: + description: All file system names available on the device + returned: when hardware is configured + type: list +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" +import re +import itertools + +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.network import NetworkModule +import ansible.module_utils.dellos9 + + +class FactsBase(object): + + def __init__(self, runner): + self.runner = runner + self.facts = dict() + + self.commands() + + +class Default(FactsBase): + + def commands(self): + self.runner.add_command('show version') + self.runner.add_command('show inventory') + self.runner.add_command('show running-config | grep hostname') + + def populate(self): + data = self.runner.get_command('show version') + self.facts['version'] = self.parse_version(data) + self.facts['model'] = self.parse_model(data) + self.facts['image'] = self.parse_image(data) + + data = self.runner.get_command('show inventory') + self.facts['serialnum'] = self.parse_serialnum(data) + + data = self.runner.get_command('show running-config | grep hostname') + self.facts['hostname'] = self.parse_hostname(data) + + def parse_version(self, data): + match = re.search(r'Software Version:\s*(.+)', data) + if match: + return match.group(1) + + def parse_hostname(self, data): + match = re.search(r'^hostname (.+)', data, re.M) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'^System Type:\s*(.+)', data, re.M) + if match: + return match.group(1) + + def parse_image(self, data): + match = re.search(r'image file is "(.+)"', data) + if match: + return match.group(1) + + def parse_serialnum(self, data): + for line in data.split('\n'): + if line.startswith('*'): + match = re.search( + r'\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)', line, re.M) + if match: + return match.group(3) + + +class Hardware(FactsBase): + + def commands(self): + self.runner.add_command('show file-systems') + self.runner.add_command('show memory | except Processor') + + def populate(self): + data = self.runner.get_command('show file-systems') + self.facts['filesystems'] = self.parse_filesystems(data) + + data = self.runner.get_command('show memory | except Processor') + match = re.findall('\s(\d+)\s', data) + if match: + self.facts['memtotal_mb'] = int(match[0]) / 1024 + self.facts['memfree_mb'] = int(match[2]) / 1024 + + def parse_filesystems(self, data): + return re.findall(r'\s(\S+):$', data, re.M) + + +class Config(FactsBase): + + def commands(self): + self.runner.add_command('show running-config') + + def populate(self): + self.facts['config'] = self.runner.get_command('show running-config') + + +class Interfaces(FactsBase): + + def commands(self): + self.runner.add_command('show interfaces') + self.runner.add_command('show ipv6 interface') + self.runner.add_command('show lldp neighbors detail') + + def populate(self): + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.runner.get_command('show interfaces') + interfaces = self.parse_interfaces(data) + + for key in interfaces.keys(): + if "ManagementEthernet" in key: + temp_parsed = interfaces[key] + del interfaces[key] + interfaces.update(self.parse_mgmt_interfaces(temp_parsed)) + + for key in interfaces.keys(): + if "Vlan" in key: + temp_parsed = interfaces[key] + del interfaces[key] + interfaces.update(self.parse_vlan_interfaces(temp_parsed)) + + self.facts['interfaces'] = self.populate_interfaces(interfaces) + + data = self.runner.get_command('show ipv6 interface') + if len(data) > 0: + data = self.parse_ipv6_interfaces(data) + self.populate_ipv6_interfaces(data) + + data = self.runner.get_command('show inventory') + if 'LLDP' in self.get_protocol_list(data): + neighbors = self.runner.get_command('show lldp neighbors detail') + self.facts['neighbors'] = self.parse_neighbors(neighbors) + + def get_protocol_list(self, data): + start = False + protocol_list = list() + for line in data.split('\n'): + match = re.search(r'Software Protocol Configured\s*', line) + if match: + start = True + continue + if start: + line = line.strip() + if line.isalnum(): + protocol_list.append(line) + return protocol_list + + def populate_interfaces(self, interfaces): + facts = dict() + for key, value in interfaces.iteritems(): + intf = dict() + intf['description'] = self.parse_description(value) + intf['macaddress'] = self.parse_macaddress(value) + ipv4 = self.parse_ipv4(value) + intf['ipv4'] = self.parse_ipv4(value) + if ipv4: + self.add_ip_address(ipv4['address'], 'ipv4') + + intf['mtu'] = self.parse_mtu(value) + intf['bandwidth'] = self.parse_bandwidth(value) + intf['mediatype'] = self.parse_mediatype(value) + intf['duplex'] = self.parse_duplex(value) + intf['lineprotocol'] = self.parse_lineprotocol(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(value) + + facts[key] = intf + return facts + + def populate_ipv6_interfaces(self, data): + for key, value in data.iteritems(): + self.facts['interfaces'][key]['ipv6'] = list() + addresses = re.findall(r'\s+(.+), subnet', value, re.M) + subnets = re.findall(r', subnet is (\S+)', value, re.M) + for addr, subnet in itertools.izip(addresses, subnets): + ipv6 = dict(address=addr.strip(), subnet=subnet.strip()) + self.add_ip_address(addr.strip(), 'ipv6') + self.facts['interfaces'][key]['ipv6'].append(ipv6) + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_neighbors(self, neighbors): + facts = dict() + + for entry in neighbors.split( + '========================================================================'): + if entry == '': + continue + + intf = self.parse_lldp_intf(entry) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = self.parse_lldp_host(entry) + fact['port'] = self.parse_lldp_port(entry) + facts[intf].append(fact) + return facts + + def parse_interfaces(self, data): + parsed = dict() + newline_count = 0 + interface_start = True + + for line in data.split('\n'): + if interface_start: + newline_count = 0 + if len(line) == 0: + newline_count += 1 + if newline_count == 2: + interface_start = True + continue + else: + match = re.match(r'^(\S+) (\S+)', line) + if match and interface_start: + interface_start = False + key = match.group(0) + parsed[key] = line + else: + parsed[key] += '\n%s' % line + return parsed + + def parse_mgmt_interfaces(self, data): + parsed = dict() + interface_start = True + for line in data.split('\n'): + match = re.match(r'^(\S+) (\S+)', line) + if "Time since" in line: + interface_start = True + parsed[key] += '\n%s' % line + continue + elif match and interface_start: + interface_start = False + key = match.group(0) + parsed[key] = line + else: + parsed[key] += '\n%s' % line + return parsed + + def parse_vlan_interfaces(self, data): + parsed = dict() + interface_start = True + line_before_end = False + for line in data.split('\n'): + match = re.match(r'^(\S+) (\S+)', line) + match_endline = re.match(r'^\s*\d+ packets, \d+ bytes$', line) + + if "Output Statistics" in line: + line_before_end = True + parsed[key] += '\n%s' % line + elif match_endline and line_before_end: + line_before_end = False + interface_start = True + parsed[key] += '\n%s' % line + elif match and interface_start: + interface_start = False + key = match.group(0) + parsed[key] = line + else: + parsed[key] += '\n%s' % line + return parsed + + def parse_ipv6_interfaces(self, data): + parsed = dict() + for line in data.split('\n'): + if len(line) == 0: + continue + elif line[0] == ' ': + parsed[key] += '\n%s' % line + else: + match = re.match(r'^(\S+) (\S+)', line) + if match: + key = match.group(0) + parsed[key] = line + return parsed + + def parse_description(self, data): + match = re.search(r'Description: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_macaddress(self, data): + match = re.search(r'address is (\S+)', data) + if match: + if match.group(1) != "not": + return match.group(1) + + def parse_ipv4(self, data): + match = re.search(r'Internet address is (\S+)', data) + if match: + if match.group(1) != "not": + addr, masklen = match.group(1).split('/') + return dict(address=addr, masklen=int(masklen)) + + def parse_mtu(self, data): + match = re.search(r'MTU (\d+)', data) + if match: + return int(match.group(1)) + + def parse_bandwidth(self, data): + match = re.search(r'LineSpeed (\d+)', data) + if match: + return int(match.group(1)) + + def parse_duplex(self, data): + match = re.search(r'(\w+) duplex', data, re.M) + if match: + return match.group(1) + + def parse_mediatype(self, data): + media = re.search(r'(.+) media present, (.+)', data, re.M) + if media: + match = re.search(r'type is (.+)$', media.group(0), re.M) + return match.group(1) + + def parse_type(self, data): + match = re.search(r'Hardware is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lineprotocol(self, data): + match = re.search(r'line protocol is (\w+[ ]?\w*)\(?.*\)?$', data, re.M) + if match: + return match.group(1) + + def parse_operstatus(self, data): + match = re.search(r'^(?:.+) is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lldp_intf(self, data): + match = re.search(r'^\sLocal Interface (\S+\s\S+)', data, re.M) + if match: + return match.group(1) + + def parse_lldp_host(self, data): + match = re.search(r'Remote System Name: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_lldp_port(self, data): + match = re.search(r'Remote Port ID: (.+)$', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = NetworkModule(argument_spec=spec, supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + runner = CommandRunner(module) + + instances = list() + for key in runable_subsets: + runs = FACT_SUBSETS[key](runner) + instances.append(runs) + + runner.run() + + try: + for inst in instances: + inst.populate() + facts.update(inst.facts) + except Exception: + module.exit_json(out=module.from_json(runner.items)) + + ansible_facts = dict() + for key, value in facts.iteritems(): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts) + + +if __name__ == '__main__': + main() diff --git a/network/eos/eos_template.py b/network/eos/_eos_template.py similarity index 77% rename from network/eos/eos_template.py rename to network/eos/_eos_template.py index 550d3bad616..35be114cf7f 100644 --- a/network/eos/eos_template.py +++ b/network/eos/_eos_template.py @@ -15,19 +15,24 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: eos_template version_added: "2.1" -author: "Peter sprygada (@privateip)" +author: "Peter Sprygada (@privateip)" short_description: Manage Arista EOS device configurations description: - Manages network device configurations over SSH or eAPI. This module - allows implementors to work with the device running-config. It + allows implementers to work with the device running-config. It provides a way to push a set of commands onto a network device - by evaluting the current running-config and only pushing configuration + by evaluating the current running-config and only pushing configuration commands that are not already configured. The config source can be a set of commands or a template. +deprecated: Deprecated in 2.2. Use eos_config instead extends_documentation_fragment: eos options: src: @@ -49,9 +54,9 @@ include_defaults: description: - By default when the M(eos_template) connects to the remote - device to retrieve the configuration it will issue the `show - running-config` command. If this option is set to True then - the issued command will be `show running-config all` + device to retrieve the configuration it will issue the C(show + running-config) command. If this option is set to True then + the issued command will be C(show running-config all). required: false default: false choices: ['yes', 'no'] @@ -69,8 +74,7 @@ - This argument will cause the provided configuration to be replaced on the destination node. The use of the replace argument will always cause the task to set changed to true and will implies - I(force) is true. This argument is only valid with I(transport) - is eapi. + C(force=true). This argument is only valid with C(transport=eapi). required: false default: false choices: ['yes', 'no'] @@ -81,23 +85,23 @@ against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the - implementer to pass in the configuruation to use as the base - config for comparision. + implementer to pass in the configuration to use as the base + config for comparison. required: false default: null """ EXAMPLES = """ -- name: push a configuration onto the device +- name: Push a configuration onto the device eos_template: src: config.j2 -- name: forceable push a configuration onto the device +- name: Forceable push a configuration onto the device eos_template: src: config.j2 force: yes -- name: provide the base configuration for comparision +- name: Provide the base configuration for comparison eos_template: src: candidate_config.txt config: current_config.txt @@ -112,17 +116,22 @@ responses: description: The set of responses from issuing the commands on the device - retured: when not check_mode + returned: when not check_mode type: list sample: ['...', '...'] """ - import re +import ansible.module_utils.eos + +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.netcfg import NetworkConfig, dumps + def get_config(module): config = module.params.get('config') + defaults = module.params['include_defaults'] if not config and not module.params['force']: - config = module.config + config = module.config.get_config(include_defaults=defaults) return config def filter_exit(commands): @@ -164,9 +173,9 @@ def main(): mutually_exclusive = [('config', 'backup'), ('config', 'force')] - module = get_module(argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True) + module = NetworkModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) replace = module.params['replace'] @@ -189,24 +198,21 @@ def main(): if not module.params['force']: commands = candidate.difference((running or list())) + commands = dumps(commands, 'commands').split('\n') + commands = [str(c) for c in commands if c] else: commands = str(candidate).split('\n') + commands = filter_exit(commands) if commands: - commands = filter_exit(commands) if not module.check_mode: - commands = [str(c).strip() for c in commands] - response = module.configure(commands, replace=replace) + response = module.config.load_config(commands, replace=replace, + commit=True) result['responses'] = response result['changed'] = True result['updates'] = commands module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.eos import * if __name__ == '__main__': main() diff --git a/network/eos/eos_command.py b/network/eos/eos_command.py index aab2d67a5dc..110cfb60cea 100644 --- a/network/eos/eos_command.py +++ b/network/eos/eos_command.py @@ -16,15 +16,19 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: eos_command version_added: "2.1" -author: "Peter sprygada (@privateip)" -short_description: Run arbitrary command on EOS device +author: "Peter Sprygada (@privateip)" +short_description: Run arbitrary commands on an Arista EOS device description: - - Sends an aribtrary set of commands to and EOS node and returns the results - read from the device. The M(eos_command) modulule includes an + - Sends an arbitrary set of commands to an EOS node and returns the results + read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met. extends_documentation_fragment: eos @@ -33,11 +37,11 @@ description: - The commands to send to the remote EOS device over the configured provider. The resulting output from the command - is returned. If the I(waitfor) argument is provided, the + is returned. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or - the number of retires as expired. + the number of I(retries) has been exceeded. required: true - waitfor: + wait_for: description: - Specifies what to evaluate from the output of the command and what conditionals to apply. This argument will cause @@ -46,12 +50,26 @@ by the configured retries, the task fails. See examples. required: false default: null + aliases: ['waitfor'] + version_added: "2.2" + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the I(wait_for) must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + required: false + default: all + choices: ['any', 'all'] + version_added: "2.2" retries: description: - - Specifies the number of retries a command should by tried + - Specifies the number of retries a command should be tried before it is considered failed. The command is run on the - target device every retry and evaluated against the waitfor - conditionals + target device every retry and evaluated against the I(wait_for) + conditionals. required: false default: 10 interval: @@ -65,29 +83,49 @@ """ EXAMPLES = """ -- eos_command: - commands: "{{ lookup('file', 'commands.txt') }}" +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +- name: run show version on remote devices + eos_command: + commands: show version + provider: "{{ cli }}" -- eos_command: +- name: run show version and check to see if output contains Arista + eos_command: + commands: show version + wait_for: result[0] contains Arista + provider: "{{ cli }}" + +- name: run multiple commands on remote nodes + eos_command: commands: - - show interface {{ item }} - with_items: interfaces + - show version + - show interfaces + provider: "{{ cli }}" -- eos_command: +- name: run multiple commands and evaluate the output + eos_command: commands: - show version - waitfor: - - "result[0] contains 4.15.0F" + - show interfaces + wait_for: + - result[0] contains Arista + - result[1] contains Loopback0 + provider: "{{ cli }}" -- eos_command: - commands: - - show version | json - - show interfaces | json - - show version - waitfor: - - "result[2] contains '4.15.0F'" - - "result[1].interfaces.Management1.interfaceAddress[0].primaryIp.maskLen eq 24" - - "result[0].modelName == 'vEOS'" +- name: run commands and specify the output format + eos_command: + commands: + - command: show version + output: json + provider: "{{ cli }}" """ RETURN = """ @@ -98,85 +136,125 @@ sample: ['...', '...'] stdout_lines: - description: The value of stdout split into a list + description: the value of stdout split into a list returned: always type: list sample: [['...', '...'], ['...'], ['...']] failed_conditions: description: the conditionals that failed - retured: failed + returned: failed type: list sample: ['...', '...'] """ -import time -import shlex -import re +import ansible.module_utils.eos -INDEX_RE = re.compile(r'(\[\d+\])') +from ansible.module_utils.basic import get_exception +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.netcli import AddCommandError, AddConditionError +from ansible.module_utils.netcli import FailedConditionsError +from ansible.module_utils.netcli import FailedConditionalError +from ansible.module_utils.six import string_types -def iterlines(stdout): +VALID_KEYS = ['command', 'output', 'prompt', 'response'] + +def to_lines(stdout): for item in stdout: - if isinstance(item, basestring): + if isinstance(item, string_types): item = str(item).split('\n') yield item +def parse_commands(module): + for cmd in module.params['commands']: + if isinstance(cmd, string_types): + cmd = dict(command=cmd, output=None) + elif 'command' not in cmd: + module.fail_json(msg='command keyword argument is required') + elif cmd.get('output') not in [None, 'text', 'json']: + module.fail_json(msg='invalid output specified for command') + elif not set(cmd.keys()).issubset(VALID_KEYS): + module.fail_json(msg='unknown command keyword specified. Valid ' + 'values are %s' % ', '.join(VALID_KEYS)) + yield cmd + def main(): spec = dict( - commands=dict(type='list'), - waitfor=dict(type='list'), + # { command: , output: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + retries=dict(default=10, type='int'), interval=dict(default=1, type='int') ) - module = get_module(argument_spec=spec, - supports_check_mode=True) + module = NetworkModule(argument_spec=spec, + supports_check_mode=True) - commands = module.params['commands'] + commands = list(parse_commands(module)) + conditionals = module.params['wait_for'] or list() - retries = module.params['retries'] - interval = module.params['interval'] + warnings = list() + + runner = CommandRunner(module) + + for cmd in commands: + if module.check_mode and not cmd['command'].startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd['command']) + else: + if cmd['command'].startswith('conf'): + module.fail_json(msg='eos_command does not support running ' + 'config mode commands. Please use ' + 'eos_config instead') + try: + runner.add_command(**cmd) + except AddCommandError: + exc = get_exception() + warnings.append('duplicate command detected: %s' % cmd) try: - queue = set() - for entry in (module.params['waitfor'] or list()): - queue.add(Conditional(entry)) - except AttributeError, exc: - module.fail_json(msg=exc.message) + for item in conditionals: + runner.add_conditional(item) + except AddConditionError: + exc = get_exception() + module.fail_json(msg=str(exc), condition=exc.condition) - result = dict(changed=False) - while retries > 0: - response = module.execute(commands) - result['stdout'] = response + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + runner.match = module.params['match'] - for index, cmd in enumerate(commands): - if cmd.endswith('json'): - response[index] = module.from_json(response[index]) + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except FailedConditionalError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditional=exc.failed_conditional) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) - for item in list(queue): - if item(response): - queue.remove(item) + result = dict(changed=False, stdout=list()) - if not queue: - break + for cmd in commands: + try: + output = runner.get_command(cmd['command'], cmd.get('output')) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) - time.sleep(interval) - retries -= 1 - else: - failed_conditions = [item.raw for item in queue] - module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions) + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) - result['stdout_lines'] = list(iterlines(result['stdout'])) - return module.exit_json(**result) + module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.eos import * if __name__ == '__main__': - main() + main() diff --git a/network/eos/eos_config.py b/network/eos/eos_config.py index 78b38aac34d..731b11efb90 100644 --- a/network/eos/eos_config.py +++ b/network/eos/eos_config.py @@ -15,6 +15,11 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: eos_config @@ -22,10 +27,10 @@ author: "Peter Sprygada (@privateip)" short_description: Manage Arista EOS configuration sections description: - - Arista EOS configurations use a simple block indent file sytanx - for segementing configuration into sections. This module provides + - Arista EOS configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides an implementation for working with eos configuration sections in - a deterministic way. This module works with either CLI or eapi + a deterministic way. This module works with either CLI or eAPI transports. extends_documentation_fragment: eos options: @@ -34,9 +39,10 @@ - The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration - command syntanx as some commands are automatically modified by the + command syntax as some commands are automatically modified by the device config parser. - required: true + required: false + default: null parents: description: - The ordered set of parents that uniquely identify the section @@ -45,19 +51,30 @@ level or global commands. required: false default: null + src: + description: + - The I(src) argument provides a path to the configuration file + to load into the remote system. The path can either be a full + system path to the configuration file if the value starts with / + or relative to the root of the implemented role or playbook. + This argument is mutually exclusive with the I(lines) and + I(parents) arguments. + required: false + default: null + version_added: "2.2" before: description: - The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched - against the system + against the system. required: false default: null after: description: - The ordered set of commands to append to the end of the command - stack if a changed needs to be made. Just like with I(before) this + stack if a change needs to be made. Just like with I(before) this allows the playbook designer to append a set of commands to be executed after the command set. required: false @@ -68,11 +85,13 @@ the set of commands against the current device config. If match is set to I(line), commands are matched line by line. If match is set to I(strict), command lines are matched with respect - to position. Finally if match is set to I(exact), command lines - must be an equal match. + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. required: false default: line - choices: ['line', 'strict', 'exact'] + choices: ['line', 'strict', 'exact', 'none'] replace: description: - Instructs the module on the way to perform the configuration @@ -80,19 +99,33 @@ the modified lines are pushed to the device in configuration mode. If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any - line is not correct + line is not correct. required: false default: line - choices: ['line', 'block'] + choices: ['line', 'block', 'config'] force: description: - The force argument instructs the module to not consider the current devices running-config. When set to true, this will cause the module to push the contents of I(src) into the device without first checking if already configured. + - Note this argument should be considered deprecated. To achieve + the equivalent, set the C(match=none) which is idempotent. This argument + will be removed in a future release. required: false default: false choices: ['yes', 'no'] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + required: false + default: no + choices: ['yes', 'no'] + version_added: "2.2" config: description: - The module, by default, will connect to the remote device and @@ -100,16 +133,46 @@ against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the - implementer to pass in the configuruation to use as the base - config for comparision. + implementer to pass in the configuration to use as the base + config for comparison. required: false default: null + defaults: + description: + - The I(defaults) argument will influence how the running-config + is collected from the device. When the value is set to true, + the command used to collect the running-config is append with + the all keyword. When the value is set to false, the command + is issued without the all keyword + required: false + default: false + version_added: "2.2" + save: + description: + - The C(save) argument instructs the module to save the + running-config to startup-config. This operation is performed + after any changes are made to the current running config. If + no changes are made, the configuration is still saved to the + startup config. This option will always cause the module to + return changed. + required: false + default: false + version_added: "2.2" """ EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + - eos_config: - lines: ['hostname {{ inventory_hostname }}'] - force: yes + lines: hostname {{ inventory_hostname }} + provider: "{{ cli }}" - eos_config: lines: @@ -118,9 +181,10 @@ - 30 permit ip 3.3.3.3/32 any log - 40 permit ip 4.4.4.4/32 any log - 50 permit ip 5.5.5.5/32 any log - parents: ['ip access-list test'] - before: ['no ip access-list test'] + parents: ip access-list test + before: no ip access-list test match: exact + provider: "{{ cli }}" - eos_config: lines: @@ -128,93 +192,163 @@ - 20 permit ip 2.2.2.2/32 any log - 30 permit ip 3.3.3.3/32 any log - 40 permit ip 4.4.4.4/32 any log - parents: ['ip access-list test'] - before: ['no ip access-list test'] + parents: ip access-list test + before: no ip access-list test replace: block + provider: "{{ cli }}" + +- name: load configuration from file + eos_config: + src: eos.cfg + provider: "{{ cli }}" """ RETURN = """ updates: description: The set of commands that will be pushed to the remote device - returned: always - type: list - sample: ['...', '...'] - -responses: - description: The set of responses from issuing the commands on the device - retured: always + returned: Only when lines is specified. type: list sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: path + sample: /playbooks/ansible/backup/eos_config.2016-07-16@22:28:34 """ +import time + +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.eos import NetworkModule, NetworkError +from ansible.module_utils.basic import get_exception + +def check_args(module, warnings): + if module.params['force']: + warnings.append('The force argument is deprecated, please use ' + 'match=none instead. This argument will be ' + 'removed in the future') -def get_config(module): - config = module.params['config'] or dict() - if not config and not module.params['force']: - config = module.config - return config + if not module.connection.supports_sessions(): + warnings.append('The current version of EOS on the remote device does ' + 'not support configuration sessions. The commit ' + 'argument will be ignored') + +def get_candidate(module): + candidate = NetworkConfig(indent=3) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + +def get_config(module, defaults=False): + contents = module.params['config'] + if not contents: + defaults = module.params['defaults'] + contents = module.config.get_config(include_defaults=defaults) + return NetworkConfig(indent=3, contents=contents) + +def load_config(module, commands, result): + replace = module.params['replace'] == 'config' + commit = not module.check_mode + + diff = module.config.load_config(commands, replace=replace, commit=commit) + + if diff and module.connection.supports_sessions(): + result['diff'] = dict(prepared=diff) + result['changed'] = True + elif diff: + result['changed'] = True + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + + candidate = get_candidate(module) + + if match != 'none' and replace != 'config': + config = get_config(module) + configobjs = candidate.difference(config, match=match, replace=replace) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['lines']: + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['updates'] = commands + + module.log('commands: %s' % commands) + load_config(module, commands, result) + + if module.params['save']: + if not module.check_mode: + module.config.save_config() + result['changed'] = True def main(): """ main entry point for module execution """ - argument_spec = dict( - lines=dict(aliases=['commands'], required=True, type='list'), + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), + before=dict(type='list'), after=dict(type='list'), - match=dict(default='line', choices=['line', 'strict', 'exact']), - replace=dict(default='line', choices=['line', 'block']), - force=dict(default=False, type='bool'), - config=dict() - ) - module = get_module(argument_spec=argument_spec, - supports_check_mode=True) + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block', 'config']), - lines = module.params['lines'] - parents = module.params['parents'] or list() + # this argument is deprecated in favor of setting match: none + # it will be removed in a future version + force=dict(default=False, type='bool'), - before = module.params['before'] - after = module.params['after'] + config=dict(), + defaults=dict(type='bool', default=False), - match = module.params['match'] - replace = module.params['replace'] + backup=dict(type='bool', default=False), + save=dict(default=False, type='bool'), + ) - if not module.params['force']: - contents = get_config(module) - config = NetworkConfig(contents=contents, indent=3) + mutually_exclusive = [('lines', 'src')] - candidate = NetworkConfig(indent=3) - candidate.add(lines, parents=parents) + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines']), + ('replace', 'config', ['src'])] - commands = candidate.difference(config, path=parents, match=match, replace=replace) - else: - commands = parents - commands.extend(lines) + module = NetworkModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) - result = dict(changed=False) + if module.params['force'] is True: + module.params['match'] = 'none' - if commands: - if before: - commands[:0] = before + warnings = list() + check_args(module, warnings) - if after: - commands.extend(after) + result = dict(changed=False, warnings=warnings) - if not module.check_mode: - commands = [str(c).strip() for c in commands] - response = module.configure(commands) - result['responses'] = response - result['changed'] = True + if module.params['backup']: + result['__backup__'] = module.config.get_config() + + try: + run(module, result) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) - result['updates'] = commands module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.eos import * + if __name__ == '__main__': main() - diff --git a/network/eos/eos_eapi.py b/network/eos/eos_eapi.py index d81a26f9741..a5cfafabd69 100644 --- a/network/eos/eos_eapi.py +++ b/network/eos/eos_eapi.py @@ -17,263 +17,356 @@ # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: eos_eapi version_added: "2.1" -author: "Chris Houseknecht (@chouseknecht)" -short_description: Manage and configure EAPI. Requires EOS v4.12 or greater. +author: "Peter Sprygada (@privateip)" +short_description: Manage and configure Arista EOS eAPI. +requirements: + - "EOS v4.12 or greater" description: - - Use to enable or disable EAPI access, and set the port and state - of http, https, localHttp and unix-socket servers. - - When enabling EAPI access the default is to enable HTTP on port + - Use to enable or disable eAPI access, and set the port and state + of http, https, local_http and unix-socket servers. + - When enabling eAPI access the default is to enable HTTP on port 80, enable HTTPS on port 443, disable local HTTP, and disable Unix socket server. Use the options listed below to override the default configuration. - Requires EOS v4.12 or greater. extends_documentation_fragment: eos options: - state: - description: - - Set to started or stopped. A state of started will - enable EAPI access, and a state of stopped will - disable or shutdown all EAPI access. - choices: - - started - - stopped - requred: false - default: started - http_port: - description: - - Port on which the HTTP server will listen. - required: false - default: 80 - https_port: - description: - - Port on which the HTTPS server will listen. - required: false - default: 443 - local_http_port: - description: - - Port on which the local HTTP server will listen. - required: false - default: 8080 - http: - description: - - Enable HTTP server access. - required: false - default: true - aliases: - - enable_http - https: - description: - - Enable HTTPS server access. - required: false - default: true - aliases: - - enable_https - local_http: - description: - - Enable local HTTP server access. - required: false - default: false - aliases: - - enable_local_http - socket: - description: - - Enable Unix socket server access. - required: false - default: false - aliases: - - enable_socket + http: + description: + - The C(http) argument controls the operating state of the HTTP + transport protocol when eAPI is present in the running-config. + When the value is set to True, the HTTP protocol is enabled and + when the value is set to False, the HTTP protocol is disabled. + By default, when eAPI is first configured, the HTTP protocol is + disabled. + required: false + default: no + choices: ['yes', 'no'] + aliases: ['enable_http'] + http_port: + description: + - Configures the HTTP port that will listen for connections when + the HTTP transport protocol is enabled. This argument accepts + integer values in the valid range of 1 to 65535. + required: false + default: 80 + https: + description: + - The C(https) argument controls the operating state of the HTTPS + transport protocol when eAPI is present in the running-config. + When the value is set to True, the HTTPS protocol is enabled and + when the value is set to False, the HTTPS protocol is disabled. + By default, when eAPI is first configured, the HTTPS protocol is + enabled. + required: false + default: yes + choices: ['yes', 'no'] + aliases: ['enable_http'] + https_port: + description: + - Configures the HTTP port that will listen for connections when + the HTTP transport protocol is enabled. This argument accepts + integer values in the valid range of 1 to 65535. + required: false + default: 443 + local_http: + description: + - The C(local_http) argument controls the operating state of the + local HTTP transport protocol when eAPI is present in the + running-config. When the value is set to True, the HTTP protocol + is enabled and restricted to connections from localhost only. When + the value is set to False, the HTTP local protocol is disabled. + - Note is value is independent of the C(http) argument + required: false + default: false + choices: ['yes', 'no'] + aliases: ['enable_local_http'] + local_http_port: + description: + - Configures the HTTP port that will listen for connections when + the HTTP transport protocol is enabled. This argument accepts + integer values in the valid range of 1 to 65535. + required: false + default: 8080 + socket: + description: + - The C(socket) argument controls the operating state of the UNIX + Domain Socket used to receive eAPI requests. When the value + of this argument is set to True, the UDS will listen for eAPI + requests. When the value is set to False, the UDS will not be + available to handle requests. By default when eAPI is first + configured, the UDS is disabled. + required: false + default: false + choices: ['yes', 'no'] + aliases: ['enable_socket'] + vrf: + description: + - The C(vrf) argument will configure eAPI to listen for connections + in the specified VRF. By default, eAPI transports will listen + for connections in the global table. This value requires the + VRF to already be created otherwise the task will fail. + required: false + default: default + version_added: "2.2" + config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(config) argument allows the + implementer to pass in the configuration to use as the base + config for comparison. + required: false + default: nul + version_added: "2.2" + state: + description: + - The C(state) argument controls the operational state of eAPI + on the remote device. When this argument is set to C(started), + eAPI is enabled to receive requests and when this argument is + C(stopped), eAPI is disabled and will not receive requests. + required: false + default: started + choices: ['started', 'stopped'] """ EXAMPLES = """ - - name: Enable EAPI access with default configuration - eos_eapi: - state: started - provider: {{ provider }} - - - name: Enable EAPI with no HTTP, HTTPS at port 9443, local HTTP at port 80, and socket enabled - eos_eapi: - state: started - http: false - https_port: 9443 - local_http: yes - local_http_port: 80 - socket: yes - provider: {{ provider }} - - - name: Shutdown EAPI access - eos_eapi: - state: stopped - provider: {{ provider }} +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + +- name: Enable eAPI access with default configuration + eos_eapi: + state: started + provider: "{{ cli }}" + +- name: Enable eAPI with no HTTP, HTTPS at port 9443, local HTTP at port 80, and socket enabled + eos_eapi: + state: started + http: false + https_port: 9443 + local_http: yes + local_http_port: 80 + socket: yes + provider: "{{ cli }}" + +- name: Shutdown eAPI access + eos_eapi: + state: stopped + provider: "{{ cli }}" """ RETURN = """ -changed: - description: - - Indicates if commands were sent to the device. - returned: always - type: boolean - sample: false - -commands: - description: - - Set of commands to be executed on remote device - returned: always - type: list - sample: [ - 'management api http-commands', - 'shutdown' - ] - -_config: - description: - - Configuration found on the device prior to executing any commands. - returned: always - type: object - sample: {...} +updates: + description: + - Set of commands to be executed on remote device + returned: always + type: list + sample: ['management api http-commands', 'shutdown'] +urls: + description: Hash of URL endpoints eAPI is listening on per interface + returned: when eAPI is started + type: dict + sample: {'Management1': ['http://172.26.10.1:80']} """ +import re +import time + +import ansible.module_utils.eos + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.netcfg import NetworkConfig, dumps + +PRIVATE_KEYS_RE = re.compile('__.+__') + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + +def get_instance(module): + try: + resp = module.cli('show management api http-commands', 'json') + return dict( + http=resp[0]['httpServer']['configured'], + http_port=resp[0]['httpServer']['port'], + https=resp[0]['httpsServer']['configured'], + https_port=resp[0]['httpsServer']['port'], + local_http=resp[0]['localHttpServer']['configured'], + local_http_port=resp[0]['localHttpServer']['port'], + socket=resp[0]['unixSocketServer']['configured'], + vrf=resp[0]['vrf'] + ) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) + +def started(module, instance, commands): + commands.append('no shutdown') + setters = set() + for key, value in module.argument_spec.iteritems(): + if module.params[key] is not None: + setter = value.get('setter') or 'set_%s' % key + if setter not in setters: + setters.add(setter) + invoke(setter, module, instance, commands) + +def stopped(module, instance, commands): + commands.append('shutdown') + +def set_protocol_http(module, instance, commands): + port = module.params['http_port'] + if not 1 <= port <= 65535: + module.fail_json(msg='http_port must be between 1 and 65535') + elif any((module.params['http'], instance['http'])): + commands.append('protocol http port %s' % port) + elif module.params['http'] is False: + commands.append('no protocol http') + +def set_protocol_https(module, instance, commands): + port = module.params['https_port'] + if not 1 <= port <= 65535: + module.fail_json(msg='https_port must be between 1 and 65535') + elif any((module.params['https'], instance['https'])): + commands.append('protocol https port %s' % port) + elif module.params['https'] is False: + commands.append('no protocol https') + +def set_local_http(module, instance, commands): + port = module.params['local_http_port'] + if not 1 <= port <= 65535: + module.fail_json(msg='local_http_port must be between 1 and 65535') + elif any((module.params['local_http'], instance['local_http'])): + commands.append('protocol http localhost port %s' % port) + elif module.params['local_http'] is False: + commands.append('no protocol http localhost port 8080') + +def set_socket(module, instance, commands): + if any((module.params['socket'], instance['socket'])): + commands.append('protocol unix-socket') + elif module.params['socket'] is False: + commands.append('no protocol unix-socket') +def set_vrf(module, instance, commands): + vrf = module.params['vrf'] + if vrf != 'default': + resp = module.cli(['show vrf']) + if vrf not in resp[0]: + module.fail_json(msg="vrf '%s' is not configured" % vrf) + commands.append('vrf %s' % vrf) + +def get_config(module): + contents = module.params['config'] + if not contents: + cmd = 'show running-config all section management api http-commands' + contents = module.cli([cmd]) + config = NetworkConfig(indent=3, contents=contents[0]) + return config + +def load_config(module, instance, commands, result): + commit = not module.check_mode + diff = module.config.load_config(commands, commit=commit) + if diff: + result['diff'] = dict(prepared=diff) + result['changed'] = True -def http_commands(protocol, port, enable, config): - - started_config = config['{0}Server'.format(protocol)] - commands = [] - changed = False - - if started_config.get('running'): - if not enable: - # turn off server - commands.append('no protocol {0}'.format(protocol)) - changed = True - elif started_config.get('port') != port: - # update the port - commands.append('protocol {0} port {1}'.format(protocol, port)) - changed = True - elif not started_config.get('runnng') and enable: - # turn on server - commands.append('protocol {0} port {1}'.format(protocol, port)) - changed = True - - return commands, changed - - -def execute_commands(module, commands): - - if not module.params.get('check_mode'): - module.configure(commands) +def load(module, instance, commands, result): + candidate = NetworkConfig(indent=3) + candidate.add(commands, parents=['management api http-commands']) + + config = get_config(module) + configobjs = candidate.difference(config) + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + result['updates'] = commands + load_config(module, instance, commands, result) + +def clean_result(result): + # strip out any keys that have two leading and two trailing + # underscore characters + for key in result.keys(): + if PRIVATE_KEYS_RE.match(key): + del result[key] + +def collect_facts(module, result): + resp = module.cli(['show management api http-commands'], output='json') + facts = dict(eos_eapi_urls=dict()) + for each in resp[0]['urls']: + intf, url = each.split(' : ') + key = str(intf).strip() + if key not in facts['eos_eapi_urls']: + facts['eos_eapi_urls'][key] = list() + facts['eos_eapi_urls'][key].append(str(url).strip()) + result['ansible_facts'] = facts -def config_server(module): +def main(): + """ main entry point for module execution + """ - state = module.params.get('state') - local_http_port = module.params.get('local_http_port') - socket= module.params.get('socket') - local_http = module.params.get('local_http') - config = module.from_json(module.execute(['show management api http-commands | json'])[0]) - result = dict(changed=False, _config=config, commands=[]) - commands = [ - 'management api http-commands' - ] + argument_spec = dict( + http=dict(aliases=['enable_http'], default=False, type='bool', setter='set_protocol_http'), + http_port=dict(default=80, type='int', setter='set_protocol_http'), - if not config.get('enabled'): - if state == 'started': - # turn on eapi access - commands.append('no shutdown') - result['changed'] = True - else: - # state is stopped. nothing to do - return result + https=dict(aliases=['enable_https'], default=True, type='bool', setter='set_protocol_https'), + https_port=dict(default=443, type='int', setter='set_protocol_https'), - if config.get('enabled') and state == 'stopped': - # turn off eapi access and exit - commands.append('shutdown') - result['changed'] = True - result['commands'] = commands - execute_commands(module, commands) - return result - - # http and https - for protocol in ['http', 'https']: - cmds, chg = http_commands(protocol, module.params['{0}_port'.format(protocol)], - module.params['{0}'.format(protocol)], config) - if chg: - commands += cmds - result['changed'] = True - - # local HTTP - if config.get('localHttpServer').get('running'): - if not local_http: - # turn off local http server - commands.append('no protocol http localhost') - result['changed'] = True - elif config.get('localHttpServer').get('port') != local_http_port: - # update the local http port - commands.append('protocol http localhost port {0}'.format(local_http_port)) - result['changed'] = True - - if not config.get('localHttpServer').get('running') and local_http: - # turn on local http server - commands.append('protocol http localhost port {0}'.format(local_http_port)) - result['changed'] = True + local_http=dict(aliases=['enable_local_http'], default=False, type='bool', setter='set_local_http'), + local_http_port=dict(default=8080, type='int', setter='set_local_http'), - # socket server - if config.get('unixSocketServer').get('running') and not socket: - # turn off unix socket - commands.append('no protocol unix-socket') - result['changed'] = True + socket=dict(aliases=['enable_socket'], default=False, type='bool'), - if not config.get('unixSocketServer').get('running') and socket: - # turn on unix socket - commands.append('protocol unix-socket') - result['changed'] = True + vrf=dict(default='default'), - if len(commands) > 1: - # something requires change - execute_commands(module, commands) - result['commands'] = commands + config=dict(), - return result + # Only allow use of transport cli when configuring eAPI + transport=dict(default='cli', choices=['cli']), -def check_version(module): - config = module.from_json(module.execute(['show version | json'])[0]) - versions = config['version'].split('.') - if int(versions[0]) < 4 or int(versions[1]) < 12: - module.fail_json(msg="Device version {0} does not support eAPI. eAPI was introduced in EOS 4.12.") + state=dict(default='started', choices=['stopped', 'started']), + ) -def main(): - """ main entry point for module execution - """ + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + supports_check_mode=True) - argument_spec = dict( - state=dict(default='started', choices=['stopped','started']), - http_port=dict(default=80, type='int'), - https_port=dict(default=443, type='int'), - local_http_port=dict(default=8080, type='int'), - http=dict(aliases=['enable_http'], default=True, type='bool'), - https=dict(aliases=['enable_https'], default=True, type='bool'), - socket=dict(aliases=['enable_socket'], default=False, type='bool'), - local_http=dict(aliases=['enable_local_http'], default=False, type='bool'), + state = module.params['state'] - # Only allow use of transport cli when coniguring EAPI - transport=dict(required=True, choices=['cli']) - ) + result = dict(changed=False) - module = get_module(argument_spec=argument_spec, - supports_check_mode=True) + commands = list() + instance = get_instance(module) - check_version(module) + invoke(state, module, instance, commands) - result = config_server(module) + try: + load(module, instance, commands, result) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) - return module.exit_json(**result) + collect_facts(module, result) + clean_result(result) + module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.eos import * if __name__ == '__main__': main() diff --git a/network/eos/eos_facts.py b/network/eos/eos_facts.py new file mode 100644 index 00000000000..5e7cb2a7bee --- /dev/null +++ b/network/eos/eos_facts.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: eos_facts +version_added: "2.2" +author: "Peter Sprygada (@privateip)" +short_description: Collect facts from remote devices running Arista EOS +description: + - Collects a base set of device facts from a remote device that + is running eos. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: eos +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +# Collect all facts from the device +- eos_facts: + gather_subset: all + provider: "{{ cli }}" + +# Collect only the config and default facts +- eos_facts: + gather_subset: + - config + provider: "{{ cli }}" + +# Do not collect hardware facts +- eos_facts: + gather_subset: + - "!hardware" + provider: "{{ cli }}" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: str +ansible_net_image: + description: The image file the device is running + returned: always + type: str +ansible_net_fqdn: + description: The fully qualified domain name of the device + returned: always + type: str + +# hardware +ansible_net_filesystems: + description: All file system names available on the device + returned: when hardware is configured + type: list +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" +import re + +from ansible.module_utils.netcli import CommandRunner, AddCommandError +from ansible.module_utils.six import iteritems +from ansible.module_utils.eos import NetworkModule + + +def add_command(runner, command, output=None): + try: + runner.add_command(command, output) + except AddCommandError: + # AddCommandError is raised for any issue adding a command to + # the runner. Silently ignore the exception in this case + pass + + +class FactsBase(object): + + def __init__(self, runner): + self.runner = runner + self.facts = dict() + + self.load_commands() + + def load_commands(self): + raise NotImplementedError + + +class Default(FactsBase): + + SYSTEM_MAP = { + 'version': 'version', + 'serialNumber': 'serialnum', + 'modelName': 'model' + } + + def load_commands(self): + add_command(self.runner, 'show version', output='json') + add_command(self.runner, 'show hostname', output='json') + add_command(self.runner, 'bash timeout 5 cat /mnt/flash/boot-config') + + def populate(self): + data = self.runner.get_command('show version', 'json') + for key, value in iteritems(self.SYSTEM_MAP): + if key in data: + self.facts[value] = data[key] + + self.facts.update(self.runner.get_command('show hostname', 'json')) + self.facts.update(self.parse_image()) + + def parse_image(self): + data = self.runner.get_command('bash timeout 5 cat /mnt/flash/boot-config') + if isinstance(data, dict): + data = data['messages'][0] + match = re.search(r'SWI=(.+)$', data, re.M) + if match: + value = match.group(1) + else: + value = None + return dict(image=value) + +class Hardware(FactsBase): + + def load_commands(self): + add_command(self.runner, 'dir all-filesystems', output='text') + add_command(self.runner, 'show version', output='json') + + def populate(self): + self.facts.update(self.populate_filesystems()) + self.facts.update(self.populate_memory()) + + def populate_filesystems(self): + data = self.runner.get_command('dir all-filesystems', 'text') + fs = re.findall(r'^Directory of (.+)/', data, re.M) + return dict(filesystems=fs) + + def populate_memory(self): + values = self.runner.get_command('show version', 'json') + return dict( + memfree_mb=int(values['memFree']) / 1024, + memtotal_mb=int(values['memTotal']) / 1024 + ) + +class Config(FactsBase): + + def load_commands(self): + add_command(self.runner, 'show running-config', output='text') + + def populate(self): + self.facts['config'] = self.runner.get_command('show running-config') + + +class Interfaces(FactsBase): + + INTERFACE_MAP = { + 'description': 'description', + 'physicalAddress': 'macaddress', + 'mtu': 'mtu', + 'bandwidth': 'bandwidth', + 'duplex': 'duplex', + 'lineProtocolStatus': 'lineprotocol', + 'interfaceStatus': 'operstatus', + 'forwardingModel': 'type' + } + + def load_commands(self): + add_command(self.runner, 'show interfaces', output='json') + add_command(self.runner, 'show lldp neighbors', output='json') + + def populate(self): + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.runner.get_command('show interfaces', 'json') + self.facts['interfaces'] = self.populate_interfaces(data) + + data = self.runner.get_command('show lldp neighbors', 'json') + self.facts['neighbors'] = self.populate_neighbors(data['lldpNeighbors']) + + def populate_interfaces(self, data): + facts = dict() + for key, value in iteritems(data['interfaces']): + intf = dict() + + for remote, local in iteritems(self.INTERFACE_MAP): + if remote in value: + intf[local] = value[remote] + + if 'interfaceAddress' in value: + intf['ipv4'] = dict() + for entry in value['interfaceAddress']: + intf['ipv4']['address'] = entry['primaryIp']['address'] + intf['ipv4']['masklen'] = entry['primaryIp']['maskLen'] + self.add_ip_address(entry['primaryIp']['address'], 'ipv4') + + if 'interfaceAddressIp6' in value: + intf['ipv6'] = dict() + for entry in value['interfaceAddressIp6']['globalUnicastIp6s']: + intf['ipv6']['address'] = entry['address'] + intf['ipv6']['subnet'] = entry['subnet'] + self.add_ip_address(entry['address'], 'ipv6') + + facts[key] = intf + + return facts + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def populate_neighbors(self, neighbors): + facts = dict() + for value in neighbors: + port = value['port'] + if port not in facts: + facts[port] = list() + lldp = dict() + lldp['host'] = value['neighborDevice'] + lldp['port'] = value['neighborPort'] + facts[port].append(lldp) + return facts + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = NetworkModule(argument_spec=spec, supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Subset must be one of [%s], got %s' % + (', '.join(VALID_SUBSETS), subset)) + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + runner = CommandRunner(module) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](runner)) + + runner.run() + + try: + for inst in instances: + inst.populate() + facts.update(inst.facts) + except Exception: + module.exit_json(out=module.from_json(runner.items)) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts) + + +if __name__ == '__main__': + main() diff --git a/network/ios/ios_template.py b/network/ios/_ios_template.py similarity index 74% rename from network/ios/ios_template.py rename to network/ios/_ios_template.py index 4c7c39f10d3..62186320caa 100644 --- a/network/ios/ios_template.py +++ b/network/ios/_ios_template.py @@ -15,19 +15,25 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + + DOCUMENTATION = """ --- module: ios_template version_added: "2.1" -author: "Peter sprygada (@privateip)" +author: "Peter Sprygada (@privateip)" short_description: Manage Cisco IOS device configurations over SSH description: - - Manages network device configurations over SSH. This module - allows implementors to work with the device running-config. It + - Manages Cisco IOS network device configurations over SSH. This module + allows implementers to work with the device running-config. It provides a way to push a set of commands onto a network device - by evaluting the current running-config and only pushing configuration + by evaluating the current running-config and only pushing configuration commands that are not already configured. The config source can be a set of commands or a template. +deprecated: Deprecated in 2.2. Use ios_config instead extends_documentation_fragment: ios options: src: @@ -37,8 +43,7 @@ runtime. By default the task will first search for the source file in role or playbook root folder in templates unless a full path to the file is given. - required: false - default: null + required: true force: description: - The force argument instructs the module not to consider the @@ -47,18 +52,17 @@ without first checking if already configured. required: false default: false - choices: BOOLEANS + choices: [ "true", "false" ] include_defaults: description: - The module, by default, will collect the current device - running-config to use as a base for comparision to the commands + running-config to use as a base for comparison to the commands in I(src). Setting this value to true will cause the command issued to add any necessary flags to collect all defaults as well as the device configuration. If the destination device does not support such a flag, this argument is silently ignored. - required: false - default: false - choices: BOOLEANS + required: true + choices: [ "true", "false" ] backup: description: - When this argument is configured true, the module will backup @@ -67,7 +71,7 @@ the root of the playbook directory. required: false default: false - choices: BOOLEANS + choices: [ "true", "false" ] config: description: - The module, by default, will connect to the remote device and @@ -75,8 +79,8 @@ against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task. The I(config) argument allows the implementer to - pass in the configuruation to use as the base config for - comparision. + pass in the configuration to use as the base config for + comparison. required: false default: null """ @@ -84,15 +88,21 @@ EXAMPLES = """ - name: push a configuration onto the device ios_template: + host: hostname + username: foo src: config.j2 - name: forceable push a configuration onto the device ios_template: + host: hostname + username: foo src: config.j2 force: yes -- name: provide the base configuration for comparision +- name: provide the base configuration for comparison ios_template: + host: hostname + username: foo src: candidate_config.txt config: current_config.txt """ @@ -106,15 +116,19 @@ responses: description: The set of responses from issuing the commands on the device - retured: when not check_mode + returned: when not check_mode type: list sample: ['...', '...'] """ +import ansible.module_utils.ios +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.ios import NetworkModule def get_config(module): config = module.params['config'] or dict() + defaults = module.params['include_defaults'] if not config and not module.params['force']: - config = module.config + config = module.config.get_config(include_defaults=defaults) return config def main(): @@ -131,9 +145,9 @@ def main(): mutually_exclusive = [('config', 'backup'), ('config', 'force')] - module = get_module(argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True) + module = NetworkModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) result = dict(changed=False) @@ -142,17 +156,18 @@ def main(): contents = get_config(module) if contents: config = NetworkConfig(contents=contents, indent=1) - result['_backup'] = contents + result['_backup'] = str(contents) if not module.params['force']: commands = candidate.difference(config) + commands = dumps(commands, 'commands').split('\n') + commands = [str(c) for c in commands if c] else: commands = str(candidate).split('\n') if commands: if not module.check_mode: - commands = [str(c).strip() for c in commands] - response = module.configure(commands) + response = module.config(commands) result['responses'] = response result['changed'] = True @@ -160,10 +175,5 @@ def main(): module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.ios import * if __name__ == '__main__': main() - diff --git a/network/ios/ios_command.py b/network/ios/ios_command.py index 81e06800005..01878856c00 100644 --- a/network/ios/ios_command.py +++ b/network/ios/ios_command.py @@ -16,42 +16,62 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: ios_command version_added: "2.1" -author: "Peter sprygada (@privateip)" -short_description: Run arbitrary commands on ios devices. +author: "Peter Sprygada (@privateip)" +short_description: Run commands on remote devices running Cisco IOS description: - Sends arbitrary commands to an ios node and returns the results - read from the device. The M(ios_command) module includes an + read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(ios_config) to configure IOS devices. extends_documentation_fragment: ios options: commands: description: - List of commands to send to the remote ios device over the configured provider. The resulting output from the command - is returned. If the I(waitfor) argument is provided, the + is returned. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or - the number of retires as expired. + the number of retries has expired. required: true - waitfor: + wait_for: description: - List of conditions to evaluate against the output of the - command. The task will wait for a each condition to be true + command. The task will wait for each condition to be true before moving forward. If the conditional is not true within the configured number of retries, the task fails. See examples. required: false default: null + aliases: ['waitfor'] + version_added: "2.2" + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + required: false + default: all + choices: ['any', 'all'] + version_added: "2.2" retries: description: - Specifies the number of retries a command should by tried before it is considered failed. The command is run on the target device every retry and evaluated against the - waitfor conditions. + I(wait_for) conditions. required: false default: 10 interval: @@ -62,32 +82,51 @@ trying the command again. required: false default: 1 - """ EXAMPLES = """ - -- ios_command: - commands: - - show version - register: output - -- ios_command: - commands: - - show version - waitfor: - - "result[0] contains IOS" - -- ios_command: - commands: - - show version - - show interfaces - +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: cisco + password: cisco + transport: cli + +tasks: + - name: run show version on remote devices + ios_command: + commands: show version + provider: "{{ cli }}" + + - name: run show version and check to see if output contains IOS + ios_command: + commands: show version + wait_for: result[0] contains IOS + provider: "{{ cli }}" + + - name: run multiple commands on remote nodes + ios_command: + commands: + - show version + - show interfaces + provider: "{{ cli }}" + + - name: run multiple commands and evaluate the output + ios_command: + commands: + - show version + - show interfaces + wait_for: + - result[0] contains IOS + - result[1] contains Loopback0 + provider: "{{ cli }}" """ RETURN = """ stdout: - description: the set of responses from the commands + description: The set of responses from the commands returned: always type: list sample: ['...', '...'] @@ -99,72 +138,104 @@ sample: [['...', '...'], ['...'], ['...']] failed_conditions: - description: the conditionals that failed - retured: failed + description: The list of conditionals that have failed + returned: failed type: list sample: ['...', '...'] """ +import ansible.module_utils.ios +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.netcli import AddCommandError, FailedConditionsError +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.six import string_types -import time -import shlex -import re +VALID_KEYS = ['command', 'prompt', 'response'] def to_lines(stdout): for item in stdout: - if isinstance(item, basestring): + if isinstance(item, string_types): item = str(item).split('\n') yield item +def parse_commands(module): + for cmd in module.params['commands']: + if isinstance(cmd, string_types): + cmd = dict(command=cmd, output=None) + elif 'command' not in cmd: + module.fail_json(msg='command keyword argument is required') + elif not set(cmd.keys()).issubset(VALID_KEYS): + module.fail_json(msg='unknown keyword specified') + yield cmd + def main(): spec = dict( - commands=dict(type='list'), - waitfor=dict(type='list'), + # { command: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + retries=dict(default=10, type='int'), interval=dict(default=1, type='int') ) - module = get_module(argument_spec=spec, - supports_check_mode=True) + module = NetworkModule(argument_spec=spec, + connect_on_load=False, + supports_check_mode=True) - commands = module.params['commands'] + commands = list(parse_commands(module)) + conditionals = module.params['wait_for'] or list() - retries = module.params['retries'] - interval = module.params['interval'] + warnings = list() - try: - queue = set() - for entry in (module.params['waitfor'] or list()): - queue.add(Conditional(entry)) - except AttributeError, exc: - module.fail_json(msg=exc.message) + runner = CommandRunner(module) - result = dict(changed=False) + for cmd in commands: + if module.check_mode and not cmd['command'].startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd['command']) + else: + if cmd['command'].startswith('conf'): + module.fail_json(msg='ios_command does not support running ' + 'config mode commands. Please use ' + 'ios_config instead') + try: + runner.add_command(**cmd) + except AddCommandError: + exc = get_exception() + warnings.append('duplicate command detected: %s' % cmd) - while retries > 0: - response = module.execute(commands) - result['stdout'] = response + for item in conditionals: + runner.add_conditional(item) - for item in list(queue): - if item(response): - queue.remove(item) + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + runner.match = module.params['match'] - if not queue: - break + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result = dict(changed=False, stdout=list()) + + for cmd in commands: + try: + output = runner.get_command(cmd['command']) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) + + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) - time.sleep(interval) - retries -= 1 - else: - failed_conditions = [item.raw for item in queue] - module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions) + module.exit_json(**result) - result['stdout_lines'] = list(to_lines(result['stdout'])) - return module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.ios import * if __name__ == '__main__': - main() - + main() diff --git a/network/ios/ios_config.py b/network/ios/ios_config.py index b07b07447b5..dd77449e5bc 100644 --- a/network/ios/ios_config.py +++ b/network/ios/ios_config.py @@ -16,6 +16,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: ios_config @@ -23,8 +27,8 @@ author: "Peter Sprygada (@privateip)" short_description: Manage Cisco IOS configuration sections description: - - Cisco IOS configurations use a simple block indent file sytanx - for segementing configuration into sections. This module provides + - Cisco IOS configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides an implementation for working with IOS configuration sections in a deterministic way. extends_documentation_fragment: ios @@ -34,9 +38,11 @@ - The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration - command syntanx as some commands are automatically modified by the + command syntax as some commands are automatically modified by the device config parser. - required: true + required: false + default: null + aliases: ['commands'] parents: description: - The ordered set of parents that uniquely identify the section @@ -45,19 +51,29 @@ level or global commands. required: false default: null + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines). + required: false + default: null + version_added: "2.2" before: description: - The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched - against the system + against the system. required: false default: null after: description: - The ordered set of commands to append to the end of the command - stack if a changed needs to be made. Just like with I(before) this + stack if a change needs to be made. Just like with I(before) this allows the playbook designer to append a set of commands to be executed after the command set. required: false @@ -68,11 +84,13 @@ the set of commands against the current device config. If match is set to I(line), commands are matched line by line. If match is set to I(strict), command lines are matched with respect - to position. Finally if match is set to I(exact), command lines - must be an equal match. + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. required: false default: line - choices: ['line', 'strict', 'exact'] + choices: ['line', 'strict', 'exact', 'none'] replace: description: - Instructs the module on the way to perform the configuration @@ -80,146 +98,308 @@ the modified lines are pushed to the device in configuration mode. If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any - line is not correct + line is not correct. required: false default: line choices: ['line', 'block'] + multiline_delimiter: + description: + - This arugment is used when pushing a multiline configuration + element to the IOS device. It specifies the character to use + as the delimiting character. This only applies to the + configuration action + required: false + default: "@" + version_added: "2.3" force: description: - The force argument instructs the module to not consider the current devices running-config. When set to true, this will cause the module to push the contents of I(src) into the device without first checking if already configured. + - Note this argument should be considered deprecated. To achieve + the equivalent, set the C(match=none) which is idempotent. This argument + will be removed in a future release. required: false default: false + choices: ["true", "false"] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + required: false + default: no choices: ['yes', 'no'] + version_added: "2.2" config: description: - - The module, by default, will connect to the remote device and - retrieve the current running-config to use as a base for comparing - against the contents of source. There are times when it is not - desirable to have the task get the current running-config for - every task in a playbook. The I(config) argument allows the - implementer to pass in the configuruation to use as the base - config for comparision. + - The C(config) argument allows the playbook designer to supply + the base configuration to be used to validate configuration + changes necessary. If this argument is provided, the module + will not download the running-config from the remote node. required: false default: null + version_added: "2.2" + defaults: + description: + - This argument specifies whether or not to collect all defaults + when getting the remote device running config. When enabled, + the module will get the current config by issuing the command + C(show running-config all). + required: false + default: no + choices: ['yes', 'no'] + version_added: "2.2" + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + required: false + default: no + choices: ['yes', 'no'] + version_added: "2.2" """ EXAMPLES = """ -- ios_config: - lines: ['hostname {{ inventory_hostname }}'] - force: yes +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: cisco + password: cisco + transport: cli + +- name: configure top level configuration + ios_config: + lines: hostname {{ inventory_hostname }} + provider: "{{ cli }}" -- ios_config: +- name: configure interface settings + ios_config: lines: - - 10 permit ip host 1.1.1.1 any log - - 20 permit ip host 2.2.2.2 any log - - 30 permit ip host 3.3.3.3 any log - - 40 permit ip host 4.4.4.4 any log - - 50 permit ip host 5.5.5.5 any log - parents: ['ip access-list extended test'] - before: ['no ip access-list extended test'] - match: exact + - description test interface + - ip address 172.31.1.1 255.255.255.0 + parents: interface Ethernet1 + provider: "{{ cli }}" -- ios_config: +- name: load new acl into device + ios_config: lines: - 10 permit ip host 1.1.1.1 any log - 20 permit ip host 2.2.2.2 any log - 30 permit ip host 3.3.3.3 any log - 40 permit ip host 4.4.4.4 any log - parents: ['ip access-list extended test'] - before: ['no ip access-list extended test'] - replace: block - -- ios_config: - commands: "{{lookup('file', 'datcenter1.txt'}}" - parents: ['ip access-list test'] - before: ['no ip access-list test'] - replace: block + - 50 permit ip host 5.5.5.5 any log + parents: ip access-list extended test + before: no ip access-list extended test + match: exact + provider: "{{ cli }}" """ RETURN = """ updates: description: The set of commands that will be pushed to the remote device - returned: always - type: list - sample: ['...', '...'] - -responses: - description: The set of responses from issuing the commands on the device - retured: when not check_mode + returned: Only when lines is specified. type: list sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: path + sample: /playbooks/ansible/backup/ios_config.2016-07-16@22:28:34 """ +import re +import time + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.six import iteritems +from ansible.module_utils.ios import NetworkModule, NetworkError +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.netcli import Command + + +def check_args(module, warnings): + if module.params['multiline_delimiter']: + if len(module.params['multiline_delimiter']) != 1: + module.fail_json(msg='multiline_delimiter value can only be a ' + 'single character') + if module.params['force']: + warnings.append('The force argument is deprecated, please use ' + 'match=none instead. This argument will be ' + 'removed in the future') + +def extract_banners(config): + banners = {} + banner_cmds = re.findall(r'^banner (\w+)', config, re.M) + for cmd in banner_cmds: + regex = r'banner %s \^C(.+?)(?=\^C)' % cmd + match = re.search(regex, config, re.S) + if match: + key = 'banner %s' % cmd + banners[key] = match.group(1).strip() + + for cmd in banner_cmds: + regex = r'banner %s \^C(.+?)(?=\^C)' % cmd + match = re.search(regex, config, re.S) + if match: + config = config.replace(str(match.group(1)), '') + + config = re.sub(r'banner \w+ \^C\^C', '!! banner removed', config) + return (config, banners) + +def diff_banners(want, have): + candidate = {} + for key, value in iteritems(want): + if value != have.get(key): + candidate[key] = value + return candidate + +def load_banners(module, banners): + delimiter = module.params['multiline_delimiter'] + for key, value in iteritems(banners): + key += ' %s' % delimiter + for cmd in ['config terminal', key, value, delimiter, 'end']: + cmd += '\r' + module.connection.shell.shell.sendall(cmd) + time.sleep(1) + module.connection.shell.receive() -def get_config(module): - config = module.params['config'] or dict() - if not config and not module.params['force']: - config = module.config - return config +def get_config(module, result): + contents = module.params['config'] + if not contents: + defaults = module.params['defaults'] + contents = module.config.get_config(include_defaults=defaults) + contents, banners = extract_banners(contents) + return NetworkConfig(indent=1, contents=contents), banners + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + banners = {} + + if module.params['src']: + src, banners = extract_banners(module.params['src']) + candidate.load(src) + + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + + return candidate, banners + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + path = module.params['parents'] + + candidate, want_banners = get_candidate(module) + + if match != 'none': + config, have_banners = get_config(module, result) + path = module.params['parents'] + configobjs = candidate.difference(config, path=path,match=match, + replace=replace) + else: + configobjs = candidate.items + have_banners = {} + + banners = diff_banners(want_banners, have_banners) + + if configobjs or banners: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['lines']: + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['updates'] = commands + result['banners'] = banners + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + if commands: + module.config(commands) + if banners: + load_banners(module, banners) + + result['changed'] = True + + if module.params['save']: + if not module.check_mode: + module.config.save_config() + result['changed'] = True def main(): + """ main entry point for module execution + """ argument_spec = dict( - lines=dict(aliases=['commands'], required=True, type='list'), + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), + before=dict(type='list'), after=dict(type='list'), - match=dict(default='line', choices=['line', 'strict', 'exact']), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), replace=dict(default='line', choices=['line', 'block']), - force=dict(default=False, type='bool'), - config=dict() - ) + multiline_delimiter=dict(default='@'), - module = get_module(argument_spec=argument_spec, - supports_check_mode=True) + # this argument is deprecated in favor of setting match: none + # it will be removed in a future version + force=dict(default=False, type='bool'), - lines = module.params['lines'] - parents = module.params['parents'] or list() + config=dict(), + defaults=dict(type='bool', default=False), - before = module.params['before'] - after = module.params['after'] + backup=dict(type='bool', default=False), + save=dict(default=False, type='bool'), + ) - match = module.params['match'] - replace = module.params['replace'] + mutually_exclusive = [('lines', 'src')] - if not module.params['force']: - contents = get_config(module) - config = NetworkConfig(contents=contents, indent=1) + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines'])] - candidate = NetworkConfig(indent=1) - candidate.add(lines, parents=parents) + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) - commands = candidate.difference(config, path=parents, match=match, replace=replace) - else: - commands = parents - commands.extend(lines) + if module.params['force'] is True: + module.params['match'] = 'none' - result = dict(changed=False) + warnings = list() + check_args(module, warnings) - if commands: - if before: - commands[:0] = before + result = dict(changed=False, warnings=warnings) - if after: - commands.extend(after) + if module.params['backup']: + result['__backup__'] = module.config.get_config() - if not module.check_mode: - commands = [str(c).strip() for c in commands] - response = module.configure(commands) - result['responses'] = response - result['changed'] = True + try: + run(module, result) + except NetworkError: + exc = get_exception() + module.disconnect() + module.fail_json(msg=str(exc)) - result['updates'] = commands + module.disconnect() module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.ios import * + if __name__ == '__main__': main() - diff --git a/network/ios/ios_facts.py b/network/ios/ios_facts.py new file mode 100644 index 00000000000..abe68ec69ce --- /dev/null +++ b/network/ios/ios_facts.py @@ -0,0 +1,461 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: ios_facts +version_added: "2.2" +author: "Peter Sprygada (@privateip)" +short_description: Collect facts from remote devices running IOS +description: + - Collects a base set of device facts from a remote device that + is running IOS. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: ios +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: cisco + password: cisco + transport: cli + +# Collect all facts from the device +- ios_facts: + gather_subset: all + provider: "{{ cli }}" + +# Collect only the config and default facts +- ios_facts: + gather_subset: + - config + provider: "{{ cli }}" + +# Do not collect hardware facts +- ios_facts: + gather_subset: + - "!hardware" + provider: "{{ cli }}" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: string +ansible_net_image: + description: The image file the device is running + returned: always + type: string + +# hardware +ansible_net_filesystems: + description: All file system names available on the device + returned: when hardware is configured + type: list +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" +import re +import itertools + +import ansible.module_utils.ios +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import zip + + +class FactsBase(object): + + def __init__(self, module): + self.module = module + self.facts = dict() + self.failed_commands = list() + + def run(self, cmd): + try: + return self.module.cli(cmd)[0] + except: + self.failed_commands.append(cmd) + + +class Default(FactsBase): + + def populate(self): + data = self.run('show version') + if data: + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + self.facts['image'] = self.parse_image(data) + self.facts['hostname'] = self.parse_hostname(data) + + def parse_version(self, data): + match = re.search(r'Version (\S+),', data) + if match: + return match.group(1) + + def parse_hostname(self, data): + match = re.search(r'^(.+) uptime', data, re.M) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'^Cisco (.+) \(revision', data, re.M) + if match: + return match.group(1) + + def parse_image(self, data): + match = re.search(r'image file is "(.+)"', data) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'board ID (\S+)', data) + if match: + return match.group(1) + + +class Hardware(FactsBase): + + def populate(self): + data = self.run('dir | include Directory') + if data: + self.facts['filesystems'] = self.parse_filesystems(data) + + data = self.run('show memory statistics | include Processor') + if data: + match = re.findall(r'\s(\d+)\s', data) + if match: + self.facts['memtotal_mb'] = int(match[0]) / 1024 + self.facts['memfree_mb'] = int(match[1]) / 1024 + + def parse_filesystems(self, data): + return re.findall(r'^Directory of (\S+)/', data, re.M) + + +class Config(FactsBase): + + def populate(self): + data = self.run('show running-config') + if data: + self.facts['config'] = data + + +class Interfaces(FactsBase): + + def populate(self): + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.run('show interfaces') + if data: + interfaces = self.parse_interfaces(data) + self.facts['interfaces'] = self.populate_interfaces(interfaces) + + data = self.run('show ipv6 interface') + if data: + data = self.parse_interfaces(data) + self.populate_ipv6_interfaces(data) + + data = self.run('show lldp') + if 'LLDP is not enabled' not in data: + neighbors = self.run('show lldp neighbors detail') + if neighbors: + self.facts['neighbors'] = self.parse_neighbors(neighbors) + + def populate_interfaces(self, interfaces): + facts = dict() + for key, value in iteritems(interfaces): + intf = dict() + intf['description'] = self.parse_description(value) + intf['macaddress'] = self.parse_macaddress(value) + + ipv4 = self.parse_ipv4(value) + intf['ipv4'] = self.parse_ipv4(value) + if ipv4: + self.add_ip_address(ipv4['address'], 'ipv4') + + intf['mtu'] = self.parse_mtu(value) + intf['bandwidth'] = self.parse_bandwidth(value) + intf['mediatype'] = self.parse_mediatype(value) + intf['duplex'] = self.parse_duplex(value) + intf['lineprotocol'] = self.parse_lineprotocol(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(value) + + facts[key] = intf + return facts + + def populate_ipv6_interfaces(self, data): + for key, value in iteritems(data): + self.facts['interfaces'][key]['ipv6'] = list() + addresses = re.findall(r'\s+(.+), subnet', value, re.M) + subnets = re.findall(r', subnet is (.+)$', value, re.M) + for addr, subnet in zip(addresses, subnets): + ipv6 = dict(address=addr.strip(), subnet=subnet.strip()) + self.add_ip_address(addr.strip(), 'ipv6') + self.facts['interfaces'][key]['ipv6'].append(ipv6) + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_neighbors(self, neighbors): + facts = dict() + for entry in neighbors.split('------------------------------------------------'): + if entry == '': + continue + intf = self.parse_lldp_intf(entry) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = self.parse_lldp_host(entry) + fact['port'] = self.parse_lldp_port(entry) + facts[intf].append(fact) + return facts + + def parse_interfaces(self, data): + parsed = dict() + key = '' + for line in data.split('\n'): + if len(line) == 0: + continue + elif line[0] == ' ': + parsed[key] += '\n%s' % line + else: + match = re.match(r'^(\S+)', line) + if match: + key = match.group(1) + parsed[key] = line + return parsed + + def parse_description(self, data): + match = re.search(r'Description: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_macaddress(self, data): + match = re.search(r'address is (\S+)', data) + if match: + return match.group(1) + + def parse_ipv4(self, data): + match = re.search(r'Internet address is (\S+)', data) + if match: + addr, masklen = match.group(1).split('/') + return dict(address=addr, masklen=int(masklen)) + + def parse_mtu(self, data): + match = re.search(r'MTU (\d+)', data) + if match: + return int(match.group(1)) + + def parse_bandwidth(self, data): + match = re.search(r'BW (\d+)', data) + if match: + return int(match.group(1)) + + def parse_duplex(self, data): + match = re.search(r'(\w+) Duplex', data, re.M) + if match: + return match.group(1) + + def parse_mediatype(self, data): + match = re.search(r'media type is (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_type(self, data): + match = re.search(r'Hardware is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lineprotocol(self, data): + match = re.search(r'line protocol is (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_operstatus(self, data): + match = re.search(r'^(?:.+) is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lldp_intf(self, data): + match = re.search(r'^Local Intf: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_lldp_host(self, data): + match = re.search(r'System Name: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_lldp_port(self, data): + match = re.search(r'Port id: (.+)$', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = NetworkModule(argument_spec=spec, supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module)) + + failed_commands = list() + + try: + for inst in instances: + inst.populate() + failed_commands.extend(inst.failed_commands) + facts.update(inst.facts) + except Exception: + exc = get_exception() + module.fail_json(msg=str(exc)) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts, failed_commands=failed_commands) + + +if __name__ == '__main__': + main() diff --git a/network/iosxr/iosxr_template.py b/network/iosxr/_iosxr_template.py similarity index 76% rename from network/iosxr/iosxr_template.py rename to network/iosxr/_iosxr_template.py index 97b5086d2a5..315e693c3f2 100644 --- a/network/iosxr/iosxr_template.py +++ b/network/iosxr/_iosxr_template.py @@ -15,20 +15,26 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + + DOCUMENTATION = """ --- module: iosxr_template version_added: "2.1" -author: "Peter sprygada (@privateip)" +author: "Peter Sprygada (@privateip)" short_description: Manage Cisco IOSXR device configurations over SSH description: - Manages network device configurations over SSH. This module - allows implementors to work with the device running-config. It + allows implementers to work with the device running-config. It provides a way to push a set of commands onto a network device - by evaluting the current running-config and only pushing configuration + by evaluating the current running-config and only pushing configuration commands that are not already configured. The config source can be a set of commands or a template. -extends_documentation_fragment: ios +deprecated: Deprecated in 2.2. Use iosxr_config instead +extends_documentation_fragment: iosxr options: src: description: @@ -47,7 +53,7 @@ without first checking if already configured. required: false default: false - choices: BOOLEANS + choices: [ "true", "false" ] backup: description: - When this argument is configured true, the module will backup @@ -56,7 +62,7 @@ the root of the playbook directory. required: false default: false - choices: BOOLEANS + choices: [ "true", "false" ] config: description: - The module, by default, will connect to the remote device and @@ -64,8 +70,8 @@ against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task. The I(config) argument allows the implementer to - pass in the configuruation to use as the base config for - comparision. + pass in the configuration to use as the base config for + comparison. required: false default: null """ @@ -73,20 +79,18 @@ EXAMPLES = """ - name: push a configuration onto the device - net_config: + iosxr_template: src: config.j2 - name: forceable push a configuration onto the device - net_config: + iosxr_template: src: config.j2 force: yes -- name: provide the base configuration for comparision - net_config: +- name: provide the base configuration for comparison + iosxr_template: src: candidate_config.txt config: current_config.txt - - """ RETURN = """ @@ -98,15 +102,17 @@ responses: description: The set of responses from issuing the commands on the device - retured: when not check_mode + returned: when not check_mode type: list sample: ['...', '...'] """ +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.iosxr import NetworkModule def get_config(module): config = module.params['config'] or dict() if not config and not module.params['force']: - config = module.config + config = module.config.get_config() return config def main(): @@ -122,9 +128,9 @@ def main(): mutually_exclusive = [('config', 'backup'), ('config', 'force')] - module = get_module(argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True) + module = NetworkModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) result = dict(changed=False) @@ -133,17 +139,18 @@ def main(): contents = get_config(module) if contents: config = NetworkConfig(contents=contents, indent=1) - result['_backup'] = contents + result['_backup'] = str(contents) if not module.params['force']: commands = candidate.difference(config) + commands = dumps(commands, 'commands').split('\n') + commands = [str(c) for c in commands if c] else: commands = str(candidate).split('\n') if commands: if not module.check_mode: - commands = [str(c).strip() for c in commands] - response = module.configure(commands) + response = module.config(commands) result['responses'] = response result['changed'] = True @@ -151,10 +158,5 @@ def main(): module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.iosxr import * if __name__ == '__main__': main() - diff --git a/network/iosxr/iosxr_command.py b/network/iosxr/iosxr_command.py index 88a05e9280c..e266700bbf8 100644 --- a/network/iosxr/iosxr_command.py +++ b/network/iosxr/iosxr_command.py @@ -16,42 +16,62 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: iosxr_command version_added: "2.1" author: "Peter Sprygada (@privateip)" -short_description: Run arbitrary commands on ios devices. +short_description: Run commands on remote devices running Cisco iosxr description: - - Sends arbitrary commands to an IOSXR node and returns the results - read from the device. The M(iosxr_command) module includes an + - Sends arbitrary commands to an iosxr node and returns the results + read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(iosxr_config) to configure iosxr devices. extends_documentation_fragment: iosxr options: commands: description: - - List of commands to send to the remote ios device over the + - List of commands to send to the remote iosxr device over the configured provider. The resulting output from the command - is returned. If the I(waitfor) argument is provided, the + is returned. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or - the number of retires as expired. + the number of retries has expired. required: true - waitfor: + wait_for: description: - List of conditions to evaluate against the output of the - command. The task will wait for a each condition to be true + command. The task will wait for each condition to be true before moving forward. If the conditional is not true within the configured number of retries, the task fails. See examples. required: false default: null + aliases: ['waitfor'] + version_added: "2.2" + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + required: false + default: all + choices: ['any', 'all'] + version_added: "2.2" retries: description: - Specifies the number of retries a command should by tried before it is considered failed. The command is run on the target device every retry and evaluated against the - waitfor conditions. + I(wait_for) conditions. required: false default: 10 interval: @@ -65,31 +85,47 @@ """ EXAMPLES = """ - -- iosxr_command: - commands: - - show version - register: output - -- iosxr_command: - commands: - - show version - waitfor: - - "result[0] contains 6.0.0" - -- iosxr_command: - commands: - - show version - - show interfaces - waitfor: - - "result[1] contains MgmtEth0/0/CPU0/0" - - "result[0] contains 6.0.0" - +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: root + password: root + +tasks: + - name: run show version on remote devices + iosxr_command: + commands: show version + provider: "{{ cli }}" + + - name: run show version and check to see if output contains iosxr + iosxr_command: + commands: show version + wait_for: result[0] contains IOS-XR + provider: "{{ cli }}" + + - name: run multiple commands on remote nodes + iosxr_command: + commands: + - show version + - show interfaces + provider: "{{ cli }}" + + - name: run multiple commands and evaluate the output + iosxr_command: + commands: + - show version + - show interfaces + wait_for: + - result[0] contains IOS-XR + - result[1] contains Loopback0 + provider: "{{ cli }}" """ RETURN = """ stdout: - description: the set of responses from the commands + description: The set of responses from the commands returned: always type: list sample: ['...', '...'] @@ -101,77 +137,107 @@ sample: [['...', '...'], ['...'], ['...']] failed_conditions: - description: the conditionals that failed - retured: failed + description: The list of conditionals that have failed + returned: failed type: list sample: ['...', '...'] """ +import ansible.module_utils.iosxr +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.netcli import AddCommandError, FailedConditionsError +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.six import string_types -import time -import shlex -import re +VALID_KEYS = ['command', 'output', 'prompt', 'response'] -INDEX_RE = re.compile(r'(\[\d+\])') - -def iterlines(stdout): +def to_lines(stdout): for item in stdout: - if isinstance(item, basestring): + if isinstance(item, string_types): item = str(item).split('\n') yield item +def parse_commands(module): + for cmd in module.params['commands']: + if isinstance(cmd, string_types): + cmd = dict(command=cmd, output=None) + elif 'command' not in cmd: + module.fail_json(msg='command keyword argument is required') + elif cmd.get('output') not in [None, 'text']: + module.fail_json(msg='invalid output specified for command') + elif not set(cmd.keys()).issubset(VALID_KEYS): + module.fail_json(msg='unknown command keyword specified. Valid ' + 'values are %s' % ', '.join(VALID_KEYS)) + yield cmd def main(): spec = dict( - commands=dict(type='list'), - waitfor=dict(type='list'), + # { command: , output: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + retries=dict(default=10, type='int'), interval=dict(default=1, type='int') ) - module = get_module(argument_spec=spec, - supports_check_mode=True) + module = NetworkModule(argument_spec=spec, + connect_on_load=False, + supports_check_mode=True) - commands = module.params['commands'] + commands = list(parse_commands(module)) + conditionals = module.params['wait_for'] or list() - retries = module.params['retries'] - interval = module.params['interval'] + warnings = list() - try: - queue = set() - for entry in (module.params['waitfor'] or list()): - queue.add(Conditional(entry)) - except AttributeError, exc: - module.fail_json(msg=exc.message) + runner = CommandRunner(module) + for cmd in commands: + if module.check_mode and not cmd['command'].startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd['command']) + else: + if cmd['command'].startswith('conf'): + module.fail_json(msg='iosxr_command does not support running ' + 'config mode commands. Please use ' + 'iosxr_config instead') + try: + runner.add_command(**cmd) + except AddCommandError: + exc = get_exception() + warnings.append('duplicate command detected: %s' % cmd) - result = dict(changed=False) + for item in conditionals: + runner.add_conditional(item) - while retries > 0: - response = module.execute(commands) - result['stdout'] = response + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + runner.match = module.params['match'] - for item in list(queue): - if item(response): - queue.remove(item) + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) - if not queue: - break + result = dict(changed=False, stdout=list()) - time.sleep(interval) - retries -= 1 - else: - failed_conditions = [item.raw for item in queue] - module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions) + for cmd in commands: + try: + output = runner.get_command(cmd['command']) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) - result['stdout_lines'] = list(iterlines(result['stdout'])) - return module.exit_json(**result) + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) + module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.iosxr import * -if __name__ == '__main__': - main() +if __name__ == '__main__': + main() diff --git a/network/iosxr/iosxr_config.py b/network/iosxr/iosxr_config.py index d1474fc0c11..d686f83111a 100644 --- a/network/iosxr/iosxr_config.py +++ b/network/iosxr/iosxr_config.py @@ -16,6 +16,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: iosxr_config @@ -23,21 +27,22 @@ author: "Peter Sprygada (@privateip)" short_description: Manage Cisco IOS XR configuration sections description: - - Cisco IOS XR configurations use a simple block indent file sytanx - for segementing configuration into sections. This module provides + - Cisco IOS XR configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides an implementation for working with IOS XR configuration sections in - a deterministic way. This module works with either CLI or NXAPI - transports. -extends_documentation_fragment: ios + a deterministic way. +extends_documentation_fragment: iosxr options: lines: description: - The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration - command syntanx as some commands are automatically modified by the + command syntax as some commands are automatically modified by the device config parser. - required: true + required: false + default: null + aliases: ['commands'] parents: description: - The ordered set of parents that uniquely identify the section @@ -46,19 +51,29 @@ level or global commands. required: false default: null + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines). + required: false + default: null + version_added: "2.2" before: description: - The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched - against the system + against the system. required: false default: null after: description: - The ordered set of commands to append to the end of the command - stack if a changed needs to be made. Just like with I(before) this + stack if a change needs to be made. Just like with I(before) this allows the playbook designer to append a set of commands to be executed after the command set. required: false @@ -69,11 +84,13 @@ the set of commands against the current device config. If match is set to I(line), commands are matched line by line. If match is set to I(strict), command lines are matched with respect - to position. Finally if match is set to I(exact), command lines - must be an equal match. + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. required: false default: line - choices: ['line', 'strict', 'exact'] + choices: ['line', 'strict', 'exact', 'none'] replace: description: - Instructs the module on the way to perform the configuration @@ -81,19 +98,23 @@ the modified lines are pushed to the device in configuration mode. If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any - line is not correct + line is not correct. required: false default: line - choices: ['line', 'block'] + choices: ['line', 'block', 'config'] force: description: - The force argument instructs the module to not consider the current devices running-config. When set to true, this will cause the module to push the contents of I(src) into the device without first checking if already configured. + - Note this argument should be considered deprecated. To achieve + the equivalent, set the C(match=none) which is idempotent. This argument + will be removed in a future release. required: false default: false - choices: BOOLEANS + choices: [ "yes", "no" ] + version_added: "2.2" config: description: - The module, by default, will connect to the remote device and @@ -101,114 +122,202 @@ against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the - implementer to pass in the configuruation to use as the base - config for comparision. + implementer to pass in the configuration to use as the base + config for comparison. required: false default: null + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + required: false + default: no + choices: ['yes', 'no'] + version_added: "2.2" + comment: + description: + - Allows a commit description to be specified to be included + when the configuration is committed. If the configuration is + not changed or committed, this argument is ignored. + required: false + default: 'configured by iosxr_config' + version_added: "2.2" """ EXAMPLES = """ -- iosxr_config: - lines: ['hostname {{ inventory_hostname }}'] - force: yes +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: cisco + password: cisco + transport: cli -- iosxr_config: - lines: - - description configured by ansible - - ipv4 address 10.0.0.25 255.255.255.0 - parents: ['interface GigabitEthernet0/0/0/0'] +- name: configure top level configuration + iosxr_config: + lines: hostname {{ inventory_hostname }} + provider: "{{ cli }}" -- iosxr_config: - commands: "{{lookup('file', 'datcenter1.txt'}}" - parents: ['ipv4 access-list test'] - before: ['no ip access-listv4 test'] - replace: block +- name: configure interface settings + iosxr_config: + lines: + - description test interface + - ip address 172.31.1.1 255.255.255.0 + parents: interface GigabitEthernet0/0/0/0 + provider: "{{ cli }}" +- name: load a config from disk and replace the current config + iosxr_config: + src: config.cfg + update: replace + backup: yes + provider: "{{ cli }}" """ RETURN = """ updates: description: The set of commands that will be pushed to the remote device - returned: always - type: list - sample: ['...', '...'] - -responses: - description: The set of responses from issuing the commands on the device - retured: always + returned: Only when lines is specified. type: list sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: path + sample: /playbooks/ansible/backup/iosxr01.2016-07-16@22:28:34 """ +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.iosxr import NetworkModule, NetworkError -def get_config(module): - config = module.params['config'] or dict() - if not config and not module.params['force']: - config = module.config - return config +DEFAULT_COMMIT_COMMENT = 'configured by iosxr_config' -def main(): +def check_args(module, warnings): + if module.params['comment']: + if len(module.params['comment']) > 60: + module.fail_json(msg='comment argument cannot be more than 60 characters') + if module.params['force']: + warnings.append('The force argument is deprecated, please use ' + 'match=none instead. This argument will be ' + 'removed in the future') + + +def get_config(module, result): + contents = module.params['config'] + if not contents: + contents = module.config.get_config() + return NetworkConfig(indent=1, contents=contents) + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + +def load_config(module, commands, result): + replace = module.params['replace'] == 'config' + comment = module.params['comment'] + commit = not module.check_mode + + diff = module.config.load_config(commands, replace=replace, commit=commit, + comment=comment) + + if diff: + result['diff'] = dict(prepared=diff) + result['changed'] = True + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + path = module.params['parents'] + + candidate = get_candidate(module) + + if match != 'none' and replace != 'config': + config = get_config(module, result) + configobjs = candidate.difference(config, path=path, match=match, + replace=replace) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['lines']: + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['updates'] = commands + + load_config(module, commands, result) + +def main(): + """main entry point for module execution + """ argument_spec = dict( - lines=dict(aliases=['commands'], required=True, type='list'), + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), + before=dict(type='list'), after=dict(type='list'), - match=dict(default='line', choices=['line', 'strict', 'exact']), - replace=dict(default='line', choices=['line', 'block']), - force=dict(default=False, type='bool'), - config=dict() - ) - - module = get_module(argument_spec=argument_spec, - supports_check_mode=True) - lines = module.params['lines'] - parents = module.params['parents'] or list() + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block', 'config']), - before = module.params['before'] - after = module.params['after'] + # this argument is deprecated in favor of setting match: none + # it will be removed in a future version + force=dict(default=False, type='bool'), - match = module.params['match'] - replace = module.params['replace'] + config=dict(), + backup=dict(type='bool', default=False), + comment=dict(default=DEFAULT_COMMIT_COMMENT), + ) - contents = get_config(module) - config = module.parse_config(contents) + mutually_exclusive = [('lines', 'src')] - if not module.params['force']: - contents = get_config(module) - config = NetworkConfig(contents=contents, indent=1) + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines']), + ('replace', 'config', ['src'])] - candidate = NetworkConfig(indent=1) - candidate.add(lines, parents=parents) + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) - commands = candidate.difference(config, path=parents, match=match, replace=replace) - else: - commands = parents - commands.extend(lines) + if module.params['force'] is True: + module.params['match'] = 'none' - result = dict(changed=False) + warnings = list() + check_args(module, warnings) - if commands: - if before: - commands[:0] = before + result = dict(changed=False, warnings=warnings) - if after: - commands.extend(after) + if module.params['backup']: + result['__backup__'] = module.config.get_config() - if not module.check_mode: - commands = [str(c).strip() for c in commands] - response = module.configure(commands) - result['responses'] = response - result['changed'] = True + try: + run(module, result) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) - result['updates'] = commands module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.iosxr import * if __name__ == '__main__': main() - diff --git a/network/iosxr/iosxr_facts.py b/network/iosxr/iosxr_facts.py new file mode 100644 index 00000000000..9f93d06125c --- /dev/null +++ b/network/iosxr/iosxr_facts.py @@ -0,0 +1,439 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: iosxr_facts +version_added: "2.2" +author: "Peter Sprygada (@privateip)" +short_description: Collect facts from remote devices running IOS-XR +description: + - Collects a base set of device facts from a remote device that + is running iosxr. This module prepends all of the + base network fact keys with C(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: iosxr +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' +""" + +EXAMPLES = """ +# Collect all facts from the device +- iosxr_facts: + gather_subset: all + +# Collect only the config and default facts +- iosxr_facts: + gather_subset: + - config + +# Do not collect hardware facts +- iosxr_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: string +ansible_net_image: + description: The image file the device is running + returned: always + type: string + +# hardware +ansible_net_filesystems: + description: All file system names available on the device + returned: when hardware is configured + type: list +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict +""" +import re + +import ansible.module_utils.iosxr +from ansible.module_utils.netcli import CommandRunner, AddCommandError +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.six import iteritems +from ansible.module_utils.six.moves import zip + + +def add_command(runner, command): + try: + runner.add_command(command) + except AddCommandError: + # AddCommandError is raised for any issue adding a command to + # the runner. Silently ignore the exception in this case + pass + +class FactsBase(object): + + def __init__(self, runner): + self.runner = runner + self.facts = dict() + + self.commands() + + def commands(self): + raise NotImplementedError + +class Default(FactsBase): + + def commands(self): + add_command(self.runner, 'show version brief') + + def populate(self): + data = self.runner.get_command('show version brief') + + self.facts['version'] = self.parse_version(data) + self.facts['image'] = self.parse_image(data) + self.facts['hostname'] = self.parse_hostname(data) + + def parse_version(self, data): + match = re.search(r'Version (\S+)$', data, re.M) + if match: + return match.group(1) + + def parse_hostname(self, data): + match = re.search(r'^(.+) uptime', data, re.M) + if match: + return match.group(1) + + def parse_image(self, data): + match = re.search(r'image file is "(.+)"', data) + if match: + return match.group(1) + + +class Hardware(FactsBase): + + def commands(self): + add_command(self.runner, 'dir /all | include Directory') + add_command(self.runner, 'show memory summary') + + def populate(self): + data = self.runner.get_command('dir /all | include Directory') + self.facts['filesystems'] = self.parse_filesystems(data) + + data = self.runner.get_command('show memory summary') + match = re.search(r'Physical Memory (\d+)M total \((\d+)', data) + if match: + self.facts['memtotal_mb'] = int(match[0]) + self.facts['memfree_mb'] = int(match[1]) + + def parse_filesystems(self, data): + return re.findall(r'^Directory of (\S+)', data, re.M) + + +class Config(FactsBase): + + def commands(self): + add_command(self.runner, 'show running-config') + + def populate(self): + self.facts['config'] = self.runner.get_command('show running-config') + + +class Interfaces(FactsBase): + + def commands(self): + add_command(self.runner, 'show interfaces') + add_command(self.runner, 'show ipv6 interface') + add_command(self.runner, 'show lldp') + add_command(self.runner, 'show lldp neighbors detail') + + def populate(self): + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.runner.get_command('show interfaces') + interfaces = self.parse_interfaces(data) + self.facts['interfaces'] = self.populate_interfaces(interfaces) + + data = self.runner.get_command('show ipv6 interface') + if len(data) > 0: + data = self.parse_interfaces(data) + self.populate_ipv6_interfaces(data) + + if 'LLDP is not enabled' not in self.runner.get_command('show lldp'): + neighbors = self.runner.get_command('show lldp neighbors detail') + self.facts['neighbors'] = self.parse_neighbors(neighbors) + + def populate_interfaces(self, interfaces): + facts = dict() + for key, value in iteritems(interfaces): + intf = dict() + intf['description'] = self.parse_description(value) + intf['macaddress'] = self.parse_macaddress(value) + + ipv4 = self.parse_ipv4(value) + intf['ipv4'] = self.parse_ipv4(value) + if ipv4: + self.add_ip_address(ipv4['address'], 'ipv4') + + intf['mtu'] = self.parse_mtu(value) + intf['bandwidth'] = self.parse_bandwidth(value) + intf['duplex'] = self.parse_duplex(value) + intf['lineprotocol'] = self.parse_lineprotocol(value) + intf['operstatus'] = self.parse_operstatus(value) + intf['type'] = self.parse_type(value) + + facts[key] = intf + return facts + + def populate_ipv6_interfaces(self, data): + for key, value in iteritems(data): + self.facts['interfaces'][key]['ipv6'] = list() + addresses = re.findall(r'\s+(.+), subnet', value, re.M) + subnets = re.findall(r', subnet is (.+)$', value, re.M) + for addr, subnet in zip(addresses, subnets): + ipv6 = dict(address=addr.strip(), subnet=subnet.strip()) + self.add_ip_address(addr.strip(), 'ipv6') + self.facts['interfaces'][key]['ipv6'].append(ipv6) + + def add_ip_address(self, address, family): + if family == 'ipv4': + self.facts['all_ipv4_addresses'].append(address) + else: + self.facts['all_ipv6_addresses'].append(address) + + def parse_neighbors(self, neighbors): + facts = dict() + nbors = neighbors.split('------------------------------------------------') + for entry in nbors[1:]: + if entry == '': + continue + intf = self.parse_lldp_intf(entry) + if intf not in facts: + facts[intf] = list() + fact = dict() + fact['host'] = self.parse_lldp_host(entry) + fact['port'] = self.parse_lldp_port(entry) + facts[intf].append(fact) + return facts + + def parse_interfaces(self, data): + parsed = dict() + key = '' + for line in data.split('\n'): + if len(line) == 0: + continue + elif line[0] == ' ': + parsed[key] += '\n%s' % line + else: + match = re.match(r'^(\S+)', line) + if match: + key = match.group(1) + parsed[key] = line + return parsed + + def parse_description(self, data): + match = re.search(r'Description: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_macaddress(self, data): + match = re.search(r'address is (\S+)', data) + if match: + return match.group(1) + + def parse_ipv4(self, data): + match = re.search(r'Internet address is (\S+)/(\d+)', data) + if match: + addr = match.group(1) + masklen = int(match.group(2)) + return dict(address=addr, masklen=masklen) + + def parse_mtu(self, data): + match = re.search(r'MTU (\d+)', data) + if match: + return int(match.group(1)) + + def parse_bandwidth(self, data): + match = re.search(r'BW (\d+)', data) + if match: + return int(match.group(1)) + + def parse_duplex(self, data): + match = re.search(r'(\w+) Duplex', data, re.M) + if match: + return match.group(1) + + def parse_type(self, data): + match = re.search(r'Hardware is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lineprotocol(self, data): + match = re.search(r'line protocol is (.+)\s+?$', data, re.M) + if match: + return match.group(1) + + def parse_operstatus(self, data): + match = re.search(r'^(?:.+) is (.+),', data, re.M) + if match: + return match.group(1) + + def parse_lldp_intf(self, data): + match = re.search(r'^Local Interface: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_lldp_host(self, data): + match = re.search(r'System Name: (.+)$', data, re.M) + if match: + return match.group(1) + + def parse_lldp_port(self, data): + match = re.search(r'Port id: (.+)$', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = NetworkModule(argument_spec=spec, supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + runner = CommandRunner(module) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](runner)) + + runner.run() + + try: + for inst in instances: + inst.populate() + facts.update(inst.facts) + except Exception: + module.exit_json(out=module.from_json(runner.items)) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts) + + +if __name__ == '__main__': + main() diff --git a/network/junos/_junos_template.py b/network/junos/_junos_template.py new file mode 100644 index 00000000000..bd0ecf371ea --- /dev/null +++ b/network/junos/_junos_template.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + + +DOCUMENTATION = """ +--- +module: junos_template +version_added: "2.1" +author: "Peter Sprygada (@privateip)" +short_description: Manage configuration on remote devices running Junos +description: + - This module will load a candidate configuration + from a template file onto a remote device running Junos. The + module will return the differences in configuration if the diff + option is specified on the Ansible command line +deprecated: Deprecated in 2.2. Use junos_config instead +extends_documentation_fragment: junos +options: + src: + description: + - The path to the config source. The source can be either a + file with config or a template that will be merged during + runtime. By default the task will search for the source + file in role or playbook root folder in templates directory. + required: true + default: null + backup: + description: + - When this argument is configured true, the module will backup + the configuration from the node prior to making any changes. + The backup file will be written to backup_{{ hostname }} in + the root of the playbook directory. + required: false + default: false + choices: ["true", "false"] + confirm: + description: + - The C(confirm) argument will configure a time out value for + the commit to be confirmed before it is automatically + rolled back. If the C(confirm) argument is set to False, this + argument is silently ignored. If the value for this argument + is set to 0, the commit is confirmed immediately. + required: false + default: 0 + comment: + description: + - The C(comment) argument specifies a text string to be used + when committing the configuration. If the C(confirm) argument + is set to False, this argument is silently ignored. + required: false + default: configured by junos_template + action: + description: + - The C(action) argument specifies how the module will apply changes. + required: false + default: merge + choices: ['merge', 'overwrite', 'replace'] + version_added: "2.2" + config_format: + description: + - The C(format) argument specifies the format of the configuration + template specified in C(src). If the format argument is not + specified, the module will attempt to infer the configuration + format based of file extension. Files that end in I(xml) will set + the format to xml. Files that end in I(set) will set the format + to set and all other files will default the format to text. + required: false + default: null + choices: ['text', 'xml', 'set'] +requirements: + - junos-eznc +notes: + - This module requires the netconf system service be enabled on + the remote device being managed +""" + +EXAMPLES = """ +- junos_template: + src: config.j2 + comment: update system config + +- name: replace config hierarchy + src: config.j2 + action: replace + +- name: overwrite the config + src: config.j2 + action: overwrite +""" +import ansible.module_utils.junos + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.network import NetworkModule, NetworkError + +DEFAULT_COMMENT = 'configured by junos_template' + +def main(): + + argument_spec = dict( + src=dict(required=True, type='path'), + confirm=dict(default=0, type='int'), + comment=dict(default=DEFAULT_COMMENT), + action=dict(default='merge', choices=['merge', 'overwrite', 'replace']), + config_format=dict(choices=['text', 'set', 'xml']), + backup=dict(default=False, type='bool'), + transport=dict(default='netconf', choices=['netconf']) + ) + + module = NetworkModule(argument_spec=argument_spec, + supports_check_mode=True) + + comment = module.params['comment'] + confirm = module.params['confirm'] + commit = not module.check_mode + + replace = False + overwrite = False + + action = module.params['action'] + if action == 'overwrite': + overwrite = True + elif action == 'replace': + replace = True + + src = module.params['src'] + fmt = module.params['config_format'] + + if action == 'overwrite' and fmt == 'set': + module.fail_json(msg="overwrite cannot be used when format is " + "set per junos-pyez documentation") + + results = dict(changed=False) + results['_backup'] = unicode(module.config.get_config()).strip() + + try: + diff = module.config.load_config(src, commit=commit, replace=replace, + confirm=confirm, comment=comment, config_format=fmt) + + if diff: + results['changed'] = True + results['diff'] = dict(prepared=diff) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/junos/junos_command.py b/network/junos/junos_command.py index ad1e977541b..c54061834a0 100644 --- a/network/junos/junos_command.py +++ b/network/junos/junos_command.py @@ -16,30 +16,43 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: junos_command version_added: "2.1" -author: "Peter sprygada (@privateip)" -short_description: Execute arbitrary commands on Juniper JUNOS devices +author: "Peter Sprygada (@privateip)" +short_description: Execute arbitrary commands on a remote device running Junos description: - - Netork devices running Juniper JUNOS can execute a variety of commands - to display both configuration and state data. This module will take - an ordered set of commands and execute them in JUNOS and return the - command results. It also supports adding conditionals the - argument set to force the module to meet a set of criteria before - proceeding. + - Network devices running the Junos operating system provide a command + driven interface both over CLI and RPC. This module provides an + interface to execute commands using these functions and return the + results to the Ansible playbook. In addition, this + module can specify a set of conditionals to be evaluated against the + returned output, only returning control to the playbook once the + entire set of conditionals has been met. extends_documentation_fragment: junos options: commands: description: - - The commands to send to the remote JUNOS device over the - configured provider. The resulting output from the command - is returned. If the I(waitfor) argument is provided, the + - The C(commands) to send to the remote device over the Netconf + transport. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or - the number of retires as expired. - required: true - waitfor: + the number of I(retries) has been exceeded. + required: false + default: null + rpcs: + description: + - The C(rpcs) argument accepts a list of RPCs to be executed + over a netconf session and the results from the RPC execution + is return to the playbook via the modules results dictionary. + required: false + default: null + wait_for: description: - Specifies what to evaluate from the output of the command and what conditionals to apply. This argument will cause @@ -48,12 +61,26 @@ by the configured retries, the task fails. See examples. required: false default: null + aliases: ['waitfor'] + version_added: "2.2" + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the I(wait_for) must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + required: false + default: all + choices: ['any', 'all'] + version_added: "2.2" retries: description: - Specifies the number of retries a command should by tried before it is considered failed. The command is run on the - target device every retry and evaluated against the waitfor - conditionals + target device every retry and evaluated against the I(waitfor) + conditionals. required: false default: 10 interval: @@ -64,24 +91,50 @@ trying the command again. required: false default: 1 + format: + description: + - Configures the encoding scheme to use when serializing output + from the device. This handles how to properly understand the + output and apply the conditionals path to the result set. + required: false + default: 'xml' + choices: ['xml', 'text'] +requirements: + - junos-eznc +notes: + - This module requires the netconf system service be enabled on + the remote device being managed """ EXAMPLES = """ -# the required set of connection arguments have been purposely left off -# the examples for brevity +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + netconf: + host: "{{ inventory_hostname }}" + username: ansible + password: Ansible - name: run a set of commands junos_command: commands: ['show version', 'show ip route'] + provider: "{{ netconf }}" - name: run a command with a conditional applied to the second command junos_command: commands: - show version - - show interfaces fxp0 | display json + - show interfaces fxp0 waitfor: - - "result[1].interface-information[0].physical-interface[0].name[0].data - eq fxp0" + - "result[1].interface-information.physical-interface.name eq fxp0" + provider: "{{ netconf }}" + +- name: collect interface information using rpc + junos_command: + rpcs: + - "get_interface_information interface=em0 media=True" + - "get_interface_information interface=fxp0 media=True" + provider: "{{ netconf }}" """ RETURN = """ @@ -99,175 +152,153 @@ failed_conditionals: description: the conditionals that failed - retured: failed + returned: failed type: list sample: ['...', '...'] """ -import time -import shlex -import re -import json -import itertools +import ansible.module_utils.junos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.netcli import AddCommandError, FailedConditionsError +from ansible.module_utils.netcli import FailedConditionalError, AddConditionError +from ansible.module_utils.junos import xml_to_json +from ansible.module_utils.six import string_types -INDEX_RE = re.compile(r'(\[\d+\])') +VALID_KEYS = { + 'cli': frozenset(['command', 'output', 'prompt', 'response']), + 'rpc': frozenset(['command', 'output']) +} -class Conditional(object): - OPERATORS = { - 'eq': ['eq', '=='], - 'neq': ['neq', 'ne', '!='], - 'gt': ['gt', '>'], - 'ge': ['ge', '>='], - 'lt': ['lt', '<'], - 'le': ['le', '<='], - 'contains': ['contains', 'in'] - } +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item - def __init__(self, conditional): - self.raw = conditional +def parse(module, command_type): + if command_type == 'cli': + items = module.params['commands'] + elif command_type == 'rpc': + items = module.params['rpcs'] - key, op, val = shlex.split(conditional) - self.key = key - self.func = self.func(op) - self.value = self._cast_value(val) + parsed = list() + for item in (items or list()): + if isinstance(item, string_types): + item = dict(command=item, output=None) + elif 'command' not in item: + module.fail_json(msg='command keyword argument is required') + elif item.get('output') not in [None, 'text', 'xml']: + module.fail_json(msg='invalid output specified for command' + 'Supported values are `text` or `xml`') + elif not set(item.keys()).issubset(VALID_KEYS[command_type]): + module.fail_json(msg='unknown command keyword specified. Valid ' + 'values are %s' % ', '.join(VALID_KEYS[command_type])) - def __call__(self, data): - try: - value = self.get_value(dict(result=data)) - return self.func(value) - except Exception: - raise - - def _cast_value(self, value): - if value in BOOLEANS_TRUE: - return True - elif value in BOOLEANS_FALSE: - return False - elif re.match(r'^\d+\.d+$', value): - return float(value) - elif re.match(r'^\d+$', value): - return int(value) - else: - return unicode(value) - - def func(self, oper): - for func, operators in self.OPERATORS.items(): - if oper in operators: - return getattr(self, func) - raise AttributeError('unknown operator: %s' % oper) - - def get_value(self, result): - for key in self.key.split('.'): - match = re.match(r'^(.+)\[(\d+)\]', key) - if match: - key, index = match.groups() - result = result[key][int(index)] - else: - result = result.get(key) - return result - - def number(self, value): - if '.' in str(value): - return float(value) - else: - return int(value) + if not item['output']: + item['output'] = module.params['display'] - def eq(self, value): - return value == self.value + item['command_type'] = command_type - def neq(self, value): - return value != self.value + # show configuration [options] will return as text + if item['command'].startswith('show configuration'): + item['output'] = 'text' - def gt(self, value): - return self.number(value) > self.value + parsed.append(item) - def ge(self, value): - return self.number(value) >= self.value + return parsed - def lt(self, value): - return self.number(value) < self.value - def le(self, value): - return self.number(value) <= self.value +def main(): + """main entry point for Ansible module + """ - def contains(self, value): - return self.value in value + spec = dict( + commands=dict(type='list'), + rpcs=dict(type='list'), -def parse_response(module, responses): - commands = module.params['commands'] - result = dict(stdout_json=list(), stdout=list(), stdout_lines=list()) - for cmd, resp in itertools.izip(commands, responses): - result['stdout'].append(resp) - if cmd.endswith('json'): - result['stdout_json'].append(module.jsonify(resp)) - else: - result['stdout_lines'].append(resp.split('\n')) - return result + display=dict(default='xml', choices=['text', 'xml'], + aliases=['format', 'output']), -def to_lines(stdout): - for item in stdout: - if isinstance(item, basestring): - item = str(item).split('\n') - yield item + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), -def main(): - spec = dict( - commands=dict(type='list'), - waitfor=dict(type='list'), retries=dict(default=10, type='int'), - interval=dict(default=1, type='int') + interval=dict(default=1, type='int'), + + transport=dict(default='netconf', choices=['netconf']) ) - module = get_module(argument_spec=spec, - supports_check_mode=True) + mutually_exclusive = [('commands', 'rpcs')] + module = NetworkModule(argument_spec=spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) - commands = module.params['commands'] + commands = list() + for key in VALID_KEYS.keys(): + commands.extend(list(parse(module, key))) - retries = module.params['retries'] - interval = module.params['interval'] + conditionals = module.params['wait_for'] or list() - try: - queue = set() - for entry in (module.params['waitfor'] or list()): - queue.add(Conditional(entry)) - except AttributeError, exc: - module.fail_json(msg=exc.message) + warnings = list() - result = dict(changed=False) + runner = CommandRunner(module) - while retries > 0: - try: - response = module.execute(commands) - result['stdout'] = response - except ShellError: - module.fail_json(msg='failed to run commands') - - for index, cmd in enumerate(commands): - if cmd.endswith('json'): - response[index] = json.loads(response[index]) + for cmd in commands: + if module.check_mode and not cmd['command'].startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd['command']) + else: + if cmd['command'].startswith('co'): + module.fail_json(msg='junos_command does not support running ' + 'config mode commands. Please use ' + 'junos_config instead') + try: + runner.add_command(**cmd) + except AddCommandError: + exc = get_exception() + warnings.append('duplicate command detected: %s' % cmd) - for item in list(queue): - if item(response): - queue.remove(item) + try: + for item in conditionals: + runner.add_conditional(item) + except (ValueError, AddConditionError): + exc = get_exception() + module.fail_json(msg=str(exc), condition=exc.condition) - if not queue: - break + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + runner.match = module.params['match'] - time.sleep(interval) - retries -= 1 - else: - failed_conditions = [item.raw for item in queue] - module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions) + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except FailedConditionalError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditional=exc.failed_conditional) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result = dict(changed=False, stdout=list()) + + for cmd in commands: + try: + output = runner.get_command(cmd['command'], cmd.get('output')) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) + result['warnings'] = warnings result['stdout_lines'] = list(to_lines(result['stdout'])) - return module.exit_json(**result) + module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.junos import * -if __name__ == '__main__': - main() +if __name__ == '__main__': + main() diff --git a/network/junos/junos_config.py b/network/junos/junos_config.py index 54486276428..a1212e974c3 100644 --- a/network/junos/junos_config.py +++ b/network/junos/junos_config.py @@ -16,163 +16,332 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: junos_config version_added: "2.1" -author: "Peter sprygada (@privateip)" -short_description: Manage Juniper JUNOS configuration sections +author: "Peter Sprygada (@privateip)" +short_description: Manage configuration on devices running Juniper JUNOS description: - - This module provides an implementation for configuring Juniper - JUNOS devices. The configuration statements must start with either - `set` or `delete` and are compared against the current device - configuration and only changes are pushed to the device. + - This module provides an implementation for working with the active + configuration running on Juniper JUNOS devices. It provides a set + of arguments for loading configuration, performing rollback operations + and zeroing the active configuration on the device. extends_documentation_fragment: junos options: lines: description: - - The ordered set of commands that should be configured in the - section. The commands must be the exact same commands as found - in the device config. Be sure to note the configuration - command syntanx as some commands are automatically modified by the - device config parser. - required: true - before: + - This argument takes a list of C(set) or C(delete) configuration + lines to push into the remote device. Each line must start with + either C(set) or C(delete). This argument is mutually exclusive + with the I(src) argument. + required: false + default: null + src: description: - - The ordered set of commands to push on to the command stack if - a change needs to be made. This allows the playbook designer - the opportunity to perform configuration commands prior to pushing - any changes without affecting how the set of commands are matched - against the system + - The I(src) argument provides a path to the configuration file + to load into the remote system. The path can either be a full + system path to the configuration file if the value starts with / + or relative to the root of the implemented role or playbook. + This argument is mutually exclusive with the I(lines) argument. required: false default: null - after: + version_added: "2.2" + src_format: description: - - The ordered set of commands to append to the end of the command - stack if a changed needs to be made. Just like with I(before) this - allows the playbook designer to append a set of commands to be - executed after the command set. + - The I(src_format) argument specifies the format of the configuration + found int I(src). If the I(src_format) argument is not provided, + the module will attempt to determine the format of the configuration + file specified in I(src). required: false default: null - force: + choices: ['xml', 'set', 'text', 'json'] + version_added: "2.2" + rollback: description: - - The force argument instructs the module to not consider the - current device config. When set to true, this will cause the - module to push the contents of I(src) into the device without - first checking if already configured. + - The C(rollback) argument instructs the module to rollback the + current configuration to the identifier specified in the + argument. If the specified rollback identifier does not + exist on the remote device, the module will fail. To rollback + to the most recent commit, set the C(rollback) argument to 0. required: false - default: false - choices: [ "true", "false" ] - config: + default: null + zeroize: description: - - The module, by default, will connect to the remote device and - retrieve the current config to use as a base for comparing - against the contents of source. There are times when it is not - desirable to have the task get the current running-config for - every task in a playbook. The I(config) argument allows the - implementer to pass in the configuruation to use as the base - config for comparision. + - The C(zeroize) argument is used to completely sanitize the + remote device configuration back to initial defaults. This + argument will effectively remove all current configuration + statements on the remote device. required: false default: null + confirm: + description: + - The C(confirm) argument will configure a time out value for + the commit to be confirmed before it is automatically + rolled back. If the C(confirm) argument is set to False, this + argument is silently ignored. If the value for this argument + is set to 0, the commit is confirmed immediately. + required: false + default: 0 + comment: + description: + - The C(comment) argument specifies a text string to be used + when committing the configuration. If the C(confirm) argument + is set to False, this argument is silently ignored. + required: false + default: configured by junos_config + replace: + description: + - The C(replace) argument will instruct the remote device to + replace the current configuration hierarchy with the one specified + in the corresponding hierarchy of the source configuration loaded + from this module. + - Note this argument should be considered deprecated. To achieve + the equivalent, set the I(update) argument to C(replace). This argument + will be removed in a future release. + required: false + choices: ['yes', 'no'] + default: false + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + required: false + default: no + choices: ['yes', 'no'] + version_added: "2.2" +requirements: + - junos-eznc +notes: + - This module requires the netconf system service be enabled on + the remote device being managed. """ EXAMPLES = """ -- junos_config: - lines: ['set system host-name {{ inventory_hostname }}'] +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + netconf: + host: "{{ inventory_hostname }}" + username: ansible + password: Ansible + +- name: load configure file into device + junos_config: + src: srx.cfg + comment: update config + provider: "{{ netconf }}" + +- name: rollback the configuration to id 10 + junos_config: + rollback: 10 + provider: "{{ netconf }}" + +- name: zero out the current configuration + junos_config: + zeroize: yes + provider: "{{ netconf }}" + +- name: confirm a previous commit + junos_config: + provider: "{{ netconf }}" """ RETURN = """ -updates: - description: The set of commands that will be pushed to the remote device - returned: always - type: list - sample: ['...', '...'] - -responses: - description: The set of responses from issuing the commands on the device - returned: always - type: list - sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: path + sample: /playbooks/ansible/backup/config.2016-07-16@22:28:34 """ -import re -import itertools - -def get_config(module): - config = module.params['config'] or dict() - if not config and not module.params['force']: - config = module.config - return config - -def to_lines(config): - lines = list() - for item in config: - if item.raw.endswith(';'): - line = [p.text for p in item.parents] - line.append(item.text) - lines.append(' '.join(line)) - return lines +import json -def main(): +from xml.etree import ElementTree - argument_spec = dict( - lines=dict(aliases=['commands'], required=True, type='list'), - before=dict(type='list'), - after=dict(type='list'), - force=dict(default=False, type='bool'), - config=dict() - ) +import ansible.module_utils.junos - module = get_module(argument_spec=argument_spec, - supports_check_mode=True) +from ansible.module_utils.basic import get_exception +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.netcfg import NetworkConfig - lines = module.params['lines'] - before = module.params['before'] - after = module.params['after'] +DEFAULT_COMMENT = 'configured by junos_config' - contents = get_config(module) - parsed = module.parse_config(contents) - config = to_lines(parsed) - result = dict(changed=False) +def guess_format(config): + try: + json.loads(config) + return 'json' + except ValueError: + pass + + try: + ElementTree.fromstring(config) + return 'xml' + except ElementTree.ParseError: + pass + + if config.startswith('set') or config.startswith('delete'): + return 'set' - candidate = list() - for line in lines: - parts = line.split() - action = parts[0] - cfgline = ' '.join(parts[1:]) - - if action not in ['set', 'delete']: - module.fail_json(msg='line must start with either `set` or `delete`') - elif action == 'set' and cfgline not in config: - candidate.append(line) - elif action == 'delete' and not config: - candidate.append(line) - elif action == 'delete': - regexp = re.compile(r'^%s$' % cfgline) - for cfg in config: - if regexp.match(cfg): - candidate.append(line) + return 'text' + +def config_to_commands(config): + set_format = config.startswith('set') or config.startswith('delete') + candidate = NetworkConfig(indent=4, contents=config, device_os='junos') + if not set_format: + candidate = [c.line for c in candidate.items] + commands = list() + # this filters out less specific lines + for item in candidate: + for index, entry in enumerate(commands): + if item.startswith(entry): + del commands[index] break + commands.append(item) + + else: + commands = str(candidate).split('\n') + + return commands + +def diff_commands(commands, config): + config = [unicode(c).replace("'", '') for c in config] + + updates = list() + visited = set() + + for item in commands: + if len(item) > 0: + if not item.startswith('set') and not item.startswith('delete'): + raise ValueError('line must start with either `set` or `delete`') + + elif item.startswith('set') and item[4:] not in config: + updates.append(item) - if candidate: - if before: - candidate[:0] = before + elif item.startswith('delete'): + for entry in config: + if entry.startswith(item[7:]) and item not in visited: + updates.append(item) + visited.add(item) - if after: - candidate.extend(after) + return updates - if not module.check_mode: - response = module.configure(candidate) - result['responses'] = response +def load_config(module, result): + candidate = module.params['lines'] or module.params['src'] + if isinstance(candidate, basestring): + candidate = candidate.split('\n') + + kwargs = dict() + kwargs['comment'] = module.params['comment'] + kwargs['confirm'] = module.params['confirm'] + kwargs['replace'] = module.params['replace'] + kwargs['commit'] = not module.check_mode + + if module.params['src']: + config_format = module.params['src_format'] or guess_format(str(candidate)) + elif module.params['lines']: + config_format = 'set' + kwargs['config_format'] = config_format + + # this is done to filter out `delete ...` statements which map to + # nothing in the config as that will cause an exception to be raised + if config_format == 'set': + config = module.config.get_config() + config = config_to_commands(config) + candidate = diff_commands(candidate, config) + + diff = module.config.load_config(candidate, **kwargs) + + if diff: result['changed'] = True + result['diff'] = dict(prepared=diff) + +def rollback_config(module, result): + rollback = module.params['rollback'] + + kwargs = dict(comment=module.param['comment'], + commit=not module.check_mode) + + diff = module.connection.rollback_config(rollback, **kwargs) + + if diff: + result['changed'] = True + result['diff'] = dict(prepared=diff) + +def zeroize_config(module, result): + if not module.check_mode: + module.cli.run_commands('request system zeroize') + result['changed'] = True + +def confirm_config(module, result): + checkonly = module.check_mode + result['changed'] = module.connection.confirm_commit(checkonly) + +def run(module, result): + if module.params['rollback']: + return rollback_config(module, result) + elif module.params['zeroize']: + return zeroize_config(module, result) + elif not any((module.params['src'], module.params['lines'])): + return confirm_config(module, result) + else: + return load_config(module, result) + + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + lines=dict(type='list'), + + src=dict(type='path'), + src_format=dict(choices=['xml', 'text', 'set', 'json']), + + # update operations + replace=dict(default=False, type='bool'), + confirm=dict(default=0, type='int'), + comment=dict(default=DEFAULT_COMMENT), + + # config operations + backup=dict(type='bool', default=False), + rollback=dict(type='int'), + zeroize=dict(default=False, type='bool'), + + transport=dict(default='netconf', choices=['netconf']) + ) + + mutually_exclusive = [('lines', 'rollback'), ('lines', 'zeroize'), + ('rollback', 'zeroize'), ('lines', 'src'), + ('src', 'zeroize'), ('src', 'rollback')] + + required_if = [('replace', True, ['src'])] + + module = NetworkModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + result = dict(changed=False) + + if module.params['backup']: + result['__backup__'] = module.config.get_config() + + try: + run(module, result) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) + + module.exit_json(**result) - result['updates'] = candidate - return module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.junos import * if __name__ == '__main__': main() diff --git a/network/junos/junos_facts.py b/network/junos/junos_facts.py new file mode 100644 index 00000000000..383eb90bcf2 --- /dev/null +++ b/network/junos/junos_facts.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: junos_facts +version_added: "2.1" +author: "Peter Sprygada (@privateip)" +short_description: Collect facts from remote device running Junos +description: + - Collects fact information from a remote device running the Junos + operating system. By default, the module will collect basic fact + information from the device to be included with the hostvars. + Additional fact information can be collected based on the + configured set of arguments. +extends_documentation_fragment: junos +options: + config: + description: + - The C(config) argument instructs the fact module to collect + the configuration from the remote device. The configuration + is then included in return facts. By default, the configuration + is returned as text. The C(config_format) can be used to return + different Junos configuration formats. + required: false + default: null + config_format: + description: + - The C(config_format) argument is used to specify the desired + format of the configuration file. Devices support three + configuration file formats. By default, the configuration + from the device is returned as text. The other option xml. + If the xml option is chosen, the configuration file is + returned as both xml and json. + required: false + default: text + choices: ['xml', 'text'] +requirements: + - junos-eznc +notes: + - This module requires the netconf system service be enabled on + the remote device being managed +""" + +EXAMPLES = """ +# the required set of connection arguments have been purposely left off +# the examples for brevity + +- name: collect default set of facts + junos_facts: + +- name: collect default set of facts and configuration + junos_facts: + config: yes + +- name: collect default set of facts and configuration in text format + junos_facts: + config: yes + config_format: text + +- name: collect default set of facts and configuration in XML and JSON format + junos_facts: + config: yes + config_format: xml +""" + +RETURN = """ +ansible_facts: + description: Returns the facts collect from the device + returned: always + type: dict +""" +import ansible.module_utils.junos + +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.junos import xml_to_string, xml_to_json + +def main(): + """ Main entry point for AnsibleModule + """ + spec = dict( + config=dict(type='bool'), + config_format=dict(default='text', choices=['xml', 'text']), + transport=dict(default='netconf', choices=['netconf']) + ) + + module = NetworkModule(argument_spec=spec, + supports_check_mode=True) + + result = dict(changed=False) + + facts = module.connection.get_facts() + + if '2RE' in facts: + facts['has_2RE'] = facts['2RE'] + del facts['2RE'] + + facts['version_info'] = dict(facts['version_info']) + + if module.params['config'] is True: + config_format = module.params['config_format'] + resp_config = module.config.get_config(config_format=config_format) + + if config_format in ['text']: + facts['config'] = resp_config + elif config_format == "xml": + facts['config'] = xml_to_string(resp_config) + facts['config_json'] = xml_to_json(resp_config) + + result['ansible_facts'] = facts + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/junos/junos_netconf.py b/network/junos/junos_netconf.py new file mode 100644 index 00000000000..6f47daa8db0 --- /dev/null +++ b/network/junos/junos_netconf.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: junos_netconf +version_added: "2.1" +author: "Peter Sprygada (@privateip)" +short_description: Configures the Junos Netconf system service +description: + - This module provides an abstraction that enables and configures + the netconf system service running on Junos devices. This module + can be used to easily enable the Netconf API. Netconf provides + a programmatic interface for working with configuration and state + resources as defined in RFC 6242. +extends_documentation_fragment: junos +options: + netconf_port: + description: + - This argument specifies the port the netconf service should + listen on for SSH connections. The default port as defined + in RFC 6242 is 830. + required: false + default: 830 + aliases: ['listens_on'] + version_added: "2.2" + state: + description: + - Specifies the state of the M(junos_netconf) resource on + the remote device. If the I(state) argument is set to + I(present) the netconf service will be configured. If the + I(state) argument is set to I(absent) the netconf service + will be removed from the configuration. + required: false + default: present + choices: ['present', 'absent'] +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: ansible + password: Ansible + transport: cli + +- name: enable netconf service on port 830 + junos_netconf: + listens_on: 830 + state: present + provider: "{{ cli }}" + +- name: disable netconf service + junos_netconf: + state: absent + provider: "{{ cli }}" +""" + +RETURN = """ +commands: + description: Returns the command sent to the remote device + returned: when changed is True + type: str + sample: 'set system services netconf ssh port 830' +""" +import re + +import ansible.module_utils.junos + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.network import NetworkModule, NetworkError + +def parse_port(config): + match = re.search(r'port (\d+)', config) + if match: + return int(match.group(1)) + +def get_instance(module): + cmd = 'show configuration system services netconf' + cfg = module.cli(cmd)[0] + result = dict(state='absent') + if cfg: + result = dict(state='present') + result['port'] = parse_port(cfg) + return result + +def main(): + """main entry point for module execution + """ + + argument_spec = dict( + netconf_port=dict(type='int', default=830, aliases=['listens_on']), + state=dict(default='present', choices=['present', 'absent']), + transport=dict(default='cli', choices=['cli']) + ) + + module = NetworkModule(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + port = module.params['netconf_port'] + + result = dict(changed=False) + + instance = get_instance(module) + + if state == 'present' and instance.get('state') == 'absent': + commands = 'set system services netconf ssh port %s' % port + elif state == 'present' and port != instance.get('port'): + commands = 'set system services netconf ssh port %s' % port + elif state == 'absent' and instance.get('state') == 'present': + commands = 'delete system services netconf' + else: + commands = None + + if commands: + if not module.check_mode: + try: + comment = 'configuration updated by junos_netconf' + module.config(commands, comment=comment) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) + result['changed'] = True + result['commands'] = commands + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/junos/junos_package.py b/network/junos/junos_package.py new file mode 100644 index 00000000000..c457be8228c --- /dev/null +++ b/network/junos/junos_package.py @@ -0,0 +1,160 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: junos_package +version_added: "2.1" +author: "Peter Sprygada (@privateip)" +short_description: Installs packages on remote devices running Junos +description: + - This module can install new and updated packages on remote + devices running Junos. The module will compare the specified + package with the one running on the remote device and install + the specified version if there is a mismatch +extends_documentation_fragment: junos +options: + src: + description: + - The I(src) argument specifies the path to the source package to be + installed on the remote device in the advent of a version mismatch. + The I(src) argument can be either a localized path or a full + path to the package file to install. + required: true + default: null + aliases: ['package'] + version: + description: + - The I(version) argument can be used to explicitly specify the + version of the package that should be installed on the remote + device. If the I(version) argument is not specified, then + the version is extracts from the I(src) filename. + required: false + default: null + reboot: + description: + - In order for a package to take effect, the remote device must be + restarted. When enabled, this argument will instruct the module + to reboot the device once the updated package has been installed. + If disabled or the remote package does not need to be changed, + the device will not be started. + required: true + default: true + choices: ['true', 'false'] + no_copy: + description: + - The I(no_copy) argument is responsible for instructing the remote + device on where to install the package from. When enabled, the + package is transferred to the remote device prior to installing. + required: false + default: false + choices: ['true', 'false'] + force: + description: + - The I(force) argument instructs the module to bypass the package + version check and install the packaged identified in I(src) on + the remote device. + required: true + default: false + choices: ['true', 'false'] +requirements: + - junos-eznc +notes: + - This module requires the netconf system service be enabled on + the remote device being managed +""" + +EXAMPLES = """ +# the required set of connection arguments have been purposely left off +# the examples for brevity + +- name: install local package on remote device + junos_package: + src: junos-vsrx-12.1X46-D10.2-domestic.tgz + +- name: install local package on remote device without rebooting + junos_package: + src: junos-vsrx-12.1X46-D10.2-domestic.tgz + reboot: no +""" +import ansible.module_utils.junos + +from ansible.module_utils.network import NetworkModule + +try: + from jnpr.junos.utils.sw import SW + HAS_SW = True +except ImportError: + HAS_SW = False + +def install_package(module): + junos = SW(module.connection.device) + package = module.params['src'] + no_copy = module.params['no_copy'] + + progress_log = lambda x, y: module.log(y) + + module.log('installing package') + result = junos.install(package, progress=progress_log, no_copy=no_copy) + + if not result: + module.fail_json(msg='Unable to install package on device') + + if module.params['reboot']: + module.log('rebooting system') + junos.reboot() + + +def main(): + spec = dict( + src=dict(type='path', required=True, aliases=['package']), + version=dict(), + reboot=dict(type='bool', default=True), + no_copy=dict(default=False, type='bool'), + force=dict(type='bool', default=False), + transport=dict(default='netconf', choices=['netconf']) + ) + + module = NetworkModule(argument_spec=spec, + supports_check_mode=True) + + if not HAS_SW: + module.fail_json(msg='Missing jnpr.junos.utils.sw module') + + result = dict(changed=False) + + do_upgrade = module.params['force'] or False + if not module.params['force']: + has_ver = module.connection.get_facts().get('version') + wants_ver = module.params['version'] + do_upgrade = has_ver != wants_ver + + if do_upgrade: + if not module.check_mode: + install_package(module) + result['changed'] = True + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/junos/junos_template.py b/network/junos/junos_template.py deleted file mode 100644 index 6b1bdbb6e9f..00000000000 --- a/network/junos/junos_template.py +++ /dev/null @@ -1,180 +0,0 @@ -#!/usr/bin/python -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -DOCUMENTATION = """ ---- -module: junos_template -version_added: "2.1" -author: "Peter sprygada (@privateip)" -short_description: Manage Juniper JUNOS device configurations -description: - - Manages network device configurations over SSH. This module - allows implementors to work with the device configuration. It - provides a way to push a set of commands onto a network device - by evaluting the current configuration and only pushing - commands that are not already configured. -extends_documentation_fragment: junos -options: - src: - description: - - The path to the config source. The source can be either a - file with config or a template that will be merged during - runtime. By default the task will search for the source - file in role or playbook root folder in templates directory. - required: false - default: null - force: - description: - - The force argument instructs the module to not consider the - current devices configuration. When set to true, this will - cause the module to push the contents of I(src) into the device - without first checking if already configured. - required: false - default: false - choices: [ "true", "false" ] - backup: - description: - - When this argument is configured true, the module will backup - the configuration from the node prior to making any changes. - The backup file will be written to backup_{{ hostname }} in - the root of the playbook directory. - required: false - default: false - choices: [ "true", "false" ] - config: - description: - - The module, by default, will connect to the remote device and - retrieve the current configuration to use as a base for comparing - against the contents of source. There are times when it is not - desirable to have the task get the current configuration for - every task in a playbook. The I(config) argument allows the - implementer to pass in the configuruation to use as the base - config for comparision. - required: false - default: null -""" - -EXAMPLES = """ - -- name: push a configuration onto the device - junos_template: - src: config.j2 - -- name: forceable push a configuration onto the device - junos_template: - src: config.j2 - force: yes - -- name: provide the base configuration for comparision - junos_template: - src: candidate_config.txt - config: current_config.txt - -""" - -RETURN = """ - -commands: - description: The set of commands that will be pushed to the remote device - returned: always - type: list - sample: [...] - -""" - -def compare(this, other): - parents = [item.text for item in this.parents] - for entry in other: - if this == entry: - return None - return this - -def expand(obj, action='set'): - cmd = [action] - cmd.extend([p.text for p in obj.parents]) - cmd.append(obj.text) - return ' '.join(cmd) - -def flatten(data, obj): - for k, v in data.items(): - obj.append(k) - flatten(v, obj) - return obj - -def to_lines(config): - lines = list() - for item in config: - if item.raw.endswith(';'): - line = [p.text for p in item.parents] - line.append(item.text) - lines.append(' '.join(line)) - return lines - -def get_config(module): - config = module.params['config'] or list() - if not config and not module.params['force']: - config = module.config - return config - -def main(): - """ main entry point for module execution - """ - - argument_spec = dict( - src=dict(), - force=dict(default=False, type='bool'), - backup=dict(default=False, type='bool'), - config=dict(), - ) - - mutually_exclusive = [('config', 'backup'), ('config', 'force')] - - module = get_module(argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True) - - result = dict(changed=False) - - parsed = module.parse_config(module.params['src']) - commands = to_lines(parsed) - - contents = get_config(module) - result['_backup'] = module.config - - parsed = module.parse_config(contents) - config = to_lines(parsed) - - candidate = list() - for item in commands: - if item not in config: - candidate.append('set %s' % item) - - if candidate: - if not module.check_mode: - module.configure(candidate) - result['changed'] = True - - result['updates'] = candidate - return module.exit_json(**result) - - -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.junos import * -if __name__ == '__main__': - main() diff --git a/network/netvisor/__init__.py b/network/netvisor/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/netvisor/pn_cluster.py b/network/netvisor/pn_cluster.py new file mode 100644 index 00000000000..de02198e2d9 --- /dev/null +++ b/network/netvisor/pn_cluster.py @@ -0,0 +1,316 @@ +#!/usr/bin/python +""" PN CLI cluster-create/cluster-delete """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_cluster +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1.0 +short_description: CLI command to create/delete a cluster. +description: + - Execute cluster-create or cluster-delete command. + - A cluster allows two switches to cooperate in high-availability (HA) + deployments. The nodes that form the cluster must be members of the same + fabric. Clusters are typically used in conjunction with a virtual link + aggregation group (VLAG) that allows links physically connected to two + separate switches appear as a single trunk to a third device. The third + device can be a switch,server, or any Ethernet device. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch to run the cli on. + required: False + state: + description: + - Specify action to perform. Use 'present' to create cluster and 'absent' + to delete cluster. + required: true + choices: ['present', 'absent'] + pn_name: + description: + - Specify the name of the cluster. + required: true + pn_cluster_node1: + description: + - Specify the name of the first switch in the cluster. + - Required for 'cluster-create'. + pn_cluster_node2: + description: + - Specify the name of the second switch in the cluster. + - Required for 'cluster-create'. + pn_validate: + description: + - Validate the inter-switch links and state of switches in the cluster. + choices: ['validate', 'no-validate'] +""" + +EXAMPLES = """ +- name: create spine cluster + pn_cluster: + state: 'present' + pn_name: 'spine-cluster' + pn_cluster_node1: 'spine01' + pn_cluster_node2: 'spine02' + pn_validate: validate + pn_quiet: True + +- name: delete spine cluster + pn_cluster: + state: 'absent' + pn_name: 'spine-cluster' + pn_quiet: True +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). +stdout: + description: The set of responses from the cluster command. + returned: always + type: list +stderr: + description: The set of error responses from the cluster command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +NAME_EXISTS = None +NODE1_EXISTS = None +NODE2_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks for idempotency using the cluster-show command. + If a cluster with given name exists, return NAME_EXISTS as True else False. + If the given cluster-node-1 is already a part of another cluster, return + NODE1_EXISTS as True else False. + If the given cluster-node-2 is already a part of another cluster, return + NODE2_EXISTS as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: NAME_EXISTS, NODE1_EXISTS, NODE2_EXISTS + """ + name = module.params['pn_name'] + node1 = module.params['pn_cluster_node1'] + node2 = module.params['pn_cluster_node2'] + + show = cli + ' cluster-show format name,cluster-node-1,cluster-node-2 ' + show = shlex.split(show) + out = module.run_command(show)[1] + + out = out.split() + # Global flags + global NAME_EXISTS, NODE1_EXISTS, NODE2_EXISTS + + if name in out: + NAME_EXISTS = True + else: + NAME_EXISTS = False + if node1 in out: + NODE1_EXISTS = True + else: + NODE2_EXISTS = False + if node2 in out: + NODE2_EXISTS = True + else: + NODE2_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'cluster-create' + if state == 'absent': + command = 'cluster-delete' + return command + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state =dict(required=True, type='str', + choices=['present', 'absent']), + pn_name=dict(required=True, type='str'), + pn_cluster_node1=dict(type='str'), + pn_cluster_node2=dict(type='str'), + pn_validate=dict(type='bool') + ), + required_if=( + ["state", "present", + ["pn_name", "pn_cluster_node1", "pn_cluster_node2"]], + ["state", "absent", ["pn_name"]] + ) + ) + + # Accessing the parameters + state = module.params['state'] + name = module.params['pn_name'] + cluster_node1 = module.params['pn_cluster_node1'] + cluster_node2 = module.params['pn_cluster_node2'] + validate = module.params['pn_validate'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if command == 'cluster-create': + + check_cli(module, cli) + + if NAME_EXISTS is True: + module.exit_json( + skipped=True, + msg='Cluster with name %s already exists' % name + ) + if NODE1_EXISTS is True: + module.exit_json( + skipped=True, + msg='Node %s already part of a cluster' % cluster_node1 + ) + if NODE2_EXISTS is True: + module.exit_json( + skipped=True, + msg='Node %s already part of a cluster' % cluster_node2 + ) + + cli += ' %s name %s ' % (command, name) + cli += 'cluster-node-1 %s cluster-node-2 %s ' % (cluster_node1, + cluster_node2) + if validate is True: + cli += ' validate ' + if validate is False: + cli += ' no-validate ' + + if command == 'cluster-delete': + + check_cli(module, cli) + + if NAME_EXISTS is False: + module.exit_json( + skipped=True, + msg='Cluster with name %s does not exist' % name + ) + cli += ' %s name %s ' % (command, name) + + run_cli(module, cli) + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/netvisor/pn_ospf.py b/network/netvisor/pn_ospf.py new file mode 100644 index 00000000000..7c4cbd1400f --- /dev/null +++ b/network/netvisor/pn_ospf.py @@ -0,0 +1,293 @@ +#!/usr/bin/python +""" PN-CLI vrouter-ospf-add/remove """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_ospf +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1.0 +short_description: CLI command to add/remove ospf protocol to a vRouter. +description: + - Execute vrouter-ospf-add, vrouter-ospf-remove command. + - This command adds/removes Open Shortest Path First(OSPF) routing + protocol to a virtual router(vRouter) service. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch to run the CLI on. + required: False + state: + description: + - Assert the state of the ospf. Use 'present' to add ospf + and 'absent' to remove ospf. + required: True + default: present + choices: ['present', 'absent'] + pn_vrouter_name: + description: + - Specify the name of the vRouter. + required: True + pn_network_ip: + description: + - Specify the network IP (IPv4 or IPv6) address. + required: True + pn_ospf_area: + description: + - Stub area number for the configuration. Required for vrouter-ospf-add. +""" + +EXAMPLES = """ +- name: "Add OSPF to vrouter" + pn_ospf: + state: present + pn_vrouter_name: name-string + pn_network_ip: 192.168.11.2/24 + pn_ospf_area: 1.0.0.0 + +- name: "Remove OSPF from vrouter" + pn_ospf: + state: absent + pn_vrouter_name: name-string +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). +stdout: + description: The set of responses from the ospf command. + returned: always + type: list +stderr: + description: The set of error responses from the ospf command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +VROUTER_EXISTS = None +NETWORK_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-ospf-show command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + If an OSPF network with the given ip exists on the given vRouter, + return NETWORK_EXISTS as True else False. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VROUTER_EXISTS, NETWORK_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + network_ip = module.params['pn_network_ip'] + # Global flags + global VROUTER_EXISTS, NETWORK_EXISTS + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers ' + check_vrouter = shlex.split(check_vrouter) + out = module.run_command(check_vrouter)[1] + out = out.split() + + if vrouter_name in out: + VROUTER_EXISTS = True + else: + VROUTER_EXISTS = False + + # Check for OSPF networks + show = cli + ' vrouter-ospf-show vrouter-name %s ' % vrouter_name + show += 'format network no-show-headers' + show = shlex.split(show) + out = module.run_command(show)[1] + out = out.split() + + if network_ip in out: + NETWORK_EXISTS = True + else: + NETWORK_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + cmd = shlex.split(cli) + + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-ospf-add' + if state == 'absent': + command = 'vrouter-ospf-remove' + return command + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(type='str', default='present', choices=['present', + 'absent']), + pn_vrouter_name=dict(required=True, type='str'), + pn_network_ip=dict(required=True, type='str'), + pn_ospf_area=dict(type='str') + ), + required_if=( + ['state', 'present', + ['pn_network_ip', 'pn_ospf_area']], + ['state', 'absent', ['pn_network_ip']] + ) + ) + + # Accessing the arguments + state = module.params['state'] + vrouter_name = module.params['pn_vrouter_name'] + network_ip = module.params['pn_network_ip'] + ospf_area = module.params['pn_ospf_area'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + check_cli(module, cli) + + if state == 'present': + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if NETWORK_EXISTS is True: + module.exit_json( + skipped=True, + msg=('OSPF with network ip %s already exists on %s' + % (network_ip, vrouter_name)) + ) + cli += (' %s vrouter-name %s network %s ospf-area %s' + % (command, vrouter_name, network_ip, ospf_area)) + + if state == 'absent': + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if NETWORK_EXISTS is False: + module.exit_json( + skipped=True, + msg=('OSPF with network ip %s already exists on %s' + % (network_ip, vrouter_name)) + ) + cli += (' %s vrouter-name %s network %s' + % (command, vrouter_name, network_ip)) + + run_cli(module, cli) +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/netvisor/pn_ospfarea.py b/network/netvisor/pn_ospfarea.py new file mode 100644 index 00000000000..d34b145d28a --- /dev/null +++ b/network/netvisor/pn_ospfarea.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +""" PN-CLI vrouter-ospf-add/remove """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_ospfarea +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1.0 +short_description: CLI command to add/remove ospf area to/from a vrouter. +description: + - Execute vrouter-ospf-add, vrouter-ospf-remove command. + - This command adds/removes Open Shortest Path First(OSPF) area to/from + a virtual router(vRouter) service. +options: + pn_cliusername: + description: + - Login username. + required: true + pn_clipassword: + description: + - Login password. + required: true + pn_cliswitch: + description: + - Target switch(es) to run the CLI on. + required: False + state: + description: + - State the action to perform. Use 'present' to add ospf-area, 'absent' + to remove ospf-area and 'update' to modify ospf-area. + required: true + choices: ['present', 'absent', 'update'] + pn_vrouter_name: + description: + - Specify the name of the vRouter. + required: true + pn_ospf_area: + description: + - Specify the OSPF area number. + required: true + pn_stub_type: + description: + - Specify the OSPF stub type. + choices: ['none', 'stub', 'stub-no-summary', 'nssa', 'nssa-no-summary'] + pn_prefix_listin: + description: + - OSPF prefix list for filtering incoming packets. + pn_prefix_listout: + description: + - OSPF prefix list for filtering outgoing packets. + pn_quiet: + description: + - Enable/disable system information. + required: false + default: true +""" + +EXAMPLES = """ +- name: "Add OSPF area to vrouter" + pn_ospfarea: + state: present + pn_cliusername: admin + pn_clipassword: admin + pn_ospf_area: 1.0.0.0 + pn_stub_type: stub + +- name: "Remove OSPF from vrouter" + pn_ospf: + state: absent + pn_cliusername: admin + pn_clipassword: admin + pn_vrouter_name: name-string + pn_ospf_area: 1.0.0.0 +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). +stdout: + description: The set of responses from the ospf command. + returned: always + type: list +stderr: + description: The set of error responses from the ospf command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-ospf-area-add' + if state == 'absent': + command = 'vrouter-ospf-area-remove' + if state == 'update': + command = 'vrouter-ospf-area-modify' + return command + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=True, type='str'), + pn_clipassword=dict(required=True, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str'), + state =dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_vrouter_name=dict(required=True, type='str'), + pn_ospf_area=dict(required=True, type='str'), + pn_stub_type=dict(type='str', choices=['none', 'stub', 'nssa', + 'stub-no-summary', + 'nssa-no-summary']), + pn_prefix_listin=dict(type='str'), + pn_prefix_listout=dict(type='str'), + pn_quiet=dict(type='bool', default='True') + ) + ) + + # Accessing the arguments + cliusername = module.params['pn_cliusername'] + clipassword = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + vrouter_name = module.params['pn_vrouter_name'] + ospf_area = module.params['pn_ospf_area'] + stub_type = module.params['pn_stub_type'] + prefix_listin = module.params['pn_prefix_listin'] + prefix_listout = module.params['pn_prefix_listout'] + quiet = module.params['pn_quiet'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = '/usr/bin/cli' + + if quiet is True: + cli += ' --quiet ' + + cli += ' --user %s:%s ' % (cliusername, clipassword) + + if cliswitch: + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + + cli += ' %s vrouter-name %s area %s ' % (command, vrouter_name, ospf_area) + + if stub_type: + cli += ' stub-type ' + stub_type + + if prefix_listin: + cli += ' prefix-list-in ' + prefix_listin + + if prefix_listout: + cli += ' prefix-list-out ' + prefix_listout + + # Run the CLI command + ospfcommand = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(ospfcommand) + + # Response in JSON format + if result != 0: + module.exit_json( + command=cli, + stderr=err.rstrip("\r\n"), + changed=False + ) + + else: + module.exit_json( + command=cli, + stdout=out.rstrip("\r\n"), + changed=True + ) + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/netvisor/pn_show.py b/network/netvisor/pn_show.py new file mode 100644 index 00000000000..ff22667a434 --- /dev/null +++ b/network/netvisor/pn_show.py @@ -0,0 +1,198 @@ +#!/usr/bin/python +""" PN CLI show commands """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_show +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1.0 +short_description: Run show commands on nvOS device. +description: + - Execute show command in the nodes and returns the results + read from the device. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the cli on. + required: False + pn_command: + description: + - The C(pn_command) takes a CLI show command as value. + required: true + pn_parameters: + description: + - Display output using a specific parameter. Use 'all' to display possible + output. List of comma separated parameters. + pn_options: + description: + - Specify formatting options. +""" + +EXAMPLES = """ +- name: run the vlan-show command + pn_show: + pn_command: 'vlan-show' + pn_parameters: id,scope,ports + pn_options: 'layout vertical' + +- name: run the vlag-show command + pn_show: + pn_command: 'vlag-show' + pn_parameters: 'id,name,cluster,mode' + pn_options: 'no-show-headers' + +- name: run the cluster-show command + pn_show: + pn_command: 'cluster-show' +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). +stdout: + description: The set of responses from the show command. + returned: always + type: list +stderr: + description: The set of error responses from the show command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused any change on the target. + returned: always(False) + type: bool +""" + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch: + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + command = module.params['pn_command'] + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + msg='%s: ' % command, + stderr=err.strip(), + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + msg='%s: ' % command, + stdout=out.strip(), + changed=False + ) + + else: + module.exit_json( + command=cli, + msg='%s: Nothing to display!!!' % command, + changed=False + ) + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=True, type='str'), + pn_clipassword=dict(required=True, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str'), + pn_command=dict(required=True, type='str'), + pn_parameters=dict(default='all', type='str'), + pn_options=dict(type='str') + ) + ) + + # Accessing the arguments + command = module.params['pn_command'] + parameters = module.params['pn_parameters'] + options = module.params['pn_options'] + + # Building the CLI command string + cli = pn_cli(module) + + cli += ' %s format %s ' % (command, parameters) + + if options: + cli += options + + run_cli(module, cli) + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/netvisor/pn_trunk.py b/network/netvisor/pn_trunk.py new file mode 100644 index 00000000000..da3c568f203 --- /dev/null +++ b/network/netvisor/pn_trunk.py @@ -0,0 +1,450 @@ +#!/usr/bin/python +""" PN CLI trunk-create/trunk-delete/trunk-modify """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_trunk +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1.0 +short_description: CLI command to create/delete/modify a trunk. +description: + - Execute trunk-create or trunk-delete command. + - Trunks can be used to aggregate network links at Layer 2 on the local + switch. Use this command to create a new trunk. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the cli on. + required: False + state: + description: + - State the action to perform. Use 'present' to create trunk, + 'absent' to delete trunk and 'update' to modify trunk. + required: True + choices: ['present', 'absent', 'update'] + pn_name: + description: + - Specify the name for the trunk configuration. + required: true + pn_ports: + description: + - Specify the port number(s) for the link(s) to aggregate into the trunk. + - Required for trunk-create. + pn_speed: + description: + - Specify the port speed or disable the port. + choices: ['disable', '10m', '100m', '1g', '2.5g', '10g', '40g'] + pn_egress_rate_limit: + description: + - Specify an egress port data rate limit for the configuration. + pn_jumbo: + description: + - Specify if the port can receive jumbo frames. + pn_lacp_mode: + description: + - Specify the LACP mode for the configuration. + choices: ['off', 'passive', 'active'] + pn_lacp_priority: + description: + - Specify the LACP priority. This is a number between 1 and 65535 with a + default value of 32768. + pn_lacp_timeout: + description: + - Specify the LACP time out as slow (30 seconds) or fast (4seconds). + The default value is slow. + choices: ['slow', 'fast'] + pn_lacp_fallback: + description: + - Specify the LACP fallback mode as bundles or individual. + choices: ['bundle', 'individual'] + pn_lacp_fallback_timeout: + description: + - Specify the LACP fallback timeout in seconds. The range is between 30 + and 60 seconds with a default value of 50 seconds. + pn_edge_switch: + description: + - Specify if the switch is an edge switch. + pn_pause: + description: + - Specify if pause frames are sent. + pn_description: + description: + - Specify a description for the trunk configuration. + pn_loopback: + description: + - Specify loopback if you want to use loopback. + pn_mirror_receive: + description: + - Specify if the configuration receives mirrored traffic. + pn_unknown_ucast_level: + description: + - Specify an unkown unicast level in percent. The default value is 100%. + pn_unknown_mcast_level: + description: + - Specify an unkown multicast level in percent. The default value is 100%. + pn_broadcast_level: + description: + - Specify a broadcast level in percent. The default value is 100%. + pn_port_macaddr: + description: + - Specify the MAC address of the port. + pn_loopvlans: + description: + - Specify a list of looping vlans. + pn_routing: + description: + - Specify if the port participates in routing on the network. + pn_host: + description: + - Host facing port control setting. +""" + +EXAMPLES = """ +- name: create trunk + pn_trunk: + state: 'present' + pn_name: 'spine-to-leaf' + pn_ports: '11,12,13,14' + +- name: delete trunk + pn_trunk: + state: 'absent' + pn_name: 'spine-to-leaf' +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). +stdout: + description: The set of responses from the trunk command. + returned: always + type: list +stderr: + description: The set of error responses from the trunk command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" +TRUNK_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks for idempotency using the trunk-show command. + If a trunk with given name exists, return TRUNK_EXISTS as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: TRUNK_EXISTS + """ + name = module.params['pn_name'] + + show = cli + ' trunk-show format switch,name no-show-headers' + show = shlex.split(show) + out = module.run_command(show)[1] + + out = out.split() + # Global flags + global TRUNK_EXISTS + if name in out: + TRUNK_EXISTS = True + else: + TRUNK_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'trunk-create' + if state == 'absent': + command = 'trunk-delete' + if state == 'update': + command = 'trunk-modify' + return command + + +def main(): + """ This portion is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_name=dict(required=True, type='str'), + pn_ports=dict(type='str'), + pn_speed=dict(type='str', + choices=['disable', '10m', '100m', '1g', '2.5g', + '10g', '40g']), + pn_egress_rate_limit=dict(type='str'), + pn_jumbo=dict(type='bool'), + pn_lacp_mode=dict(type='str', choices=[ + 'off', 'passive', 'active']), + pn_lacp_priority=dict(type='int'), + pn_lacp_timeout=dict(type='str'), + pn_lacp_fallback=dict(type='str', choices=[ + 'bundle', 'individual']), + pn_lacp_fallback_timeout=dict(type='str'), + pn_edge_switch=dict(type='bool'), + pn_pause=dict(type='bool'), + pn_description=dict(type='str'), + pn_loopback=dict(type='bool'), + pn_mirror_receive=dict(type='bool'), + pn_unknown_ucast_level=dict(type='str'), + pn_unknown_mcast_level=dict(type='str'), + pn_broadcast_level=dict(type='str'), + pn_port_macaddr=dict(type='str'), + pn_loopvlans=dict(type='str'), + pn_routing=dict(type='bool'), + pn_host=dict(type='bool') + ), + required_if=( + ["state", "present", ["pn_name", "pn_ports"]], + ["state", "absent", ["pn_name"]], + ["state", "update", ["pn_name"]] + ) + ) + + # Accessing the arguments + state = module.params['state'] + name = module.params['pn_name'] + ports = module.params['pn_ports'] + speed = module.params['pn_speed'] + egress_rate_limit = module.params['pn_egress_rate_limit'] + jumbo = module.params['pn_jumbo'] + lacp_mode = module.params['pn_lacp_mode'] + lacp_priority = module.params['pn_lacp_priority'] + lacp_timeout = module.params['pn_lacp_timeout'] + lacp_fallback = module.params['pn_lacp_fallback'] + lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout'] + edge_switch = module.params['pn_edge_switch'] + pause = module.params['pn_pause'] + description = module.params['pn_description'] + loopback = module.params['pn_loopback'] + mirror_receive = module.params['pn_mirror_receive'] + unknown_ucast_level = module.params['pn_unknown_ucast_level'] + unknown_mcast_level = module.params['pn_unknown_mcast_level'] + broadcast_level = module.params['pn_broadcast_level'] + port_macaddr = module.params['pn_port_macaddr'] + loopvlans = module.params['pn_loopvlans'] + routing = module.params['pn_routing'] + host = module.params['pn_host'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if command == 'trunk-delete': + + check_cli(module, cli) + if TRUNK_EXISTS is False: + module.exit_json( + skipped=True, + msg='Trunk with name %s does not exist' % name + ) + cli += ' %s name %s ' % (command, name) + + else: + if command == 'trunk-create': + check_cli(module, cli) + if TRUNK_EXISTS is True: + module.exit_json( + skipped=True, + msg='Trunk with name %s already exists' % name + ) + cli += ' %s name %s ' % (command, name) + + # Appending options + if ports: + cli += ' ports ' + ports + + if speed: + cli += ' speed ' + speed + + if egress_rate_limit: + cli += ' egress-rate-limit ' + egress_rate_limit + + if jumbo is True: + cli += ' jumbo ' + if jumbo is False: + cli += ' no-jumbo ' + + if lacp_mode: + cli += ' lacp-mode ' + lacp_mode + + if lacp_priority: + cli += ' lacp-priority ' + lacp_priority + + if lacp_timeout: + cli += ' lacp-timeout ' + lacp_timeout + + if lacp_fallback: + cli += ' lacp-fallback ' + lacp_fallback + + if lacp_fallback_timeout: + cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout + + if edge_switch is True: + cli += ' edge-switch ' + if edge_switch is False: + cli += ' no-edge-switch ' + + if pause is True: + cli += ' pause ' + if pause is False: + cli += ' no-pause ' + + if description: + cli += ' description ' + description + + if loopback is True: + cli += ' loopback ' + if loopback is False: + cli += ' no-loopback ' + + if mirror_receive is True: + cli += ' mirror-receive-only ' + if mirror_receive is False: + cli += ' no-mirror-receive-only ' + + if unknown_ucast_level: + cli += ' unknown-ucast-level ' + unknown_ucast_level + + if unknown_mcast_level: + cli += ' unknown-mcast-level ' + unknown_mcast_level + + if broadcast_level: + cli += ' broadcast-level ' + broadcast_level + + if port_macaddr: + cli += ' port-mac-address ' + port_macaddr + + if loopvlans: + cli += ' loopvlans ' + loopvlans + + if routing is True: + cli += ' routing ' + if routing is False: + cli += ' no-routing ' + + if host is True: + cli += ' host-enable ' + if host is False: + cli += ' host-disable ' + + run_cli(module, cli) + +# Ansible boiler-plate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/netvisor/pn_vlag.py b/network/netvisor/pn_vlag.py new file mode 100644 index 00000000000..66b9ce05960 --- /dev/null +++ b/network/netvisor/pn_vlag.py @@ -0,0 +1,346 @@ +#!/usr/bin/python +""" PN CLI vlag-create/vlag-delete/vlag-modify """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_vlag +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1.0 +short_description: CLI command to create/delete/modify vlag. +description: + - Execute vlag-create/vlag-delete/vlag-modify command. + - A virtual link aggregation group (VLAG) allows links that are physically + connected to two different Pluribus Networks devices to appear as a single + trunk to a third device. The third device can be a switch, server, or any + Ethernet device. A VLAG can provide Layer 2 multipathing, which allows you + to create redundancy by increasing bandwidth, enabling multiple parallel + paths between nodes and loadbalancing traffic where alternative paths exist. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run this command on. + state: + description: + - State the action to perform. Use 'present' to create vlag, + 'absent' to delete vlag and 'update' to modify vlag. + required: True + choices: ['present', 'absent', 'update'] + pn_name: + description: + - The C(pn_name) takes a valid name for vlag configuration. + required: true + pn_port: + description: + - Specify the local VLAG port. + - Required for vlag-create. + pn_peer_port: + description: + - Specify the peer VLAG port. + - Required for vlag-create. + pn_mode: + description: + - Specify the mode for the VLAG. Active-standby indicates one side is + active and the other side is in standby mode. Active-active indicates + that both sides of the vlag are up by default. + choices: ['active-active', 'active-standby'] + pn_peer_switch: + description: + - Specify the fabric-name of the peer switch. + pn_failover_action: + description: + - Specify the failover action as move or ignore. + choices: ['move', 'ignore'] + pn_lacp_mode: + description: + - Specify the LACP mode. + choices: ['off', 'passive', 'active'] + pn_lacp_timeout: + description: + - Specify the LACP timeout as slow(30 seconds) or fast(4 seconds). + choices: ['slow', 'fast'] + pn_lacp_fallback: + description: + - Specify the LACP fallback mode as bundles or individual. + choices: ['bundle', 'individual'] + pn_lacp_fallback_timeout: + description: + - Specify the LACP fallback timeout in seconds. The range is between 30 + and 60 seconds with a default value of 50 seconds. +""" + +EXAMPLES = """ +- name: create a VLAG + pn_vlag: + state: 'present' + pn_name: spine-to-leaf + pn_port: 'spine01-to-leaf' + pn_peer_port: 'spine02-to-leaf' + pn_peer_switch: spine02 + pn_mode: 'active-active' + +- name: delete VLAGs + pn_vlag: + state: 'absent' + pn_name: spine-to-leaf +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). +stdout: + description: The set of responses from the vlag command. + returned: always + type: list +stderr: + description: The set of error responses from the vlag command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +VLAG_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks for idempotency using the vlag-show command. + If a vlag with given vlag exists, return VLAG_EXISTS as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VLAG_EXISTS + """ + name = module.params['pn_name'] + + show = cli + ' vlag-show format name no-show-headers' + show = shlex.split(show) + out = module.run_command(show)[1] + + out = out.split() + # Global flags + global VLAG_EXISTS + if name in out: + VLAG_EXISTS = True + else: + VLAG_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vlag-create' + if state == 'absent': + command = 'vlag-delete' + if state == 'update': + command = 'vlag-modify' + return command + + +def main(): + """ This section is for argument parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state =dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_name=dict(required=True, type='str'), + pn_port=dict(type='str'), + pn_peer_port=dict(type='str'), + pn_mode=dict(type='str', choices=[ + 'active-standby', 'active-active']), + pn_peer_switch=dict(type='str'), + pn_failover_action=dict(type='str', choices=['move', 'ignore']), + pn_lacp_mode=dict(type='str', choices=[ + 'off', 'passive', 'active']), + pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']), + pn_lacp_fallback=dict(type='str', choices=[ + 'individual', 'bundled']), + pn_lacp_fallback_timeout=dict(type='str') + ), + required_if=( + ["state", "present", ["pn_name", "pn_port", "pn_peer_port", + "pn_peer_switch"]], + ["state", "absent", ["pn_name"]], + ["state", "update", ["pn_name"]] + ) + ) + + # Argument accessing + state = module.params['state'] + name = module.params['pn_name'] + port = module.params['pn_port'] + peer_port = module.params['pn_peer_port'] + mode = module.params['pn_mode'] + peer_switch = module.params['pn_peer_switch'] + failover_action = module.params['pn_failover_action'] + lacp_mode = module.params['pn_lacp_mode'] + lacp_timeout = module.params['pn_lacp_timeout'] + lacp_fallback = module.params['pn_lacp_fallback'] + lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if command == 'vlag-delete': + + check_cli(module, cli) + if VLAG_EXISTS is False: + module.exit_json( + skipped=True, + msg='VLAG with name %s does not exist' % name + ) + cli += ' %s name %s ' % (command, name) + + else: + + if command == 'vlag-create': + check_cli(module, cli) + if VLAG_EXISTS is True: + module.exit_json( + skipped=True, + msg='VLAG with name %s already exists' % name + ) + cli += ' %s name %s ' % (command, name) + + if port: + cli += ' port %s peer-port %s ' % (port, peer_port) + + if mode: + cli += ' mode ' + mode + + if peer_switch: + cli += ' peer-switch ' + peer_switch + + if failover_action: + cli += ' failover-' + failover_action + '-L2 ' + + if lacp_mode: + cli += ' lacp-mode ' + lacp_mode + + if lacp_timeout: + cli += ' lacp-timeout ' + lacp_timeout + + if lacp_fallback: + cli += ' lacp-fallback ' + lacp_fallback + + if lacp_fallback_timeout: + cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout + + run_cli(module, cli) + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/netvisor/pn_vlan.py b/network/netvisor/pn_vlan.py new file mode 100644 index 00000000000..c79e4ba5e3e --- /dev/null +++ b/network/netvisor/pn_vlan.py @@ -0,0 +1,311 @@ +#!/usr/bin/python +""" PN CLI vlan-create/vlan-delete """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_vlan +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1.0 +short_description: CLI command to create/delete a VLAN. +description: + - Execute vlan-create or vlan-delete command. + - VLANs are used to isolate network traffic at Layer 2.The VLAN identifiers + 0 and 4095 are reserved and cannot be used per the IEEE 802.1Q standard. + The range of configurable VLAN identifiers is 2 through 4092. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the cli on. + required: False + state: + description: + - State the action to perform. Use 'present' to create vlan and + 'absent' to delete vlan. + required: True + choices: ['present', 'absent'] + pn_vlanid: + description: + - Specify a VLAN identifier for the VLAN. This is a value between + 2 and 4092. + required: True + pn_scope: + description: + - Specify a scope for the VLAN. + - Required for vlan-create. + choices: ['fabric', 'local'] + pn_description: + description: + - Specify a description for the VLAN. + pn_stats: + description: + - Specify if you want to collect statistics for a VLAN. Statistic + collection is enabled by default. + pn_ports: + description: + - Specifies the switch network data port number, list of ports, or range + of ports. Port numbers must ne in the range of 1 to 64. + pn_untagged_ports: + description: + - Specifies the ports that should have untagged packets mapped to the + VLAN. Untagged packets are packets that do not contain IEEE 802.1Q VLAN + tags. +""" + +EXAMPLES = """ +- name: create a VLAN + pn_vlan: + state: 'present' + pn_vlanid: 1854 + pn_scope: fabric + +- name: delete VLANs + pn_vlan: + state: 'absent' + pn_vlanid: 1854 +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). +stdout: + description: The set of responses from the vlan command. + returned: always + type: list +stderr: + description: The set of error responses from the vlan command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +VLAN_EXISTS = None +MAX_VLAN_ID = 4092 +MIN_VLAN_ID = 2 + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks for idempotency using the vlan-show command. + If a vlan with given vlan id exists, return VLAN_EXISTS as True else False. + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VLAN_EXISTS + """ + vlanid = module.params['pn_vlanid'] + + show = cli + \ + ' vlan-show id %s format id,scope no-show-headers' % str(vlanid) + show = shlex.split(show) + out = module.run_command(show)[1] + + out = out.split() + # Global flags + global VLAN_EXISTS + if str(vlanid) in out: + VLAN_EXISTS = True + else: + VLAN_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state= module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vlan-create' + if state == 'absent': + command = 'vlan-delete' + return command + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state =dict(required=True, type='str', + choices=['present', 'absent']), + pn_vlanid=dict(required=True, type='int'), + pn_scope=dict(type='str', choices=['fabric', 'local']), + pn_description=dict(type='str'), + pn_stats=dict(type='bool'), + pn_ports=dict(type='str'), + pn_untagged_ports=dict(type='str') + ), + required_if=( + ["state", "present", ["pn_vlanid", "pn_scope"]], + ["state", "absent", ["pn_vlanid"]] + ) + ) + + # Accessing the arguments + state = module.params['state'] + vlanid = module.params['pn_vlanid'] + scope = module.params['pn_scope'] + description = module.params['pn_description'] + stats = module.params['pn_stats'] + ports = module.params['pn_ports'] + untagged_ports = module.params['pn_untagged_ports'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if not MIN_VLAN_ID <= vlanid <= MAX_VLAN_ID: + module.exit_json( + msg="VLAN id must be between 2 and 4092", + changed=False + ) + + if command == 'vlan-create': + + check_cli(module, cli) + if VLAN_EXISTS is True: + module.exit_json( + skipped=True, + msg='VLAN with id %s already exists' % str(vlanid) + ) + + cli += ' %s id %s scope %s ' % (command, str(vlanid), scope) + + if description: + cli += ' description ' + description + + if stats is True: + cli += ' stats ' + if stats is False: + cli += ' no-stats ' + + if ports: + cli += ' ports ' + ports + + if untagged_ports: + cli += ' untagged-ports ' + untagged_ports + + if command == 'vlan-delete': + + check_cli(module, cli) + if VLAN_EXISTS is False: + module.exit_json( + skipped=True, + msg='VLAN with id %s does not exist' % str(vlanid) + ) + + cli += ' %s id %s ' % (command, str(vlanid)) + + run_cli(module, cli) + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/netvisor/pn_vrouter.py b/network/netvisor/pn_vrouter.py new file mode 100644 index 00000000000..bb0cadc76d2 --- /dev/null +++ b/network/netvisor/pn_vrouter.py @@ -0,0 +1,416 @@ +#!/usr/bin/python +""" PN CLI vrouter-create/vrouter-delete/vrouter-modify """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_vrouter +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1 +short_description: CLI command to create/delete/modify a vrouter. +description: + - Execute vrouter-create, vrouter-delete, vrouter-modify command. + - Each fabric, cluster, standalone switch, or virtual network (VNET) can + provide its tenants with a virtual router (vRouter) service that forwards + traffic between networks and implements Layer 3 protocols. + - C(vrouter-create) creates a new vRouter service. + - C(vrouter-delete) deletes a vRouter service. + - C(vrouter-modify) modifies a vRouter service. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the CLI on. + required: False + state: + description: + - State the action to perform. Use 'present' to create vrouter, + 'absent' to delete vrouter and 'update' to modify vrouter. + required: True + choices: ['present', 'absent', 'update'] + pn_name: + description: + - Specify the name of the vRouter. + required: true + pn_vnet: + description: + - Specify the name of the VNET. + - Required for vrouter-create. + pn_service_type: + description: + - Specify if the vRouter is a dedicated or shared VNET service. + choices: ['dedicated', 'shared'] + pn_service_state: + description: + - Specify to enable or disable vRouter service. + choices: ['enable', 'disable'] + pn_router_type: + description: + - Specify if the vRouter uses software or hardware. + - Note that if you specify hardware as router type, you cannot assign IP + addresses using DHCP. You must specify a static IP address. + choices: ['hardware', 'software'] + pn_hw_vrrp_id: + description: + - Specifies the VRRP ID for a hardware vrouter. + pn_router_id: + description: + - Specify the vRouter IP address. + pn_bgp_as: + description: + - Specify the Autonomous System Number(ASN) if the vRouter runs Border + Gateway Protocol(BGP). + pn_bgp_redistribute: + description: + - Specify how BGP routes are redistributed. + choices: ['static', 'connected', 'rip', 'ospf'] + pn_bgp_max_paths: + description: + - Specify the maximum number of paths for BGP. This is a number between + 1 and 255 or 0 to unset. + pn_bgp_options: + description: + - Specify other BGP options as a whitespaces separated string within + single quotes ''. + pn_rip_redistribute: + description: + - Specify how RIP routes are redistributed. + choices: ['static', 'connected', 'ospf', 'bgp'] + pn_ospf_redistribute: + description: + - Specify how OSPF routes are redistributed. + choices: ['static', 'connected', 'bgp', 'rip'] + pn_ospf_options: + description: + - Specify other OSPF options as a whitespaces separated string within + single quotes ''. +""" + +EXAMPLES = """ +- name: create vrouter + pn_vrouter: + state: 'present' + pn_name: 'ansible-vrouter' + pn_vnet: 'ansible-fab-global' + pn_router_id: 208.74.182.1 + +- name: delete vrouter + pn_vrouter: + state: 'absent' + pn_name: 'ansible-vrouter' +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). +stdout: + description: The set of responses from the vrouter command. + returned: always + type: list +stderr: + description: The set of error responses from the vrouter command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + +VROUTER_EXISTS = None +VROUTER_NAME_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks for idempotency using the vlan-show command. + A switch can have only one vRouter configuration. + If a vRouter already exists on the given switch, return VROUTER_EXISTS as + True else False. + If a vRouter with the given name exists(on a different switch), return + VROUTER_NAME_EXISTS as True else False. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VROUTER_EXISTS, VROUTER_NAME_EXISTS + """ + name = module.params['pn_name'] + # Global flags + global VROUTER_EXISTS, VROUTER_NAME_EXISTS + + # Get the name of the local switch + location = cli + ' switch-setup-show format switch-name' + location = shlex.split(location) + out = module.run_command(location)[1] + location = out.split()[1] + + # Check for any vRouters on the switch + check_vrouter = cli + ' vrouter-show location %s ' % location + check_vrouter += 'format name no-show-headers' + check_vrouter = shlex.split(check_vrouter) + out = module.run_command(check_vrouter)[1] + + if out: + VROUTER_EXISTS = True + else: + VROUTER_EXISTS = False + + # Check for any vRouters with the given name + show = cli + ' vrouter-show format name no-show-headers ' + show = shlex.split(show) + out = module.run_command(show)[1] + out = out.split() + + if name in out: + VROUTER_NAME_EXISTS = True + else: + VROUTER_NAME_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-create' + if state == 'absent': + command = 'vrouter-delete' + if state == 'update': + command = 'vrouter-modify' + return command + + +def main(): + """ This section is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state =dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_name=dict(required=True, type='str'), + pn_vnet=dict(type='str'), + pn_service_type=dict(type='str', choices=['dedicated', 'shared']), + pn_service_state=dict(type='str', choices=['enable', 'disable']), + pn_router_type=dict(type='str', choices=['hardware', 'software']), + pn_hw_vrrp_id=dict(type='int'), + pn_router_id=dict(type='str'), + pn_bgp_as=dict(type='int'), + pn_bgp_redistribute=dict(type='str', choices=['static', 'connected', + 'rip', 'ospf']), + pn_bgp_max_paths=dict(type='int'), + pn_bgp_options=dict(type='str'), + pn_rip_redistribute=dict(type='str', choices=['static', 'connected', + 'bgp', 'ospf']), + pn_ospf_redistribute=dict(type='str', choices=['static', 'connected', + 'bgp', 'rip']), + pn_ospf_options=dict(type='str'), + pn_vrrp_track_port=dict(type='str') + ), + required_if=( + ["state", "present", ["pn_name", "pn_vnet"]], + ["state", "absent", ["pn_name"]], + ["state", "update", ["pn_name"]] + ) + ) + + # Accessing the arguments + state = module.params['state'] + name = module.params['pn_name'] + vnet = module.params['pn_vnet'] + service_type = module.params['pn_service_type'] + service_state = module.params['pn_service_state'] + router_type = module.params['pn_router_type'] + hw_vrrp_id = module.params['pn_hw_vrrp_id'] + router_id = module.params['pn_router_id'] + bgp_as = module.params['pn_bgp_as'] + bgp_redistribute = module.params['pn_bgp_redistribute'] + bgp_max_paths = module.params['pn_bgp_max_paths'] + bgp_options = module.params['pn_bgp_options'] + rip_redistribute = module.params['pn_rip_redistribute'] + ospf_redistribute = module.params['pn_ospf_redistribute'] + ospf_options = module.params['pn_ospf_options'] + vrrp_track_port = module.params['pn_vrrp_track_port'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if command == 'vrouter-delete': + check_cli(module, cli) + if VROUTER_NAME_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter with name %s does not exist' % name + ) + cli += ' %s name %s ' % (command, name) + + else: + + if command == 'vrouter-create': + check_cli(module, cli) + if VROUTER_EXISTS is True: + module.exit_json( + skipped=True, + msg='Maximum number of vRouters has been reached on this ' + 'switch' + ) + if VROUTER_NAME_EXISTS is True: + module.exit_json( + skipped=True, + msg='vRouter with name %s already exists' % name + ) + cli += ' %s name %s ' % (command, name) + + if vnet: + cli += ' vnet ' + vnet + + if service_type: + cli += ' %s-vnet-service ' % service_type + + if service_state: + cli += ' ' + service_state + + if router_type: + cli += ' router-type ' + router_type + + if hw_vrrp_id: + cli += ' hw-vrrp-id ' + str(hw_vrrp_id) + + if router_id: + cli += ' router-id ' + router_id + + if bgp_as: + cli += ' bgp-as ' + str(bgp_as) + + if bgp_redistribute: + cli += ' bgp-redistribute ' + bgp_redistribute + + if bgp_max_paths: + cli += ' bgp-max-paths ' + str(bgp_max_paths) + + if bgp_options: + cli += ' %s ' % bgp_options + + if rip_redistribute: + cli += ' rip-redistribute ' + rip_redistribute + + if ospf_redistribute: + cli += ' ospf-redistribute ' + ospf_redistribute + + if ospf_options: + cli += ' %s ' % ospf_options + + if vrrp_track_port: + cli += ' vrrp-track-port ' + vrrp_track_port + + run_cli(module, cli) + +# AnsibleModule boilerplate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/netvisor/pn_vrouterbgp.py b/network/netvisor/pn_vrouterbgp.py new file mode 100644 index 00000000000..e53403da070 --- /dev/null +++ b/network/netvisor/pn_vrouterbgp.py @@ -0,0 +1,474 @@ +#!/usr/bin/python +""" PN-CLI vrouter-bgp-add/vrouter-bgp-remove/vrouter-bgp-modify """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_vrouterbgp +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1.0 +short_description: CLI command to add/remove/modify vrouter-bgp. +description: + - Execute vrouter-bgp-add, vrouter-bgp-remove, vrouter-bgp-modify command. + - Each fabric, cluster, standalone switch, or virtual network (VNET) can + provide its tenants with a vRouter service that forwards traffic between + networks and implements Layer 4 protocols. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the cli on. + required: False + state: + description: + - State the action to perform. Use 'present' to add bgp, + 'absent' to remove bgp and 'update' to modify bgp. + required: True + choices: ['present', 'absent', 'update'] + pn_vrouter_name: + description: + - Specify a name for the vRouter service. + required: True + pn_neighbor: + description: + - Specify a neighbor IP address to use for BGP. + - Required for vrouter-bgp-add. + pn_remote_as: + description: + - Specify the remote Autonomous System(AS) number. This value is between + 1 and 4294967295. + - Required for vrouter-bgp-add. + pn_next_hop_self: + description: + - Specify if the next-hop is the same router or not. + pn_password: + description: + - Specify a password, if desired. + pn_ebgp: + description: + - Specify a value for external BGP to accept or attempt BGP connections + to external peers, not directly connected, on the network. This is a + value between 1 and 255. + pn_prefix_listin: + description: + - Specify the prefix list to filter traffic inbound. + pn_prefix_listout: + description: + - Specify the prefix list to filter traffic outbound. + pn_route_reflector: + description: + - Specify if a route reflector client is used. + pn_override_capability: + description: + - Specify if you want to override capability. + pn_soft_reconfig: + description: + - Specify if you want a soft reconfiguration of inbound traffic. + pn_max_prefix: + description: + - Specify the maximum number of prefixes. + pn_max_prefix_warn: + description: + - Specify if you want a warning message when the maximum number of + prefixes is exceeded. + pn_bfd: + description: + - Specify if you want BFD protocol support for fault detection. + pn_multiprotocol: + description: + - Specify a multi-protocol for BGP. + choices: ['ipv4-unicast', 'ipv6-unicast'] + pn_weight: + description: + - Specify a default weight value between 0 and 65535 for the neighbor + routes. + pn_default_originate: + description: + - Specify if you want announce default routes to the neighbor or not. + pn_keepalive: + description: + - Specify BGP neighbor keepalive interval in seconds. + pn_holdtime: + description: + - Specify BGP neighbor holdtime in seconds. + pn_route_mapin: + description: + - Specify inbound route map for neighbor. + pn_route_mapout: + description: + - Specify outbound route map for neighbor. +""" + +EXAMPLES = """ +- name: add vrouter-bgp + pn_vrouterbgp: + state: 'present' + pn_vrouter_name: 'ansible-vrouter' + pn_neighbor: 104.104.104.1 + pn_remote_as: 1800 + +- name: remove vrouter-bgp + pn_vrouterbgp: + state: 'absent' + pn_name: 'ansible-vrouter' +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). +stdout: + description: The set of responses from the vrouterbpg command. + returned: always + type: list +stderr: + description: The set of error responses from the vrouterbgp command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +VROUTER_EXISTS = None +NEIGHBOR_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-bgp-show command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + If a BGP neighbor with the given ip exists on the given vRouter, + return NEIGHBOR_EXISTS as True else False. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + neighbor = module.params['pn_neighbor'] + # Global flags + global VROUTER_EXISTS, NEIGHBOR_EXISTS + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers ' + check_vrouter = shlex.split(check_vrouter) + out = module.run_command(check_vrouter)[1] + out = out.split() + + if vrouter_name in out: + VROUTER_EXISTS = True + else: + VROUTER_EXISTS = False + + # Check for BGP neighbors + show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name + show += 'format neighbor no-show-headers' + show = shlex.split(show) + out = module.run_command(show)[1] + out = out.split() + + if neighbor in out: + NEIGHBOR_EXISTS = True + else: + NEIGHBOR_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-bgp-add' + if state == 'absent': + command = 'vrouter-bgp-remove' + if state == 'update': + command = 'vrouter-bgp-modify' + return command + + +def main(): + """ This portion is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state=dict(required=True, type='str', + choices=['present', 'absent', 'update']), + pn_vrouter_name=dict(required=True, type='str'), + pn_neighbor=dict(type='str'), + pn_remote_as=dict(type='str'), + pn_next_hop_self=dict(type='bool'), + pn_password=dict(type='str', no_log=True), + pn_ebgp=dict(type='int'), + pn_prefix_listin=dict(type='str'), + pn_prefix_listout=dict(type='str'), + pn_route_reflector=dict(type='bool'), + pn_override_capability=dict(type='bool'), + pn_soft_reconfig=dict(type='bool'), + pn_max_prefix=dict(type='int'), + pn_max_prefix_warn=dict(type='bool'), + pn_bfd=dict(type='bool'), + pn_multiprotocol=dict(type='str', + choices=['ipv4-unicast', 'ipv6-unicast']), + pn_weight=dict(type='int'), + pn_default_originate=dict(type='bool'), + pn_keepalive=dict(type='str'), + pn_holdtime=dict(type='str'), + pn_route_mapin=dict(type='str'), + pn_route_mapout=dict(type='str') + ), + required_if=( + ["state", "present", + ["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]], + ["state", "absent", + ["pn_vrouter_name", "pn_neighbor"]], + ["state", "update", + ["pn_vrouter_name", "pn_neighbor"]] + ) + ) + + # Accessing the arguments + state= module.params['state'] + vrouter_name = module.params['pn_vrouter_name'] + neighbor = module.params['pn_neighbor'] + remote_as = module.params['pn_remote_as'] + next_hop_self = module.params['pn_next_hop_self'] + password = module.params['pn_password'] + ebgp = module.params['pn_ebgp'] + prefix_listin = module.params['pn_prefix_listin'] + prefix_listout = module.params['pn_prefix_listout'] + route_reflector = module.params['pn_route_reflector'] + override_capability = module.params['pn_override_capability'] + soft_reconfig = module.params['pn_soft_reconfig'] + max_prefix = module.params['pn_max_prefix'] + max_prefix_warn = module.params['pn_max_prefix_warn'] + bfd = module.params['pn_bfd'] + multiprotocol = module.params['pn_multiprotocol'] + weight = module.params['pn_weight'] + default_originate = module.params['pn_default_originate'] + keepalive = module.params['pn_keepalive'] + holdtime = module.params['pn_holdtime'] + route_mapin = module.params['pn_route_mapin'] + route_mapout = module.params['pn_route_mapout'] + + # Building the CLI command string + cli = pn_cli(module) + + command = get_command_from_state(state) + if command == 'vrouter-bgp-remove': + check_cli(module, cli) + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if NEIGHBOR_EXISTS is False: + module.exit_json( + skipped=True, + msg=('BGP neighbor with IP %s does not exist on %s' + % (neighbor, vrouter_name)) + ) + cli += (' %s vrouter-name %s neighbor %s ' + % (command, vrouter_name, neighbor)) + + else: + + if command == 'vrouter-bgp-add': + check_cli(module, cli) + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if NEIGHBOR_EXISTS is True: + module.exit_json( + skipped=True, + msg=('BGP neighbor with IP %s already exists on %s' + % (neighbor, vrouter_name)) + ) + + cli += (' %s vrouter-name %s neighbor %s ' + % (command, vrouter_name, neighbor)) + + if remote_as: + cli += ' remote-as ' + str(remote_as) + + if next_hop_self is True: + cli += ' next-hop-self ' + if next_hop_self is False: + cli += ' no-next-hop-self ' + + if password: + cli += ' password ' + password + + if ebgp: + cli += ' ebgp-multihop ' + str(ebgp) + + if prefix_listin: + cli += ' prefix-list-in ' + prefix_listin + + if prefix_listout: + cli += ' prefix-list-out ' + prefix_listout + + if route_reflector is True: + cli += ' route-reflector-client ' + if route_reflector is False: + cli += ' no-route-reflector-client ' + + if override_capability is True: + cli += ' override-capability ' + if override_capability is False: + cli += ' no-override-capability ' + + if soft_reconfig is True: + cli += ' soft-reconfig-inbound ' + if soft_reconfig is False: + cli += ' no-soft-reconfig-inbound ' + + if max_prefix: + cli += ' max-prefix ' + str(max_prefix) + + if max_prefix_warn is True: + cli += ' max-prefix-warn-only ' + if max_prefix_warn is False: + cli += ' no-max-prefix-warn-only ' + + if bfd is True: + cli += ' bfd ' + if bfd is False: + cli += ' no-bfd ' + + if multiprotocol: + cli += ' multi-protocol ' + multiprotocol + + if weight: + cli += ' weight ' + str(weight) + + if default_originate is True: + cli += ' default-originate ' + if default_originate is False: + cli += ' no-default-originate ' + + if keepalive: + cli += ' neighbor-keepalive-interval ' + keepalive + + if holdtime: + cli += ' neighbor-holdtime ' + holdtime + + if route_mapin: + cli += ' route-map-in ' + route_mapin + + if route_mapout: + cli += ' route-map-out ' + route_mapout + + run_cli(module, cli) +# Ansible boiler-plate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/netvisor/pn_vrouterif.py b/network/netvisor/pn_vrouterif.py new file mode 100644 index 00000000000..4c5df6b7fd4 --- /dev/null +++ b/network/netvisor/pn_vrouterif.py @@ -0,0 +1,483 @@ +#!/usr/bin/python +""" PN-CLI vrouter-interface-add/remove/modify """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_vrouterif +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1.0 +short_description: CLI command to add/remove/modify vrouter-interface. +description: + - Execute vrouter-interface-add, vrouter-interface-remove, + vrouter-interface-modify command. + - You configure interfaces to vRouter services on a fabric, cluster, + standalone switch or virtual network(VNET). +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch to run the cli on. + required: False + state: + description: + - State the action to perform. Use 'present' to add vrouter interface, + 'absent' to remove vrouter interface and 'update' to modify vrouter + interface. + required: True + choices: ['present', 'absent', 'update'] + pn_vrouter_name: + description: + - Specify the name of the vRouter interface. + required: True + pn_vlan: + description: + - Specify the VLAN identifier. This is a value between 1 and 4092. + pn_interface_ip: + description: + - Specify the IP address of the interface in x.x.x.x/n format. + pn_assignment: + description: + - Specify the DHCP method for IP address assignment. + choices: ['none', 'dhcp', 'dhcpv6', 'autov6'] + pn_vxlan: + description: + - Specify the VXLAN identifier. This is a value between 1 and 16777215. + pn_interface: + description: + - Specify if the interface is management, data or span interface. + choices: ['mgmt', 'data', 'span'] + pn_alias: + description: + - Specify an alias for the interface. + pn_exclusive: + description: + - Specify if the interface is exclusive to the configuration. Exclusive + means that other configurations cannot use the interface. Exclusive is + specified when you configure the interface as span interface and allows + higher throughput through the interface. + pn_nic_enable: + description: + - Specify if the NIC is enabled or not + pn_vrrp_id: + description: + - Specify the ID for the VRRP interface. The IDs on both vRouters must be + the same IS number. + pn_vrrp_priority: + description: + - Specify the priority for the VRRP interface. This is a value between + 1 (lowest) and 255 (highest). + pn_vrrp_adv_int: + description: + - Specify a VRRP advertisement interval in milliseconds. The range is + from 30 to 40950 with a default value of 1000. + pn_l3port: + description: + - Specify a Layer 3 port for the interface. + pn_secondary_macs: + description: + - Specify a secondary MAC address for the interface. + pn_nic_str: + description: + - Specify the type of NIC. Used for vrouter-interface remove/modify. +""" + +EXAMPLES = """ +- name: Add vrouter-interface + pn_vrouterif: + pn_cliusername: admin + pn_clipassword: admin + state: 'present' + pn_vrouter_name: 'ansible-vrouter' + pn_interface_ip: 101.101.101.2/24 + pn_vlan: 101 + +- name: Add VRRP.. + pn_vrouterif: + pn_cliusername: admin + pn_clipassword: admin + state: 'present' + pn_vrouter_name: 'ansible-vrouter' + pn_interface_ip: 101.101.101.2/24 + pn_vrrp_ip: 101.101.101.1/24 + pn_vrrp_priority: 100 + pn_vlan: 101 + +- name: Remove vrouter-interface + pn_vrouterif: + pn_cliusername: admin + pn_clipassword: admin + state: 'absent' + pn_vrouter_name: 'ansible-vrouter' + pn_interface_ip: 101.101.101.2/24 +""" + +RETURN = """ +vrouterifcmd: + description: The CLI command run on the target node(s). +stdout/msg: + description: The set of responses from the vrouterif command. + returned: on success + type: list +stderr/msg: + description: The set of error responses from the vrouterif command. + returned: on error + type: str +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +VROUTER_EXISTS = None +INTERFACE_EXISTS = None +NIC_EXISTS = None +VRRP_EXISTS = None + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the vrouter-interface-show + command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + + If an interface with the given ip exists on the given vRouter, + return INTERFACE_EXISTS as True else False. This is required for + vrouter-interface-add. + + If nic_str exists on the given vRouter, return NIC_EXISTS as True else + False. This is required for vrouter-interface-remove. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + interface_ip = module.params['pn_interface_ip'] + nic_str = module.params['pn_nic_str'] + + # Global flags + global VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers ' + check_vrouter = shlex.split(check_vrouter) + out = module.run_command(check_vrouter)[1] + out = out.split() + + if vrouter_name in out: + VROUTER_EXISTS = True + else: + VROUTER_EXISTS = False + + if interface_ip: + # Check for interface and VRRP and fetch nic for VRRP + show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name + show += 'ip %s format ip,nic no-show-headers' % interface_ip + show = shlex.split(show) + out = module.run_command(show)[1] + if out: + INTERFACE_EXISTS = True + else: + INTERFACE_EXISTS = False + + if nic_str: + # Check for nic + show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name + show += ' format nic no-show-headers' + show = shlex.split(show) + out = module.run_command(show)[1] + if nic_str in out: + NIC_EXISTS = True + else: + NIC_EXISTS = False + + +def get_nic(module, cli): + """ + This module checks if VRRP interface can be added. If No, return VRRP_EXISTS + as True. + If Yes, fetch the nic string from the primary interface and return nic and + VRRP_EXISTS as False. + :param module: + :param cli: + :return: nic, Global Boolean: VRRP_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + interface_ip = module.params['pn_interface_ip'] + + global VRRP_EXISTS + + # Check for interface and VRRP and fetch nic for VRRP + show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name + show += 'ip %s format ip,nic no-show-headers' % interface_ip + show = shlex.split(show) + out = module.run_command(show)[1] + out = out.split() + + if len(out) > 3: + VRRP_EXISTS = True + return None + else: + nic = out[2] + VRRP_EXISTS = False + return nic + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-interface-add' + if state == 'absent': + command = 'vrouter-interface-remove' + if state == 'update': + command = 'vrouter-interface-modify' + return command + + +def main(): + """ This portion is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state =dict(required=True, type='str', + choices=['present', 'absent']), + pn_vrouter_name=dict(required=True, type='str'), + pn_vlan=dict(type='int'), + pn_interface_ip=dict(required=True, type='str'), + pn_assignment=dict(type='str', + choices=['none', 'dhcp', 'dhcpv6', 'autov6']), + pn_vxlan=dict(type='int'), + pn_interface=dict(type='str', choices=['mgmt', 'data', 'span']), + pn_alias=dict(type='str'), + pn_exclusive=dict(type='bool'), + pn_nic_enable=dict(type='bool'), + pn_vrrp_id=dict(type='int'), + pn_vrrp_priority=dict(type='int'), + pn_vrrp_adv_int=dict(type='str'), + pn_l3port=dict(type='str'), + pn_secondary_macs=dict(type='str'), + pn_nic_str=dict(type='str') + ), + required_if=( + ["state", "present", + ["pn_vrouter_name", "pn_interface_ip"]], + ["state", "absent", + ["pn_vrouter_name", "pn_nic_str"]] + ), + ) + + # Accessing the arguments + state = module.params['state'] + vrouter_name = module.params['pn_vrouter_name'] + vlan = module.params['pn_vlan'] + interface_ip = module.params['pn_interface_ip'] + assignment = module.params['pn_assignment'] + vxlan = module.params['pn_vxlan'] + interface = module.params['pn_interface'] + alias = module.params['pn_alias'] + exclusive = module.params['pn_exclusive'] + nic_enable = module.params['pn_nic_enable'] + vrrp_id = module.params['pn_vrrp_id'] + vrrp_priority = module.params['pn_vrrp_priority'] + vrrp_adv_int = module.params['pn_vrrp_adv_int'] + l3port = module.params['pn_l3port'] + secondary_macs = module.params['pn_secondary_macs'] + nic_str = module.params['pn_nic_str'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + check_cli(module, cli) + if command == 'vrouter-interface-add': + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + + if vrrp_id: + vrrp_primary = get_nic(module, cli) + if VRRP_EXISTS is True: + module.exit_json( + skipped=True, + msg=('VRRP interface on %s already exists. Check ' + 'the IP addresses' % vrouter_name) + ) + cli += ' %s vrouter-name %s ' % (command, vrouter_name) + cli += (' ip %s vrrp-primary %s vrrp-id %s ' + % (interface_ip, vrrp_primary, str(vrrp_id))) + if vrrp_priority: + cli += ' vrrp-priority %s ' % str(vrrp_priority) + if vrrp_adv_int: + cli += ' vrrp-adv-int %s ' % vrrp_adv_int + + else: + if INTERFACE_EXISTS is True: + module.exit_json( + skipped=True, + msg=('vRouter interface on %s already exists. Check the ' + 'IP addresses' % vrouter_name) + ) + cli += ' %s vrouter-name %s ' % (command, vrouter_name) + cli += ' ip %s ' % interface_ip + + if vlan: + cli += ' vlan ' + str(vlan) + + if l3port: + cli += ' l3-port ' + l3port + + if assignment: + cli += ' assignment ' + assignment + + if vxlan: + cli += ' vxlan ' + str(vxlan) + + if interface: + cli += ' if ' + interface + + if alias: + cli += ' alias-on ' + alias + + if exclusive is True: + cli += ' exclusive ' + if exclusive is False: + cli += ' no-exclusive ' + + if nic_enable is True: + cli += ' nic-enable ' + if nic_enable is False: + cli += ' nic-disable ' + + if secondary_macs: + cli += ' secondary-macs ' + secondary_macs + + if command == 'vrouter-interface-remove': + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if NIC_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter interface with nic %s does not exist' % nic_str + ) + cli += ' %s vrouter-name %s nic %s ' % (command, vrouter_name, nic_str) + + run_cli(module, cli) +# Ansible boiler-plate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/netvisor/pn_vrouterlbif.py b/network/netvisor/pn_vrouterlbif.py new file mode 100644 index 00000000000..e6fc928a205 --- /dev/null +++ b/network/netvisor/pn_vrouterlbif.py @@ -0,0 +1,328 @@ +#!/usr/bin/python +""" PN CLI vrouter-loopback-interface-add/remove """ + +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import shlex + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: pn_vrouterlbif +author: "Pluribus Networks (@amitsi)" +version_added: "2.2" +version: 1.0 +short_description: CLI command to add/remove vrouter-loopback-interface. +description: + - Execute vrouter-loopback-interface-add, vrouter-loopback-interface-remove + commands. + - Each fabric, cluster, standalone switch, or virtual network (VNET) can + provide its tenants with a virtual router (vRouter) service that forwards + traffic between networks and implements Layer 3 protocols. +options: + pn_cliusername: + description: + - Provide login username if user is not root. + required: False + pn_clipassword: + description: + - Provide login password if user is not root. + required: False + pn_cliswitch: + description: + - Target switch(es) to run the cli on. + required: False + state: + description: + - State the action to perform. Use 'present' to add vrouter loopback + interface and 'absent' to remove vrouter loopback interface. + required: True + choices: ['present', 'absent'] + pn_vrouter_name: + description: + - Specify the name of the vRouter. + required: True + pn_index: + description: + - Specify the interface index from 1 to 255. + pn_interface_ip: + description: + - Specify the IP address. + required: True +""" + +EXAMPLES = """ +- name: add vrouter-loopback-interface + pn_vrouterlbif: + state: 'present' + pn_vrouter_name: 'ansible-vrouter' + pn_interface_ip: '104.104.104.1' + +- name: remove vrouter-loopback-interface + pn_vrouterlbif: + state: 'absent' + pn_vrouter_name: 'ansible-vrouter' + pn_interface_ip: '104.104.104.1' +""" + +RETURN = """ +command: + description: The CLI command run on the target node(s). +stdout: + description: The set of responses from the vrouterlb command. + returned: always + type: list +stderr: + description: The set of error responses from the vrouterlb command. + returned: on error + type: list +changed: + description: Indicates whether the CLI caused changes on the target. + returned: always + type: bool +""" + + +VROUTER_EXISTS = None +LB_INTERFACE_EXISTS = None +# Index range +MIN_INDEX = 1 +MAX_INDEX = 255 + + +def pn_cli(module): + """ + This method is to generate the cli portion to launch the Netvisor cli. + It parses the username, password, switch parameters from module. + :param module: The Ansible module to fetch username, password and switch + :return: returns the cli string for further processing + """ + username = module.params['pn_cliusername'] + password = module.params['pn_clipassword'] + cliswitch = module.params['pn_cliswitch'] + + if username and password: + cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) + else: + cli = '/usr/bin/cli --quiet ' + + if cliswitch == 'local': + cli += ' switch-local ' + else: + cli += ' switch ' + cliswitch + return cli + + +def check_cli(module, cli): + """ + This method checks if vRouter exists on the target node. + This method also checks for idempotency using the + vrouter-loopback-interface-show command. + If the given vRouter exists, return VROUTER_EXISTS as True else False. + If a loopback interface with the given ip exists on the given vRouter, + return LB_INTERFACE_EXISTS as True else False. + + :param module: The Ansible module to fetch input parameters + :param cli: The CLI string + :return Global Booleans: VROUTER_EXISTS, LB_INTERFACE_EXISTS + """ + vrouter_name = module.params['pn_vrouter_name'] + interface_ip = module.params['pn_interface_ip'] + + # Global flags + global VROUTER_EXISTS, LB_INTERFACE_EXISTS + + # Check for vRouter + check_vrouter = cli + ' vrouter-show format name no-show-headers ' + check_vrouter = shlex.split(check_vrouter) + out = module.run_command(check_vrouter)[1] + out = out.split() + + if vrouter_name in out: + VROUTER_EXISTS = True + else: + VROUTER_EXISTS = False + + # Check for loopback interface + show = (cli + ' vrouter-loopback-interface-show vrouter-name %s format ip ' + 'no-show-headers' % vrouter_name) + show = shlex.split(show) + out = module.run_command(show)[1] + out = out.split() + + if interface_ip in out: + LB_INTERFACE_EXISTS = True + else: + LB_INTERFACE_EXISTS = False + + +def run_cli(module, cli): + """ + This method executes the cli command on the target node(s) and returns the + output. The module then exits based on the output. + :param cli: the complete cli string to be executed on the target node(s). + :param module: The Ansible module to fetch command + """ + cliswitch = module.params['pn_cliswitch'] + state = module.params['state'] + command = get_command_from_state(state) + + cmd = shlex.split(cli) + + # 'out' contains the output + # 'err' contains the error messages + result, out, err = module.run_command(cmd) + + print_cli = cli.split(cliswitch)[1] + + # Response in JSON format + if result != 0: + module.exit_json( + command=print_cli, + stderr=err.strip(), + msg="%s operation failed" % command, + changed=False + ) + + if out: + module.exit_json( + command=print_cli, + stdout=out.strip(), + msg="%s operation completed" % command, + changed=True + ) + + else: + module.exit_json( + command=print_cli, + msg="%s operation completed" % command, + changed=True + ) + + +def get_command_from_state(state): + """ + This method gets appropriate command name for the state specified. It + returns the command name for the specified state. + :param state: The state for which the respective command name is required. + """ + command = None + if state == 'present': + command = 'vrouter-loopback-interface-add' + if state == 'absent': + command = 'vrouter-loopback-interface-remove' + return command + + +def main(): + """ This portion is for arguments parsing """ + module = AnsibleModule( + argument_spec=dict( + pn_cliusername=dict(required=False, type='str'), + pn_clipassword=dict(required=False, type='str', no_log=True), + pn_cliswitch=dict(required=False, type='str', default='local'), + state =dict(required=True, type='str', + choices=['present', 'absent']), + pn_vrouter_name=dict(required=True, type='str'), + pn_interface_ip=dict(type='str'), + pn_index=dict(type='int') + ), + required_if=( + ["state", "present", + ["pn_vrouter_name", "pn_interface_ip"]], + ["state", "absent", + ["pn_vrouter_name", "pn_interface_ip"]] + ) + ) + + # Accessing the arguments + state = module.params['state'] + vrouter_name = module.params['pn_vrouter_name'] + interface_ip = module.params['pn_interface_ip'] + index = module.params['pn_index'] + + command = get_command_from_state(state) + + # Building the CLI command string + cli = pn_cli(module) + + if index: + if not MIN_INDEX <= index <= MAX_INDEX: + module.exit_json( + msg="Index must be between 1 and 255", + changed=False + ) + index = str(index) + + if command == 'vrouter-loopback-interface-remove': + check_cli(module, cli) + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg='vRouter %s does not exist' % vrouter_name + ) + if LB_INTERFACE_EXISTS is False: + module.exit_json( + skipped=True, + msg=('Loopback interface with IP %s does not exist on %s' + % (interface_ip, vrouter_name)) + ) + if not index: + # To remove loopback interface, we need the index. + # If index is not specified, get the Loopback interface index + # using the given interface ip. + get_index = cli + get_index += (' vrouter-loopback-interface-show vrouter-name %s ip ' + '%s ' % (vrouter_name, interface_ip)) + get_index += 'format index no-show-headers' + + get_index = shlex.split(get_index) + out = module.run_command(get_index)[1] + index = out.split()[1] + + cli += ' %s vrouter-name %s index %s' % (command, vrouter_name, index) + + if command == 'vrouter-loopback-interface-add': + check_cli(module, cli) + if VROUTER_EXISTS is False: + module.exit_json( + skipped=True, + msg=('vRouter %s does not exist' % vrouter_name) + ) + if LB_INTERFACE_EXISTS is True: + module.exit_json( + skipped=True, + msg=('Loopback interface with IP %s already exists on %s' + % (interface_ip, vrouter_name)) + ) + cli += (' %s vrouter-name %s ip %s' + % (command, vrouter_name, interface_ip)) + if index: + cli += ' index %s ' % index + + run_cli(module, cli) + +# Ansible boiler-plate +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_template.py b/network/nxos/_nxos_template.py similarity index 74% rename from network/nxos/nxos_template.py rename to network/nxos/_nxos_template.py index 5e90bf36ba8..a5b976aa4ce 100644 --- a/network/nxos/nxos_template.py +++ b/network/nxos/_nxos_template.py @@ -15,6 +15,11 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + + DOCUMENTATION = """ --- module: nxos_template @@ -23,11 +28,12 @@ short_description: Manage Cisco NXOS device configurations description: - Manages network device configurations over SSH or NXAPI. This module - allows implementors to work with the device running-config. It + allows implementers to work with the device running-config. It provides a way to push a set of commands onto a network device - by evaluting the current running-config and only pushing configuration + by evaluating the current running-config and only pushing configuration commands that are not already configured. The config source can be a set of commands or a template. +deprecated: Deprecated in 2.2. Use nxos_config instead extends_documentation_fragment: nxos options: src: @@ -46,17 +52,17 @@ without first checking if already configured. required: false default: false - choices: BOOLEANS + choices: [ "true", "false" ] include_defaults: description: - The module, by default, will collect the current device - running-config to use as a base for comparision to the commands + running-config to use as a base for comparisons to the commands in I(src). Setting this value to true will cause the module - to issue the command `show running-config all` to include all + to issue the command C(show running-config all) to include all device settings. required: false default: false - choices: BOOLEANS + choices: [ "true", "false" ] backup: description: - When this argument is configured true, the module will backup @@ -65,7 +71,7 @@ the root of the playbook directory. required: false default: false - choices: BOOLEANS + choices: [ "true", "false" ] config: description: - The module, by default, will connect to the remote device and @@ -73,8 +79,8 @@ against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the - implementer to pass in the configuruation to use as the base - config for comparision. + implementer to pass in the configuration to use as the base + config for comparison. required: false default: null """ @@ -89,7 +95,7 @@ src: config.j2 force: yes -- name: provide the base configuration for comparision +- name: provide the base configuration for comparison nxos_template: src: candidate_config.txt config: current_config.txt @@ -104,32 +110,37 @@ responses: description: The set of responses from issuing the commands on the device - retured: when not check_mode + returned: when not check_mode type: list sample: ['...', '...'] """ +import ansible.module_utils.nxos +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.network import NetworkModule def get_config(module): config = module.params['config'] or dict() if not config and not module.params['force']: - config = module.config + config = module.config.get_config() return config def main(): + """ main entry point for module execution + """ argument_spec = dict( src=dict(), force=dict(default=False, type='bool'), - include_defaults=dict(default=False, type='bool'), + include_defaults=dict(default=True, type='bool'), backup=dict(default=False, type='bool'), - config=dict() + config=dict(), ) mutually_exclusive = [('config', 'backup'), ('config', 'force')] - module = get_module(argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True) + module = NetworkModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) result = dict(changed=False) @@ -138,27 +149,24 @@ def main(): contents = get_config(module) if contents: config = NetworkConfig(contents=contents, indent=2) - result['_backup'] = contents + result['_backup'] = str(contents) if not module.params['force']: commands = candidate.difference(config) + commands = dumps(commands, 'commands').split('\n') + commands = [str(c) for c in commands if c] else: commands = str(candidate).split('\n') if commands: if not module.check_mode: - commands = [str(c).strip() for c in commands] - response = module.configure(commands) + response = module.config(commands) result['responses'] = response result['changed'] = True result['updates'] = commands module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.nxos import * + if __name__ == '__main__': main() diff --git a/network/nxos/nxos_aaa_server.py b/network/nxos/nxos_aaa_server.py new file mode 100644 index 00000000000..6b4f52ae4e3 --- /dev/null +++ b/network/nxos/nxos_aaa_server.py @@ -0,0 +1,573 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- + +module: nxos_aaa_server +version_added: "2.2" +short_description: Manages AAA server global configuration. +description: + - Manages AAA server global configuration +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) +notes: + - The server_type parameter is always required. + - If encrypt_type is not supplied, the global AAA server key will be + stored as encrypted (type 7). + - Changes to the global AAA server key with encrypt_type=0 + are not idempotent. + - If global AAA server key is not found, it's shown as "unknown" + - state=default will set the supplied parameters to their default values. + The parameters that you want to default must also be set to default. + If global_key=default, the global key will be removed. +options: + server_type: + description: + - The server type is either radius or tacacs. + required: true + choices: ['radius', 'tacacs'] + global_key: + description: + - Global AAA shared secret. + required: false + default: null + encrypt_type: + description: + - The state of encryption applied to the entered global key. + O clear text, 7 encrypted. Type-6 encryption is not supported. + required: false + default: null + choices: ['0', '7'] + deadtime: + description: + - Duration for which a non-reachable AAA server is skipped, + in minutes. Range is 1-1440. Device default is 0. + required: false + default: null + server_timeout: + description: + - Global AAA server timeout period, in seconds. Range is 1-60. + Device default is 5. + required: false + default: null + directed_request: + description: + - Enables direct authentication requests to AAA server. + Device default is disabled. + required: false + default: null + choices: ['enabled', 'disabled'] + state: + description: + - Manage the state of the resource. + required: true + default: present + choices: ['present','default'] +''' + +EXAMPLES = ''' +# Radius Server Basic settings + - name: "Radius Server Basic settings" + nxos_aaa_server: + server_type: radius + server_timeout: 9 + deadtime: 20 + directed_request: enabled + host: inventory_hostname }} + username: un }} + password: pwd }} + +# Tacacs Server Basic settings + - name: "Tacacs Server Basic settings" + nxos_aaa_server: + server_type: tacacs + server_timeout: 8 + deadtime: 19 + directed_request: disabled + host: inventory_hostname }} + username: un }} + password: pwd }} + +# Setting Global Key + - name: "AAA Server Global Key" + nxos_aaa_server: + server_type: radius + global_key: test_key + host: inventory_hostname }} + username: un }} + password: pwd }} +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"deadtime": "22", "directed_request": "enabled", + "server_type": "radius", "server_timeout": "11"} +existing: + description: + - k/v pairs of existing aaa server + type: dict + sample: {"deadtime": "0", "directed_request": "disabled", + "global_key": "unknown", "server_timeout": "5"} +end_state: + description: k/v pairs of aaa params after module execution + returned: always + type: dict + sample: {"deadtime": "22", "directed_request": "enabled", + "global_key": "unknown", "server_timeout": "11"} +state: + description: state as sent in from the playbook + returned: always + type: string + sample: "present" +updates: + description: command sent to the device + returned: always + type: list + sample: ["radius-server deadtime 22", "radius-server timeout 11", + "radius-server directed-request"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + cmds = [command] + if module.params['transport'] == 'cli': + body = execute_show(cmds, module) + elif module.params['transport'] == 'nxapi': + body = execute_show(cmds, module, command_type=command_type) + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + + +def get_aaa_server_info(server_type, module): + aaa_server_info = {} + server_command = 'show {0}-server'.format(server_type) + request_command = 'show {0}-server directed-request'.format(server_type) + global_key_command = 'show run | sec {0}'.format(server_type) + aaa_regex = '.*{0}-server\skey\s\d\s+(?P\S+).*'.format(server_type) + + server_body = execute_show_command( + server_command, module, command_type='cli_show_ascii')[0] + + split_server = server_body.splitlines() + + for line in split_server: + if line.startswith('timeout'): + aaa_server_info['server_timeout'] = line.split(':')[1] + + elif line.startswith('deadtime'): + aaa_server_info['deadtime'] = line.split(':')[1] + + request_body = execute_show_command( + request_command, module, command_type='cli_show_ascii')[0] + aaa_server_info['directed_request'] = request_body.replace('\n', '') + + key_body = execute_show_command( + global_key_command, module, command_type='cli_show_ascii')[0] + + try: + match_global_key = re.match(aaa_regex, key_body, re.DOTALL) + group_key = match_global_key.groupdict() + aaa_server_info['global_key'] = group_key["key"].replace('\"', '') + except (AttributeError, TypeError): + aaa_server_info['global_key'] = 'unknown' + + return aaa_server_info + + +def set_aaa_server_global_key(encrypt_type, key, server_type): + if not encrypt_type: + encrypt_type = '' + return '{0}-server key {1} {2}'.format( + server_type, encrypt_type, key) + + +def config_aaa_server(params, server_type): + cmds = [] + + deadtime = params.get('deadtime') + server_timeout = params.get('server_timeout') + directed_request = params.get('directed_request') + encrypt_type = params.get('encrypt_type', '7') + global_key = params.get('global_key') + + if deadtime is not None: + cmds.append('{0}-server deadtime {1}'.format(server_type, deadtime)) + + if server_timeout is not None: + cmds.append('{0}-server timeout {1}'.format(server_type, server_timeout)) + + if directed_request is not None: + if directed_request == 'enabled': + cmds.append('{0}-server directed-request'.format(server_type)) + elif directed_request == 'disabled': + cmds.append('no {0}-server directed-request'.format(server_type)) + + if global_key is not None: + cmds.append('{0}-server key {1} {2}'.format(server_type, encrypt_type, + global_key)) + + return cmds + + +def default_aaa_server(existing, params, server_type): + cmds = [] + + deadtime = params.get('deadtime') + server_timeout = params.get('server_timeout') + directed_request = params.get('directed_request') + global_key = params.get('global_key') + existing_key = existing.get('global_key') + + if deadtime is not None: + cmds.append('no {0}-server deadtime 1'.format(server_type)) + + if server_timeout is not None: + cmds.append('no {0}-server timeout 1'.format(server_type)) + + if directed_request is not None: + cmds.append('no {0}-server directed-request'.format(server_type)) + + if global_key is not None and existing_key is not None: + cmds.append('no {0}-server key 7 {1}'.format(server_type, existing_key)) + + return cmds + + +def main(): + argument_spec = dict( + server_type=dict(type='str', + choices=['radius', 'tacacs'], required=True), + global_key=dict(type='str'), + encrypt_type=dict(type='str', choices=['0', '7']), + deadtime=dict(type='str'), + server_timeout=dict(type='str'), + directed_request=dict(type='str', + choices=['enabled', 'disabled', 'default']), + state=dict(choices=['default', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + server_type = module.params['server_type'] + global_key = module.params['global_key'] + encrypt_type = module.params['encrypt_type'] + deadtime = module.params['deadtime'] + server_timeout = module.params['server_timeout'] + directed_request = module.params['directed_request'] + state = module.params['state'] + + if encrypt_type and not global_key: + module.fail_json(msg='encrypt_type must be used with global_key.') + + args = dict(server_type=server_type, global_key=global_key, + encrypt_type=encrypt_type, deadtime=deadtime, + server_timeout=server_timeout, directed_request=directed_request) + + changed = False + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + existing = get_aaa_server_info(server_type, module) + end_state = existing + + commands = [] + if state == 'present': + if deadtime: + try: + if int(deadtime) < 0 or int(deadtime) > 1440: + raise ValueError + except ValueError: + module.fail_json( + msg='deadtime must be an integer between 0 and 1440') + + if server_timeout: + try: + if int(server_timeout) < 1 or int(server_timeout) > 60: + raise ValueError + except ValueError: + module.fail_json( + msg='server_timeout must be an integer between 1 and 60') + + delta = dict(set(proposed.iteritems()).difference( + existing.iteritems())) + if delta: + command = config_aaa_server(delta, server_type) + if command: + commands.append(command) + + elif state == 'default': + for key, value in proposed.iteritems(): + if key != 'server_type' and value != 'default': + module.fail_json( + msg='Parameters must be set to "default"' + 'when state=default') + command = default_aaa_server(existing, proposed, server_type) + if command: + commands.append(command) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_aaa_server_info(server_type, module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_aaa_server_host.py b/network/nxos/nxos_aaa_server_host.py new file mode 100644 index 00000000000..aef2af144f2 --- /dev/null +++ b/network/nxos/nxos_aaa_server_host.py @@ -0,0 +1,582 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_aaa_server_host +version_added: "2.2" +short_description: Manages AAA server host-specific configuration. +description: + - Manages AAA server host-specific configuration. +extends_documentation_fragment: nxos +author: Jason Edelman (@jedelman8) +notes: + - Changes to the AAA server host key (shared secret) are not idempotent. + - If C(state=absent) removes the whole host configuration. +options: + server_type: + description: + - The server type is either radius or tacacs. + required: true + choices: ['radius', 'tacacs'] + address: + description: + - Address or name of the radius or tacacs host. + required: true + key: + description: + - Shared secret for the specified host. + required: false + default: null + encrypt_type: + description: + - The state of encryption applied to the entered key. + O for clear text, 7 for encrypted. Type-6 encryption is + not supported. + required: false + default: null + choices: ['0', '7'] + host_timeout: + description: + - Timeout period for specified host, in seconds. Range is 1-60. + required: false + default: null + auth_port: + description: + - Alternate UDP port for RADIUS authentication. + required: false + default: null + acct_port: + description: + - Alternate UDP port for RADIUS accounting. + required: false + default: null + tacacs_port: + description: + - Alternate TCP port TACACS Server. + required: false + default: null + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +# Radius Server Host Basic settings + - name: "Radius Server Host Basic settings" + nxos_aaa_server_host: + state: present + server_type: radius + address: 1.2.3.4 + acct_port: 2084 + host_timeout: 10 + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# Radius Server Host Key Configuration + - name: "Radius Server Host Key Configuration" + nxos_aaa_server_host: + state: present + server_type: radius + address: 1.2.3.4 + key: hello + encrypt_type: 7 + host: inventory_hostname }} + username: "{{ un }}" + password: "{{ pwd }}" + +# TACACS Server Host Configuration + - name: "Tacacs Server Host Configuration" + nxos_aaa_server_host: + state: present + server_type: tacacs + tacacs_port: 89 + host_timeout: 10 + address: 5.6.7.8 + host: inventory_hostname }} + username: un }} + password: pwd }} +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"address": "1.2.3.4", "auth_port": "2084", + "host_timeout": "10", "server_type": "radius"} +existing: + description: + - k/v pairs of existing configuration + type: dict + sample: {} +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: {"address": "1.2.3.4", "auth_port": "2084", + "host_timeout": "10", "server_type": "radius"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["radius-server host 1.2.3.4 auth-port 2084 timeout 10"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'show run' in command: + body = response + else: + try: + if isinstance(response[0], str): + body = [json.loads(response[0])] + else: + body = response + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def _match_dict(match_list, key_map): + no_blanks = [] + match_dict = {} + + for match_set in match_list: + match_set = tuple(v for v in match_set if v) + no_blanks.append(match_set) + + for info in no_blanks: + words = info[0].strip().split() + length = len(words) + alt_key = key_map.get(words[0]) + first = alt_key or words[0] + last = words[length - 1] + match_dict[first] = last.replace('\"', '') + + return match_dict + + +def get_aaa_host_info(module, server_type, address): + aaa_host_info = {} + command = 'show run | inc {0}-server.host.{1}'.format(server_type, address) + + body = execute_show_command(command, module, command_type='cli_show_ascii') + + if body: + try: + pattern = ('(acct-port \d+)|(timeout \d+)|(auth-port \d+)|' + '(key 7 "\w+")|( port \d+)') + raw_match = re.findall(pattern, body[0]) + aaa_host_info = _match_dict(raw_match, {'acct-port': 'acct_port', + 'auth-port': 'auth_port', + 'port': 'tacacs_port', + 'timeout': 'host_timeout'}) + if aaa_host_info: + aaa_host_info['server_type'] = server_type + aaa_host_info['address'] = address + except TypeError: + return {} + else: + return {} + + return aaa_host_info + + +def config_aaa_host(server_type, address, params, clear=False): + cmds = [] + + if clear: + cmds.append('no {0}-server host {1}'.format(server_type, address)) + + cmd_str = '{0}-server host {1}'.format(server_type, address) + + key = params.get('key') + enc_type = params.get('encrypt_type', '') + host_timeout = params.get('host_timeout') + auth_port = params.get('auth_port') + acct_port = params.get('acct_port') + port = params.get('tacacs_port') + + if auth_port: + cmd_str += ' auth-port {0}'.format(auth_port) + if acct_port: + cmd_str += ' acct-port {0}'.format(acct_port) + if port: + cmd_str += ' port {0}'.format(port) + if host_timeout: + cmd_str += ' timeout {0}'.format(host_timeout) + if key: + cmds.append('{0}-server host {1} key {2} {3}'.format(server_type, + address, + enc_type, key)) + + cmds.append(cmd_str) + return cmds + + +def main(): + argument_spec = dict( + server_type=dict(choices=['radius', 'tacacs'], required=True), + address=dict(type='str', required=True), + key=dict(type='str'), + encrypt_type=dict(type='str', choices=['0', '7']), + host_timeout=dict(type='str'), + auth_port=dict(type='str'), + acct_port=dict(type='str'), + tacacs_port=dict(type='str'), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + server_type = module.params['server_type'] + address = module.params['address'] + key = module.params['key'] + encrypt_type = module.params['encrypt_type'] + host_timeout = module.params['host_timeout'] + auth_port = module.params['auth_port'] + acct_port = module.params['acct_port'] + tacacs_port = module.params['tacacs_port'] + state = module.params['state'] + + args = dict(server_type=server_type, address=address, key=key, + encrypt_type=encrypt_type, host_timeout=host_timeout, + auth_port=auth_port, acct_port=acct_port, + tacacs_port=tacacs_port) + + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + changed = False + + if encrypt_type and not key: + module.fail_json(msg='encrypt_type must be used with key') + + if tacacs_port and server_type != 'tacacs': + module.fail_json( + msg='tacacs_port can only be used with server_type=tacacs') + + if (auth_port or acct_port) and server_type != 'radius': + module.fail_json(msg='auth_port and acct_port can only be used' + 'when server_type=radius') + + + existing = get_aaa_host_info(module, server_type, address) + end_state = existing + + commands = [] + if state == 'present': + host_timeout = proposed.get('host_timeout') + if host_timeout: + try: + if int(host_timeout) < 1 or int(host_timeout) > 60: + raise ValueError + except ValueError: + module.fail_json( + msg='host_timeout must be an integer between 1 and 60') + + delta = dict( + set(proposed.iteritems()).difference(existing.iteritems())) + if delta: + union = existing.copy() + union.update(delta) + command = config_aaa_host(server_type, address, union) + if command: + commands.append(command) + + elif state == 'absent': + intersect = dict( + set(proposed.iteritems()).intersection(existing.iteritems())) + if intersect.get('address') and intersect.get('server_type'): + command = 'no {0}-server host {1}'.format( + intersect.get('server_type'), intersect.get('address')) + commands.append(command) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_aaa_host_info(module, server_type, address) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_acl.py b/network/nxos/nxos_acl.py new file mode 100644 index 00000000000..63762f97615 --- /dev/null +++ b/network/nxos/nxos_acl.py @@ -0,0 +1,816 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_acl +version_added: "2.2" +short_description: Manages access list entries for ACLs. +description: + - Manages access list entries for ACLs. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - C(state=absent) removes the ACE if it exists. + - C(state=delete_acl) deleted the ACL if it exists. + - For idempotency, use port numbers for the src/dest port + params like I(src_port1) and names for the well defined protocols + for the I(proto) param. + - Although this module is idempotent in that if the ace as presented in + the task is identical to the one on the switch, no changes will be made. + If there is any difference, what is in Ansible will be pushed (configured + options will be overridden). This is to improve security, but at the + same time remember an ACE is removed, then re-added, so if there is a + change, the new ACE will be exactly what parameters you are sending to + the module. +options: + seq: + description: + - Sequence number of the entry (ACE). + required: false + default: null + name: + description: + - Case sensitive name of the access list (ACL). + required: true + action: + description: + - Action of the ACE. + required: false + default: null + choices: ['permit', 'deny', 'remark'] + remark: + description: + - If action is set to remark, this is the description. + required: false + default: null + proto: + description: + - Port number or protocol (as supported by the switch). + required: false + default: null + src: + description: + - Source ip and mask using IP/MASK notation and + supports keyword 'any'. + required: false + default: null + src_port_op: + description: + - Source port operands such as eq, neq, gt, lt, range. + required: false + default: null + choices: ['any', 'eq', 'gt', 'lt', 'neq', 'range'] + src_port1: + description: + - Port/protocol and also first (lower) port when using range + operand. + required: false + default: null + src_port2: + description: + - Second (end) port when using range operand. + required: false + default: null + dest: + description: + - Destination ip and mask using IP/MASK notation and supports the + keyword 'any'. + required: false + default: null + dest_port_op: + description: + - Destination port operands such as eq, neq, gt, lt, range. + required: false + default: null + choices: ['any', 'eq', 'gt', 'lt', 'neq', 'range'] + dest_port1: + description: + - Port/protocol and also first (lower) port when using range + operand. + required: false + default: null + dest_port2: + description: + - Second (end) port when using range operand. + required: false + default: null + log: + description: + - Log matches against this entry. + required: false + default: null + choices: ['enable'] + urg: + description: + - Match on the URG bit. + required: false + default: null + choices: ['enable'] + ack: + description: + - Match on the ACK bit. + required: false + default: null + choices: ['enable'] + psh: + description: + - Match on the PSH bit. + required: false + default: null + choices: ['enable'] + rst: + description: + - Match on the RST bit. + required: false + default: null + choices: ['enable'] + syn: + description: + - Match on the SYN bit. + required: false + default: null + choices: ['enable'] + fin: + description: + - Match on the FIN bit. + required: false + default: null + choices: ['enable'] + established: + description: + - Match established connections. + required: false + default: null + choices: ['enable'] + fragments: + description: + - Check non-initial fragments. + required: false + default: null + choices: ['enable'] + time-range: + description: + - Name of time-range to apply. + required: false + default: null + precedence: + description: + - Match packets with given precedence. + required: false + default: null + choices: ['critical', 'flash', 'flash-override', 'immediate', + 'internet', 'network', 'priority', 'routine'] + dscp: + description: + - Match packets with given dscp value. + required: false + default: null + choices: ['af11', 'af12', 'af13', 'af21', 'af22', 'af23','af31','af32', + 'af33', 'af41', 'af42', 'af43', 'cs1', 'cs2', 'cs3', 'cs4', + 'cs5', 'cs6', 'cs7', 'default', 'ef'] + state: + description: + - Specify desired state of the resource. + required: false + default: present + choices: ['present','absent','delete_acl'] +''' + +EXAMPLES = ''' + +# configure ACL ANSIBLE +- nxos_acl: + name: ANSIBLE + seq: 10 + action: permit + proto: tcp + src: 1.1.1.1/24 + dest: any + state: present + provider: "{{ nxos_provider }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module. + returned: always + type: dict + sample: {"action": "permit", "dest": "any", "name": "ANSIBLE", + "proto": "tcp", "seq": "10", "src": "1.1.1.1/24"} +existing: + description: k/v pairs of existing ACL entries. + returned: always + type: dict + sample: {} +end_state: + description: k/v pairs of ACL entries after module execution. + returned: always + type: dict + sample: {"action": "permit", "dest": "any", "name": "ANSIBLE", + "proto": "tcp", "seq": "10", "src": "1.1.1.1/24"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["ip access-list ANSIBLE", "10 permit tcp 1.1.1.1/24 any"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import collections +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, we assume if '^' is found in response, + it is an invalid command. + """ + if 'xml' in response[0]: + body = [] + elif '^' in response[0]: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def get_acl(module, acl_name, seq_number): + command = 'show ip access-list' + new_acl = [] + saveme = {} + seqs = [] + acl_body = {} + + body = execute_show_command(command, module)[0] + all_acl_body = body['TABLE_ip_ipv6_mac']['ROW_ip_ipv6_mac'] + + for acl in all_acl_body: + if acl.get('acl_name') == acl_name: + acl_body = acl + + try: + acl_entries = acl_body['TABLE_seqno']['ROW_seqno'] + acl_name = acl_body.get('acl_name') + except KeyError: # could be raised if no ACEs are configured for an ACL + return saveme, [{'acl': 'no_entries'}], seqs + + if isinstance(acl_entries, dict): + acl_entries = [acl_entries] + + for each in acl_entries: + temp = collections.OrderedDict() + keep = {} + temp['name'] = acl_name + temp['seq'] = str(each.get('seqno')) + temp['options'] = {} + remark = each.get('remark') + if remark: + temp['remark'] = remark + temp['action'] = 'remark' + else: + temp['action'] = each.get('permitdeny') + temp['proto'] = each.get('proto', each.get('proto_str', each.get('ip'))) + temp['src'] = each.get('src_any', each.get('src_ip_prefix')) + temp['src_port_op'] = each.get('src_port_op') + temp['src_port1'] = each.get('src_port1_num') + temp['src_port2'] = each.get('src_port2_num') + temp['dest'] = each.get('dest_any', each.get('dest_ip_prefix')) + temp['dest_port_op'] = each.get('dest_port_op') + temp['dest_port1'] = each.get('dest_port1_num') + temp['dest_port2'] = each.get('dest_port2_num') + + options = collections.OrderedDict() + options['log'] = each.get('log') + options['urg'] = each.get('urg') + options['ack'] = each.get('ack') + options['psh'] = each.get('psh') + options['rst'] = each.get('rst') + options['syn'] = each.get('syn') + options['fin'] = each.get('fin') + options['established'] = each.get('established') + options['dscp'] = each.get('dscp_str') + options['precedence'] = each.get('precedence_str') + options['fragments'] = each.get('fragments') + options['time_range'] = each.get('timerange') + + options_no_null = {} + for key, value in options.iteritems(): + if value is not None: + options_no_null[key] = value + + keep['options'] = options_no_null + + for key, value in temp.iteritems(): + if value: + keep[key] = value + # ensure options is always in the dict + if keep.get('options', 'DNE') == 'DNE': + keep['options'] = {} + + if keep.get('seq') == seq_number: + saveme = dict(keep) + + seqs.append(str(keep.get('seq'))) + new_acl.append(keep) + + return saveme, new_acl, seqs + + +def _acl_operand(operand, srcp1, sprcp2): + sub_entry = ' ' + operand + + if operand == 'range': + sub_entry += ' ' + srcp1 + ' ' + sprcp2 + else: + sub_entry += ' ' + srcp1 + + return sub_entry + + +def config_core_acl(proposed): + seq = proposed.get('seq') + action = proposed.get('action') + remark = proposed.get('remark') + proto = proposed.get('proto') + src = proposed.get('src') + src_port_op = proposed.get('src_port_op') + src_port1 = proposed.get('src_port1') + src_port2 = proposed.get('src_port2') + + dest = proposed.get('dest') + dest_port_op = proposed.get('dest_port_op') + dest_port1 = proposed.get('dest_port1') + dest_port2 = proposed.get('dest_port2') + + ace_start_entries = [action, proto, src] + if not remark: + ace = seq + ' ' + ' '.join(ace_start_entries) + if src_port_op: + ace += _acl_operand(src_port_op, src_port1, src_port2) + ace += ' ' + dest + if dest_port_op: + ace += _acl_operand(dest_port_op, dest_port1, dest_port2) + else: + ace = seq + ' remark ' + remark + + return ace + + +def config_acl_options(options): + ENABLE_ONLY = ['psh', 'urg', 'log', 'ack', 'syn', + 'established', 'rst', 'fin', 'fragments', + 'log'] + + OTHER = ['dscp', 'precedence', 'time-range'] + # packet-length is the only option not currently supported + + if options.get('time_range'): + options['time-range'] = options.get('time_range') + options.pop('time_range') + + command = '' + for option, value in options.iteritems(): + if option in ENABLE_ONLY: + if value == 'enable': + command += ' ' + option + elif option in OTHER: + command += ' ' + option + ' ' + value + if command: + command = command.strip() + return command + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def main(): + argument_spec = dict( + seq=dict(required=False, type='str'), + name=dict(required=True, type='str'), + action=dict(required=False, choices=['remark', 'permit', 'deny']), + remark=dict(required=False, type='str'), + proto=dict(required=False, type='str'), + src=dict(required=False, type='str'), + src_port_op=dict(required=False), + src_port1=dict(required=False, type='str'), + src_port2=dict(required=False, type='str'), + dest=dict(required=False, type='str'), + dest_port_op=dict(required=False), + dest_port1=dict(required=False, type='str'), + dest_port2=dict(required=False, type='str'), + log=dict(required=False, choices=['enable']), + urg=dict(required=False, choices=['enable']), + ack=dict(required=False, choices=['enable']), + psh=dict(required=False, choices=['enable']), + rst=dict(required=False, choices=['enable']), + syn=dict(required=False, choices=['enable']), + fragments=dict(required=False, choices=['enable']), + fin=dict(required=False, choices=['enable']), + established=dict(required=False, choices=['enable']), + time_range=dict(required=False), + precedence=dict(required=False, choices=['critical', 'flash', + 'flash-override', + 'immediate', 'internet', + 'network', 'priority', + 'routine']), + dscp=dict(required=False, choices=['af11', 'af12', 'af13', 'af21', + 'af22', 'af23', 'af31', 'af32', + 'af33', 'af41', 'af42', 'af43', + 'cs1', 'cs2', 'cs3', 'cs4', + 'cs5', 'cs6', 'cs7', 'default', + 'ef']), + state=dict(choices=['absent', 'present', 'delete_acl'], + default='present'), + protocol=dict(choices=['http', 'https'], default='http'), + host=dict(required=True), + username=dict(type='str'), + password=dict(no_log=True, type='str'), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + action = module.params['action'] + remark = module.params['remark'] + dscp = module.params['dscp'] + precedence = module.params['precedence'] + seq = module.params['seq'] + name = module.params['name'] + seq = module.params['seq'] + + if action == 'remark' and not remark: + module.fail_json(msg='when state is action, remark param is also ' + 'required') + + REQUIRED = ['seq', 'name', 'action', 'proto', 'src', 'dest'] + ABSENT = ['name', 'seq'] + if state == 'present': + if action and remark and seq: + pass + else: + for each in REQUIRED: + if module.params[each] is None: + module.fail_json(msg="req'd params when state is present:", + params=REQUIRED) + elif state == 'absent': + for each in ABSENT: + if module.params[each] is None: + module.fail_json(msg='require params when state is absent', + params=ABSENT) + elif state == 'delete_acl': + if module.params['name'] is None: + module.fail_json(msg="param name req'd when state is delete_acl") + + if dscp and precedence: + module.fail_json(msg='only one of the params dscp/precedence ' + 'are allowed') + + OPTIONS_NAMES = ['log', 'urg', 'ack', 'psh', 'rst', 'syn', 'fin', + 'established', 'dscp', 'precedence', 'fragments', + 'time_range'] + + CORE = ['seq', 'name', 'action', 'proto', 'src', 'src_port_op', + 'src_port1', 'src_port2', 'dest', 'dest_port_op', + 'dest_port1', 'dest_port2', 'remark'] + + proposed_core = dict((param, value) for (param, value) in + module.params.iteritems() + if param in CORE and value is not None) + + proposed_options = dict((param, value) for (param, value) in + module.params.iteritems() + if param in OPTIONS_NAMES and value is not None) + proposed = {} + proposed.update(proposed_core) + proposed.update(proposed_options) + + existing_options = {} + + # getting existing existing_core=dict, acl=list, seq=list + existing_core, acl, seqs = get_acl(module, name, seq) + if existing_core: + existing_options = existing_core.get('options') + existing_core.pop('options') + + end_state = acl + commands = [] + changed = False + delta_core = {} + delta_options = {} + + if not existing_core.get('remark'): + delta_core = dict( + set(proposed_core.iteritems()).difference( + existing_core.iteritems()) + ) + delta_options = dict( + set(proposed_options.iteritems()).difference( + existing_options.iteritems()) + ) + + if state == 'present': + if delta_core or delta_options: + if existing_core: # if the ace exists already + commands.append(['no {0}'.format(seq)]) + if delta_options: + myacl_str = config_core_acl(proposed_core) + myacl_str += ' ' + config_acl_options(proposed_options) + else: + myacl_str = config_core_acl(proposed_core) + command = [myacl_str] + commands.append(command) + elif state == 'absent': + if existing_core: + commands.append(['no {0}'.format(seq)]) + elif state == 'delete_acl': + if acl[0].get('acl') != 'no_entries': + commands.append(['no ip access-list {0}'.format(name)]) + + results = {} + cmds = [] + if commands: + preface = [] + if state in ['present', 'absent']: + preface = ['ip access-list {0}'.format(name)] + commands.insert(0, preface) + + cmds = flatten_list(commands) + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + execute_config_command(cmds, module) + changed = True + new_existing_core, end_state, seqs = get_acl(module, name, seq) + if 'configure' in cmds: + cmds.pop(0) + + results['proposed'] = proposed + results['existing'] = existing_core + results['changed'] = changed + results['updates'] = cmds + results['end_state'] = end_state + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_acl_interface.py b/network/nxos/nxos_acl_interface.py new file mode 100644 index 00000000000..e9dbcb3fe07 --- /dev/null +++ b/network/nxos/nxos_acl_interface.py @@ -0,0 +1,543 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_acl_interface +version_added: "2.2" +short_description: Manages applying ACLs to interfaces. +description: + - Manages applying ACLs to interfaces. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +options: + name: + description: + - Case sensitive name of the access list (ACL). + required: true + interface: + description: + - Full name of interface, e.g. I(Ethernet1/1). + required: true + direction: + description: + - Direction ACL to be applied in on the interface. + required: true + choices: ['ingress', 'egress'] + state: + description: + - Specify desired state of the resource. + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: apply egress acl to ethernet1/41 + nxos_acl_interface: + name: ANSIBLE + interface: ethernet1/41 + direction: egress + state: present + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"direction": "egress", "interface": "ethernet1/41", + "name": "ANSIBLE"} +existing: + description: k/v pairs of existing ACL applied to the interface + type: dict + sample: {} +end_state: + description: k/v pairs of interface ACL after module execution + returned: always + type: dict + sample: {"direction": "egress", "interface": "ethernet1/41", + "name": "ANSIBLE"} +acl_applied_to: + description: list of interfaces the ACL is applied to + returned: always + type: list + sample: [{"acl_type": "Router ACL", "direction": "egress", + "interface": "Ethernet1/41", "name": "ANSIBLE"}] +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface ethernet1/41", "ip access-group ANSIBLE out"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import collections +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, we assume if '^' is found in response, + it is an invalid command. + """ + if 'xml' in response[0]: + body = [] + elif '^' in response[0] or 'summary' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'summary' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def get_acl_interface(module, acl): + command = 'show ip access-list summary' + name_regex = '.*IPV4\s+ACL\s+(?P\S+).*' + interface_regex = ('.*\s+(?P\w+(\d+)?\/?(\d+)?)\s-\s' + '(?P\w+)\s+\W(?P\w+\s\w+)\W.*') + acl_list = [] + + body = execute_show_command(command, module, command_type='cli_show_ascii') + body_split = body[0].split('Active on interfaces:') + + for each_acl in body_split: + intf_list = [] + temp = {} + try: + match_name = re.match(name_regex, each_acl, re.DOTALL) + name_dict = match_name.groupdict() + name = name_dict['name'] + except AttributeError: + name = '' + + temp['interfaces'] = [] + for line in each_acl.split('\n'): + intf_temp = {} + try: + match_interface = re.match(interface_regex, line, re.DOTALL) + interface_dict = match_interface.groupdict() + interface = interface_dict['interface'] + direction = interface_dict['direction'] + acl_type = interface_dict['acl_type'] + except AttributeError: + interface = '' + direction = '' + acl_type = '' + + if interface: + intf_temp['interface'] = interface + if acl_type: + intf_temp['acl_type'] = acl_type + if direction: + intf_temp['direction'] = direction + if intf_temp: + temp['interfaces'].append(intf_temp) + if name: + temp['name'] = name + + if temp: + acl_list.append(temp) + + existing_no_null = [] + for each in acl_list: + if each.get('name') == acl: + interfaces = each.get('interfaces') + for interface in interfaces: + new_temp = {} + new_temp['name'] = acl + new_temp.update(interface) + existing_no_null.append(new_temp) + return existing_no_null + + +def other_existing_acl(get_existing, interface, direction): + # now we'll just get the interface in question + # needs to be a list since same acl could be applied in both dirs + acls_interface = [] + if get_existing: + for each in get_existing: + if each.get('interface').lower() == interface: + acls_interface.append(each) + else: + acls_interface = [] + + if acls_interface: + this = {} + for each in acls_interface: + if each.get('direction') == direction: + this = each + else: + acls_interface = [] + this = {} + + return acls_interface, this + + +def apply_acl(proposed): + commands = [] + + commands.append('interface ' + proposed.get('interface')) + direction = proposed.get('direction') + if direction == 'egress': + cmd = 'ip access-group {0} {1}'.format(proposed.get('name'), 'out') + elif direction == 'ingress': + cmd = 'ip access-group {0} {1}'.format(proposed.get('name'), 'in') + commands.append(cmd) + + return commands + + +def remove_acl(proposed): + commands = [] + + commands.append('interface ' + proposed.get('interface')) + direction = proposed.get('direction') + if direction == 'egress': + cmd = 'no ip access-group {0} {1}'.format(proposed.get('name'), 'out') + elif direction == 'ingress': + cmd = 'no ip access-group {0} {1}'.format(proposed.get('name'), 'in') + commands.append(cmd) + + return commands + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def main(): + argument_spec = dict( + name=dict(required=False, type='str'), + interface=dict(required=True), + direction=dict(required=True, choices=['egress', 'ingress']), + state=dict(choices=['absent', 'present'], + default='present'), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + name = module.params['name'] + interface = module.params['interface'].lower() + direction = module.params['direction'].lower() + + proposed = dict(name=name, interface=interface, direction=direction) + + # includes all interfaces the ACL is applied to (list) + get_existing = get_acl_interface(module, name) + + # interface_acls = includes entries of this ACL on the interface (list) + # this_dir_acl_intf = dict - not null if it already exists + interfaces_acls, existing = other_existing_acl( + get_existing, interface, direction) + + end_state = existing + end_state_acls = get_existing + changed = False + + cmds = [] + commands = [] + if state == 'present': + if not existing: + command = apply_acl(proposed) + if command: + commands.append(command) + + elif state == 'absent': + if existing: + command = remove_acl(proposed) + if command: + commands.append(command) + + if commands: + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + execute_config_command(cmds, module) + changed = True + end_state_acls = get_acl_interface(module, name) + interfaces_acls, this_dir_acl_intf = other_existing_acl( + end_state_acls, interface, direction) + end_state = this_dir_acl_intf + if 'configure' in cmds: + cmds.pop(0) + else: + cmds = [] + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + results['acl_applied_to'] = end_state_acls + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_bgp.py b/network/nxos/nxos_bgp.py new file mode 100644 index 00000000000..a6306fe74cc --- /dev/null +++ b/network/nxos/nxos_bgp.py @@ -0,0 +1,1012 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_bgp +version_added: "2.2" +short_description: Manages BGP configuration. +description: + - Manages BGP configurations on NX-OS switches. +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - C(state=absent) removes the whole BGP ASN configuration when + C(vrf=default) or the whole VRF instance within the BGP process when + using a different VRF. + - Default when supported restores params default value. + - Configuring global parmas is only permitted if C(vrf=default). +options: + asn: + description: + - BGP autonomous system number. Valid values are String, + Integer in ASPLAIN or ASDOT notation. + required: true + vrf: + description: + - Name of the VRF. The name 'default' is a valid VRF representing + the global BGP. + required: false + default: null + bestpath_always_compare_med: + description: + - Enable/Disable MED comparison on paths from different + autonomous systems. + required: false + choices: ['true','false'] + default: null + bestpath_aspath_multipath_relax: + description: + - Enable/Disable load sharing across the providers with + different (but equal-length) AS paths. + required: false + choices: ['true','false'] + default: null + bestpath_compare_routerid: + description: + - Enable/Disable comparison of router IDs for identical eBGP paths. + required: false + choices: ['true','false'] + default: null + bestpath_cost_community_ignore: + description: + - Enable/Disable Ignores the cost community for BGP best-path + calculations. + required: false + choices: ['true','false'] + default: null + bestpath_med_confed: + description: + - Enable/Disable enforcement of bestpath to do a MED comparison + only between paths originated within a confederation. + required: false + choices: ['true','false'] + default: null + bestpath_med_missing_as_worst: + description: + - Enable/Disable assigns the value of infinity to received + routes that do not carry the MED attribute, making these routes + the least desirable. + required: false + choices: ['true','false'] + default: null + bestpath_med_non_deterministic: + description: + - Enable/Disable deterministic selection of the best MED pat + from among the paths from the same autonomous system. + required: false + choices: ['true','false'] + default: null + cluster_id: + description: + - Route Reflector Cluster-ID. + required: false + default: null + confederation_id: + description: + - Routing domain confederation AS. + required: false + default: null + confederation_peers: + description: + - AS confederation parameters. + required: false + default: null + disable_policy_batching: + description: + - Enable/Disable the batching evaluation of prefix advertisement + to all peers. + required: false + choices: ['true','false'] + default: null + disable_policy_batching_ipv4_prefix_list: + description: + - Enable/Disable the batching evaluation of prefix advertisements + to all peers with prefix list. + required: false + default: null + disable_policy_batching_ipv6_prefix_list: + description: + - Enable/Disable the batching evaluation of prefix advertisements + to all peers with prefix list. + required: false + enforce_first_as: + description: + - Enable/Disable enforces the neighbor autonomous system to be + the first AS number listed in the AS path attribute for eBGP. + On NX-OS, this property is only supported in the + global BGP context. + required: false + choices: ['true','false'] + default: null + event_history_cli: + description: + - Enable/Disable cli event history buffer. + required: false + choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default'] + default: null + event_history_detail: + description: + - Enable/Disable detail event history buffer. + required: false + choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default'] + default: null + event_history_events: + description: + - Enable/Disable event history buffer. + required: false + choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default'] + default: null + event_history_periodic: + description: + - Enable/Disable periodic event history buffer. + required: false + choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default'] + fast_external_fallover: + description: + - Enable/Disable immediately reset the session if the link to a + directly connected BGP peer goes down. Only supported in the + global BGP context. + required: false + choices: ['true','false'] + default: null + flush_routes: + description: + - Enable/Disable flush routes in RIB upon controlled restart. + On NX-OS, this property is only supported in the global + BGP context. + required: false + choices: ['true','false'] + default: null + graceful_restart: + description: + - Enable/Disable graceful restart. + required: false + choices: ['true','false'] + default: null + graceful_restart_helper: + description: + - Enable/Disable graceful restart helper mode. + required: false + choices: ['true','false'] + default: null + graceful_restart_timers_restart: + description: + - Set maximum time for a restart sent to the BGP peer. + required: false + choices: ['true','false'] + default: null + graceful_restart_timers_stalepath_time: + description: + - Set maximum time that BGP keeps the stale routes from the + restarting BGP peer. + choices: ['true','false'] + default: null + isolate: + description: + - Enable/Disable isolate this router from BGP perspective. + required: false + choices: ['true','false'] + default: null + local_as: + description: + - Local AS number to be used within a VRF instance. + required: false + default: null + log_neighbor_changes: + description: + - Enable/Disable message logging for neighbor up/down event. + required: false + choices: ['true','false'] + default: null + maxas_limit: + description: + - Specify Maximum number of AS numbers allowed in the AS-path + attribute. Valid values are between 1 and 512. + required: false + default: null + neighbor_down_fib_accelerate: + description: + - Enable/Disable handle BGP neighbor down event, due to + various reasons. + required: false + choices: ['true','false'] + default: null + reconnect_interval: + description: + - The BGP reconnection interval for dropped sessions. + Valid values are between 1 and 60. + required: false + default: null + router_id: + description: + - Router Identifier (ID) of the BGP router VRF instance. + required: false + default: null + shutdown: + description: + - Administratively shutdown the BGP protocol. + required: false + choices: ['true','false'] + default: null + suppress_fib_pending: + description: + - Enable/Disable advertise only routes programmed in hardware + to peers. + required: false + choices: ['true','false'] + default: null + timer_bestpath_limit: + description: + - Specify timeout for the first best path after a restart, + in seconds. + required: false + default: null + timer_bestpath_limit_always: + description: + - Enable/Disable update-delay-always option. + required: false + choices: ['true','false'] + default: null + timer_bgp_hold: + description: + - Set BGP hold timer. + required: false + default: null + timer_bgp_keepalive: + description: + - Set BGP keepalive timer. + required: false + default: null + state: + description: + - Determines whether the config should be present or not + on the device. + required: false + default: present + choices: ['present','absent'] +''' + + +EXAMPLES = ''' +- name: Configure a simple ASN + nxos_bgp: + asn: 65535 + vrf: test + router_id: 1.1.1.1 + state: present + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"} +existing: + description: k/v pairs of existing BGP configuration + returned: verbose mode + type: dict + sample: {"asn": "65535", "bestpath_always_compare_med": false, + "bestpath_aspath_multipath_relax": false, + "bestpath_compare_neighborid": false, + "bestpath_compare_routerid": false, + "bestpath_cost_community_ignore": false, + "bestpath_med_confed": false, + "bestpath_med_missing_as_worst": false, + "bestpath_med_non_deterministic": false, "cluster_id": "", + "confederation_id": "", "confederation_peers": "", + "graceful_restart": true, "graceful_restart_helper": false, + "graceful_restart_timers_restart": "120", + "graceful_restart_timers_stalepath_time": "300", "local_as": "", + "log_neighbor_changes": false, "maxas_limit": "", + "neighbor_down_fib_accelerate": false, "reconnect_interval": "60", + "router_id": "11.11.11.11", "suppress_fib_pending": false, + "timer_bestpath_limit": "", "timer_bgp_hold": "180", + "timer_bgp_keepalive": "60", "vrf": "test"} +end_state: + description: k/v pairs of BGP configuration after module execution + returned: verbose mode + type: dict + sample: {"asn": "65535", "bestpath_always_compare_med": false, + "bestpath_aspath_multipath_relax": false, + "bestpath_compare_neighborid": false, + "bestpath_compare_routerid": false, + "bestpath_cost_community_ignore": false, + "bestpath_med_confed": false, + "bestpath_med_missing_as_worst": false, + "bestpath_med_non_deterministic": false, "cluster_id": "", + "confederation_id": "", "confederation_peers": "", + "graceful_restart": true, "graceful_restart_helper": false, + "graceful_restart_timers_restart": "120", + "graceful_restart_timers_stalepath_time": "300", "local_as": "", + "log_neighbor_changes": false, "maxas_limit": "", + "neighbor_down_fib_accelerate": false, "reconnect_interval": "60", + "router_id": "1.1.1.1", "suppress_fib_pending": false, + "timer_bestpath_limit": "", "timer_bgp_hold": "180", + "timer_bgp_keepalive": "60", "vrf": "test"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.shell import ShellError + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +WARNINGS = [] +BOOL_PARAMS = [ + 'bestpath_always_compare_med', + 'bestpath_aspath_multipath_relax', + 'bestpath_compare_neighborid', + 'bestpath_compare_routerid', + 'bestpath_cost_community_ignore', + 'bestpath_med_confed', + 'bestpath_med_missing_as_worst', + 'bestpath_med_non_deterministic', + 'disable_policy_batching', + 'enforce_first_as', + 'fast_external_fallover', + 'flush_routes', + 'graceful_restart', + 'graceful_restart_helper', + 'isolate', + 'log_neighbor_changes', + 'neighbor_down_fib_accelerate', + 'shutdown', + 'suppress_fib_pending' +] +GLOBAL_PARAMS = [ + 'disable_policy_batching', + 'disable_policy_batching_ipv4_prefix_list', + 'disable_policy_batching_ipv6_prefix_list', + 'enforce_first_as', + 'event_history_cli', + 'event_history_detail', + 'event_history_events', + 'event_history_periodic', + 'fast_external_fallover', + 'flush_routes', + 'isolate', + 'shutdown' +] +PARAM_TO_DEFAULT_KEYMAP = { + 'timer_bgp_keepalive': '60', + 'timer_bgp_hold': '180', + 'graceful_restart': True, + 'graceful_restart_timers_restart': '120', + 'graceful_restart_timers_stalepath_time': '300', + 'reconnect_interval': '60', + 'suppress_fib_pending': True, + 'fast_external_fallover': True, + 'enforce_first_as': True, + 'event_history_periodic': True, + 'event_history_cli': True, + 'event_history_events': True +} +PARAM_TO_COMMAND_KEYMAP = { + 'asn': 'router bgp', + 'bestpath_always_compare_med': 'bestpath always-compare-med', + 'bestpath_aspath_multipath_relax': 'bestpath as-path multipath-relax', + 'bestpath_compare_neighborid': 'bestpath compare-neighborid', + 'bestpath_compare_routerid': 'bestpath compare-routerid', + 'bestpath_cost_community_ignore': 'bestpath cost-community ignore', + 'bestpath_med_confed': 'bestpath med confed', + 'bestpath_med_missing_as_worst': 'bestpath med missing-as-worst', + 'bestpath_med_non_deterministic': 'bestpath med non-deterministic', + 'cluster_id': 'cluster-id', + 'confederation_id': 'confederation identifier', + 'confederation_peers': 'confederation peers', + 'disable_policy_batching': 'disable-policy-batching', + 'disable_policy_batching_ipv4_prefix_list': 'disable-policy-batching ipv4 prefix-list', + 'disable_policy_batching_ipv6_prefix_list': 'disable-policy-batching ipv6 prefix-list', + 'enforce_first_as': 'enforce-first-as', + 'event_history_cli': 'event-history cli', + 'event_history_detail': 'event-history detail', + 'event_history_events': 'event-history events', + 'event_history_periodic': 'event-history periodic', + 'fast_external_fallover': 'fast-external-fallover', + 'flush_routes': 'flush-routes', + 'graceful_restart': 'graceful-restart', + 'graceful_restart_helper': 'graceful-restart-helper', + 'graceful_restart_timers_restart': 'graceful-restart restart-time', + 'graceful_restart_timers_stalepath_time': 'graceful-restart stalepath-time', + 'isolate': 'isolate', + 'local_as': 'local-as', + 'log_neighbor_changes': 'log-neighbor-changes', + 'maxas_limit': 'maxas-limit', + 'neighbor_down_fib_accelerate': 'neighbor-down fib-accelerate', + 'reconnect_interval': 'reconnect-interval', + 'router_id': 'router-id', + 'shutdown': 'shutdown', + 'suppress_fib_pending': 'suppress-fib-pending', + 'timer_bestpath_limit': 'timers bestpath-limit', + 'timer_bgp_hold': 'timers bgp', + 'timer_bgp_keepalive': 'timers bgp', + 'vrf': 'vrf' +} + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_custom_value(config, arg): + if arg.startswith('event_history'): + REGEX_SIZE = re.compile(r'(?:{0} size\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + + if 'no {0}'.format(PARAM_TO_COMMAND_KEYMAP[arg]) in config: + pass + elif PARAM_TO_COMMAND_KEYMAP[arg] in config: + try: + value = REGEX_SIZE.search(config).group('value') + except AttributeError: + if REGEX.search(config): + value = True + + elif arg == 'enforce_first_as' or arg == 'fast_external_fallover': + REGEX = re.compile(r'no\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = True + try: + if REGEX.search(config): + value = False + except TypeError: + value = True + + elif arg == 'confederation_peers': + REGEX = re.compile(r'(?:confederation peers\s)(?P.*)$', re.M) + value = '' + if 'confederation peers' in config: + value = REGEX.search(config).group('value').split() + + elif arg == 'timer_bgp_keepalive': + REGEX = re.compile(r'(?:timers bgp\s)(?P.*)$', re.M) + value = '' + if 'timers bgp' in config: + parsed = REGEX.search(config).group('value').split() + value = parsed[0] + + elif arg == 'timer_bgp_hold': + REGEX = re.compile(r'(?:timers bgp\s)(?P.*)$', re.M) + value = '' + if 'timers bgp' in config: + parsed = REGEX.search(config).group('value').split() + if len(parsed) == 2: + value = parsed[1] + + return value + + +def get_value(arg, config): + custom = [ + 'event_history_cli', + 'event_history_events', + 'event_history_periodic', + 'event_history_detail', + 'confederation_peers', + 'timer_bgp_hold', + 'timer_bgp_keepalive', + 'enforce_first_as', + 'fast_external_fallover' + ] + + if arg in custom: + value = get_custom_value(config, arg) + elif arg in BOOL_PARAMS: + REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + else: + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + + try: + asn_regex = '.*router\sbgp\s(?P\d+).*' + match_asn = re.match(asn_regex, str(netcfg), re.DOTALL) + existing_asn_group = match_asn.groupdict() + existing_asn = existing_asn_group['existing_asn'] + except AttributeError: + existing_asn = '' + + if existing_asn: + bgp_parent = 'router bgp {0}'.format(existing_asn) + if module.params['vrf'] != 'default': + parents = [bgp_parent, 'vrf {0}'.format(module.params['vrf'])] + else: + parents = [bgp_parent] + + config = netcfg.get_section(parents) + if config: + for arg in args: + if arg != 'asn': + if module.params['vrf'] != 'default': + if arg not in GLOBAL_PARAMS: + existing[arg] = get_value(arg, config) + else: + existing[arg] = get_value(arg, config) + + existing['asn'] = existing_asn + if module.params['vrf'] == 'default': + existing['vrf'] = 'default' + else: + if (module.params['state'] == 'present' and + module.params['vrf'] != 'default'): + msg = ("VRF {0} doesn't exist. ".format(module.params['vrf'])) + WARNINGS.append(msg) + else: + if (module.params['state'] == 'present' and + module.params['vrf'] != 'default'): + msg = ("VRF {0} doesn't exist. ".format(module.params['vrf'])) + WARNINGS.append(msg) + + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def state_present(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in proposed_commands.iteritems(): + if value is True: + commands.append(key) + elif value is False: + commands.append('no {0}'.format(key)) + elif value == 'default': + if key in PARAM_TO_DEFAULT_KEYMAP: + commands.append('{0} {1}'.format(key, PARAM_TO_DEFAULT_KEYMAP[key])) + elif existing_commands.get(key): + existing_value = existing_commands.get(key) + if key == 'confederation peers': + commands.append('no {0} {1}'.format(key, ' '.join(existing_value))) + else: + commands.append('no {0} {1}'.format(key, existing_value)) + else: + if key == 'confederation peers': + existing_confederation_peers = existing.get('confederation_peers') + + if existing_confederation_peers: + if not isinstance(existing_confederation_peers, list): + existing_confederation_peers = [existing_confederation_peers] + else: + existing_confederation_peers = [] + + values = value.split() + for each_value in values: + if each_value not in existing_confederation_peers: + existing_confederation_peers.append(each_value) + peer_string = ' '.join(existing_confederation_peers) + commands.append('{0} {1}'.format(key, peer_string)) + elif key.startswith('timers bgp'): + command = 'timers bgp {0} {1}'.format( + proposed['timer_bgp_keepalive'], + proposed['timer_bgp_hold']) + if command not in commands: + commands.append(command) + else: + if value.startswith('size'): + value = value.replace('_', ' ') + command = '{0} {1}'.format(key, value) + commands.append(command) + + if commands: + commands = fix_commands(commands) + parents = ['router bgp {0}'.format(module.params['asn'])] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + candidate.add(commands, parents=parents) + else: + if len(proposed.keys()) == 0: + if module.params['vrf'] != 'default': + commands.append('vrf {0}'.format(module.params['vrf'])) + parents = ['router bgp {0}'.format(module.params['asn'])] + else: + commands.append('router bgp {0}'.format(module.params['asn'])) + parents = [] + candidate.add(commands, parents=parents) + + +def state_absent(module, existing, proposed, candidate): + commands = [] + parents = [] + if module.params['vrf'] == 'default': + commands.append('no router bgp {0}'.format(module.params['asn'])) + else: + if existing.get('vrf') == module.params['vrf']: + commands.append('no vrf {0}'.format(module.params['vrf'])) + parents = ['router bgp {0}'.format(module.params['asn'])] + + candidate.add(commands, parents=parents) + + +def fix_commands(commands): + local_as_command = '' + confederation_id_command = '' + confederation_peers_command = '' + + for command in commands: + if 'local-as' in command: + local_as_command = command + elif 'confederation identifier' in command: + confederation_id_command = command + elif 'confederation peers' in command: + confederation_peers_command = command + + if local_as_command and confederation_id_command: + commands.pop(commands.index(local_as_command)) + commands.pop(commands.index(confederation_id_command)) + commands.append(local_as_command) + commands.append(confederation_id_command) + + elif confederation_peers_command and confederation_id_command: + commands.pop(commands.index(confederation_peers_command)) + commands.pop(commands.index(confederation_id_command)) + commands.append(confederation_id_command) + commands.append(confederation_peers_command) + + return commands + + +def main(): + argument_spec = dict( + asn=dict(required=True, type='str'), + vrf=dict(required=False, type='str', default='default'), + bestpath_always_compare_med=dict(required=False, type='bool'), + bestpath_aspath_multipath_relax=dict(required=False, type='bool'), + bestpath_compare_neighborid=dict(required=False, type='bool'), + bestpath_compare_routerid=dict(required=False, type='bool'), + bestpath_cost_community_ignore=dict(required=False, type='bool'), + bestpath_med_confed=dict(required=False, type='bool'), + bestpath_med_missing_as_worst=dict(required=False, type='bool'), + bestpath_med_non_deterministic=dict(required=False, type='bool'), + cluster_id=dict(required=False, type='str'), + confederation_id=dict(required=False, type='str'), + confederation_peers=dict(required=False, type='str'), + disable_policy_batching=dict(required=False, type='bool'), + disable_policy_batching_ipv4_prefix_list=dict(required=False, type='str'), + disable_policy_batching_ipv6_prefix_list=dict(required=False, type='str'), + enforce_first_as=dict(required=False, type='bool'), + event_history_cli=dict(required=False, choices=['true', 'false', 'default', 'size_small', 'size_medium', 'size_large', 'size_disable']), + event_history_detail=dict(required=False, choices=['true', 'false', 'default', 'size_small', 'size_medium', 'size_large', 'size_disable']), + event_history_events=dict(required=False, choices=['true', 'false', 'default' 'size_small', 'size_medium', 'size_large', 'size_disable']), + event_history_periodic=dict(required=False, choices=['true', 'false', 'default', 'size_small', 'size_medium', 'size_large', 'size_disable']), + fast_external_fallover=dict(required=False, type='bool'), + flush_routes=dict(required=False, type='bool'), + graceful_restart=dict(required=False, type='bool'), + graceful_restart_helper=dict(required=False, type='bool'), + graceful_restart_timers_restart=dict(required=False, type='str'), + graceful_restart_timers_stalepath_time=dict(required=False, type='str'), + isolate=dict(required=False, type='bool'), + local_as=dict(required=False, type='str'), + log_neighbor_changes=dict(required=False, type='bool'), + maxas_limit=dict(required=False, type='str'), + neighbor_down_fib_accelerate=dict(required=False, type='bool'), + reconnect_interval=dict(required=False, type='str'), + router_id=dict(required=False, type='str'), + shutdown=dict(required=False, type='bool'), + suppress_fib_pending=dict(required=False, type='bool'), + timer_bestpath_limit=dict(required=False, type='str'), + timer_bgp_hold=dict(required=False, type='str'), + timer_bgp_keepalive=dict(required=False, type='str'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + required_together=[['timer_bgp_hold', + 'timer_bgp_keepalive']], + supports_check_mode=True) + + state = module.params['state'] + args = [ + "asn", + "bestpath_always_compare_med", + "bestpath_aspath_multipath_relax", + "bestpath_compare_neighborid", + "bestpath_compare_routerid", + "bestpath_cost_community_ignore", + "bestpath_med_confed", + "bestpath_med_missing_as_worst", + "bestpath_med_non_deterministic", + "cluster_id", + "confederation_id", + "confederation_peers", + "disable_policy_batching", + "disable_policy_batching_ipv4_prefix_list", + "disable_policy_batching_ipv6_prefix_list", + "enforce_first_as", + "event_history_cli", + "event_history_detail", + "event_history_events", + "event_history_periodic", + "fast_external_fallover", + "flush_routes", + "graceful_restart", + "graceful_restart_helper", + "graceful_restart_timers_restart", + "graceful_restart_timers_stalepath_time", + "isolate", + "local_as", + "log_neighbor_changes", + "maxas_limit", + "neighbor_down_fib_accelerate", + "reconnect_interval", + "router_id", + "shutdown", + "suppress_fib_pending", + "timer_bestpath_limit", + "timer_bgp_hold", + "timer_bgp_keepalive", + "vrf" + ] + + if module.params['vrf'] != 'default': + for param, inserted_value in module.params.iteritems(): + if param in GLOBAL_PARAMS and inserted_value: + module.fail_json(msg='Global params can be modified only' + ' under "default" VRF.', + vrf=module.params['vrf'], + global_param=param) + + existing = invoke('get_existing', module, args) + + if existing.get('asn'): + if (existing.get('asn') != module.params['asn'] and + state == 'present'): + module.fail_json(msg='Another BGP ASN already exists.', + proposed_asn=module.params['asn'], + existing_asn=existing.get('asn')) + + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + proposed = {} + for key, value in proposed_args.iteritems(): + if key != 'asn' and key != 'vrf': + if str(value).lower() == 'default': + value = PARAM_TO_DEFAULT_KEYMAP.get(key) + if value is None: + value = 'default' + if existing.get(key) or (not existing.get(key) and value): + proposed[key] = value + + result = {} + if (state == 'present' or (state == 'absent' and + existing.get('asn') == module.params['asn'])): + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + if WARNINGS: + result['warnings'] = WARNINGS + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_bgp_af.py b/network/nxos/nxos_bgp_af.py new file mode 100644 index 00000000000..3b804d51a3c --- /dev/null +++ b/network/nxos/nxos_bgp_af.py @@ -0,0 +1,1110 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_bgp_af +version_added: "2.2" +short_description: Manages BGP Address-family configuration. +description: + - Manages BGP Address-family configurations on NX-OS switches. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - C(state=absent) removes the whole BGP ASN configuration + - Default, where supported, restores params default value. +options: + asn: + description: + - BGP autonomous system number. Valid values are String, + Integer in ASPLAIN or ASDOT notation. + required: true + vrf: + description: + - Name of the VRF. The name 'default' is a valid VRF representing + the global bgp. + required: true + afi: + description: + - Address Family Identifier. + required: true + choices: ['ipv4','ipv6', 'vpnv4', 'vpnv6', 'l2vpn'] + safi: + description: + - Sub Address Family Identifier. + required: true + choices: ['unicast','multicast', 'evpn'] + additional_paths_install: + description: + - Install a backup path into the forwarding table and provide + prefix independent convergence (PIC) in case of a PE-CE link + failure. + required: false + choices: ['true','false'] + default: null + additional_paths_receive: + description: + - Enables the receive capability of additional paths for all of + the neighbors under this address family for which the capability + has not been disabled. + required: false + choices: ['true','false'] + default: null + additional_paths_selection: + description: + - Configures the capability of selecting additional paths for + a prefix. Valid values are a string defining the name of + the route-map. + required: false + default: null + additional_paths_send: + description: + - Enables the send capability of additional paths for all of + the neighbors under this address family for which the capability + has not been disabled. + required: false + choices: ['true','false'] + default: null + advertise_l2vpn_evpn: + description: + - Advertise evpn routes. + required: false + choices: ['true','false'] + default: null + client_to_client: + description: + - Configure client-to-client route reflection. + required: false + choices: ['true','false'] + default: null + dampen_igp_metric: + description: + - Specify dampen value for IGP metric-related changes, in seconds. + Valid values are integer and keyword 'default'. + required: false + default: null + dampening_state: + description: + - Enable/disable route-flap dampening. + required: false + choices: ['true','false'] + default: null + dampening_half_time: + description: + - Specify decay half-life in minutes for route-flap dampening. + Valid values are integer and keyword 'default'. + required: false + default: null + dampening_max_suppress_time: + description: + - Specify max suppress time for route-flap dampening stable route. + Valid values are integer and keyword 'default'. + required: false + default: null + dampening_reuse_time: + description: + - Specify route reuse time for route-flap dampening. + Valid values are integer and keyword 'default'. + required: false + dampening_routemap: + description: + - Specify route-map for route-flap dampening. Valid values are a + string defining the name of the route-map. + required: false + default: null + dampening_suppress_time: + description: + - Specify route suppress time for route-flap dampening. + Valid values are integer and keyword 'default'. + required: false + default: null + default_information_originate: + description: + - Default information originate. + required: false + choices: ['true','false'] + default: null + default_metric: + description: + - Sets default metrics for routes redistributed into BGP. + Valid values are Integer or keyword 'default' + required: false + default: null + distance_ebgp: + description: + - Sets the administrative distance for eBGP routes. + Valid values are Integer or keyword 'default'. + required: false + default: null + distance_ibgp: + description: + - Sets the administrative distance for iBGP routes. + Valid values are Integer or keyword 'default'. + required: false + default: null + distance_local: + description: + - Sets the administrative distance for local BGP routes. + Valid values are Integer or keyword 'default'. + required: false + default: null + inject_map: + description: + - An array of route-map names which will specify prefixes to + inject. Each array entry must first specify the inject-map name, + secondly an exist-map name, and optionally the copy-attributes + keyword which indicates that attributes should be copied from + the aggregate. For example [['lax_inject_map', 'lax_exist_map'], + ['nyc_inject_map', 'nyc_exist_map', 'copy-attributes'], + ['fsd_inject_map', 'fsd_exist_map']]. + required: false + default: null + maximum_paths: + description: + - Configures the maximum number of equal-cost paths for + load sharing. Valid value is an integer in the range 1-64. + default: null + maximum_paths_ibgp: + description: + - Configures the maximum number of ibgp equal-cost paths for + load sharing. Valid value is an integer in the range 1-64. + required: false + default: null + networks: + description: + - Networks to configure. Valid value is a list of network + prefixes to advertise. The list must be in the form of an array. + Each entry in the array must include a prefix address and an + optional route-map. For example [['10.0.0.0/16', 'routemap_LA'], + ['192.168.1.1', 'Chicago'], ['192.168.2.0/24], + ['192.168.3.0/24', 'routemap_NYC']]. + required: false + default: null + next_hop_route_map: + description: + - Configure a route-map for valid nexthops. Valid values are a + string defining the name of the route-map. + required: false + default: null + redistribute: + description: + - A list of redistribute directives. Multiple redistribute entries + are allowed. The list must be in the form of a nested array. + the first entry of each array defines the source-protocol to + redistribute from; the second entry defines a route-map name. + A route-map is highly advised but may be optional on some + platforms, in which case it may be omitted from the array list. + For example [['direct', 'rm_direct'], ['lisp', 'rm_lisp']]. + required: false + default: null + suppress_inactive: + description: + - Advertises only active routes to peers. + required: false + choices: ['true','false'] + default: null + table_map: + description: + - Apply table-map to filter routes downloaded into URIB. + Valid values are a string. + required: false + default: null + table_map_filter: + description: + - Filters routes rejected by the route-map and does not download + them to the RIB. + required: false + choices: ['true','false'] + default: null + state: + description: + - Determines whether the config should be present or not + on the device. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +# configure a simple address-family +- nxos_bgp_af: + asn: 65535 + vrf: TESTING + afi: ipv4 + safi: unicast + advertise_l2vpn_evpn: true + state: present +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"advertise_l2vpn_evpn": true, "afi": "ipv4", + "asn": "65535", "safi": "unicast", "vrf": "TESTING"} +existing: + description: k/v pairs of existing BGP AF configuration + returned: verbose mode + type: dict + sample: {} +end_state: + description: k/v pairs of BGP AF configuration after module execution + returned: verbose mode + type: dict + sample: {"additional_paths_install": false, + "additional_paths_receive": false, + "additional_paths_selection": "", + "additional_paths_send": false, + "advertise_l2vpn_evpn": true, "afi": "ipv4", + "asn": "65535", "client_to_client": true, + "dampen_igp_metric": "600", "dampening_half_time": "", + "dampening_max_suppress_time": "", "dampening_reuse_time": "", + "dampening_routemap": "", "dampening_state": false, + "dampening_suppress_time": "", + "default_information_originate": false, "default_metric": "", + "distance_ebgp": "20", "distance_ibgp": "200", + "distance_local": "220", "inject_map": [], "maximum_paths": "1", + "maximum_paths_ibgp": "1", "networks": [], + "next_hop_route_map": "", "redistribute": [], "safi": "unicast", + "suppress_inactive": false, "table_map": "", + "table_map_filter": false, "vrf": "TESTING"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["router bgp 65535", "vrf TESTING", + "address-family ipv4 unicast", "advertise l2vpn evpn"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.shell import ShellError + + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +WARNINGS = [] +BOOL_PARAMS = [ + 'additional_paths_install', + 'additional_paths_receive', + 'additional_paths_send', + 'advertise_l2vpn_evpn', + 'client_to_client', + 'dampening_state', + 'default_information_originate', + 'suppress_inactive', +] +PARAM_TO_DEFAULT_KEYMAP = { + 'maximum_paths': '1', + 'maximum_paths_ibgp': '1', + 'client_to_client': True, + 'distance_ebgp': '20', + 'distance_ibgp': '200', + 'distance_local': '220', + 'dampen_igp_metric': '600' +} +PARAM_TO_COMMAND_KEYMAP = { + 'asn': 'router bgp', + 'afi': 'address-family', + 'safi': 'address-family', + 'additional_paths_install': 'additional-paths install backup', + 'additional_paths_receive': 'additional-paths receive', + 'additional_paths_selection': 'additional-paths selection route-map', + 'additional_paths_send': 'additional-paths send', + 'advertise_l2vpn_evpn': 'advertise l2vpn evpn', + 'client_to_client': 'client-to-client reflection', + 'dampen_igp_metric': 'dampen-igp-metric', + 'dampening_state': 'dampening', + 'dampening_half_time': 'dampening', + 'dampening_max_suppress_time': 'dampening', + 'dampening_reuse_time': 'dampening', + 'dampening_routemap': 'dampening route-map', + 'dampening_suppress_time': 'dampening', + 'default_information_originate': 'default-information originate', + 'default_metric': 'default-metric', + 'distance_ebgp': 'distance', + 'distance_ibgp': 'distance', + 'distance_local': 'distance', + 'inject_map': 'inject-map', + 'maximum_paths': 'maximum-paths', + 'maximum_paths_ibgp': 'maximum-paths ibgp', + 'networks': 'network', + 'redistribute': 'redistribute', + 'next_hop_route_map': 'nexthop route-map', + 'suppress_inactive': 'suppress-inactive', + 'table_map': 'table-map', + 'table_map_filter': 'table-map-filter', + 'vrf': 'vrf' +} +DAMPENING_PARAMS = [ + 'dampening_half_time', + 'dampening_suppress_time', + 'dampening_reuse_time', + 'dampening_max_suppress_time' + ] + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_custom_list_value(config, arg, module): + value_list = [] + splitted_config = config.splitlines() + if arg == 'inject_map': + REGEX_INJECT = ('.*inject-map\s(?P\S+)' + '\sexist-map\s(?P\S+)-*') + + for line in splitted_config: + value = [] + inject_group = {} + try: + match_inject = re.match(REGEX_INJECT, line, re.DOTALL) + inject_group = match_inject.groupdict() + inject_map = inject_group['inject_map'] + exist_map = inject_group['exist_map'] + value.append(inject_map) + value.append(exist_map) + except AttributeError: + value = [] + + if value: + copy_attributes = False + inject_map_command = ('inject-map {0} exist-map {1} ' + 'copy-attributes'.format( + inject_group['inject_map'], + inject_group['exist_map'])) + + REGEX = re.compile(r'\s+{0}\s*$'.format( + inject_map_command), re.M) + try: + if REGEX.search(config): + copy_attributes = True + except TypeError: + copy_attributes = False + + if copy_attributes: + value.append('copy_attributes') + value_list.append(value) + + elif arg == 'networks': + REGEX_NETWORK = re.compile(r'(?:network\s)(?P.*)$') + + for line in splitted_config: + value = [] + network_group = {} + if 'network' in line: + value = REGEX_NETWORK.search(line).group('value').split() + + if value: + if len(value) == 3: + value.pop(1) + value_list.append(value) + + elif arg == 'redistribute': + RED_REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format( + PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + for line in splitted_config: + value = [] + redistribute_group = {} + if 'redistribute' in line: + value = RED_REGEX.search(line).group('value').split() + if value: + if len(value) == 3: + value.pop(1) + elif len(value) == 4: + value = ['{0} {1}'.format( + value[0], value[1]), value[3]] + value_list.append(value) + return value_list + + +def get_custom_string_value(config, arg, module): + value = '' + if arg.startswith('distance'): + REGEX_DISTANCE = ('.*distance\s(?P\w+)\s(?P\w+)' + '\s(?P\w+)') + try: + match_distance = re.match(REGEX_DISTANCE, config, re.DOTALL) + distance_group = match_distance.groupdict() + except AttributeError: + distance_group = {} + + if distance_group: + if arg == 'distance_ebgp': + value = distance_group['d_ebgp'] + elif arg == 'distance_ibgp': + value = distance_group['d_ibgp'] + elif arg == 'distance_local': + value = distance_group['d_local'] + + elif arg.startswith('dampening'): + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format( + PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + if arg == 'dampen_igp_metric' or arg == 'dampening_routemap': + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + else: + REGEX_DAMPENING = ('.*dampening\s(?P\w+)\s(?P\w+)' + '\s(?P\w+)\s(?P\w+)') + try: + match_dampening = re.match(REGEX_DAMPENING, config, re.DOTALL) + dampening_group = match_dampening.groupdict() + except AttributeError: + dampening_group = {} + + if dampening_group: + if arg == 'dampening_half_time': + value = dampening_group['half'] + elif arg == 'dampening_reuse_time': + value = dampening_group['reuse'] + elif arg == 'dampening_suppress_time': + value = dampening_group['suppress'] + elif arg == 'dampening_max_suppress_time': + value = dampening_group['max_suppress'] + + elif arg == 'table_map_filter': + TMF_REGEX = re.compile(r'\s+table-map.*filter$', re.M) + value = False + try: + if TMF_REGEX.search(config): + value = True + except TypeError: + value = False + elif arg == 'table_map': + TM_REGEX = re.compile(r'(?:table-map\s)(?P\S+)(\sfilter)?$', re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = TM_REGEX.search(config).group('value') + return value + + +def get_value(arg, config, module): + custom = [ + 'inject_map', + 'networks', + 'redistribute' + ] + + if arg in BOOL_PARAMS: + REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + + elif arg in custom: + value = get_custom_list_value(config, arg, module) + + elif (arg.startswith('distance') or arg.startswith('dampening') or + arg.startswith('table_map')): + value = get_custom_string_value(config, arg, module) + + else: + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + + try: + asn_regex = '.*router\sbgp\s(?P\d+).*' + match_asn = re.match(asn_regex, str(netcfg), re.DOTALL) + existing_asn_group = match_asn.groupdict() + existing_asn = existing_asn_group['existing_asn'] + except AttributeError: + existing_asn = '' + + if existing_asn: + parents = ["router bgp {0}".format(existing_asn)] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + parents.append('address-family {0} {1}'.format(module.params['afi'], + module.params['safi'])) + config = netcfg.get_section(parents) + + if config: + for arg in args: + if arg not in ['asn', 'afi', 'safi', 'vrf']: + existing[arg] = get_value(arg, config, module) + + existing['asn'] = existing_asn + existing['afi'] = module.params['afi'] + existing['safi'] = module.params['safi'] + existing['vrf'] = module.params['vrf'] + else: + WARNINGS.append("The BGP process {0} didn't exist but the task" + " just created it.".format(module.params['asn'])) + + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def fix_proposed(module, proposed, existing): + commands = list() + command = '' + fixed_proposed = {} + for key, value in proposed.iteritems(): + if key in DAMPENING_PARAMS: + if value != 'default': + command = 'dampening {0} {1} {2} {3}'.format( + proposed.get('dampening_half_time'), + proposed.get('dampening_reuse_time'), + proposed.get('dampening_suppress_time'), + proposed.get('dampening_max_suppress_time')) + else: + if existing.get(key): + command = ('no dampening {0} {1} {2} {3}'.format( + existing['dampening_half_time'], + existing['dampening_reuse_time'], + existing['dampening_suppress_time'], + existing['dampening_max_suppress_time'])) + if 'default' in command: + command = '' + elif key.startswith('distance'): + command = 'distance {0} {1} {2}'.format( + proposed.get('distance_ebgp'), + proposed.get('distance_ibgp'), + proposed.get('distance_local')) + else: + fixed_proposed[key] = value + + if command: + if command not in commands: + commands.append(command) + + return fixed_proposed, commands + + +def default_existing(existing_value, key, value): + commands = [] + if key == 'network': + for network in existing_value: + if len(network) == 2: + commands.append('no network {0} route-map {1}'.format( + network[0], network[1])) + elif len(network) == 1: + commands.append('no network {0}'.format( + network[0])) + + elif key == 'inject-map': + for maps in existing_value: + if len(maps) == 2: + commands.append('no inject-map {0} exist-map {1}'.format( + maps[0], maps[1])) + elif len(maps) == 3: + commands.append('no inject-map {0} exist-map {1} ' + 'copy-attributes'.format( + maps[0], maps[1])) + else: + commands.append('no {0} {1}'.format(key, existing_value)) + return commands + + +def get_network_command(existing, key, value): + commands = [] + existing_networks = existing.get('networks', []) + for inet in value: + if not isinstance(inet, list): + inet = [inet] + if inet not in existing_networks: + if len(inet) == 1: + command = '{0} {1}'.format(key, inet[0]) + elif len(inet) == 2: + command = '{0} {1} route-map {2}'.format(key, + inet[0], inet[1]) + commands.append(command) + return commands + + +def get_inject_map_command(existing, key, value): + commands = [] + existing_maps = existing.get('inject_map', []) + for maps in value: + if not isinstance(maps, list): + maps = [maps] + if maps not in existing_maps: + if len(maps) == 2: + command = ('inject-map {0} exist-map {1}'.format( + maps[0], maps[1])) + elif len(maps) == 3: + command = ('inject-map {0} exist-map {1} ' + 'copy-attributes'.format(maps[0], + maps[1])) + commands.append(command) + return commands + + +def get_redistribute_command(existing, key, value): + commands = [] + for rule in value: + if rule[1] == 'default': + existing_rule = existing.get('redistribute', []) + for each_rule in existing_rule: + if rule[0] in each_rule: + command = 'no {0} {1} route-map {2}'.format( + key, each_rule[0], each_rule[1]) + commands.append(command) + else: + command = '{0} {1} route-map {2}'.format(key, rule[0], rule[1]) + commands.append(command) + return commands + + +def get_table_map_command(module, existing, key, value): + commands = [] + if key == 'table-map': + if value != 'default': + command = '{0} {1}'.format(key, module.params['table_map']) + if (module.params['table_map_filter'] is not None and + module.params['table_map_filter'] != 'default'): + command += ' filter' + commands.append(command) + else: + if existing.get('table_map'): + command = 'no {0} {1}'.format(key, existing.get('table_map')) + commands.append(command) + return commands + + +def get_default_table_map_filter(existing): + commands = [] + existing_table_map_filter = existing.get('table_map_filter') + if existing_table_map_filter: + existing_table_map = existing.get('table_map') + if existing_table_map: + command = 'table-map {0}'.format(existing_table_map) + commands.append(command) + return commands + + +def state_present(module, existing, proposed, candidate): + fixed_proposed, commands = fix_proposed(module, proposed, existing) + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, fixed_proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + for key, value in proposed_commands.iteritems(): + if key == 'address-family': + addr_family_command = "address-family {0} {1}".format( + module.params['afi'], module.params['safi']) + if addr_family_command not in commands: + commands.append(addr_family_command) + + elif key.startswith('table-map'): + table_map_commands = get_table_map_command(module, existing, key, value) + if table_map_commands: + commands.extend(table_map_commands) + + elif value is True: + commands.append(key) + + elif value is False: + commands.append('no {0}'.format(key)) + + elif value == 'default': + if key in PARAM_TO_DEFAULT_KEYMAP: + commands.append('{0} {1}'.format(key, PARAM_TO_DEFAULT_KEYMAP[key])) + + elif existing_commands.get(key): + if key == 'table-map-filter': + default_tmf_command = get_default_table_map_filter(existing) + + if default_tmf_command: + commands.extend(default_tmf_command) + else: + existing_value = existing_commands.get(key) + default_command = default_existing(existing_value, key, value) + if default_command: + commands.extend(default_command) + else: + if key == 'network': + network_commands = get_network_command(existing, key, value) + if network_commands: + commands.extend(network_commands) + + elif key == 'inject-map': + inject_map_commands = get_inject_map_command(existing, key, value) + if inject_map_commands: + commands.extend(inject_map_commands) + + elif key == 'redistribute': + redistribute_commands = get_redistribute_command(existing, key, value) + if redistribute_commands: + commands.extend(redistribute_commands) + + else: + command = '{0} {1}'.format(key, value) + commands.append(command) + + if commands: + parents = ["router bgp {0}".format(module.params['asn'])] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + if len(commands) == 1: + candidate.add(commands, parents=parents) + elif len(commands) > 1: + parents.append('address-family {0} {1}'.format(module.params['afi'], + module.params['safi'])) + if addr_family_command in commands: + commands.remove(addr_family_command) + candidate.add(commands, parents=parents) + + +def state_absent(module, existing, proposed, candidate): + commands = [] + parents = ["router bgp {0}".format(module.params['asn'])] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + commands.append('no address-family {0} {1}'.format( + module.params['afi'], module.params['safi'])) + candidate.add(commands, parents=parents) + + +def main(): + argument_spec = dict( + asn=dict(required=True, type='str'), + vrf=dict(required=False, type='str', default='default'), + safi=dict(required=True, type='str', choices=['unicast','multicast', 'evpn']), + afi=dict(required=True, type='str', choices=['ipv4','ipv6', 'vpnv4', 'vpnv6', 'l2vpn']), + additional_paths_install=dict(required=False, type='bool'), + additional_paths_receive=dict(required=False, type='bool'), + additional_paths_selection=dict(required=False, type='str'), + additional_paths_send=dict(required=False, type='bool'), + advertise_l2vpn_evpn=dict(required=False, type='bool'), + client_to_client=dict(required=False, type='bool'), + dampen_igp_metric=dict(required=False, type='str'), + dampening_state=dict(required=False, type='bool'), + dampening_half_time=dict(required=False, type='str'), + dampening_max_suppress_time=dict(required=False, type='str'), + dampening_reuse_time=dict(required=False, type='str'), + dampening_routemap=dict(required=False, type='str'), + dampening_suppress_time=dict(required=False, type='str'), + default_information_originate=dict(required=False, type='bool'), + default_metric=dict(required=False, type='str'), + distance_ebgp=dict(required=False, type='str'), + distance_ibgp=dict(required=False, type='str'), + distance_local=dict(required=False, type='str'), + inject_map=dict(required=False, type='list'), + maximum_paths=dict(required=False, type='str'), + maximum_paths_ibgp=dict(required=False, type='str'), + networks=dict(required=False, type='list'), + next_hop_route_map=dict(required=False, type='str'), + redistribute=dict(required=False, type='list'), + suppress_inactive=dict(required=False, type='bool'), + table_map=dict(required=False, type='str'), + table_map_filter=dict(required=False, type='bool'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + required_together=[DAMPENING_PARAMS, + ['distance_ibgp', + 'distance_ebgp', + 'distance_local']], + supports_check_mode=True) + + state = module.params['state'] + if module.params['dampening_routemap']: + for param in DAMPENING_PARAMS: + if module.params[param]: + module.fail_json(msg='dampening_routemap cannot be used with' + ' the {0} param'.format(param)) + + if module.params['advertise_l2vpn_evpn']: + if module.params['vrf'] == 'default': + module.fail_json(msg='It is not possible to advertise L2VPN ' + 'EVPN in the default VRF. Please specify ' + 'another one.', vrf=module.params['vrf']) + + if module.params['table_map_filter'] and not module.params['table_map']: + module.fail_json(msg='table_map param is needed when using' + ' table_map_filter filter.') + + args = [ + "additional_paths_install", + "additional_paths_receive", + "additional_paths_selection", + "additional_paths_send", + "advertise_l2vpn_evpn", + "afi", + "asn", + "client_to_client", + "dampen_igp_metric", + "dampening_half_time", + "dampening_max_suppress_time", + "dampening_reuse_time", + "dampening_suppress_time", + "dampening_routemap", + "dampening_state", + "default_information_originate", + "default_metric", + "distance_ebgp", + "distance_ibgp", + "distance_local", + "inject_map", + "maximum_paths", + "maximum_paths_ibgp", + "networks", + "next_hop_route_map", + "redistribute", + "safi", + "suppress_inactive", + "table_map", + "table_map_filter", + "vrf" + ] + + existing = invoke('get_existing', module, args) + + if existing.get('asn'): + if (existing.get('asn') != module.params['asn'] and + state == 'present'): + module.fail_json(msg='Another BGP ASN already exists.', + proposed_asn=module.params['asn'], + existing_asn=existing.get('asn')) + + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + if proposed_args.get('networks'): + if proposed_args['networks'][0] == 'default': + proposed_args['networks'] = 'default' + if proposed_args.get('inject_map'): + if proposed_args['inject_map'][0] == 'default': + proposed_args['inject_map'] = 'default' + + proposed = {} + for key, value in proposed_args.iteritems(): + if key not in ['asn', 'vrf']: + if str(value).lower() == 'default': + value = PARAM_TO_DEFAULT_KEYMAP.get(key) + if value is None: + value = 'default' + if existing.get(key) or (not existing.get(key) and value): + proposed[key] = value + + result = {} + if state == 'present' or (state == 'absent' and existing): + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + if WARNINGS: + result['warnings'] = WARNINGS + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_bgp_neighbor.py b/network/nxos/nxos_bgp_neighbor.py new file mode 100644 index 00000000000..f0cc6145819 --- /dev/null +++ b/network/nxos/nxos_bgp_neighbor.py @@ -0,0 +1,769 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_bgp_neighbor +version_added: "2.2" +short_description: Manages BGP neighbors configurations. +description: + - Manages BGP neighbors configurations on NX-OS switches. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - C(state=absent) removes the whole BGP neighbor configuration. + - Default, where supported, restores params default value. +options: + asn: + description: + - BGP autonomous system number. Valid values are string, + Integer in ASPLAIN or ASDOT notation. + required: true + vrf: + description: + - Name of the VRF. The name 'default' is a valid VRF representing + the global bgp. + required: false + default: default + neighbor: + description: + - Neighbor Identifier. Valid values are string. Neighbors may use + IPv4 or IPv6 notation, with or without prefix length. + required: true + description: + description: + - Description of the neighbor. + required: false + default: null + connected_check: + description: + - Configure whether or not to check for directly connected peer. + required: false + choices: ['true', 'false'] + default: null + capability_negotiation: + description: + - Configure whether or not to negotiate capability with + this neighbor. + required: false + choices: ['true', 'false'] + default: null + dynamic_capability: + description: + - Configure whether or not to enable dynamic capability. + required: false + choices: ['true', 'false'] + default: null + ebgp_multihop: + description: + - Specify multihop TTL for a remote peer. Valid values are + integers between 2 and 255, or keyword 'default' to disable + this property. + required: false + default: null + local_as: + description: + - Specify the local-as number for the eBGP neighbor. + Valid values are String or Integer in ASPLAIN or ASDOT notation, + or 'default', which means not to configure it. + required: false + default: null + log_neighbor_changes: + description: + - Specify whether or not to enable log messages for neighbor + up/down event. + required: false + choices: ['enable', 'disable', 'inherit'] + default: null + low_memory_exempt: + description: + - Specify whether or not to shut down this neighbor under + memory pressure. + required: false + choices: ['true', 'false'] + default: null + maximum_peers: + description: + - Specify Maximum number of peers for this neighbor prefix + Valid values are between 1 and 1000, or 'default', which does + not impose the limit. + required: false + default: null + pwd: + description: + - Specify the password for neighbor. Valid value is string. + required: false + default: null + pwd_type: + description: + - Specify the encryption type the password will use. Valid values + are '3des' or 'cisco_type_7' encryption. + required: false + choices: ['3des', 'cisco_type_7'] + default: null + remote_as: + description: + - Specify Autonomous System Number of the neighbor. + Valid values are String or Integer in ASPLAIN or ASDOT notation, + or 'default', which means not to configure it. + required: false + default: null + remove_private_as: + description: + - Specify the config to remove private AS number from outbound + updates. Valid values are 'enable' to enable this config, + 'disable' to disable this config, 'all' to remove all + private AS number, or 'replace-as', to replace the private + AS number. + required: false + choices: ['enable', 'disable', 'all', 'replace-as'] + default: null + shutdown: + description: + - Configure to administratively shutdown this neighbor. + required: false + choices: ['true','false'] + default: null + suppress_4_byte_as: + description: + - Configure to suppress 4-byte AS Capability. + required: false + choices: ['true','false'] + default: null + timers_keepalive: + description: + - Specify keepalive timer value. Valid values are integers + between 0 and 3600 in terms of seconds, or 'default', + which is 60. + required: false + default: null + timers_holdtime: + description: + - Specify holdtime timer value. Valid values are integers between + 0 and 3600 in terms of seconds, or 'default', which is 180. + required: false + default: null + transport_passive_only: + description: + - Specify whether or not to only allow passive connection setup. + Valid values are 'true', 'false', and 'default', which defaults + to 'false'. This property can only be configured when the + neighbor is in 'ip' address format without prefix length. + This property and the transport_passive_mode property are + mutually exclusive. + required: false + choices: ['true','false'] + default: null + update_source: + description: + - Specify source interface of BGP session and updates. + required: false + default: null + state: + description: + - Determines whether the config should be present or not + on the device. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +# create a new neighbor +- nxos_bgp_neighbor: + asn: 65535 + neighbor: 3.3.3.3 + local_as: 20 + remote_as: 30 + description: "just a description" + update_source: Ethernet1/3 + shutdown: default + state: present + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"asn": "65535", "description": "just a description", + "local_as": "20", "neighbor": "3.3.3.3", + "remote_as": "30", "shutdown": "default", + "update_source": "Ethernet1/3", "vrf": "default"} +existing: + description: k/v pairs of existing BGP neighbor configuration + returned: verbose mode + type: dict + sample: {} +end_state: + description: k/v pairs of BGP neighbor configuration after module execution + returned: verbose mode + type: dict + sample: {"asn": "65535", "capability_negotiation": false, + "connected_check": false, "description": "just a description", + "dynamic_capability": true, "ebgp_multihop": "", + "local_as": "20", "log_neighbor_changes": "", + "low_memory_exempt": false, "maximum_peers": "", + "neighbor": "3.3.3.3", "pwd": "", + "pwd_type": "", "remote_as": "30", + "remove_private_as": "disable", "shutdown": false, + "suppress_4_byte_as": false, "timers_holdtime": "180", + "timers_keepalive": "60", "transport_passive_only": false, + "update_source": "Ethernet1/3", "vrf": "default"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["router bgp 65535", "neighbor 3.3.3.3", + "remote-as 30", "update-source Ethernet1/3", + "description just a description", "local-as 20"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.shell import ShellError + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +WARNINGS = [] +BOOL_PARAMS = [ + 'capability_negotiation', + 'shutdown', + 'connected_check', + 'dynamic_capability', + 'low_memory_exempt', + 'suppress_4_byte_as', + 'transport_passive_only' +] +PARAM_TO_COMMAND_KEYMAP = { + 'asn': 'router bgp', + 'capability_negotiation': 'dont-capability-negotiate', + 'connected_check': 'disable-connected-check', + 'description': 'description', + 'dynamic_capability': 'dynamic-capability', + 'ebgp_multihop': 'ebgp-multihop', + 'local_as': 'local-as', + 'log_neighbor_changes': 'log-neighbor-changes', + 'low_memory_exempt': 'low-memory exempt', + 'maximum_peers': 'maximum-peers', + 'neighbor': 'neighbor', + 'pwd': 'password', + 'pwd_type': 'password-type', + 'remote_as': 'remote-as', + 'remove_private_as': 'remove-private-as', + 'shutdown': 'shutdown', + 'suppress_4_byte_as': 'capability suppress 4-byte-as', + 'timers_keepalive': 'timers-keepalive', + 'timers_holdtime': 'timers-holdtime', + 'transport_passive_only': 'transport connection-mode passive', + 'update_source': 'update-source', + 'vrf': 'vrf' +} +PARAM_TO_DEFAULT_KEYMAP = { + 'shutdown': False, + 'dynamic_capability': True, + 'timers_keepalive': 60, + 'timers_holdtime': 180 +} + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + if arg in BOOL_PARAMS: + REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + + else: + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def get_custom_value(arg, config, module): + value = '' + splitted_config = config.splitlines() + + if arg == 'log_neighbor_changes': + for line in splitted_config: + if 'log-neighbor-changes' in line: + if 'disable' in line: + value = 'disable' + else: + value = 'enable' + + elif arg == 'pwd': + for line in splitted_config: + if 'password' in line: + splitted_line = line.split() + value = splitted_line[2] + + elif arg == 'pwd_type': + for line in splitted_config: + if 'password' in line: + splitted_line = line.split() + value = splitted_line[1] + + elif arg == 'remove_private_as': + value = 'disable' + for line in splitted_config: + if 'remove-private-as' in line: + splitted_line = line.split() + if len(splitted_line) == 1: + value = 'enable' + elif len(splitted_line) == 2: + value = splitted_line[1] + + elif arg == 'timers_keepalive': + REGEX = re.compile(r'(?:timers\s)(?P.*)$', re.M) + value = '' + if 'timers' in config: + parsed = REGEX.search(config).group('value').split() + value = parsed[0] + + elif arg == 'timers_holdtime': + REGEX = re.compile(r'(?:timers\s)(?P.*)$', re.M) + value = '' + if 'timers' in config: + parsed = REGEX.search(config).group('value').split() + if len(parsed) == 2: + value = parsed[1] + + return value + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + custom = [ + 'log_neighbor_changes', + 'pwd', + 'pwd_type', + 'remove_private_as', + 'timers_holdtime', + 'timers_keepalive' + ] + try: + asn_regex = '.*router\sbgp\s(?P\d+).*' + match_asn = re.match(asn_regex, str(netcfg), re.DOTALL) + existing_asn_group = match_asn.groupdict() + existing_asn = existing_asn_group['existing_asn'] + except AttributeError: + existing_asn = '' + + if existing_asn: + parents = ["router bgp {0}".format(existing_asn)] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + parents.append('neighbor {0}'.format(module.params['neighbor'])) + config = netcfg.get_section(parents) + + if config: + for arg in args: + if arg not in ['asn', 'vrf', 'neighbor']: + if arg in custom: + existing[arg] = get_custom_value(arg, config, module) + else: + existing[arg] = get_value(arg, config, module) + + existing['asn'] = existing_asn + existing['neighbor'] = module.params['neighbor'] + existing['vrf'] = module.params['vrf'] + else: + WARNINGS.append("The BGP process didn't exist but the task" + " just created it.") + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def state_present(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in proposed_commands.iteritems(): + if value is True: + commands.append(key) + + elif value is False: + commands.append('no {0}'.format(key)) + + elif value == 'default': + if existing_commands.get(key): + existing_value = existing_commands.get(key) + commands.append('no {0} {1}'.format(key, existing_value)) + else: + if key == 'log-neighbor-changes': + if value == 'enable': + commands.append('{0}'.format(key)) + elif value == 'disable': + commands.append('{0} {1}'.format(key, value)) + elif value == 'inherit': + if existing_commands.get(key): + commands.append('no {0}'.format(key)) + elif key == 'password': + pwd_type = module.params['pwd_type'] + if pwd_type == '3des': + pwd_type = 3 + else: + pwd_type = 7 + command = '{0} {1} {2}'.format(key, pwd_type, value) + if command not in commands: + commands.append(command) + elif key == 'remove-private-as': + if value == 'enable': + command = '{0}'.format(key) + commands.append(command) + elif value == 'disable': + if existing_commands.get(key) != 'disable': + command = 'no {0}'.format(key) + commands.append(command) + else: + command = '{0} {1}'.format(key, value) + commands.append(command) + elif key.startswith('timers'): + command = 'timers {0} {1}'.format( + proposed_commands['timers-keepalive'], + proposed_commands['timers-holdtime']) + if command not in commands: + commands.append(command) + else: + command = '{0} {1}'.format(key, value) + commands.append(command) + + if commands: + parents = ["router bgp {0}".format(module.params['asn'])] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + parents.append('neighbor {0}'.format(module.params['neighbor'])) + + # make sure that local-as is the last command in the list. + local_as_command = 'local-as {0}'.format(module.params['local_as']) + if local_as_command in commands: + commands.remove(local_as_command) + commands.append(local_as_command) + candidate.add(commands, parents=parents) + + +def state_absent(module, existing, proposed, candidate): + commands = [] + parents = ["router bgp {0}".format(module.params['asn'])] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + commands.append('no neighbor {0}'.format(module.params['neighbor'])) + candidate.add(commands, parents=parents) + + +def main(): + argument_spec = dict( + asn=dict(required=True, type='str'), + vrf=dict(required=False, type='str', default='default'), + neighbor=dict(required=True, type='str'), + description=dict(required=False, type='str'), + capability_negotiation=dict(required=False, type='bool'), + connected_check=dict(required=False, type='bool'), + dynamic_capability=dict(required=False, type='bool'), + ebgp_multihop=dict(required=False, type='str'), + local_as=dict(required=False, type='str'), + log_neighbor_changes=dict(required=False, type='str', choices=['enable', 'disable', 'inherit']), + low_memory_exempt=dict(required=False, type='bool'), + maximum_peers=dict(required=False, type='str'), + pwd=dict(required=False, type='str'), + pwd_type=dict(required=False, type='str', choices=['cleartext', '3des', 'cisco_type_7', 'default']), + remote_as=dict(required=False, type='str'), + remove_private_as=dict(required=False, type='str', choices=['enable', 'disable', 'all', 'replace-as']), + shutdown=dict(required=False, type='str'), + suppress_4_byte_as=dict(required=False, type='bool'), + timers_keepalive=dict(required=False, type='str'), + timers_holdtime=dict(required=False, type='str'), + transport_passive_only=dict(required=False, type='bool'), + update_source=dict(required=False, type='str'), + m_facts=dict(required=False, default=False, type='bool'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + required_together=[['timer_bgp_hold', + 'timer_bgp_keepalive']], + supports_check_mode=True) + + state = module.params['state'] + if module.params['pwd_type'] == 'default': + module.params['pwd_type'] = '0' + + args = [ + 'asn', + 'capability_negotiation', + 'connected_check', + 'description', + 'dynamic_capability', + 'ebgp_multihop', + 'local_as', + 'log_neighbor_changes', + 'low_memory_exempt', + 'maximum_peers', + 'neighbor', + 'pwd', + 'pwd_type', + 'remote_as', + 'remove_private_as', + 'shutdown', + 'suppress_4_byte_as', + 'timers_keepalive', + 'timers_holdtime', + 'transport_passive_only', + 'update_source', + 'vrf' + ] + + existing = invoke('get_existing', module, args) + if existing.get('asn'): + if (existing.get('asn') != module.params['asn'] and + state == 'present'): + module.fail_json(msg='Another BGP ASN already exists.', + proposed_asn=module.params['asn'], + existing_asn=existing.get('asn')) + + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + proposed = {} + for key, value in proposed_args.iteritems(): + if key not in ['asn', 'vrf', 'neighbor', 'pwd_type']: + if str(value).lower() == 'default': + value = PARAM_TO_DEFAULT_KEYMAP.get(key) + if value is None: + value = 'default' + if existing.get(key) or (not existing.get(key) and value): + proposed[key] = value + + result = {} + if state == 'present' or (state == 'absent' and existing): + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + if WARNINGS: + result['warnings'] = WARNINGS + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_bgp_neighbor_af.py b/network/nxos/nxos_bgp_neighbor_af.py new file mode 100644 index 00000000000..e7d9ea6481e --- /dev/null +++ b/network/nxos/nxos_bgp_neighbor_af.py @@ -0,0 +1,1122 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_bgp_neighbor_af +version_added: "2.2" +short_description: Manages BGP address-family's neighbors configuration. +description: + - Manages BGP address-family's neighbors configurations on NX-OS switches. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - C(state=absent) removes the whole BGP address-family's + neighbor configuration. + - Default, when supported, removes properties + - In order to default maximum-prefix configuration, only + C(max_prefix_limit=default) is needed. +options: + asn: + description: + - BGP autonomous system number. Valid values are String, + Integer in ASPLAIN or ASDOT notation. + required: true + vrf: + description: + - Name of the VRF. The name 'default' is a valid VRF representing + the global bgp. + required: false + default: default + neighbor: + description: + - Neighbor Identifier. Valid values are string. Neighbors may use + IPv4 or IPv6 notation, with or without prefix length. + required: true + afi: + description: + - Address Family Identifier. + required: true + choices: ['ipv4','ipv6', 'vpnv4', 'vpnv6', 'l2vpn'] + safi: + description: + - Sub Address Family Identifier. + required: true + choices: ['unicast','multicast', 'evpn'] + additional_paths_receive: + description: + - Valid values are enable for basic command enablement; disable + for disabling the command at the neighbor af level + (it adds the disable keyword to the basic command); and inherit + to remove the command at this level (the command value is + inherited from a higher BGP layer). + required: false + choices: ['enable','disable', 'inherit'] + default: null + additional_paths_send: + description: + - Valid values are enable for basic command enablement; disable + for disabling the command at the neighbor af level + (it adds the disable keyword to the basic command); and inherit + to remove the command at this level (the command value is + inherited from a higher BGP layer). + required: false + choices: ['enable','disable', 'inherit'] + default: null + advertise_map_exist: + description: + - Conditional route advertisement. This property requires two + route maps, an advertise-map and an exist-map. Valid values are + an array specifying both the advertise-map name and the exist-map + name, or simply 'default' e.g. ['my_advertise_map', + 'my_exist_map']. This command is mutually exclusive with the + advertise_map_non_exist property. + required: false + default: null + advertise_map_non_exist: + description: + - Conditional route advertisement. This property requires two + route maps, an advertise-map and an exist-map. Valid values are + an array specifying both the advertise-map name and the + non-exist-map name, or simply 'default' e.g. + ['my_advertise_map', 'my_non_exist_map']. This command is mutually + exclusive with the advertise_map_exist property. + required: false + default: null + allowas_in: + description: + - Activate allowas-in property + required: false + default: null + allowas_in_max: + description: + - Optional max-occurrences value for allowas_in. Valid values are + an integer value or 'default'. Can be used independently or in + conjunction with allowas_in. + required: false + default: null + as_override: + description: + - Activate the as-override feature. + required: false + choices: ['true', 'false'] + default: null + default_originate: + description: + - Activate the default-originate feature. + required: false + choices: ['true', 'false'] + default: null + default_originate_route_map: + description: + - Optional route-map for the default_originate property. Can be + used independently or in conjunction with C(default_originate). + Valid values are a string defining a route-map name, + or 'default'. + required: false + default: null + filter_list_in: + description: + - Valid values are a string defining a filter-list name, + or 'default'. + required: false + default: null + filter_list_out: + description: + - Valid values are a string defining a filter-list name, + or 'default'. + required: false + default: null + max_prefix_limit: + description: + - maximum-prefix limit value. Valid values are an integer value + or 'default'. + required: false + default: null + max_prefix_interval: + description: + - Optional restart interval. Valid values are an integer. + Requires max_prefix_limit. + required: false + default: null + max_prefix_threshold: + description: + - Optional threshold percentage at which to generate a warning. + Valid values are an integer value. + Requires max_prefix_limit. + required: false + default: null + max_prefix_warning: + description: + - Optional warning-only keyword. Requires max_prefix_limit. + required: false + choices: ['true','false'] + default: null + next_hop_self: + description: + - Activate the next-hop-self feature. + required: false + choices: ['true','false'] + default: null + next_hop_third_party: + description: + - Activate the next-hop-third-party feature. + required: false + choices: ['true','false'] + default: null + prefix_list_in: + description: + - Valid values are a string defining a prefix-list name, + or 'default'. + required: false + default: null + prefix_list_out: + description: + - Valid values are a string defining a prefix-list name, + or 'default'. + required: false + default: null + route_map_in: + description: + - Valid values are a string defining a route-map name, + or 'default'. + required: false + default: null + route_map_out: + description: + - Valid values are a string defining a route-map name, + or 'default'. + required: false + default: null + route_reflector_client: + description: + - Router reflector client. + required: false + choices: ['true','false'] + default: null + send_community: + description: + - send-community attribute. + required: false + choices: ['none', 'both', 'extended', 'standard', 'default'] + default: null + soft_reconfiguration_in: + description: + - Valid values are 'enable' for basic command enablement; 'always' + to add the always keyword to the basic command; and 'inherit' to + remove the command at this level (the command value is inherited + from a higher BGP layer). + required: false + choices: ['enable','always','inherit'] + default: null + soo: + description: + - Site-of-origin. Valid values are a string defining a VPN + extcommunity or 'default'. + required: false + default: null + suppress_inactive: + description: + - suppress-inactive feature. + required: false + choices: ['true','false','default'] + default: null + unsuppress_map: + description: + - unsuppress-map. Valid values are a string defining a route-map + name or 'default'. + required: false + default: null + weight: + description: + - Weight value. Valid values are an integer value or 'default'. + required: false + default: null + state: + description: + - Determines whether the config should be present or not + on the device. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +configure RR client +- nxos_bgp_neighbor_af: + asn: 65535 + neighbor: '3.3.3.3' + afi: ipv4 + safi: unicast + route_reflector_client: true + state: present + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"afi": "ipv4", "asn": "65535", + "neighbor": "3.3.3.3", "route_reflector_client": true, + "safi": "unicast", "vrf": "default"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"additional_paths_receive": "inherit", + "additional_paths_send": "inherit", + "advertise_map_exist": [], "advertise_map_non_exist": [], + "afi": "ipv4", "allowas_in": false, + "allowas_in_max": "", "as_override": false, + "asn": "65535", "default_originate": false, + "default_originate_route_map": "", "filter_list_in": "", + "filter_list_out": "", "max_prefix_interval": "", + "max_prefix_limit": "", "max_prefix_threshold": "", + "max_prefix_warning": "", "neighbor": "3.3.3.3", + "next_hop_self": false, "next_hop_third_party": true, + "prefix_list_in": "", "prefix_list_out": "", + "route_map_in": "", "route_map_out": "", + "route_reflector_client": true, "safi": "unicast", + "send_community": "", + "soft_reconfiguration_in": "inherit", "soo": "", + "suppress_inactive": false, "unsuppress_map": "", + "vrf": "default", "weight": ""} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["router bgp 65535", "neighbor 3.3.3.3", + "address-family ipv4 unicast", "route-reflector-client"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.shell import ShellError + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +WARNINGS = [] +BOOL_PARAMS = [ + 'allowas_in', + 'as_override', + 'default_originate', + 'next_hop_self', + 'next_hop_third_party', + 'route_reflector_client', + 'suppress_inactive' +] +PARAM_TO_COMMAND_KEYMAP = { + 'afi': 'address-family', + 'asn': 'router bgp', + 'neighbor': 'neighbor', + 'additional_paths_receive': 'capability additional-paths receive', + 'additional_paths_send': 'capability additional-paths send', + 'advertise_map_exist': 'advertise-map exist', + 'advertise_map_non_exist': 'advertise-map non-exist', + 'allowas_in': 'allowas-in', + 'allowas_in_max': 'allowas-in max', + 'as_override': 'as-override', + 'default_originate': 'default-originate', + 'default_originate_route_map': 'default-originate route-map', + 'filter_list_in': 'filter-list in', + 'filter_list_out': 'filter-list out', + 'max_prefix_limit': 'maximum-prefix', + 'max_prefix_interval': 'maximum-prefix options', + 'max_prefix_threshold': 'maximum-prefix options', + 'max_prefix_warning': 'maximum-prefix options', + 'next_hop_self': 'next-hop-self', + 'next_hop_third_party': 'next-hop-third-party', + 'prefix_list_in': 'prefix-list in', + 'prefix_list_out': 'prefix-list out', + 'route_map_in': 'route-map in', + 'route_map_out': 'route-map out', + 'route_reflector_client': 'route-reflector-client', + 'safi': 'address-family', + 'send_community': 'send-community', + 'soft_reconfiguration_in': 'soft-reconfiguration inbound', + 'soo': 'soo', + 'suppress_inactive': 'suppress-inactive', + 'unsuppress_map': 'unsuppress-map', + 'weight': 'weight', + 'vrf': 'vrf' +} +PARAM_TO_DEFAULT_KEYMAP = {} + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + if arg in BOOL_PARAMS: + REGEX = re.compile(r'\s+{0}\s*'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + else: + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def in_out_param(arg, config, module): + value = '' + for line in config: + if PARAM_TO_COMMAND_KEYMAP[arg].split()[0] in line: + splitted_line = line.split() + if splitted_line[-1] == PARAM_TO_COMMAND_KEYMAP[arg].split()[1]: + value = splitted_line[1] + return value + + +def get_custom_value(arg, config, module): + splitted_config = config.splitlines() + value = '' + + if (arg.startswith('filter_list') or arg.startswith('prefix_list') or + arg.startswith('route_map')): + value = in_out_param(arg, splitted_config, module) + elif arg == 'send_community': + for line in splitted_config: + if PARAM_TO_COMMAND_KEYMAP[arg] in line: + splitted_line = line.split() + if len(splitted_line) == 1: + value = 'none' + else: + value = splitted_line[1] + elif arg == 'additional_paths_receive': + value = 'inherit' + for line in splitted_config: + if PARAM_TO_COMMAND_KEYMAP[arg] in line: + if 'disable' in line: + value = 'disable' + else: + value = 'enable' + elif arg == 'additional_paths_send': + value = 'inherit' + for line in splitted_config: + if PARAM_TO_COMMAND_KEYMAP[arg] in line: + if 'disable' in line: + value = 'disable' + else: + value = 'enable' + elif arg == 'advertise_map_exist': + value = [] + for line in splitted_config: + if 'advertise-map' in line and 'exist-map' in line: + splitted_line = line.split() + value = [splitted_line[1], splitted_line[3]] + elif arg == 'advertise_map_non_exist': + value = [] + for line in splitted_config: + if 'advertise-map' in line and 'non-exist-map' in line: + splitted_line = line.split() + value = [splitted_line[1], splitted_line[3]] + elif arg == 'allowas_in_max': + for line in splitted_config: + if 'allowas-in' in line: + splitted_line = line.split() + if len(splitted_line) == 2: + value = splitted_line[-1] + elif arg.startswith('max_prefix'): + for line in splitted_config: + if 'maximum-prefix' in line: + splitted_line = line.split() + if arg == 'max_prefix_limit': + value = splitted_line[1] + elif arg == 'max_prefix_interval' and 'restart' in line: + value = splitted_line[-1] + elif arg == 'max_prefix_threshold' and len(splitted_line) > 2: + try: + int(splitted_line[2]) + value = splitted_line[2] + except ValueError: + value = '' + elif arg == 'max_prefix_warning': + if 'warning-only' in line: + value = True + else: + value = False + elif arg == 'soft_reconfiguration_in': + value = 'inherit' + for line in splitted_config: + if PARAM_TO_COMMAND_KEYMAP[arg] in line: + if 'always' in line: + value = 'always' + else: + value = 'enable' + elif arg == 'next_hop_third_party': + PRESENT_REGEX = re.compile(r'\s+{0}\s*'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + ABSENT_REGEX = re.compile(r'\s+no\s+{0}\s*'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + try: + if ABSENT_REGEX.search(config): + value = False + elif PRESENT_REGEX.search(config): + value = True + except TypeError: + value = False + + return value + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + + custom = [ + 'allowas_in_max', + 'send_community', + 'additional_paths_send', + 'additional_paths_receive', + 'advertise_map_exist', + 'advertise_map_non_exist', + 'filter_list_in', + 'filter_list_out', + 'max_prefix_limit', + 'max_prefix_interval', + 'max_prefix_threshold', + 'max_prefix_warning', + 'next_hop_third_party', + 'prefix_list_in', + 'prefix_list_out', + 'route_map_in', + 'route_map_out', + 'soft_reconfiguration_in' + ] + try: + asn_regex = '.*router\sbgp\s(?P\d+).*' + match_asn = re.match(asn_regex, str(netcfg), re.DOTALL) + existing_asn_group = match_asn.groupdict() + existing_asn = existing_asn_group['existing_asn'] + except AttributeError: + existing_asn = '' + + if existing_asn: + parents = ["router bgp {0}".format(existing_asn)] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + parents.append('neighbor {0}'.format(module.params['neighbor'])) + parents.append('address-family {0} {1}'.format( + module.params['afi'], module.params['safi'])) + config = netcfg.get_section(parents) + + if config: + for arg in args: + if arg not in ['asn', 'vrf', 'neighbor', 'afi', 'safi']: + if arg in custom: + existing[arg] = get_custom_value(arg, config, module) + else: + existing[arg] = get_value(arg, config, module) + + existing['asn'] = existing_asn + existing['neighbor'] = module.params['neighbor'] + existing['vrf'] = module.params['vrf'] + existing['afi'] = module.params['afi'] + existing['safi'] = module.params['safi'] + else: + WARNINGS.append("The BGP process didn't exist but the task" + " just created it.") + + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def get_address_family_command(key, value, module): + command = "address-family {0} {1}".format( + module.params['afi'], module.params['safi']) + return command + + +def get_capability_additional_paths_receive_command(key, value, module): + command = '' + if value == 'enable': + command = key + elif value == 'disable': + command = '{0} {1}'.format(key, value) + return command + + +def get_capability_additional_paths_send_command(key, value, module): + command = '' + if value == 'enable': + command = key + elif value == 'disable': + command = '{0} {1}'.format(key, value) + return command + + +def get_advertise_map_exist_command(key, value, module): + command = 'advertise-map {0} exist-map {1}'.format( + value[0], value[1]) + return command + + +def get_advertise_map_non_exist_command(key, value, module): + command = 'advertise-map {0} non-exist-map {1}'.format( + value[0], value[1]) + return command + + +def get_allowas_in_max_command(key, value, module): + command = 'allowas-in {0}'.format(value) + return command + + +def get_filter_list_in_command(key, value, module): + command = 'filter-list {0} in'.format(value) + return command + + +def get_filter_list_out_command(key, value, module): + command = 'filter-list {0} out'.format(value) + return command + + +def get_prefix_list_in_command(key, value, module): + command = 'prefix-list {0} in'.format(value) + return command + + +def get_prefix_list_out_command(key, value, module): + command = 'prefix-list {0} out'.format(value) + return command + + +def get_route_map_in_command(key, value, module): + command = 'route-map {0} in'.format(value) + return command + + +def get_route_map_out_command(key, value, module): + command = 'route-map {0} out'.format(value) + return command + + +def get_maximum_prefix_command(key, value, module): + return get_maximum_prefix_options_command(key, value, module) + + +def get_maximum_prefix_options_command(key, value, module): + command = 'maximum-prefix {0}'.format(module.params['max_prefix_limit']) + if module.params['max_prefix_threshold']: + command += ' {0}'.format(module.params['max_prefix_threshold']) + if module.params['max_prefix_interval']: + command += ' restart {0}'.format(module.params['max_prefix_interval']) + elif module.params['max_prefix_warning']: + command += ' warning-only' + return command + + +def get_soft_reconfiguration_inbound_command(key, value, module): + command = '' + if value == 'enable': + command = key + elif value == 'always': + command = '{0} {1}'.format(key, value) + return command + + +def get_default_command(key, value, existing_commands): + command = '' + if key == 'send-community' and existing_commands.get(key) == 'none': + command = 'no {0}'.format(key) + + elif existing_commands.get(key): + existing_value = existing_commands.get(key) + if value == 'inherit': + if existing_value != 'inherit': + command = 'no {0}'.format(key) + else: + if key == 'advertise-map exist': + command = 'no advertise-map {0} exist-map {1}'.format( + existing_value[0], existing_value[1]) + elif key == 'advertise-map non-exist': + command = 'no advertise-map {0} non-exist-map {1}'.format( + existing_value[0], existing_value[1]) + elif key == 'filter-list in': + command = 'no filter-list {0} in'.format(existing_value) + elif key == 'filter-list out': + command = 'no filter-list {0} out'.format(existing_value) + elif key == 'prefix-list in': + command = 'no prefix-list {0} in'.format(existing_value) + elif key == 'prefix-list out': + command = 'no prefix-list {0} out'.format(existing_value) + elif key == 'route-map in': + command = 'no route-map {0} in'.format(existing_value) + elif key == 'route-map out': + command = 'no route-map {0} out'.format(existing_value) + elif key.startswith('maximum-prefix'): + command = 'no maximum-prefix {0}'.format( + existing_commands.get('maximum-prefix')) + elif key == 'allowas-in max': + command = ['no allowas-in {0}'.format(existing_value)] + command.append('allowas-in') + else: + command = 'no {0} {1}'.format(key, existing_value) + else: + if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS: + command = 'no {0}'.format(key) + return command + + +def fix_proposed(module, proposed): + allowas_in = proposed.get('allowas_in') + allowas_in_max = proposed.get('allowas_in_max') + + if allowas_in is False and allowas_in_max: + proposed.pop('allowas_in_max') + elif allowas_in and allowas_in_max: + proposed.pop('allowas_in') + + return proposed + + +def state_present(module, existing, proposed, candidate): + commands = list() + + proposed = fix_proposed(module, proposed) + + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + custom = [ + 'address-family', + 'capability additional-paths receive', + 'capability additional-paths send', + 'advertise-map exist', + 'advertise-map non-exist', + 'allowas-in max', + 'filter-list in', + 'filter-list out', + 'maximum-prefix', + 'maximum-prefix options', + 'prefix-list in', + 'prefix-list out', + 'route-map in', + 'route-map out', + 'soft-reconfiguration inbound' + ] + for key, value in proposed_commands.iteritems(): + if key == 'send-community' and value == 'none': + commands.append('{0}'.format(key)) + + elif value is True and key != 'maximum-prefix options': + commands.append(key) + + elif value is False and key != 'maximum-prefix options': + commands.append('no {0}'.format(key)) + + elif value == 'default' or value == 'inherit': + command = get_default_command(key, value, existing_commands) + + if isinstance(command, str): + if command and command not in commands: + commands.append(command) + elif isinstance(command, list): + for cmd in command: + if cmd not in commands: + commands.append(cmd) + + elif key in custom: + fixed_key = key.replace(' ', '_').replace('-', '_') + command = invoke('get_%s_command' % fixed_key, key, value, module) + if command and command not in commands: + commands.append(command) + else: + command = '{0} {1}'.format(key, value) + commands.append(command) + + if commands: + parents = ["router bgp {0}".format(module.params['asn'])] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + parents.append('neighbor {0}'.format(module.params['neighbor'])) + + if len(commands) == 1: + candidate.add(commands, parents=parents) + elif len(commands) > 1: + af_command = 'address-family {0} {1}'.format( + module.params['afi'], module.params['safi']) + if af_command in commands: + commands.remove(af_command) + parents.append('address-family {0} {1}'.format( + module.params['afi'], module.params['safi'])) + candidate.add(commands, parents=parents) + + +def state_absent(module, existing, proposed, candidate): + commands = [] + parents = ["router bgp {0}".format(module.params['asn'])] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + parents.append('neighbor {0}'.format(module.params['neighbor'])) + commands.append('no address-family {0} {1}'.format( + module.params['afi'], module.params['safi'])) + candidate.add(commands, parents=parents) + + +def main(): + argument_spec = dict( + asn=dict(required=True, type='str'), + vrf=dict(required=False, type='str', default='default'), + neighbor=dict(required=True, type='str'), + afi=dict(required=True, type='str'), + safi=dict(required=True, type='str'), + additional_paths_receive=dict(required=False, type='str', + choices=['enable', 'disable', 'inherit']), + additional_paths_send=dict(required=False, type='str', + choices=['enable', 'disable', 'inherit']), + advertise_map_exist=dict(required=False, type='list'), + advertise_map_non_exist=dict(required=False, type='list'), + allowas_in=dict(required=False, type='bool'), + allowas_in_max=dict(required=False, type='str'), + as_override=dict(required=False, type='bool'), + default_originate=dict(required=False, type='bool'), + default_originate_route_map=dict(required=False, type='str'), + filter_list_in=dict(required=False, type='str'), + filter_list_out=dict(required=False, type='str'), + max_prefix_limit=dict(required=False, type='str'), + max_prefix_interval=dict(required=False, type='str'), + max_prefix_threshold=dict(required=False, type='str'), + max_prefix_warning=dict(required=False, type='bool'), + next_hop_self=dict(required=False, type='bool'), + next_hop_third_party=dict(required=False, type='bool'), + prefix_list_in=dict(required=False, type='str'), + prefix_list_out=dict(required=False, type='str'), + route_map_in=dict(required=False, type='str'), + route_map_out=dict(required=False, type='str'), + route_reflector_client=dict(required=False, type='bool'), + send_community=dict(required=False, choices=['none', + 'both', + 'extended', + 'standard', + 'default']), + soft_reconfiguration_in=dict(required=False, type='str', + choices=['enable', 'always', 'inherit']), + soo=dict(required=False, type='str'), + suppress_inactive=dict(required=False, type='bool'), + unsuppress_map=dict(required=False, type='str'), + weight=dict(required=False, type='str'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + mutually_exclusive=[['advertise_map_exist', + 'advertise_map_non_exist']], + supports_check_mode=True) + + state = module.params['state'] + if ((module.params['max_prefix_interval'] or + module.params['max_prefix_warning'] or + module.params['max_prefix_threshold']) and + not module.params['max_prefix_limit']): + module.fail_json(msg='max_prefix_limit is required when using ' + 'max_prefix_warning, max_prefix_limit or ' + 'max_prefix_threshold.') + if module.params['vrf'] == 'default' and module.params['soo']: + module.fail_json(msg='SOO is only allowed in non-default VRF') + + args = [ + 'afi', + 'asn', + 'neighbor', + 'additional_paths_receive', + 'additional_paths_send', + 'advertise_map_exist', + 'advertise_map_non_exist', + 'allowas_in', + 'allowas_in_max', + 'as_override', + 'default_originate', + 'default_originate_route_map', + 'filter_list_in', + 'filter_list_out', + 'max_prefix_limit', + 'max_prefix_interval', + 'max_prefix_threshold', + 'max_prefix_warning', + 'next_hop_self', + 'next_hop_third_party', + 'prefix_list_in', + 'prefix_list_out', + 'route_map_in', + 'route_map_out', + 'soft_reconfiguration_in', + 'soo', + 'suppress_inactive', + 'unsuppress_map', + 'weight', + 'route_reflector_client', + 'safi', + 'send_community', + 'vrf' + ] + + existing = invoke('get_existing', module, args) + if existing.get('asn'): + if (existing.get('asn') != module.params['asn'] and + state == 'present'): + module.fail_json(msg='Another BGP ASN already exists.', + proposed_asn=module.params['asn'], + existing_asn=existing.get('asn')) + + if module.params['advertise_map_exist'] == ['default']: + module.params['advertise_map_exist'] = 'default' + if module.params['advertise_map_non_exist'] == ['default']: + module.params['advertise_map_non_exist'] = 'default' + + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + proposed = {} + for key, value in proposed_args.iteritems(): + if key not in ['asn', 'vrf', 'neighbor']: + if not isinstance(value, list): + if str(value).lower() == 'true': + value = True + elif str(value).lower() == 'false': + value = False + elif str(value).lower() == 'default': + value = PARAM_TO_DEFAULT_KEYMAP.get(key) + if value is None: + if key in BOOL_PARAMS: + value = False + else: + value = 'default' + if existing.get(key) or (not existing.get(key) and value): + proposed[key] = value + + result = {} + if state == 'present' or (state == 'absent' and existing): + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + if WARNINGS: + result['warnings'] = WARNINGS + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_command.py b/network/nxos/nxos_command.py index 21a6b56628c..b90034a09e6 100644 --- a/network/nxos/nxos_command.py +++ b/network/nxos/nxos_command.py @@ -16,6 +16,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: nxos_command @@ -23,8 +27,8 @@ author: "Peter Sprygada (@privateip)" short_description: Run arbitrary command on Cisco NXOS devices description: - - Sends an aribtrary command to an NXOS node and returns the results - read from the device. The M(nxos_command) modulule includes an + - Sends an arbitrary command to an NXOS node and returns the results + read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met. extends_documentation_fragment: nxos @@ -33,11 +37,17 @@ description: - The commands to send to the remote NXOS device over the configured provider. The resulting output from the command - is returned. If the I(waitfor) argument is provided, the + is returned. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or the number of retires as expired. + - The I(commands) argument also accepts an alternative form + that allows for complex values that specify the command + to run and the output format to return. This can be done + on a command by command basis. The complex argument supports + the keywords C(command) and C(output) where C(command) is the + command to run and C(output) is one of 'text' or 'json'. required: true - waitfor: + wait_for: description: - Specifies what to evaluate from the output of the command and what conditionals to apply. This argument will cause @@ -46,12 +56,25 @@ by the configured retries, the task fails. See examples. required: false default: null + aliases: ['waitfor'] + version_added: "2.2" + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the I(wait_for) must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + required: false + default: all + version_added: "2.2" retries: description: - Specifies the number of retries a command should by tried before it is considered failed. The command is run on the - target device every retry and evaluated against the waitfor - conditionals + target device every retry and evaluated against the I(wait_for) + conditionals. required: false default: 10 interval: @@ -65,33 +88,49 @@ """ EXAMPLES = """ -- nxos_command: - commands: ["show version"] +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli -- nxos_command: - commands: "{{ lookup('file', 'commands.txt') }}" +- name: run show version on remote devices + nxos_command: + commands: show version + provider: "{{ cli }}" -- nxos_command: - commands: - - "show interface {{ item }}" - with_items: interfaces +- name: run show version and check to see if output contains Cisco + nxos_command: + commands: show version + wait_for: result[0] contains Cisco + provider: "{{ cli }}" +- name: run multiple commands on remote nodes + nxos_command: + commands: + - show version + - show interfaces + provider: "{{ cli }}" -- nxos_command: +- name: run multiple commands and evaluate the output + nxos_command: commands: - show version - waitfor: - - "result[0] contains 7.2(0)D1(1)" + - show interfaces + wait_for: + - result[0] contains Cisco + - result[1] contains loopback0 + provider: "{{ cli }}" -- nxos_command: - commands: - - show version | json - - show interface Ethernet2/1 | json - - show version - waitfor: - - "result[1].TABLE_interface.ROW_interface.state eq up" - - "result[2] contains 'version 7.2(0)D1(1)'" - - "result[0].sys_ver_str == 7.2(0)D1(1)" +- name: run commands and specify the output format + nxos_command: + commands: + - command: show version + output: json + provider: "{{ cli }}" """ RETURN = """ @@ -109,92 +148,114 @@ failed_conditions: description: the conditionals that failed - retured: failed + returned: failed type: list sample: ['...', '...'] """ +import ansible.module_utils.nxos -import time -import shlex -import re +from ansible.module_utils.basic import get_exception +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.netcli import FailedConditionsError +from ansible.module_utils.netcli import FailedConditionalError +from ansible.module_utils.netcli import AddCommandError, AddConditionError -INDEX_RE = re.compile(r'(\[\d+\])') +VALID_KEYS = ['command', 'output', 'prompt', 'response'] -def iterlines(stdout): +def to_lines(stdout): for item in stdout: if isinstance(item, basestring): item = str(item).split('\n') yield item +def parse_commands(module): + for cmd in module.params['commands']: + if isinstance(cmd, basestring): + cmd = dict(command=cmd, output=None) + elif 'command' not in cmd: + module.fail_json(msg='command keyword argument is required') + elif cmd.get('output') not in [None, 'text', 'json']: + module.fail_json(msg='invalid output specified for command') + elif not set(cmd.keys()).issubset(VALID_KEYS): + module.fail_json(msg='unknown keyword specified') + yield cmd + def main(): spec = dict( - commands=dict(type='list'), - waitfor=dict(type='list'), + # { command: , output: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['any', 'all']), + retries=dict(default=10, type='int'), interval=dict(default=1, type='int') ) - module = get_module(argument_spec=spec, - supports_check_mode=True) + module = NetworkModule(argument_spec=spec, + supports_check_mode=True) + + commands = list(parse_commands(module)) + conditionals = module.params['wait_for'] or list() + warnings = list() - commands = module.params['commands'] + runner = CommandRunner(module) - retries = module.params['retries'] - interval = module.params['interval'] + for cmd in commands: + if module.check_mode and not cmd['command'].startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd['command']) + else: + if cmd['command'].startswith('conf'): + module.fail_json(msg='nxos_command does not support running ' + 'config mode commands. Please use ' + 'nxos_config instead') + try: + runner.add_command(**cmd) + except AddCommandError: + exc = get_exception() + warnings.append('duplicate command detected: %s' % cmd) try: - queue = set() - for entry in (module.params['waitfor'] or list()): - queue.add(Conditional(entry)) - except AttributeError, exc: - module.fail_json(msg=exc.message) + for item in conditionals: + runner.add_conditional(item) + except AddConditionError: + exc = get_exception() + module.fail_json(msg=str(exc), condition=exc.condition) - result = dict(changed=False, result=list()) + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + runner.match = module.params['match'] - kwargs = dict() - if module.params['transport'] == 'nxapi': - kwargs['command_type'] = 'cli_show' + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except FailedConditionalError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditional=exc.failed_conditional) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) - while retries > 0: + result = dict(changed=False) + + result['stdout'] = list() + for cmd in commands: try: - response = module.execute(commands, **kwargs) - result['stdout'] = response - except ShellError, exc: - module.fail_json(msg='failed to run commands', exc=exc.message, - command=exc.command) - - for index, cmd in enumerate(commands): - if cmd.endswith('json'): - try: - response[index] = module.from_json(response[index]) - except ValueError, exc: - module.fail_json(msg='failed to parse json response', - exc_message=str(exc), response=response[index], - cmd=cmd, response_dict=response) - - for item in list(queue): - if item(response): - queue.remove(item) - - if not queue: - break - - time.sleep(interval) - retries -= 1 - else: - failed_conditions = [item.raw for item in queue] - module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions) - - result['stdout_lines'] = list(iterlines(result['stdout'])) - return module.exit_json(**result) - - -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.nxos import * -if __name__ == '__main__': - main() + output = runner.get_command(cmd['command'], cmd.get('output')) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_config.py b/network/nxos/nxos_config.py index ec578eaaa17..2355ec37d89 100644 --- a/network/nxos/nxos_config.py +++ b/network/nxos/nxos_config.py @@ -16,6 +16,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: nxos_config @@ -23,8 +27,8 @@ author: "Peter Sprygada (@privateip)" short_description: Manage Cisco NXOS configuration sections description: - - Cisco NXOS configurations use a simple block indent file sytanx - for segementing configuration into sections. This module provides + - Cisco NXOS configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides an implementation for working with NXOS configuration sections in a deterministic way. This module works with either CLI or NXAPI transports. @@ -35,9 +39,10 @@ - The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration - command syntanx as some commands are automatically modified by the + command syntax as some commands are automatically modified by the device config parser. - required: true + required: false + default: null parents: description: - The ordered set of parents that uniquely identify the section @@ -46,19 +51,30 @@ level or global commands. required: false default: null + src: + description: + - The I(src) argument provides a path to the configuration file + to load into the remote system. The path can either be a full + system path to the configuration file if the value starts with / + or relative to the root of the implemented role or playbook. + This argument is mutually exclusive with the I(lines) and + I(parents) arguments. + required: false + default: null + version_added: "2.2" before: description: - The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched - against the system + against the system. required: false default: null after: description: - The ordered set of commands to append to the end of the command - stack if a changed needs to be made. Just like with I(before) this + stack if a change needs to be made. Just like with I(before) this allows the playbook designer to append a set of commands to be executed after the command set. required: false @@ -69,11 +85,13 @@ the set of commands against the current device config. If match is set to I(line), commands are matched line by line. If match is set to I(strict), command lines are matched with respect - to position. Finally if match is set to I(exact), command lines - must be an equal match. + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. required: false default: line - choices: ['line', 'strict', 'exact'] + choices: ['line', 'strict', 'exact', 'none'] replace: description: - Instructs the module on the way to perform the configuration @@ -81,7 +99,7 @@ the modified lines are pushed to the device in configuration mode. If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any - line is not correct + line is not correct. required: false default: line choices: ['line', 'block'] @@ -91,9 +109,23 @@ current devices running-config. When set to true, this will cause the module to push the contents of I(src) into the device without first checking if already configured. + - Note this argument should be considered deprecated. To achieve + the equivalent, set the C(match=none) which is idempotent. This argument + will be removed in a future release. required: false default: false - choices: BOOLEANS + choices: [ "true", "false" ] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + required: false + default: no + choices: ['yes', 'no'] + version_added: "2.2" config: description: - The module, by default, will connect to the remote device and @@ -101,16 +133,48 @@ against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the - implementer to pass in the configuruation to use as the base - config for comparision. + implementer to pass in the configuration to use as the base + config for comparison. required: false default: null + defaults: + description: + - The I(defaults) argument will influence how the running-config + is collected from the device. When the value is set to true, + the command used to collect the running-config is append with + the all keyword. When the value is set to false, the command + is issued without the all keyword + required: false + default: false + version_added: "2.2" + save: + description: + - The C(save) argument instructs the module to save the + running-config to startup-config. This operation is performed + after any changes are made to the current running config. If + no changes are made, the configuration is still saved to the + startup config. This option will always cause the module to + return changed. + required: false + default: false + version_added: "2.2" """ EXAMPLES = """ -- nxos_config: - lines: ['hostname {{ inventory_hostname }}'] - force: yes +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +- name: configure top level configuration and save it + nxos_config: + lines: hostname {{ inventory_hostname }} + save: yes + provider: "{{ cli }}" - nxos_config: lines: @@ -119,9 +183,10 @@ - 30 permit ip 3.3.3.3/32 any log - 40 permit ip 4.4.4.4/32 any log - 50 permit ip 5.5.5.5/32 any log - parents: ['ip access-list test'] - before: ['no ip access-list test'] + parents: ip access-list test + before: no ip access-list test match: exact + provider: "{{ cli }}" - nxos_config: lines: @@ -129,102 +194,146 @@ - 20 permit ip 2.2.2.2/32 any log - 30 permit ip 3.3.3.3/32 any log - 40 permit ip 4.4.4.4/32 any log - parents: ['ip access-list test'] - before: ['no ip access-list test'] - replace: block - -- nxos_config: - lines: "{{lookup('file', 'datcenter1.txt'}}" - parents: ['ip access-list test'] - before: ['no ip access-list test'] + parents: ip access-list test + before: no ip access-list test replace: block - + provider: "{{ cli }}" """ RETURN = """ updates: description: The set of commands that will be pushed to the remote device - returned: always - type: list - sample: ['...', '...'] - -responses: - description: The set of responses from issuing the commands on the device - retured: when not check_mode + returned: Only when lines is specified. type: list sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: path + sample: /playbooks/ansible/backup/nxos_config.2016-07-16@22:28:34 """ +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.netcfg import NetworkConfig, dumps + +def check_args(module, warnings): + if module.params['force']: + warnings.append('The force argument is deprecated, please use ' + 'match=none instead. This argument will be ' + 'removed in the future') + +def get_candidate(module): + candidate = NetworkConfig(indent=2) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + def get_config(module): - config = module.params['config'] or dict() - if not config and not module.params['force']: - config = module.config - return config + contents = module.params['config'] + if not contents: + defaults = module.params['defaults'] + contents = module.config.get_config(include_defaults=defaults) + return NetworkConfig(indent=2, contents=contents) + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + candidate = get_candidate(module) + + if match != 'none': + config = get_config(module) + path = module.params['parents'] + configobjs = candidate.difference(config, path=path, match=match, + replace=replace) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['lines']: + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['updates'] = commands + + if not module.check_mode: + module.config.load_config(commands) + + result['changed'] = True + + if module.params['save']: + if not module.check_mode: + module.config.save_config() + result['changed'] = True def main(): + """ main entry point for module execution + """ argument_spec = dict( - lines=dict(aliases=['commands'], required=True, type='list'), + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), + before=dict(type='list'), after=dict(type='list'), - match=dict(default='line', choices=['line', 'strict', 'exact']), - replace=dict(default='line', choices=['line', 'block']), - force=dict(default=False, type='bool'), - config=dict() - ) - module = get_module(argument_spec=argument_spec, - supports_check_mode=True) + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), - lines = module.params['lines'] - parents = module.params['parents'] or list() + # this argument is deprecated in favor of setting match: none + # it will be removed in a future version + force=dict(default=False, type='bool'), - before = module.params['before'] - after = module.params['after'] + config=dict(), + defaults=dict(type='bool', default=False), - match = module.params['match'] - replace = module.params['replace'] + backup=dict(type='bool', default=False), + save=dict(type='bool', default=False), + ) - contents = get_config(module) - config = module.parse_config(contents) + mutually_exclusive = [('lines', 'src')] - if not module.params['force']: - contents = get_config(module) - config = NetworkConfig(contents=contents, indent=2) + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines'])] - candidate = NetworkConfig(indent=2) - candidate.add(lines, parents=parents) + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) - commands = candidate.difference(config, path=parents, match=match, replace=replace) - else: - commands = parents - commands.extend(lines) + if module.params['force'] is True: + module.params['match'] = 'none' - result = dict(changed=False) + warnings = list() + check_args(module, warnings) - if commands: - if before: - commands[:0] = before + result = dict(changed=False, warnings=warnings) - if after: - commands.extend(after) + if module.params['backup']: + result['__backup__'] = module.config.get_config() - if not module.check_mode: - commands = [str(c).strip() for c in commands] - response = module.configure(commands) - result['responses'] = response - result['changed'] = True + try: + run(module, result) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) - result['updates'] = commands module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.nxos import * + if __name__ == '__main__': main() - diff --git a/network/nxos/nxos_evpn_global.py b/network/nxos/nxos_evpn_global.py new file mode 100644 index 00000000000..375269e0709 --- /dev/null +++ b/network/nxos/nxos_evpn_global.py @@ -0,0 +1,321 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_evpn_global +version_added: "2.2" +short_description: Handles the EVPN control plane for VXLAN. +description: + - Handles the EVPN control plane for VXLAN. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +options: + nv_overlay_evpn: + description: + - EVPN control plane. + required: true + choices: ['true', 'false'] +''' +EXAMPLES = ''' +- nxos_evpn_global: + nv_overlay_evpn: true + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"nv_overlay_evpn": true} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"nv_overlay_evpn": false} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"nv_overlay_evpn": true} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["nv overlay evpn"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.shell import ShellError + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +PARAM_TO_COMMAND_KEYMAP = { + 'nv_overlay_evpn': 'nv overlay evpn', +} + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + if REGEX.search(config): + value = True + return value + + +def get_existing(module): + existing = {} + config = str(get_config(module)) + + existing['nv_overlay_evpn'] = get_value('nv_overlay_evpn', config, module) + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def get_commands(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in proposed_commands.iteritems(): + if value is True: + commands.append(key) + elif value is False: + commands.append('no {0}'.format(key)) + + if commands: + candidate.add(commands, parents=[]) + + +def main(): + argument_spec = dict( + nv_overlay_evpn=dict(required=True, type='bool'), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + existing = invoke('get_existing', module) + end_state = existing + proposed = dict(nv_overlay_evpn=module.params['nv_overlay_evpn']) + + result = {} + candidate = CustomNetworkConfig(indent=3) + invoke('get_commands', module, existing, proposed, candidate) + + if proposed != existing: + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_evpn_vni.py b/network/nxos/nxos_evpn_vni.py new file mode 100644 index 00000000000..5d20addd63a --- /dev/null +++ b/network/nxos/nxos_evpn_vni.py @@ -0,0 +1,492 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_evpn_vni +version_added: "2.2" +short_description: Manages Cisco EVPN VXLAN Network Identifier (VNI). +description: + - Manages Cisco Ethernet Virtual Private Network (EVPN) VXLAN Network + Identifier (VNI) configurations of a Nexus device. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - default, where supported, restores params default value. + - RD override is not permitted. You should set it to the default values + first and then reconfigure it. + - C(route_target_both), C(route_target_import) and + C(route_target_export valid) values are a list of extended communities, + (i.e. ['1.2.3.4:5', '33:55']) or the keywords 'auto' or 'default'. + - The C(route_target_both) property is discouraged due to the inconsistent + behavior of the property across Nexus platforms and image versions. + For this reason it is recommended to use explicit C(route_target_export) + and C(route_target_import) properties instead of C(route_target_both). + - RD valid values are a string in one of the route-distinguisher formats, + the keyword 'auto', or the keyword 'default'. +options: + vni: + description: + - The EVPN VXLAN Network Identifier. + required: true + default: null + route_distinguisher: + description: + - The VPN Route Distinguisher (RD). The RD is combined with + the IPv4 or IPv6 prefix learned by the PE router to create a + globally unique address. + required: true + default: null + route_target_both: + description: + - Enables/Disables route-target settings for both import and + export target communities using a single property. + required: false + default: null + route_target_import: + description: + - Sets the route-target 'import' extended communities. + required: false + default: null + route_target_export: + description: + - Sets the route-target 'import' extended communities. + required: false + default: null + state: + description: + - Determines whether the config should be present or not + on the device. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +- nxos_evpn_vni: + vni: 6000 + route_distinguisher: "60:10" + route_target_import: + - "5000:10" + - "4100:100" + route_target_export: auto + route_target_both: default + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"route_target_import": ["5000:10", "4100:100", + "5001:10"],"vni": "6000"} +existing: + description: k/v pairs of existing EVPN VNI configuration + returned: verbose mode + type: dict + sample: {"route_distinguisher": "70:10", "route_target_both": [], + "route_target_export": [], "route_target_import": [ + "4100:100", "5000:10"], "vni": "6000"} +end_state: + description: k/v pairs of EVPN VNI configuration after module execution + returned: verbose mode + type: dict + sample: {"route_distinguisher": "70:10", "route_target_both": [], + "route_target_export": [], "route_target_import": [ + "4100:100", "5000:10", "5001:10"], "vni": "6000"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["evpn", "vni 6000 l2", "route-target import 5001:10"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError +from ansible.module_utils.network import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +PARAM_TO_COMMAND_KEYMAP = { + 'vni': 'vni', + 'route_target_both': 'route-target both', + 'route_target_import': 'route-target import', + 'route_target_export': 'route-target export', + 'route_distinguisher': 'rd' +} +WARNINGS = [] + +import time + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def get_route_target_value(arg, config, module): + splitted_config = config.splitlines() + value_list = [] + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + + for line in splitted_config: + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in line.strip(): + value = REGEX.search(line).group('value') + value_list.append(value) + return value_list + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])] + config = netcfg.get_section(parents) + + if config: + for arg in args: + if arg != 'vni': + if arg == 'route_distinguisher': + existing[arg] = get_value(arg, config, module) + else: + existing[arg] = get_route_target_value(arg, config, module) + + existing_fix = dict((k, v) for k, v in existing.iteritems() if v) + if existing_fix: + existing['vni'] = module.params['vni'] + else: + existing = existing_fix + + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def state_present(module, existing, proposed): + commands = list() + parents = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in proposed_commands.iteritems(): + if key.startswith('route-target'): + if value == ['default']: + existing_value = existing_commands.get(key) + + if existing_value: + for target in existing_value: + commands.append('no {0} {1}'.format(key, target)) + else: + if not isinstance(value, list): + value = [value] + for target in value: + if existing: + if target not in existing.get(key.replace('-', '_').replace(' ', '_')): + commands.append('{0} {1}'.format(key, target)) + else: + commands.append('{0} {1}'.format(key, target)) + else: + if value == 'default': + existing_value = existing_commands.get(key) + if existing_value: + commands.append('no {0} {1}'.format(key, existing_value)) + else: + command = '{0} {1}'.format(key, value) + commands.append(command) + + if commands: + parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])] + + return commands, parents + + +def state_absent(module, existing, proposed): + commands = ['no vni {0} l2'.format(module.params['vni'])] + parents = ['evpn'] + return commands, parents + + +def execute_config(module, candidate): + result = {} + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + return result + + +def main(): + argument_spec = dict( + vni=dict(required=True, type='str'), + route_distinguisher=dict(required=False, type='str'), + route_target_both=dict(required=False, type='list'), + route_target_import=dict(required=False, type='list'), + route_target_export=dict(required=False, type='list'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + args = [ + 'vni', + 'route_distinguisher', + 'route_target_both', + 'route_target_import', + 'route_target_export' + ] + + existing = invoke('get_existing', module, args) + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + proposed = {} + for key, value in proposed_args.iteritems(): + if key != 'vni': + if value == 'true': + value = True + elif value == 'false': + value = False + if existing.get(key) or (not existing.get(key) and value): + proposed[key] = value + result = {} + if state == 'present' or (state == 'absent' and existing): + candidate = CustomNetworkConfig(indent=3) + commands, parents = invoke('state_%s' % state, module, existing, + proposed) + if commands: + if (existing.get('route_distinguisher') and + proposed.get('route_distinguisher')): + if (existing['route_distinguisher'] != proposed[ + 'route_distinguisher'] and + proposed['route_distinguisher'] != 'default'): + WARNINGS.append('EVPN RD {0} was automatically removed. ' + 'It is highly recommended to use a task ' + '(with default as value) to explicitly ' + 'unconfigure it.'.format( + existing['route_distinguisher'])) + remove_commands = ['no rd {0}'.format( + existing['route_distinguisher'])] + + candidate.add(remove_commands, parents=parents) + result = execute_config(module, candidate) + time.sleep(30) + + candidate = CustomNetworkConfig(indent=3) + candidate.add(commands, parents=parents) + result = execute_config(module, candidate) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + if WARNINGS: + result['warnings'] = WARNINGS + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_facts.py b/network/nxos/nxos_facts.py new file mode 100644 index 00000000000..f5ec1238ee3 --- /dev/null +++ b/network/nxos/nxos_facts.py @@ -0,0 +1,535 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: nxos_facts +version_added: "2.1" +short_description: Gets facts about NX-OS switches +description: + - Collects facts from Cisco Nexus devices running the NX-OS operating + system. Fact collection is supported over both Cli and Nxapi + transports. This module prepends all of the base network fact keys + with C(ansible_net_). The facts module will always collect a + base set of facts from the device and can enable or disable + collection of additional facts. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, legacy, and interfaces. Can specify a + list of values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' + version_added: "2.2" +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +- nxos_facts: + gather_subset: all + +# Collect only the config and default facts +- nxos_facts: + gather_subset: + - config + +# Do not collect hardware facts +- nxos_facts: + gather_subset: + - "!hardware" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: always + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: string +ansible_net_image: + description: The image file the device is running + returned: always + type: string + +# hardware +ansible_net_filesystems: + description: All file system names available on the device + returned: when hardware is configured + type: list +ansible_net_memfree_mb: + description: The available free memory on the remote device in Mb + returned: when hardware is configured + type: int +ansible_net_memtotal_mb: + description: The total memory on the remote device in Mb + returned: when hardware is configured + type: int + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is configured + type: str + +# interfaces +ansible_net_all_ipv4_addresses: + description: All IPv4 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_all_ipv6_addresses: + description: All IPv6 addresses configured on the device + returned: when interfaces is configured + type: list +ansible_net_interfaces: + description: A hash of all interfaces running on the system + returned: when interfaces is configured + type: dict +ansible_net_neighbors: + description: The list of LLDP neighbors from the remote device + returned: when interfaces is configured + type: dict + +# legacy (pre Ansible 2.2) +fan_info: + description: A hash of facts about fans in the remote device + returned: when legacy is configured + type: dict +hostname: + description: The configured hostname of the remote device + returned: when legacy is configured + type: dict +interfaces_list: + description: The list of interface names on the remote device + returned: when legacy is configured + type: dict +kickstart: + description: The software version used to boot the system + returned: when legacy is configured + type: str +module: + description: A hash of facts about the modules in a remote device + returned: when legacy is configured + type: dict +platform: + description: The hardware platform reported by the remote device + returned: when legacy is configured + type: str +power_supply_info: + description: A hash of facts about the power supplies in the remote device + returned: when legacy is configured + type: str +vlan_list: + description: The list of VLAN IDs configured on the remote device + returned: when legacy is configured + type: list +""" +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner, AddCommandError +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.six import iteritems + + +def add_command(runner, command, output=None): + try: + runner.add_command(command, output) + except AddCommandError: + # AddCommandError is raised for any issue adding a command to + # the runner. Silently ignore the exception in this case + pass + +class FactsBase(object): + + def __init__(self, module, runner): + self.module = module + self.runner = runner + self.facts = dict() + self.commands() + + def commands(self): + raise NotImplementedError + + def transform_dict(self, data, keymap): + transform = dict() + for key, fact in keymap: + if key in data: + transform[fact] = data[key] + return transform + + def transform_iterable(self, iterable, keymap): + for item in iterable: + yield self.transform_dict(item, keymap) + + +class Default(FactsBase): + + VERSION_MAP = frozenset([ + ('sys_ver_str', 'version'), + ('proc_board_id', 'serialnum'), + ('chassis_id', 'model'), + ('isan_file_name', 'image'), + ('host_name', 'hostname') + ]) + + def commands(self): + add_command(self.runner, 'show version', output='json') + + def populate(self): + data = self.runner.get_command('show version', output='json') + self.facts.update(self.transform_dict(data, self.VERSION_MAP)) + +class Config(FactsBase): + + def commands(self): + add_command(self.runner, 'show running-config') + + def populate(self): + self.facts['config'] = self.runner.get_command('show running-config') + + +class Hardware(FactsBase): + + def commands(self): + add_command(self.runner, 'dir', output='text') + add_command(self.runner, 'show system resources', output='json') + + def populate(self): + data = self.runner.get_command('dir', 'text') + self.facts['filesystems'] = self.parse_filesystems(data) + + data = self.runner.get_command('show system resources', output='json') + self.facts['memtotal_mb'] = int(data['memory_usage_total']) / 1024 + self.facts['memfree_mb'] = int(data['memory_usage_free']) / 1024 + + def parse_filesystems(self, data): + return re.findall(r'^Usage for (\S+)//', data, re.M) + + +class Interfaces(FactsBase): + + INTERFACE_MAP = frozenset([ + ('state', 'state'), + ('desc', 'description'), + ('eth_bw', 'bandwidth'), + ('eth_duplex', 'duplex'), + ('eth_speed', 'speed'), + ('eth_mode', 'mode'), + ('eth_hw_addr', 'macaddress'), + ('eth_mtu', 'mtu'), + ('eth_hw_desc', 'type') + ]) + + INTERFACE_IPV4_MAP = frozenset([ + ('eth_ip_addr', 'address'), + ('eth_ip_mask', 'masklen') + ]) + + INTERFACE_IPV6_MAP = frozenset([ + ('addr', 'address'), + ('prefix', 'subnet') + ]) + + def commands(self): + add_command(self.runner, 'show interface', output='json') + + try: + self.module.cli('show ipv6 interface', 'json') + add_command(self.runner, 'show ipv6 interface', output='json') + self.ipv6 = True + except NetworkError: + self.ipv6 = False + + try: + self.module.cli(['show lldp neighbors']) + add_command(self.runner, 'show lldp neighbors', output='json') + self.lldp_enabled = True + except NetworkError: + self.lldp_enabled = False + + def populate(self): + self.facts['all_ipv4_addresses'] = list() + self.facts['all_ipv6_addresses'] = list() + + data = self.runner.get_command('show interface', 'json') + self.facts['interfaces'] = self.populate_interfaces(data) + + if self.ipv6: + data = self.runner.get_command('show ipv6 interface', 'json') + if data: + self.parse_ipv6_interfaces(data) + + if self.lldp_enabled: + data = self.runner.get_command('show lldp neighbors', 'json') + self.facts['neighbors'] = self.populate_neighbors(data) + + def populate_interfaces(self, data): + interfaces = dict() + for item in data['TABLE_interface']['ROW_interface']: + name = item['interface'] + + intf = dict() + intf.update(self.transform_dict(item, self.INTERFACE_MAP)) + + if 'eth_ip_addr' in item: + intf['ipv4'] = self.transform_dict(item, self.INTERFACE_IPV4_MAP) + self.facts['all_ipv4_addresses'].append(item['eth_ip_addr']) + + interfaces[name] = intf + + return interfaces + + def populate_neighbors(self, data): + data = data['TABLE_nbor'] + if isinstance(data, dict): + data = [data] + + objects = dict() + for item in data: + local_intf = item['ROW_nbor']['l_port_id'] + if local_intf not in objects: + objects[local_intf] = list() + nbor = dict() + nbor['port'] = item['ROW_nbor']['port_id'] + nbor['host'] = item['ROW_nbor']['chassis_id'] + objects[local_intf].append(nbor) + return objects + + def parse_ipv6_interfaces(self, data): + data = data['TABLE_intf'] + if isinstance(data, dict): + data = [data] + for item in data: + name = item['ROW_intf']['intf-name'] + intf = self.facts['interfaces'][name] + intf['ipv6'] = self.transform_dict(item, self.INTERFACE_IPV6_MAP) + self.facts['all_ipv6_addresses'].append(item['ROW_intf']['addr']) + +class Legacy(FactsBase): + # facts from nxos_facts 2.1 + + VERSION_MAP = frozenset([ + ('host_name', '_hostname'), + ('kickstart_ver_str', '_os'), + ('chassis_id', '_platform') + ]) + + MODULE_MAP = frozenset([ + ('model', 'model'), + ('modtype', 'type'), + ('ports', 'ports'), + ('status', 'status') + ]) + + FAN_MAP = frozenset([ + ('fanname', 'name'), + ('fanmodel', 'model'), + ('fanhwver', 'hw_ver'), + ('fandir', 'direction'), + ('fanstatus', 'status') + ]) + + POWERSUP_MAP = frozenset([ + ('psmodel', 'model'), + ('psnum', 'number'), + ('ps_status', 'status'), + ('actual_out', 'actual_output'), + ('actual_in', 'actual_in'), + ('total_capa', 'total_capacity') + ]) + + def commands(self): + add_command(self.runner, 'show version', output='json') + add_command(self.runner, 'show module', output='json') + add_command(self.runner, 'show environment', output='json') + add_command(self.runner, 'show interface', output='json') + add_command(self.runner, 'show vlan brief', output='json') + + def populate(self): + data = self.runner.get_command('show version', 'json') + self.facts.update(self.transform_dict(data, self.VERSION_MAP)) + + data = self.runner.get_command('show interface', 'json') + self.facts['_interfaces_list'] = self.parse_interfaces(data) + + data = self.runner.get_command('show vlan brief', 'json') + self.facts['_vlan_list'] = self.parse_vlans(data) + + data = self.runner.get_command('show module', 'json') + self.facts['_module'] = self.parse_module(data) + + data = self.runner.get_command('show environment', 'json') + self.facts['_fan_info'] = self.parse_fan_info(data) + self.facts['_power_supply_info'] = self.parse_power_supply_info(data) + + def parse_interfaces(self, data): + objects = list() + for item in data['TABLE_interface']['ROW_interface']: + objects.append(item['interface']) + return objects + + def parse_vlans(self, data): + objects = list() + data = data['TABLE_vlanbriefxbrief']['ROW_vlanbriefxbrief'] + if isinstance(data, dict): + objects.append(data['vlanshowbr-vlanid-utf']) + elif isinstance(data, list): + for item in data: + objects.append(item['vlanshowbr-vlanid-utf']) + return objects + + def parse_module(self, data): + data = data['TABLE_modinfo']['ROW_modinfo'] + objects = list(self.transform_iterable(data, self.MODULE_MAP)) + return objects + + def parse_fan_info(self, data): + data = data['fandetails']['TABLE_faninfo']['ROW_faninfo'] + objects = list(self.transform_iterable(data, self.FAN_MAP)) + return objects + + def parse_power_supply_info(self, data): + data = data['powersup']['TABLE_psinfo']['ROW_psinfo'] + objects = list(self.transform_iterable(data, self.POWERSUP_MAP)) + return objects + + +FACT_SUBSETS = dict( + default=Default, + legacy=Legacy, + hardware=Hardware, + interfaces=Interfaces, + config=Config, +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = NetworkModule(argument_spec=spec, supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + runner = CommandRunner(module) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module, runner)) + + try: + runner.run() + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) + + try: + for inst in instances: + inst.populate() + facts.update(inst.facts) + except Exception: + raise + module.exit_json(out=module.from_json(runner.items)) + + ansible_facts = dict() + for key, value in iteritems(facts): + # this is to maintain capability with nxos_facts 2.1 + if key.startswith('_'): + ansible_facts[key[1:]] = value + else: + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_feature.py b/network/nxos/nxos_feature.py new file mode 100644 index 00000000000..2a532dd5994 --- /dev/null +++ b/network/nxos/nxos_feature.py @@ -0,0 +1,501 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_feature +version_added: "2.1" +short_description: Manage features in NX-OS switches. +description: + - Offers ability to enable and disable features in NX-OS. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +options: + feature: + description: + - Name of feature. + required: true + state: + description: + - Desired state of the feature. + required: false + default: 'enabled' + choices: ['enabled','disabled'] +''' + +EXAMPLES = ''' +- name: Ensure lacp is enabled + nxos_feature: + feature: lacp + state: enabled + host: "{{ inventory_hostname }}" + +- name: Ensure ospf is disabled + nxos_feature: + feature: ospf + state: disabled + host: "{{ inventory_hostname }}" + +- name: Ensure vpc is enabled + nxos_feature: + feature: vpc + state: enabled + host: "{{ inventory_hostname }}" + +''' + +RETURN = ''' +proposed: + description: proposed feature state + returned: always + type: dict + sample: {"state": "disabled"} +existing: + description: existing state of feature + returned: always + type: dict + sample: {"state": "enabled"} +end_state: + description: feature state after executing module + returned: always + type: dict + sample: {"state": "disabled"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["no feature eigrp"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +feature: + description: the feature that has been examined + returned: always + type: string + sample: "vpc" +''' + +import json +import collections + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. + """ + if 'xml' in response[0]: + body = [] + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def get_available_features(feature, module): + available_features = {} + feature_regex = '(?P\S+)\s+\d+\s+(?P.*)' + command = 'show feature' + + body = execute_show_command(command, module, command_type='cli_show_ascii') + split_body = body[0].splitlines() + + for line in split_body: + try: + match_feature = re.match(feature_regex, line, re.DOTALL) + feature_group = match_feature.groupdict() + feature = feature_group['feature'] + state = feature_group['state'] + except AttributeError: + feature = '' + state = '' + + if feature and state: + if 'enabled' in state: + state = 'enabled' + + if feature not in available_features: + available_features[feature] = state + else: + if (available_features[feature] == 'disabled' and + state == 'enabled'): + available_features[feature] = state + + return available_features + + + +def get_commands(proposed, existing, state, module): + feature = validate_feature(module, mode='config') + commands = [] + feature_check = proposed == existing + if not feature_check: + if state == 'enabled': + command = 'feature {0}'.format(feature) + commands.append(command) + elif state == 'disabled': + command = "no feature {0}".format(feature) + commands.append(command) + return commands + + +def validate_feature(module, mode='show'): + '''Some features may need to be mapped due to inconsistency + between how they appear from "show feature" output and + how they are configured''' + + feature = module.params['feature'] + + feature_to_be_mapped = { + 'show': { + 'nv overlay': 'nve', + 'vn-segment-vlan-based': 'vnseg_vlan', + 'hsrp': 'hsrp_engine', + 'fabric multicast': 'fabric_mcast', + 'scp-server': 'scpServer', + 'sftp-server': 'sftpServer', + 'sla responder': 'sla_responder', + 'sla sender': 'sla_sender', + 'ssh': 'sshServer', + 'tacacs+': 'tacacs', + 'telnet': 'telnetServer', + 'ethernet-link-oam': 'elo', + 'port-security': 'eth_port_sec' + }, + 'config': + { + 'nve': 'nv overlay', + 'vnseg_vlan': 'vn-segment-vlan-based', + 'hsrp_engine': 'hsrp', + 'fabric_mcast': 'fabric multicast', + 'scpServer': 'scp-server', + 'sftpServer': 'sftp-server', + 'sla_sender': 'sla sender', + 'sla_responder': 'sla responder', + 'sshServer': 'ssh', + 'tacacs': 'tacacs+', + 'telnetServer': 'telnet', + 'elo': 'ethernet-link-oam', + 'eth_port_sec': 'port-security' + } + } + + if feature in feature_to_be_mapped[mode]: + feature = feature_to_be_mapped[mode][feature] + + return feature + + +def main(): + argument_spec = dict( + feature=dict(type='str', required=True), + state=dict(choices=['enabled', 'disabled'], default='enabled', + required=False), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + feature = validate_feature(module) + state = module.params['state'].lower() + + available_features = get_available_features(feature, module) + if feature not in available_features: + module.fail_json( + msg='Invalid feature name.', + features_currently_supported=available_features, + invalid_feature=feature) + else: + existstate = available_features[feature] + + existing = dict(state=existstate) + proposed = dict(state=state) + changed = False + end_state = existing + + cmds = get_commands(proposed, existing, state, module) + + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + execute_config_command(cmds, module) + changed = True + updated_features = get_available_features(feature, module) + existstate = updated_features[feature] + end_state = dict(state=existstate) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + results['feature'] = module.params['feature'] + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_file_copy.py b/network/nxos/nxos_file_copy.py new file mode 100644 index 00000000000..0885c4575e2 --- /dev/null +++ b/network/nxos/nxos_file_copy.py @@ -0,0 +1,416 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_file_copy +version_added: "2.2" +short_description: Copy a file to a remote NXOS device over SCP. +description: + - Copy a file to the flash (or bootflash) remote network device + on NXOS devices. +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - The feature must be enabled with feature scp-server. + - If the file is already present (md5 sums match), no transfer will + take place. + - Check mode will tell you if the file would be copied. +options: + local_file: + description: + - Path to local file. Local directory must exist. + required: true + remote_file: + description: + - Remote file path of the copy. Remote directories must exist. + If omitted, the name of the local file will be used. + required: false + default: null + file_system: + description: + - The remote file system of the device. If omitted, + devices that support a file_system parameter will use + their default values. + required: false + default: null +''' + +EXAMPLES = ''' +- nxos_file_copy: + local_file: "./test_file.txt" + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +transfer_status: + description: Whether a file was transferred. "No Transfer" or "Sent". + returned: success + type: string + sample: 'Sent' +local_file: + description: The path of the local file. + returned: success + type: string + sample: '/path/to/local/file' +remote_file: + description: The path of the remote file. + returned: success + type: string + sample: '/path/to/remote/file' +''' + + +import os +from scp import SCPClient +import paramiko +import time + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + cmds = [command] + body = execute_show(cmds, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def remote_file_exists(module, dst, file_system='bootflash:'): + command = 'dir {0}/{1}'.format(file_system, dst) + body = execute_show_command(command, module, command_type='cli_show_ascii') + if 'No such file' in body[0]: + return False + return True + + +def verify_remote_file_exists(module, dst, file_system='bootflash:'): + command = 'dir {0}/{1}'.format(file_system, dst) + body = execute_show_command(command, module, command_type='cli_show_ascii') + if 'No such file' in body[0]: + return 0 + return body[0].split()[0].strip() + + +def local_file_exists(module): + return os.path.isfile(module.params['local_file']) + + +def get_flash_size(module): + command = 'dir {}'.format(module.params['file_system']) + body = execute_show_command(command, module, command_type='cli_show_ascii') + + match = re.search(r'(\d+) bytes free', body[0]) + bytes_free = match.group(1) + + return int(bytes_free) + + +def enough_space(module): + flash_size = get_flash_size(module) + file_size = os.path.getsize(module.params['local_file']) + if file_size > flash_size: + return False + + return True + + +def transfer_file(module, dest): + file_size = os.path.getsize(module.params['local_file']) + + if not local_file_exists(module): + module.fail_json(msg='Could not transfer file. Local file doesn\'t exist.') + + if not enough_space(module): + module.fail_json(msg='Could not transfer file. Not enough space on device.') + + hostname = module.params['host'] + username = module.params['username'] + password = module.params['password'] + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect( + hostname=hostname, + username=username, + password=password) + + full_remote_path = '{}{}'.format(module.params['file_system'], dest) + scp = SCPClient(ssh.get_transport()) + try: + scp.put(module.params['local_file'], full_remote_path) + except: + time.sleep(10) + temp_size = verify_remote_file_exists( + module, dest, file_system=module.params['file_system']) + if int(temp_size) == int(file_size): + pass + else: + module.fail_json(msg='Could not transfer file. There was an error ' + 'during transfer. Please make sure remote ' + 'permissions are set.', temp_size=temp_size, + file_size=file_size) + scp.close() + return True + + +def main(): + argument_spec = dict( + local_file=dict(required=True), + remote_file=dict(required=False), + file_system=dict(required=False, default='bootflash:'), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + local_file = module.params['local_file'] + remote_file = module.params['remote_file'] + file_system = module.params['file_system'] + + changed = False + transfer_status = 'No Transfer' + + if not os.path.isfile(local_file): + module.fail_json(msg="Local file {} not found".format(local_file)) + + dest = remote_file or os.path.basename(local_file) + remote_exists = remote_file_exists(module, dest, file_system=file_system) + + if not remote_exists: + changed = True + file_exists = False + else: + file_exists = True + + if not module.check_mode and not file_exists: + try: + transfer_file(module, dest) + transfer_status = 'Sent' + except ShellError: + clie = get_exception() + module.fail_json(msg=str(clie)) + + if remote_file is None: + remote_file = os.path.basename(local_file) + + module.exit_json(changed=changed, + transfer_status=transfer_status, + local_file=local_file, + remote_file=remote_file, + file_system=file_system) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_gir.py b/network/nxos/nxos_gir.py new file mode 100644 index 00000000000..f72f7d79a07 --- /dev/null +++ b/network/nxos/nxos_gir.py @@ -0,0 +1,511 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_gir +version_added: "2.2" +short_description: Trigger a graceful removal or insertion (GIR) of the switch. +description: + - Trigger a graceful removal or insertion (GIR) of the switch. +extends_documentation_fragment: nxos +author: + - Gabriele Gerbino (@GGabriele) +notes: + - C(state) has effect only in combination with + C(system_mode_maintenance_timeout) or + C(system_mode_maintenance_on_reload_reset_reason). + - Using C(system_mode_maintenance) and + C(system_mode_maintenance_dont_generate_profile) would make the module + fail, but the system mode will be triggered anyway. +options: + system_mode_maintenance: + description: + - When C(system_mode_maintenance=true) it puts all enabled + protocols in maintenance mode (using the isolate command). + When C(system_mode_maintenance=false) it puts all enabled + protocols in normal mode (using the no isolate command). + required: false + default: null + choices: ['true','false'] + system_mode_maintenance_dont_generate_profile: + description: + - When C(system_mode_maintenance_dont_generate_profile=true) it + prevents the dynamic searching of enabled protocols and executes + commands configured in a maintenance-mode profile. + Use this option if you want the system to use a maintenance-mode + profile that you have created. + When C(system_mode_maintenance_dont_generate_profile=false) it + prevents the dynamic searching of enabled protocols and executes + commands configured in a normal-mode profile. Use this option if + you want the system to use a normal-mode profile that + you have created. + required: false + default: null + choices: ['true','false'] + system_mode_maintenance_timeout: + description: + - Keeps the switch in maintenance mode for a specified + number of minutes. Range is 5-65535. + required: false + default: null + system_mode_maintenance_shutdown: + description: + - Shuts down all protocols, vPC domains, and interfaces except + the management interface (using the shutdown command). + This option is disruptive while C(system_mode_maintenance) + (which uses the isolate command) is not. + required: false + default: null + choices: ['true','false'] + system_mode_maintenance_on_reload_reset_reason: + description: + - Boots the switch into maintenance mode automatically in the + event of a specified system crash. + required: false + default: null + choices: ['hw_error','svc_failure','kern_failure','wdog_timeout', + 'fatal_error','lc_failure','match_any','manual_reload'] + state: + description: + - Specify desired state of the resource. + required: true + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +# Trigger system maintenance mode +- nxos_gir: + system_mode_maintenance: true + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +# Trigger system normal mode +- nxos_gir: + system_mode_maintenance: false + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +# Configure on-reload reset-reason for maintenance mode +- nxos_gir: + system_mode_maintenance_on_reload_reset_reason: manual_reload + state: present + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +# Add on-reload reset-reason for maintenance mode +- nxos_gir: + system_mode_maintenance_on_reload_reset_reason: hw_error + state: present + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +# Remove on-reload reset-reason for maintenance mode +- nxos_gir: + system_mode_maintenance_on_reload_reset_reason: manual_reload + state: absent + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +# Set timeout for maintenance mode +- nxos_gir: + system_mode_maintenance_timeout: 30 + state: present + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +# Remove timeout for maintenance mode +- nxos_gir: + system_mode_maintenance_timeout: 30 + state: absent + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +final_system_mode: + description: describe the last system mode + returned: verbose mode + type: string + sample: normal +updates: + description: commands sent to the device + returned: verbose mode + type: list + sample: ["terminal dont-ask", "system mode maintenance timeout 10"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show_ascii'): + cmds = [command] + if module.params['transport'] == 'cli': + body = execute_show(cmds, module) + elif module.params['transport'] == 'nxapi': + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_system_mode(module): + command = 'show system mode' + body = execute_show_command(command, module)[0] + if 'normal' in body.lower(): + mode = 'normal' + else: + mode = 'maintenance' + return mode + + +def get_maintenance_timeout(module): + command = 'show maintenance timeout' + body = execute_show_command(command, module)[0] + timeout = body.split()[4] + return timeout + + +def get_reset_reasons(module): + command = 'show maintenance on-reload reset-reasons' + body = execute_show_command(command, module)[0] + return body + + +def get_commands(module, state, mode): + commands = list() + system_mode = '' + if module.params['system_mode_maintenance'] is True and mode == 'normal': + commands.append('system mode maintenance') + elif (module.params['system_mode_maintenance'] is False and + mode == 'maintenance'): + commands.append('no system mode maintenance') + + elif (module.params[ + 'system_mode_maintenance_dont_generate_profile'] is True and + mode == 'normal'): + commands.append('system mode maintenance dont-generate-profile') + elif (module.params[ + 'system_mode_maintenance_dont_generate_profile'] is False and + mode == 'maintenance'): + commands.append('no system mode maintenance dont-generate-profile') + + elif module.params['system_mode_maintenance_timeout']: + timeout = get_maintenance_timeout(module) + if (state == 'present' and + timeout != module.params['system_mode_maintenance_timeout']): + commands.append('system mode maintenance timeout {0}'.format( + module.params['system_mode_maintenance_timeout'])) + elif (state == 'absent' and + timeout == module.params['system_mode_maintenance_timeout']): + commands.append('no system mode maintenance timeout {0}'.format( + module.params['system_mode_maintenance_timeout'])) + + elif module.params['system_mode_maintenance_shutdown'] is True: + commands.append('system mode maintenance shutdown') + + elif module.params['system_mode_maintenance_on_reload_reset_reason']: + reset_reasons = get_reset_reasons(module) + if (state == 'present' and + module.params[ + 'system_mode_maintenance_on_reload_reset_reason'].lower() not + in reset_reasons.lower()): + commands.append('system mode maintenance on-reload ' + 'reset-reason {0}'.format( + module.params[ + 'system_mode_maintenance_on_reload_reset_reason'])) + elif (state == 'absent' and + module.params[ + 'system_mode_maintenance_on_reload_reset_reason'].lower() in + reset_reasons.lower()): + commands.append('no system mode maintenance on-reload ' + 'reset-reason {0}'.format( + module.params[ + 'system_mode_maintenance_on_reload_reset_reason'])) + + if commands: + commands.insert(0, 'terminal dont-ask') + return commands + + +def main(): + argument_spec = dict( + system_mode_maintenance=dict(required=False, type='bool'), + system_mode_maintenance_dont_generate_profile=dict(required=False, + type='bool'), + system_mode_maintenance_timeout=dict(required=False, type='str'), + system_mode_maintenance_shutdown=dict(required=False, type='bool'), + system_mode_maintenance_on_reload_reset_reason=dict(required=False, + choices=['hw_error','svc_failure','kern_failure', + 'wdog_timeout','fatal_error','lc_failure', + 'match_any','manual_reload']), + state=dict(choices=['absent', 'present', 'default'], + default='present', required=False) + ) + module = get_network_module(argument_spec=argument_spec, + mutually_exclusive=[[ + 'system_mode_maintenance', + 'system_mode_maintenance_dont_generate_profile', + 'system_mode_maintenance_timeout', + 'system_mode_maintenance_shutdown', + 'system_mode_maintenance_on_reload_reset_reason' + ]], + required_one_of=[[ + 'system_mode_maintenance', + 'system_mode_maintenance_dont_generate_profile', + 'system_mode_maintenance_timeout', + 'system_mode_maintenance_shutdown', + 'system_mode_maintenance_on_reload_reset_reason' + ]], + supports_check_mode=True) + + state = module.params['state'] + mode = get_system_mode(module) + commands = get_commands(module, state, mode) + changed = False + if commands: + if module.check_mode: + module.exit_json(changed=True, commands=commands) + else: + execute_config_command(commands, module) + changed = True + + result = {} + result['connected'] = module.connected + result['changed'] = changed + if module._verbosity > 0: + final_system_mode = get_system_mode(module) + result['final_system_mode'] = final_system_mode + result['updates'] = commands + + module.exit_json(**result) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/network/nxos/nxos_gir_profile_management.py b/network/nxos/nxos_gir_profile_management.py new file mode 100644 index 00000000000..99c29d6253d --- /dev/null +++ b/network/nxos/nxos_gir_profile_management.py @@ -0,0 +1,383 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_gir_profile +version_added: "2.2" +short_description: Create a maintenance-mode or normal-mode profile for GIR. +description: + - Manage a maintenance-mode or normal-mode profile with configuration + commands that can be applied during graceful removal + or graceful insertion. +extends_documentation_fragment: nxos +author: + - Gabriele Gerbino (@GGabriele) +notes: + - This module is not idempotent when C(state=present). + - C(state=absent) removes the whole profile. +options: + commands: + description: + - List of commands to be included into the profile. + required: false + default: null + mode: + description: + - Configure the profile as Maintenance or Normal mode. + required: true + choices: ['maintenance', 'normal'] + state: + description: + - Specify desired state of the resource. + required: false + default: present + choices: ['present','absent'] + include_defaults: + description: + - Specify to retrieve or not the complete running configuration + for module operations. + required: false + default: false + choices: ['true','false'] + config: + description: + - Specify the configuration string to be used for module operations. + required: false + default: null +''' + +EXAMPLES = ''' +# Create a maintenance-mode profile +- nxos_gir_profile: + mode: maintenance + commands: + - router eigrp 11 + - isolate + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +# Remove the maintenance-mode profile +- nxos_gir_profile: + mode: maintenance + state: absent + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: list of commands passed into module. + returned: verbose mode + type: list + sample: ["router eigrp 11", "isolate"] +existing: + description: list of existing profile commands. + returned: verbose mode + type: list + sample: ["router bgp 65535","isolate","router eigrp 10","isolate", + "diagnostic bootup level complete"] +end_state: + description: list of profile entries after module execution. + returned: verbose mode + type: list + sample: ["router bgp 65535","isolate","router eigrp 10","isolate", + "diagnostic bootup level complete","router eigrp 11", "isolate"] +updates: + description: commands sent to the device + returned: always + type: list + sample: ["configure maintenance profile maintenance-mode", + "router eigrp 11","isolate"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def get_existing(module): + existing = [] + netcfg = get_config(module) + + if module.params['mode'] == 'maintenance': + parents = ['configure maintenance profile maintenance-mode'] + else: + parents = ['configure maintenance profile normal-mode'] + + config = netcfg.get_section(parents) + if config: + existing = config.splitlines() + existing = [cmd.strip() for cmd in existing] + existing.pop(0) + + return existing + + +def state_present(module, existing, commands): + cmds = list() + cmds.extend(commands) + if module.params['mode'] == 'maintenance': + cmds.insert(0, 'configure maintenance profile maintenance-mode') + else: + cmds.insert(0, 'configure maintenance profile normal-mode') + + return cmds + + +def state_absent(module, existing, commands): + if module.params['mode'] == 'maintenance': + cmds = ['no configure maintenance profile maintenance-mode'] + else: + cmds = ['no configure maintenance profile normal-mode'] + return cmds + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def main(): + argument_spec = dict( + commands=dict(required=False, type='list'), + mode=dict(required=True, choices=['maintenance', 'normal']), + state=dict(choices=['absent', 'present'], + default='present'), + include_defaults=dict(default=False), + config=dict() + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + commands = module.params['commands'] or [] + + if state == 'absent' and commands: + module.fail_json(msg='when state is absent, no command can be used.') + + existing = invoke('get_existing', module) + end_state = existing + changed = False + + result = {} + cmds = [] + if state == 'present' or (state == 'absent' and existing): + cmds = invoke('state_%s' % state, module, existing, commands) + + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + execute_config_command(cmds, module) + changed = True + end_state = invoke('get_existing', module) + + result['connected'] = module.connected + result['changed'] = changed + if module._verbosity > 0: + end_state = invoke('get_existing', module) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = commands + result['updates'] = cmds + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_hsrp.py b/network/nxos/nxos_hsrp.py new file mode 100644 index 00000000000..9e9e7e3542a --- /dev/null +++ b/network/nxos/nxos_hsrp.py @@ -0,0 +1,721 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_hsrp +version_added: "2.2" +short_description: Manages HSRP configuration on NX-OS switches. +description: + - Manages HSRP configuration on NX-OS switches. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - HSRP feature needs to be enabled first on the system. + - SVIs must exist before using this module. + - Interface must be a L3 port before using this module. + - HSRP cannot be configured on loopback interfaces. + - MD5 authentication is only possible with HSRPv2 while it is ignored if + HSRPv1 is used instead, while it will not raise any error. Here we allow + MD5 authentication only with HSRPv2 in order to enforce better practice. +options: + group: + description: + - HSRP group number. + required: true + interface: + description: + - Full name of interface that is being managed for HSRP. + required: true + version: + description: + - HSRP version. + required: false + default: 2 + choices: ['1','2'] + priority: + description: + - HSRP priority. + required: false + default: null + vip: + description: + - HSRP virtual IP address. + required: false + default: null + auth_string: + description: + - Authentication string. + required: false + default: null + auth_type: + description: + - Authentication type. + required: false + default: null + choices: ['text','md5'] + state: + description: + - Specify desired state of the resource. + required: false + choices: ['present','absent'] + default: 'present' +''' + +EXAMPLES = ''' +- name: Ensure HSRP is configured with following params on a SVI + nxos_hsrp: + group: 10 + vip: 10.1.1.1 + priority: 150 + interface: vlan10 + preempt: enabled + host: 68.170.147.165 + +- name: Ensure HSRP is configured with following params on a SVI + nxos_hsrp: + group: 10 + vip: 10.1.1.1 + priority: 150 + interface: vlan10 + preempt: enabled + host: 68.170.147.165 + auth_type: text + auth_string: CISCO + +- name: Remove HSRP config for given interface, group, and VIP + nxos_hsrp: + group: 10 + interface: vlan10 + vip: 10.1.1.1 + host: 68.170.147.165 + state: absent +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"group": "30", "version": "2", "vip": "10.30.1.1"} +existing: + description: k/v pairs of existing hsrp info on the interface + type: dict + sample: {} +end_state: + description: k/v pairs of hsrp after module execution + returned: always + type: dict + sample: {"auth_string": "cisco", "auth_type": "text", + "group": "30", "interface": "vlan10", "preempt": "disabled", + "priority": "100", "version": "2", "vip": "10.30.1.1"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface vlan10", "hsrp version 2", "hsrp 30", "ip 10.30.1.1"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError +from ansible.module_utils.network import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def execute_config_command(commands, module): + try: + output = module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + output = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + return output + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + response = response[0].replace(command + '\n\n', '').strip() + body = [json.loads(response)] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def get_interface_type(interface): + if interface.upper().startswith('ET'): + return 'ethernet' + elif interface.upper().startswith('VL'): + return 'svi' + elif interface.upper().startswith('LO'): + return 'loopback' + elif interface.upper().startswith('MG'): + return 'management' + elif interface.upper().startswith('MA'): + return 'management' + elif interface.upper().startswith('PO'): + return 'portchannel' + else: + return 'unknown' + + +def get_interface_mode(interface, intf_type, module): + command = 'show interface {0}'.format(interface) + interface = {} + mode = 'unknown' + + if intf_type in ['ethernet', 'portchannel']: + body = execute_show_command(command, module)[0] + interface_table = body['TABLE_interface']['ROW_interface'] + mode = str(interface_table.get('eth_mode', 'layer3')) + if mode == 'access' or mode == 'trunk': + mode = 'layer2' + elif intf_type == 'svi': + mode = 'layer3' + return mode + + +def get_hsrp_groups_on_interfaces(device, module): + command = 'show hsrp all' + body = execute_show_command(command, module) + hsrp = {} + + try: + get_data = body[0]['TABLE_grp_detail']['ROW_grp_detail'] + except (KeyError, AttributeError): + return {} + + for entry in get_data: + interface = str(entry['sh_if_index'].lower()) + value = hsrp.get(interface, 'new') + if value == 'new': + hsrp[interface] = [] + group = str(entry['sh_group_num']) + hsrp[interface].append(group) + + return hsrp + + +def get_hsrp_group(group, interface, module): + command = 'show hsrp group {0}'.format(group) + body = execute_show_command(command, module) + hsrp = {} + + hsrp_key = { + 'sh_if_index': 'interface', + 'sh_group_num': 'group', + 'sh_group_version': 'version', + 'sh_cfg_prio': 'priority', + 'sh_preempt': 'preempt', + 'sh_vip': 'vip', + 'sh_authentication_type': 'auth_type', + 'sh_authentication_data': 'auth_string' + } + + try: + hsrp_table = body[0]['TABLE_grp_detail']['ROW_grp_detail'] + except (AttributeError, IndexError, TypeError): + return {} + + if isinstance(hsrp_table, dict): + hsrp_table = [hsrp_table] + + for hsrp_group in hsrp_table: + parsed_hsrp = apply_key_map(hsrp_key, hsrp_group) + + parsed_hsrp['interface'] = parsed_hsrp['interface'].lower() + + if parsed_hsrp['version'] == 'v1': + parsed_hsrp['version'] = '1' + elif parsed_hsrp['version'] == 'v2': + parsed_hsrp['version'] = '2' + + if parsed_hsrp['interface'] == interface: + return parsed_hsrp + + return hsrp + + +def get_commands_remove_hsrp(group, interface): + commands = [] + commands.append('interface {0}'.format(interface)) + commands.append('no hsrp {0}'.format(group)) + return commands + + +def get_commands_config_hsrp(delta, interface, args): + commands = [] + + config_args = { + 'group': 'hsrp {group}', + 'priority': 'priority {priority}', + 'preempt': '{preempt}', + 'vip': 'ip {vip}' + } + + preempt = delta.get('preempt', None) + group = delta.get('group', None) + if preempt: + if preempt == 'enabled': + delta['preempt'] = 'preempt' + elif preempt == 'disabled': + delta['preempt'] = 'no preempt' + + for key, value in delta.iteritems(): + command = config_args.get(key, 'DNE').format(**delta) + if command and command != 'DNE': + if key == 'group': + commands.insert(0, command) + else: + commands.append(command) + command = None + + auth_type = delta.get('auth_type', None) + auth_string = delta.get('auth_string', None) + if auth_type or auth_string: + if not auth_type: + auth_type = args['auth_type'] + elif not auth_string: + auth_string = args['auth_string'] + if auth_type == 'md5': + command = 'authentication md5 key-string {0}'.format(auth_string) + commands.append(command) + elif auth_type == 'text': + command = 'authentication text {0}'.format(auth_string) + commands.append(command) + + if commands and not group: + commands.insert(0, 'hsrp {0}'.format(args['group'])) + + version = delta.get('version', None) + if version: + if version == '2': + command = 'hsrp version 2' + elif version == '1': + command = 'hsrp version 1' + commands.insert(0, command) + commands.insert(0, 'interface {0}'.format(interface)) + + if commands: + if not commands[0].startswith('interface'): + commands.insert(0, 'interface {0}'.format(interface)) + + return commands + + +def is_default(interface, module): + command = 'show run interface {0}'.format(interface) + + try: + body = execute_show_command(command, module)[0] + if 'invalid' in body.lower(): + return 'DNE' + else: + raw_list = body.split('\n') + if raw_list[-1].startswith('interface'): + return True + else: + return False + except (KeyError): + return 'DNE' + + +def validate_config(body, vip, module): + new_body = ''.join(body) + if "invalid ip address" in new_body.lower(): + module.fail_json(msg="Invalid VIP. Possible duplicate IP address.", + vip=vip) + + +def validate_params(param, module): + value = module.params[param] + version = module.params['version'] + + if param == 'group': + try: + if (int(value) < 0 or int(value) > 255) and version == '1': + raise ValueError + elif int(value) < 0 or int(value) > 4095: + raise ValueError + except ValueError: + module.fail_json(msg="Warning! 'group' must be an integer between" + " 0 and 255 when version 1 and up to 4095 " + "when version 2.", group=value, + version=version) + elif param == 'priority': + try: + if (int(value) < 0 or int(value) > 255): + raise ValueError + except ValueError: + module.fail_json(msg="Warning! 'priority' must be an integer " + "between 0 and 255", priority=value) + + +def main(): + argument_spec = dict( + group=dict(required=True, type='str'), + interface=dict(required=True), + version=dict(choices=['1', '2'], default='2', required=False), + priority=dict(type='str', required=False), + preempt=dict(type='str', choices=['disabled', 'enabled'], + required=False), + vip=dict(type='str', required=False), + auth_type=dict(choices=['text', 'md5'], required=False), + auth_string=dict(type='str', required=False), + state=dict(choices=['absent', 'present'], required=False, + default='present'), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + interface = module.params['interface'].lower() + group = module.params['group'] + version = module.params['version'] + state = module.params['state'] + priority = module.params['priority'] + preempt = module.params['preempt'] + vip = module.params['vip'] + auth_type = module.params['auth_type'] + auth_string = module.params['auth_string'] + + transport = module.params['transport'] + + if state == 'present' and not vip: + module.fail_json(msg='the "vip" param is required when state=present') + + for param in ['group', 'priority']: + if module.params[param] is not None: + validate_params(param, module) + + intf_type = get_interface_type(interface) + if (intf_type != 'ethernet' and transport == 'cli'): + if is_default(interface, module) == 'DNE': + module.fail_json(msg='That interface does not exist yet. Create ' + 'it first.', interface=interface) + if intf_type == 'loopback': + module.fail_json(msg="Loopback interfaces don't support HSRP.", + interface=interface) + + mode = get_interface_mode(interface, intf_type, module) + if mode == 'layer2': + module.fail_json(msg='That interface is a layer2 port.\nMake it ' + 'a layer 3 port first.', interface=interface) + + if auth_type or auth_string: + if not (auth_type and auth_string): + module.fail_json(msg='When using auth parameters, you need BOTH ' + 'auth_type AND auth_string.') + + args = dict(group=group, version=version, priority=priority, + preempt=preempt, vip=vip, auth_type=auth_type, + auth_string=auth_string) + + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + existing = get_hsrp_group(group, interface, module) + + # This will enforce better practice with md5 and hsrp version. + if proposed.get('auth_type', None) == 'md5': + if proposed['version'] == '1': + module.fail_json(msg="It's recommended to use HSRP v2 " + "when auth_type=md5") + + elif not proposed.get('auth_type', None) and existing: + if (proposed['version'] == '1' and + existing['auth_type'] == 'md5'): + module.fail_json(msg="Existing auth_type is md5. It's recommended " + "to use HSRP v2 when using md5") + + changed = False + end_state = existing + commands = [] + if state == 'present': + delta = dict( + set(proposed.iteritems()).difference(existing.iteritems())) + if delta: + command = get_commands_config_hsrp(delta, interface, args) + commands.extend(command) + + elif state == 'absent': + if existing: + command = get_commands_remove_hsrp(group, interface) + commands.extend(command) + + if commands: + if module.check_mode: + module.exit_json(changed=True, commands=commands) + else: + body = execute_config_command(commands, module) + if transport == 'cli': + validate_config(body, vip, module) + changed = True + end_state = get_hsrp_group(group, interface, module) + if 'configure' in commands: + commands.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = commands + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_igmp.py b/network/nxos/nxos_igmp.py new file mode 100644 index 00000000000..3cd5a0dbd20 --- /dev/null +++ b/network/nxos/nxos_igmp.py @@ -0,0 +1,399 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_igmp +version_added: "2.2" +short_description: Manages IGMP global configuration. +description: + - Manages IGMP global configuration configuration settings. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - When C(state=default), all supported params will be reset to a + default state. + - If restart is set to true with other params set, the restart will happen + last, i.e. after the configuration takes place. +options: + flush_routes: + description: + - Removes routes when the IGMP process is restarted. By default, + routes are not flushed. + required: false + default: null + choices: ['true', 'false'] + enforce_rtr_alert: + description: + - Enables or disables the enforce router alert option check for + IGMPv2 and IGMPv3 packets. + required: false + default: null + choices: ['true', 'false'] + restart: + description: + - Restarts the igmp process (using an exec config command). + required: false + default: null + choices: ['true', 'false'] + state: + description: + - Manages desired state of the resource. + required: false + default: present + choices: ['present', 'default'] +''' +EXAMPLES = ''' +- name: Default igmp global params (all params except restart) + nxos_igmp: + state: default + host: "{{ inventory_hostname }}" + +- name: Ensure the following igmp global config exists on the device + nxos_igmp: + flush_routes: true + enforce_rtr_alert: true + host: "{{ inventory_hostname }}" + +- name: Restart the igmp process + nxos_igmp: + restart: true + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"enforce_rtr_alert": true, "flush_routes": true} +existing: + description: k/v pairs of existing IGMP configuration + returned: verbose mode + type: dict + sample: {"enforce_rtr_alert": true, "flush_routes": false} +end_state: + description: k/v pairs of IGMP configuration after module execution + returned: verbose mode + type: dict + sample: {"enforce_rtr_alert": true, "flush_routes": true} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["ip igmp flush-routes"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +PARAM_TO_COMMAND_KEYMAP = { + 'flush_routes': 'ip igmp flush-routes', + 'enforce_rtr_alert': 'ip igmp enforce-router-alert' +} + + +def get_value(arg, config): + REGEX = re.compile(r'{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + return value + + +def get_existing(module, args): + existing = {} + config = str(get_config(module)) + + for arg in args: + existing[arg] = get_value(arg, config) + return existing + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_commands(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + if module.params['state'] == 'default': + for key, value in proposed_commands.iteritems(): + if existing_commands.get(key): + commands.append('no {0}'.format(key)) + else: + for key, value in proposed_commands.iteritems(): + if value is True: + commands.append(key) + else: + if existing_commands.get(key): + commands.append('no {0}'.format(key)) + + if module.params['restart']: + commands.append('restart igmp') + + if commands: + parents = [] + candidate.add(commands, parents=parents) + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def main(): + argument_spec = dict( + flush_routes=dict(type='bool'), + enforce_rtr_alert=dict(type='bool'), + restart=dict(type='bool', default=False), + state=dict(choices=['present', 'default'], default='present'), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + restart = module.params['restart'] + + if (state == 'default' and (module.params['flush_routes'] is not None or + module.params['enforce_rtr_alert'] is not None)): + module.fail_json(msg='When state=default other params have no effect.') + + args = [ + "flush_routes", + "enforce_rtr_alert", + ] + + existing = invoke('get_existing', module, args) + end_state = existing + + proposed = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + proposed_args = proposed.copy() + if state == 'default': + proposed_args = dict((k, False) for k in args) + + result = {} + if (state == 'present' or (state == 'default' and + True in existing.values()) or restart): + candidate = CustomNetworkConfig(indent=3) + invoke('get_commands', module, existing, proposed_args, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + if restart: + proposed['restart'] = restart + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_igmp_interface.py b/network/nxos/nxos_igmp_interface.py new file mode 100644 index 00000000000..d5e3226d346 --- /dev/null +++ b/network/nxos/nxos_igmp_interface.py @@ -0,0 +1,910 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_igmp_interface +version_added: "2.2" +short_description: Manages IGMP interface configuration. +description: + - Manages IGMP interface configuration settings. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - When C(state=default), supported params will be reset to a default state. + These include C(version), C(startup_query_interval), + C(startup_query_count), C(robustness), C(querier_timeout), C(query_mrt), + C(query_interval), C(last_member_qrt), C(last_member_query_count), + C(group_timeout), C(report_llg), and C(immediate_leave). + - When C(state=absent), all configs for C(oif_prefix), C(oif_source), and + C(oif_routemap) will be removed. + - PIM must be enabled to use this module. + - This module is for Layer 3 interfaces. + - Route-map check not performed (same as CLI) check when configuring + route-map with 'static-oif' + - If restart is set to true with other params set, the restart will happen + last, i.e. after the configuration takes place. +options: + interface: + description: + - The full interface name for IGMP configuration. + e.g. I(Ethernet1/2). + required: true + version: + description: + - IGMP version. It can be 2 or 3. + required: false + default: null + choices: ['2', '3'] + startup_query_interval: + description: + - Query interval used when the IGMP process starts up. + The range is from 1 to 18000. The default is 31. + required: false + default: null + startup_query_count: + description: + - Query count used when the IGMP process starts up. + The range is from 1 to 10. The default is 2. + required: false + default: null + robustness: + description: + - Sets the robustness variable. Values can range from 1 to 7. + The default is 2. + required: false + default: null + querier_timeout: + description: + - Sets the querier timeout that the software uses when deciding + to take over as the querier. Values can range from 1 to 65535 + seconds. The default is 255 seconds. + required: false + default: null + query_mrt: + description: + - Sets the response time advertised in IGMP queries. + Values can range from 1 to 25 seconds. The default is 10 seconds. + required: false + default: null + query_interval: + description: + - Sets the frequency at which the software sends IGMP host query + messages. Values can range from 1 to 18000 seconds. + he default is 125 seconds. + required: false + default: null + last_member_qrt: + description: + - Sets the query interval waited after sending membership reports + before the software deletes the group state. Values can range + from 1 to 25 seconds. The default is 1 second. + required: false + default: null + last_member_query_count: + description: + - Sets the number of times that the software sends an IGMP query + in response to a host leave message. + Values can range from 1 to 5. The default is 2. + required: false + default: null + group_timeout: + description: + - Sets the group membership timeout for IGMPv2. + Values can range from 3 to 65,535 seconds. + The default is 260 seconds. + required: false + default: null + report_llg: + description: + - Configures report-link-local-groups. + Enables sending reports for groups in 224.0.0.0/24. + Reports are always sent for nonlink local groups. + By default, reports are not sent for link local groups. + required: false + choices: ['true', 'false'] + default: false + immediate_leave: + description: + - Enables the device to remove the group entry from the multicast + routing table immediately upon receiving a leave message for + the group. Use this command to minimize the leave latency of + IGMPv2 group memberships on a given IGMP interface because the + device does not send group-specific queries. + The default is disabled. + required: false + choices: ['true', 'false'] + default: false + oif_routemap: + description: + - Configure a routemap for static outgoing interface (OIF). + required: false + default: null + oif_prefix: + description: + - Configure a prefix for static outgoing interface (OIF). + required: false + default: null + oif_source: + description: + - Configure a source for static outgoing interface (OIF). + required: false + default: null + restart: + description: + - Restart IGMP. + required: false + choices: ['true', 'false'] + default: null + state: + description: + - Manages desired state of the resource. + required: false + default: present + choices: ['present', 'default'] +''' +EXAMPLES = ''' +- nxos_igmp_interface: + interface: ethernet1/32 + startup_query_interval: 30 + state: present + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"} +existing: + description: k/v pairs of existing BGP configuration + type: dict + sample: {"asn": "65535", "bestpath_always_compare_med": false, + "bestpath_aspath_multipath_relax": false, + "bestpath_compare_neighborid": false, + "bestpath_compare_routerid": false, + "bestpath_cost_community_ignore": false, + "bestpath_med_confed": false, + "bestpath_med_missing_as_worst": false, + "bestpath_med_non_deterministic": false, "cluster_id": "", + "confederation_id": "", "confederation_peers": "", + "graceful_restart": true, "graceful_restart_helper": false, + "graceful_restart_timers_restart": "120", + "graceful_restart_timers_stalepath_time": "300", "local_as": "", + "log_neighbor_changes": false, "maxas_limit": "", + "neighbor_down_fib_accelerate": false, "reconnect_interval": "60", + "router_id": "11.11.11.11", "suppress_fib_pending": false, + "timer_bestpath_limit": "", "timer_bgp_hold": "180", + "timer_bgp_keepalive": "60", "vrf": "test"} +end_state: + description: k/v pairs of BGP configuration after module execution + returned: always + type: dict + sample: {"asn": "65535", "bestpath_always_compare_med": false, + "bestpath_aspath_multipath_relax": false, + "bestpath_compare_neighborid": false, + "bestpath_compare_routerid": false, + "bestpath_cost_community_ignore": false, + "bestpath_med_confed": false, + "bestpath_med_missing_as_worst": false, + "bestpath_med_non_deterministic": false, "cluster_id": "", + "confederation_id": "", "confederation_peers": "", + "graceful_restart": true, "graceful_restart_helper": false, + "graceful_restart_timers_restart": "120", + "graceful_restart_timers_stalepath_time": "300", "local_as": "", + "log_neighbor_changes": false, "maxas_limit": "", + "neighbor_down_fib_accelerate": false, "reconnect_interval": "60", + "router_id": "1.1.1.1", "suppress_fib_pending": false, + "timer_bestpath_limit": "", "timer_bgp_hold": "180", + "timer_bgp_keepalive": "60", "vrf": "test"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json +import collections + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + response = response[0].replace(command + '\n\n', '').strip() + body = [json.loads(response)] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def get_interface_mode(interface, intf_type, module): + command = 'show interface {0}'.format(interface) + interface = {} + mode = 'unknown' + + if intf_type in ['ethernet', 'portchannel']: + body = execute_show_command(command, module)[0] + interface_table = body['TABLE_interface']['ROW_interface'] + mode = str(interface_table.get('eth_mode', 'layer3')) + if mode == 'access' or mode == 'trunk': + mode = 'layer2' + elif intf_type == 'loopback' or intf_type == 'svi': + mode = 'layer3' + return mode + + +def get_interface_type(interface): + if interface.upper().startswith('ET'): + return 'ethernet' + elif interface.upper().startswith('VL'): + return 'svi' + elif interface.upper().startswith('LO'): + return 'loopback' + elif interface.upper().startswith('MG'): + return 'management' + elif interface.upper().startswith('MA'): + return 'management' + elif interface.upper().startswith('PO'): + return 'portchannel' + else: + return 'unknown' + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_igmp_interface(module, interface): + command = 'show ip igmp interface {0}'.format(interface) + igmp = {} + + key_map = { + 'IGMPVersion': 'version', + 'ConfiguredStartupQueryInterval': 'startup_query_interval', + 'StartupQueryCount': 'startup_query_count', + 'RobustnessVariable': 'robustness', + 'QuerierTimeout': 'querier_timeout', + 'ConfiguredMaxResponseTime': 'query_mrt', + 'ConfiguredQueryInterval': 'query_interval', + 'LastMemberMTR': 'last_member_qrt', + 'LastMemberQueryCount': 'last_member_query_count', + 'ConfiguredGroupTimeout': 'group_timeout' + } + + body = execute_show_command(command, module)[0] + + if body: + resource = body['TABLE_vrf']['ROW_vrf']['TABLE_if']['ROW_if'] + igmp = apply_key_map(key_map, resource) + report_llg = str(resource['ReportingForLinkLocal']) + if report_llg == 'true': + igmp['report_llg'] = True + elif report_llg == 'false': + igmp['report_llg'] = False + + immediate_leave = str(resource['ImmediateLeave']) # returns en or dis + if immediate_leave == 'en': + igmp['immediate_leave'] = True + elif immediate_leave == 'dis': + igmp['immediate_leave'] = False + + # the next block of code is used to retrieve anything with: + # ip igmp static-oif *** i.e.. could be route-map ROUTEMAP + # or PREFIX source , etc. + command = 'show run interface {0} | inc oif'.format(interface) + + body = execute_show_command( + command, module, command_type='cli_show_ascii')[0] + + staticoif = [] + if body: + split_body = body.split('\n') + route_map_regex = ('.*ip igmp static-oif route-map\s+' + '(?P\S+).*') + prefix_source_regex = ('.*ip igmp static-oif\s+(?P' + '((\d+.){3}\d+))(\ssource\s' + '(?P\S+))?.*') + + for line in split_body: + temp = {} + try: + match_route_map = re.match(route_map_regex, line, re.DOTALL) + route_map = match_route_map.groupdict()['route_map'] + except AttributeError: + route_map = '' + + try: + match_prefix_source = re.match( + prefix_source_regex, line, re.DOTALL) + prefix_source_group = match_prefix_source.groupdict() + prefix = prefix_source_group['prefix'] + source = prefix_source_group['source'] + except AttributeError: + prefix = '' + source = '' + + if route_map: + temp['route_map'] = route_map + if prefix: + temp['prefix'] = prefix + if source: + temp['source'] = source + if temp: + staticoif.append(temp) + + igmp['oif_routemap'] = None + igmp['oif_prefix_source'] = [] + + if staticoif: + if len(staticoif) == 1 and staticoif[0].get('route_map'): + igmp['oif_routemap'] = staticoif[0]['route_map'] + else: + igmp['oif_prefix_source'] = staticoif + + return igmp + + +def config_igmp_interface(delta, found_both, found_prefix): + CMDS = { + 'version': 'ip igmp version {0}', + 'startup_query_interval': 'ip igmp startup-query-interval {0}', + 'startup_query_count': 'ip igmp startup-query-count {0}', + 'robustness': 'ip igmp robustness-variable {0}', + 'querier_timeout': 'ip igmp querier-timeout {0}', + 'query_mrt': 'ip igmp query-max-response-time {0}', + 'query_interval': 'ip igmp query-interval {0}', + 'last_member_qrt': 'ip igmp last-member-query-response-time {0}', + 'last_member_query_count': 'ip igmp last-member-query-count {0}', + 'group_timeout': 'ip igmp group-timeout {0}', + 'report_llg': 'ip igmp report-link-local-groups', + 'immediate_leave': 'ip igmp immediate-leave', + 'oif_prefix_source': 'ip igmp static-oif {0} source {1} ', + 'oif_routemap': 'ip igmp static-oif route-map {0}', + 'oif_prefix': 'ip igmp static-oif {0}', + } + + commands = [] + command = None + + for key, value in delta.iteritems(): + if key == 'oif_source' or found_both or found_prefix: + pass + elif key == 'oif_prefix': + if delta.get('oif_source'): + command = CMDS.get('oif_prefix_source').format( + delta.get('oif_prefix'), delta.get('oif_source')) + else: + command = CMDS.get('oif_prefix').format( + delta.get('oif_prefix')) + elif value: + command = CMDS.get(key).format(value) + elif not value: + command = 'no {0}'.format(CMDS.get(key).format(value)) + + if command: + if command not in commands: + commands.append(command) + command = None + + return commands + + +def get_igmp_interface_defaults(): + version = '2' + startup_query_interval = '31' + startup_query_count = '2' + robustness = '2' + querier_timeout = '255' + query_mrt = '10' + query_interval = '125' + last_member_qrt = '1' + last_member_query_count = '2' + group_timeout = '260' + report_llg = False + immediate_leave = False + + args = dict(version=version, startup_query_interval=startup_query_interval, + startup_query_count=startup_query_count, robustness=robustness, + querier_timeout=querier_timeout, query_mrt=query_mrt, + query_interval=query_interval, last_member_qrt=last_member_qrt, + last_member_query_count=last_member_query_count, + group_timeout=group_timeout, report_llg=report_llg, + immediate_leave=immediate_leave) + + default = dict((param, value) for (param, value) in args.iteritems() + if value is not None) + + return default + + +def config_default_igmp_interface(existing, delta, found_both, found_prefix): + commands = [] + proposed = get_igmp_interface_defaults() + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + if delta: + command = config_igmp_interface(delta, found_both, found_prefix) + + if command: + for each in command: + commands.append(each) + + return commands + + +def config_remove_oif(existing, existing_oif_prefix_source): + commands = [] + command = None + if existing.get('routemap'): + command = 'no ip igmp static-oif route-map {0}'.format( + existing.get('routemap')) + if existing_oif_prefix_source: + for each in existing_oif_prefix_source: + if each.get('prefix') and each.get('source'): + command = 'no ip igmp static-oif {0} source {1} '.format( + each.get('prefix'), each.get('source') + ) + elif each.get('prefix'): + command = 'no ip igmp static-oif {0}'.format( + each.get('prefix') + ) + if command: + commands.append(command) + command = None + + return commands + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def main(): + argument_spec = dict( + interface=dict(required=True, type='str'), + version=dict(required=False, type='str'), + startup_query_interval=dict(required=False, type='str'), + startup_query_count=dict(required=False, type='str'), + robustness=dict(required=False, type='str'), + querier_timeout=dict(required=False, type='str'), + query_mrt=dict(required=False, type='str'), + query_interval=dict(required=False, type='str'), + last_member_qrt=dict(required=False, type='str'), + last_member_query_count=dict(required=False, type='str'), + group_timeout=dict(required=False, type='str'), + report_llg=dict(type='bool'), + immediate_leave=dict(type='bool'), + oif_routemap=dict(required=False, type='str'), + oif_prefix=dict(required=False, type='str'), + oif_source=dict(required=False, type='str'), + restart=dict(type='bool', default=False), + state=dict(choices=['present', 'absent', 'default'], + default='present'), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + interface = module.params['interface'] + oif_prefix = module.params['oif_prefix'] + oif_source = module.params['oif_source'] + oif_routemap = module.params['oif_routemap'] + + if oif_source: + if not oif_prefix: + module.fail_json(msg='oif_prefix required when setting oif_source') + + intf_type = get_interface_type(interface) + if get_interface_mode(interface, intf_type, module) == 'layer2': + module.fail_json(msg='this module only works on Layer 3 interfaces') + + if oif_prefix and oif_routemap: + module.fail_json(msg='cannot use oif_prefix AND oif_routemap.' + ' select one.') + + existing = get_igmp_interface(module, interface) + existing_copy = existing.copy() + end_state = existing_copy + + if not existing.get('version'): + module.fail_json(msg='pim needs to be enabled on the interface') + + existing_oif_prefix_source = existing.get('oif_prefix_source') + # not json serializable + existing.pop('oif_prefix_source') + + if oif_routemap and existing_oif_prefix_source: + module.fail_json(msg='Delete static-oif configurations on this ' + 'interface if you want to use a routemap') + + if oif_prefix and existing.get('oif_routemap'): + module.fail_json(msg='Delete static-oif route-map configuration ' + 'on this interface if you want to config ' + 'static entries') + + args = [ + 'version', + 'startup_query_interval', + 'startup_query_count', + 'robustness', + 'querier_timeout', + 'query_mrt', + 'query_interval', + 'last_member_qrt', + 'last_member_query_count', + 'group_timeout', + 'report_llg', + 'immediate_leave', + 'oif_routemap', + 'oif_prefix', + 'oif_source' + ] + + changed = False + commands = [] + proposed = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + CANNOT_ABSENT = ['version', 'startup_query_interval', + 'startup_query_count', 'robustness', 'querier_timeout', + 'query_mrt', 'query_interval', 'last_member_qrt', + 'last_member_query_count', 'group_timeout', 'report_llg', + 'immediate_leave'] + + if state == 'absent': + for each in CANNOT_ABSENT: + if each in proposed: + module.fail_json(msg='only params: oif_prefix, oif_source, ' + 'oif_routemap can be used when ' + 'state=absent') + + # delta check for all params except oif_prefix and oif_source + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + + # now check to see there is a delta for prefix and source command option + found_both = False + found_prefix = False + + if existing_oif_prefix_source: + if oif_prefix and oif_source: + for each in existing_oif_prefix_source: + if (oif_prefix == each.get('prefix') and + oif_source == each.get('source')): + found_both = True + if not found_both: + delta['prefix'] = oif_prefix + delta['source'] = oif_source + elif oif_prefix: + for each in existing_oif_prefix_source: + if oif_prefix == each.get('prefix') and not each.get('source'): + found_prefix = True + if not found_prefix: + delta['prefix'] = oif_prefix + + if state == 'present': + if delta: + command = config_igmp_interface(delta, found_both, found_prefix) + if command: + commands.append(command) + + elif state == 'default': + command = config_default_igmp_interface(existing, delta, + found_both, found_prefix) + if command: + commands.append(command) + elif state == 'absent': + command = None + if existing.get('oif_routemap') or existing_oif_prefix_source: + command = config_remove_oif(existing, existing_oif_prefix_source) + + if command: + commands.append(command) + + command = config_default_igmp_interface(existing, delta, + found_both, found_prefix) + if command: + commands.append(command) + + if module.params['restart']: + commands.append('restart igmp') + + cmds = [] + results = {} + if commands: + commands.insert(0, ['interface {0}'.format(interface)]) + cmds = flatten_list(commands) + + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + execute_config_command(cmds, module) + changed = True + end_state = get_igmp_interface(module, interface) + if 'configure' in cmds: + cmds.pop(0) + + results['proposed'] = proposed + results['existing'] = existing_copy + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + + module.exit_json(**results) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/network/nxos/nxos_igmp_snooping.py b/network/nxos/nxos_igmp_snooping.py new file mode 100644 index 00000000000..7044adecd16 --- /dev/null +++ b/network/nxos/nxos_igmp_snooping.py @@ -0,0 +1,556 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_igmp_snooping +version_added: "2.2" +short_description: Manages IGMP snooping global configuration. +description: + - Manages IGMP snooping global configuration. +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - When C(state=default), params will be reset to a default state. + - C(group_timeout) also accepts I(never) as an input. +options: + snooping: + description: + - Enables/disables IGMP snooping on the switch. + required: false + default: null + choices: ['true', 'false'] + group_timeout: + description: + - Group membership timeout value for all VLANs on the device. + Accepted values are integer in range 1-10080, I(never) and + I(default). + required: false + default: null + link_local_grp_supp: + description: + - Global link-local groups suppression. + required: false + default: null + choices: ['true', 'false'] + report_supp: + description: + - Global IGMPv1/IGMPv2 Report Suppression. + required: false + default: null + v3_report_supp: + description: + - Global IGMPv3 Report Suppression and Proxy Reporting. + required: false + default: null + choices: ['true', 'false'] + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','default'] +''' + +EXAMPLES = ''' +# ensure igmp snooping params supported in this module are in there default state +- nxos_igmp_snooping: + state: default + host: inventory_hostname }} + username: un }} + password: pwd }} + +# ensure following igmp snooping params are in the desired state +- nxos_igmp_snooping: + group_timeout: never + snooping: true + link_local_grp_supp: false + optimize_mcast_flood: false + report_supp: true + v3_report_supp: true + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"group_timeout": "50", "link_local_grp_supp": true, + "report_supp": false, "snooping": false, "v3_report_supp": false} +existing: + description: + - k/v pairs of existing configuration + type: dict + sample: {"group_timeout": "never", "link_local_grp_supp": false, + "report_supp": true, "snooping": true, "v3_report_supp": true} +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: {"group_timeout": "50", "link_local_grp_supp": true, + "report_supp": false, "snooping": false, "v3_report_supp": false} +updates: + description: command sent to the device + returned: always + type: list + sample: ["ip igmp snooping link-local-groups-suppression", + "ip igmp snooping group-timeout 50", + "no ip igmp snooping report-suppression", + "no ip igmp snooping v3-report-suppression", + "no ip igmp snooping"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + if isinstance(response[0], str): + response = response[0].replace(command + '\n\n', '').strip() + body = [json.loads(response[0])] + else: + body = response + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_group_timeout(config): + command = 'ip igmp snooping group-timeout' + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(command), re.M) + value = '' + if command in config: + value = REGEX.search(config).group('value') + return value + + +def get_snooping(config): + REGEX = re.compile(r'{0}$'.format('no ip igmp snooping'), re.M) + value = False + try: + if REGEX.search(config): + value = False + except TypeError: + value = True + return value + + +def get_igmp_snooping(module): + command = 'show run all | include igmp.snooping' + existing = {} + body = execute_show_command( + command, module, command_type='cli_show_ascii')[0] + + if body: + split_body = body.splitlines() + + if 'no ip igmp snooping' in split_body: + existing['snooping'] = False + else: + existing['snooping'] = True + + if 'no ip igmp snooping report-suppression' in split_body: + existing['report_supp'] = False + elif 'ip igmp snooping report-suppression' in split_body: + existing['report_supp'] = True + + if 'no ip igmp snooping link-local-groups-suppression' in split_body: + existing['link_local_grp_supp'] = False + elif 'ip igmp snooping link-local-groups-suppression' in split_body: + existing['link_local_grp_supp'] = True + + if 'ip igmp snooping v3-report-suppression' in split_body: + existing['v3_report_supp'] = True + else: + existing['v3_report_supp'] = False + + existing['group_timeout'] = get_group_timeout(body) + + return existing + + +def config_igmp_snooping(delta, existing, default=False): + CMDS = { + 'snooping': 'ip igmp snooping', + 'group_timeout': 'ip igmp snooping group-timeout {}', + 'link_local_grp_supp': 'ip igmp snooping link-local-groups-suppression', + 'v3_report_supp': 'ip igmp snooping v3-report-suppression', + 'report_supp': 'ip igmp snooping report-suppression' + } + + commands = [] + command = None + for key, value in delta.iteritems(): + if value: + if default and key == 'group_timeout': + if existing.get(key): + command = 'no ' + CMDS.get(key).format(existing.get(key)) + else: + command = CMDS.get(key).format(value) + else: + command = 'no ' + CMDS.get(key).format(value) + + if command: + commands.append(command) + command = None + + return commands + + +def get_igmp_snooping_defaults(): + group_timeout = 'dummy' + report_supp = True + link_local_grp_supp = True + v3_report_supp = False + snooping = True + + args = dict(snooping=snooping, link_local_grp_supp=link_local_grp_supp, + report_supp=report_supp, v3_report_supp=v3_report_supp, + group_timeout=group_timeout) + + default = dict((param, value) for (param, value) in args.iteritems() + if value is not None) + + return default + + +def main(): + argument_spec = dict( + snooping=dict(required=False, type='bool'), + group_timeout=dict(required=False, type='str'), + link_local_grp_supp=dict(required=False, type='bool'), + report_supp=dict(required=False, type='bool'), + v3_report_supp=dict(required=False, type='bool'), + state=dict(choices=['present', 'default'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + snooping = module.params['snooping'] + link_local_grp_supp = module.params['link_local_grp_supp'] + report_supp = module.params['report_supp'] + v3_report_supp = module.params['v3_report_supp'] + group_timeout = module.params['group_timeout'] + state = module.params['state'] + + args = dict(snooping=snooping, link_local_grp_supp=link_local_grp_supp, + report_supp=report_supp, v3_report_supp=v3_report_supp, + group_timeout=group_timeout) + + proposed = dict((param, value) for (param, value) in args.iteritems() + if value is not None) + + existing = get_igmp_snooping(module) + end_state = existing + changed = False + + commands = [] + if state == 'present': + delta = dict( + set(proposed.iteritems()).difference(existing.iteritems()) + ) + if delta: + command = config_igmp_snooping(delta, existing) + if command: + commands.append(command) + elif state == 'default': + proposed = get_igmp_snooping_defaults() + delta = dict( + set(proposed.iteritems()).difference(existing.iteritems()) + ) + if delta: + command = config_igmp_snooping(delta, existing, default=True) + if command: + commands.append(command) + + cmds = flatten_list(commands) + results = {} + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_igmp_snooping(module) + if 'configure' in cmds: + cmds.pop(0) + + results['proposed'] = proposed + results['existing'] = existing + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + + module.exit_json(**results) + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_install_os.py b/network/nxos/nxos_install_os.py new file mode 100644 index 00000000000..aa0c0505437 --- /dev/null +++ b/network/nxos/nxos_install_os.py @@ -0,0 +1,420 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_install_os +short_description: Set boot options like boot image and kickstart image. +description: + - Install an operating system by setting the boot options like boot + image and kickstart image. +notes: + - The module will fail due to timeout issues, but the install will go on + anyway. Ansible's block and rescue can be leveraged to handle this kind + of failure and check actual module results. See EXAMPLE for more about + this. The first task on the rescue block is needed to make sure the + device has completed all checks and it started to reboot. The second + task is needed to wait for the device to come back up. The last two tasks + are used to verify the installation process was successful. + - Do not include full file paths, just the name of the file(s) stored on + the top level flash directory. + - You must know if your platform supports taking a kickstart image as a + parameter. If supplied but not supported, errors may occur. + - This module attempts to install the software immediately, + which may trigger a reboot. + - In check mode, the module tells you if the current boot images are set + to the desired images. +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbibo (@GGabriele) +version_added: 2.2 +options: + system_image_file: + description: + - Name of the system (or combined) image file on flash. + required: true + kickstart_image_file: + description: + - Name of the kickstart image file on flash. + required: false + default: null +''' + +EXAMPLES = ''' +- block: + - name: Install OS + nxos_install_os: + system_image_file: nxos.7.0.3.I2.2d.bin + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + transport: nxapi + rescue: + - name: Wait for device to perform checks + wait_for: + port: 22 + state: stopped + timeout: 300 + delay: 60 + host: "{{ inventory_hostname }}" + - name: Wait for device to come back up + wait_for: + port: 22 + state: started + timeout: 300 + delay: 60 + host: "{{ inventory_hostname }}" + - name: Check installed OS + nxos_command: + commands: + - show version + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" + transport: nxapi + register: output + - assert: + that: + - output['stdout'][0]['kickstart_ver_str'] == '7.0(3)I4(1)' +''' + +RETURN = ''' +install_state: + returned: always + type: dictionary + sample: { + "kick": "n5000-uk9-kickstart.7.2.1.N1.1.bin", + "sys": "n5000-uk9.7.2.1.N1.1.bin", + "status": "This is the log of last installation.\n + Continuing with installation process, please wait.\n + The login will be disabled until the installation is completed.\n + Performing supervisor state verification. \n + SUCCESS\n + Supervisor non-disruptive upgrade successful.\n + Install has been successful.\n", + } +''' + + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show_ascii'): + cmds = [command] + if module.params['transport'] == 'cli': + body = execute_show(cmds, module) + elif module.params['transport'] == 'nxapi': + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_boot_options(module): + """Get current boot variables + like system image and kickstart image. + Returns: + A dictionary, e.g. { 'kick': router_kick.img, 'sys': 'router_sys.img'} + """ + command = 'show boot' + body = execute_show_command(command, module)[0] + boot_options_raw_text = body.split('Boot Variables on next reload')[1] + + if 'kickstart' in boot_options_raw_text: + kick_regex = r'kickstart variable = bootflash:/(\S+)' + sys_regex = r'system variable = bootflash:/(\S+)' + + kick = re.search(kick_regex, boot_options_raw_text).group(1) + sys = re.search(sys_regex, boot_options_raw_text).group(1) + retdict = dict(kick=kick, sys=sys) + else: + nxos_regex = r'NXOS variable = bootflash:/(\S+)' + nxos = re.search(nxos_regex, boot_options_raw_text).group(1) + retdict = dict(sys=nxos) + + command = 'show install all status' + retdict['status'] = execute_show_command(command, module)[0] + + return retdict + + +def already_set(current_boot_options, system_image_file, kickstart_image_file): + return current_boot_options.get('sys') == system_image_file \ + and current_boot_options.get('kick') == kickstart_image_file + + +def set_boot_options(module, image_name, kickstart=None): + """Set boot variables + like system image and kickstart image. + Args: + The main system image file name. + Keyword Args: many implementors may choose + to supply a kickstart parameter to specify a kickstart image. + """ + commands = ['terminal dont-ask'] + if kickstart is None: + commands.append('install all nxos %s' % image_name) + else: + commands.append( + 'install all system %s kickstart %s' % (image_name, kickstart)) + execute_config_command(commands, module) + + +def main(): + argument_spec = dict( + system_image_file=dict(required=True), + kickstart_image_file=dict(required=False), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + system_image_file = module.params['system_image_file'] + kickstart_image_file = module.params['kickstart_image_file'] + + if kickstart_image_file == 'null': + kickstart_image_file = None + + current_boot_options = get_boot_options(module) + changed = False + if not already_set(current_boot_options, + system_image_file, + kickstart_image_file): + changed = True + + if not module.check_mode and changed == True: + set_boot_options(module, + system_image_file, + kickstart=kickstart_image_file) + + if not already_set(install_state, + system_image_file, + kickstart_image_file): + module.fail_json(msg='Install not successful', + install_state=install_state) + else: + install_state = current_boot_options + + module.exit_json(changed=changed, install_state=install_state) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_interface.py b/network/nxos/nxos_interface.py new file mode 100644 index 00000000000..b65fb9d3109 --- /dev/null +++ b/network/nxos/nxos_interface.py @@ -0,0 +1,970 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_interface +version_added: "2.1" +short_description: Manages physical attributes of interfaces. +description: + - Manages physical attributes of interfaces of NX-OS switches. +author: Jason Edelman (@jedelman8) +notes: + - This module is also used to create logical interfaces such as + svis and loopbacks. + - Be cautious of platform specific idiosyncrasies. For example, + when you default a loopback interface, the admin state toggles + on certain versions of NX-OS. + - The M(nxos_overlay_global) C(anycast_gateway_mac) attribute must be + set before setting the C(fabric_forwarding_anycast_gateway) property. +options: + interface: + description: + - Full name of interface, i.e. Ethernet1/1, port-channel10. + required: true + default: null + interface_type: + description: + - Interface type to be unconfigured from the device. + required: false + default: null + choices: ['loopback', 'portchannel', 'svi', 'nve'] + version_added: "2.2" + admin_state: + description: + - Administrative state of the interface. + required: false + default: up + choices: ['up','down'] + description: + description: + - Interface description. + required: false + default: null + mode: + description: + - Manage Layer 2 or Layer 3 state of the interface. + required: false + default: null + choices: ['layer2','layer3'] + ip_forward: + description: + - Enable/Disable ip forward feature on SVIs. + required: false + default: null + choices: ['enable','disable'] + version_added: "2.2" + fabric_forwarding_anycast_gateway: + description: + - Associate SVI with anycast gateway under VLAN configuration mode. + required: false + default: null + choices: ['true','false'] + version_added: "2.2" + state: + description: + - Specify desired state of the resource. + required: true + default: present + choices: ['present','absent','default'] +''' + +EXAMPLES = ''' +- name Ensure an interface is a Layer 3 port and that it has the proper description + nxos_interface: + interface: Ethernet1/1 + description: 'Configured by Ansible' + mode: layer3 + host: 68.170.147.165 + +- name Admin down an interface + nxos_interface: + interface: Ethernet2/1 + host: 68.170.147.165 + admin_state: down + +- name Remove all loopback interfaces + nxos_interface: + interface: loopback + state: absent + host: 68.170.147.165 + +- name Remove all logical interfaces + nxos_interface: + interface_type: "{{ item }} " + state: absent + host: "{{ inventory_hostname }}" + + with_items: + - loopback + - portchannel + - svi + - nve +- name Admin up all ethernet interfaces + nxos_interface: + interface: ethernet + host: 68.170.147.165 + admin_state: up + +- name Admin down ALL interfaces (physical and logical) + nxos_interface: + interface: all + host: 68.170.147.165 + admin_state: down +''' +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"admin_state": "down"} +existing: + description: k/v pairs of existing switchport + type: dict + sample: {"admin_state": "up", "description": "None", + "interface": "port-channel101", "mode": "layer2", + "type": "portchannel", "ip_forward": "enable"} +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict or null + sample: {"admin_state": "down", "description": "None", + "interface": "port-channel101", "mode": "layer2", + "type": "portchannel", "ip_forward": "enable"} +updates: + description: command list sent to the device + returned: always + type: list + sample: ["interface port-channel101", "shutdown"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.shell import ShellError + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def is_default_interface(interface, module): + """Checks to see if interface exists and if it is a default config + Args: + interface (str): full name of interface, i.e. vlan10, + Ethernet1/1, loopback10 + Returns: + True: if interface has default config + False: if it does not have a default config + DNE (str): if the interface does not exist - loopbacks, SVIs, etc. + """ + command = 'show run interface ' + interface + + try: + body = execute_show_command(command, module, + command_type='cli_show_ascii')[0] + except IndexError: + body = '' + + if body: + raw_list = body.split('\n') + found = False + for line in raw_list: + if line.startswith('interface'): + found = True + if found and line and not line.startswith('interface'): + return False + return True + + else: + return 'DNE' + + +def get_interface_type(interface): + """Gets the type of interface + Args: + interface (str): full name of interface, i.e. Ethernet1/1, loopback10, + port-channel20, vlan20 + Returns: + type of interface: ethernet, svi, loopback, management, portchannel, + or unknown + """ + if interface.upper().startswith('ET'): + return 'ethernet' + elif interface.upper().startswith('VL'): + return 'svi' + elif interface.upper().startswith('LO'): + return 'loopback' + elif interface.upper().startswith('MG'): + return 'management' + elif interface.upper().startswith('MA'): + return 'management' + elif interface.upper().startswith('PO'): + return 'portchannel' + elif interface.upper().startswith('NV'): + return 'nve' + else: + return 'unknown' + + +def get_manual_interface_attributes(interface, module): + """Gets admin state and description of a SVI interface. Hack due to API. + Args: + interface (str): full name of SVI interface, i.e. vlan10 + Returns: + dictionary that has two k/v pairs: admin_state & description + if not an svi, returns None + """ + + if get_interface_type(interface) == 'svi': + command = 'show interface ' + interface + try: + body = execute_modified_show_for_cli_text(command, module)[0] + except (IndexError, ShellError): + return None + + command_list = body.split('\n') + desc = None + admin_state = 'up' + for each in command_list: + if 'Description:' in each: + line = each.split('Description:') + desc = line[1].strip().split('MTU')[0].strip() + elif 'Administratively down' in each: + admin_state = 'down' + + return dict(description=desc, admin_state=admin_state) + else: + return None + + +def get_interface(intf, module): + """Gets current config/state of interface + Args: + intf (string): full name of interface, i.e. Ethernet1/1, loopback10, + port-channel20, vlan20 + Returns: + dictionary that has relevant config/state data about the given + interface based on the type of interface it is + """ + base_key_map = { + 'interface': 'interface', + 'admin_state': 'admin_state', + 'desc': 'description', + } + mode_map = { + 'eth_mode': 'mode' + } + loop_map = { + 'state': 'admin_state' + } + svi_map = { + 'svi_admin_state': 'admin_state', + 'desc': 'description' + } + mode_value_map = { + "mode": { + "access": "layer2", + "trunk": "layer2", + "routed": "layer3", + "layer3": "layer3" + } + } + + key_map = {} + interface = {} + + command = 'show interface ' + intf + try: + body = execute_show_command(command, module)[0] + except IndexError: + body = [] + + if body: + interface_table = body['TABLE_interface']['ROW_interface'] + intf_type = get_interface_type(intf) + if intf_type in ['portchannel', 'ethernet']: + if not interface_table.get('eth_mode'): + interface_table['eth_mode'] = 'layer3' + + if intf_type == 'ethernet': + key_map.update(base_key_map) + key_map.update(mode_map) + temp_dict = apply_key_map(key_map, interface_table) + temp_dict = apply_value_map(mode_value_map, temp_dict) + interface.update(temp_dict) + + elif intf_type == 'svi': + key_map.update(svi_map) + temp_dict = apply_key_map(key_map, interface_table) + interface.update(temp_dict) + attributes = get_manual_interface_attributes(intf, module) + interface['admin_state'] = str(attributes.get('admin_state', + 'nxapibug')) + interface['description'] = str(attributes.get('description', + 'nxapi_bug')) + command = 'show run interface ' + intf + body = execute_show_command(command, module, + command_type='cli_show_ascii')[0] + if 'ip forward' in body: + interface['ip_forward'] = 'enable' + else: + interface['ip_forward'] = 'disable' + if 'fabric forwarding mode anycast-gateway' in body: + interface['fabric_forwarding_anycast_gateway'] = True + else: + interface['fabric_forwarding_anycast_gateway'] = False + + elif intf_type == 'loopback': + key_map.update(base_key_map) + key_map.pop('admin_state') + key_map.update(loop_map) + temp_dict = apply_key_map(key_map, interface_table) + if not temp_dict.get('description'): + temp_dict['description'] = "None" + interface.update(temp_dict) + + elif intf_type == 'management': + key_map.update(base_key_map) + temp_dict = apply_key_map(key_map, interface_table) + interface.update(temp_dict) + + elif intf_type == 'portchannel': + key_map.update(base_key_map) + key_map.update(mode_map) + temp_dict = apply_key_map(key_map, interface_table) + temp_dict = apply_value_map(mode_value_map, temp_dict) + if not temp_dict.get('description'): + temp_dict['description'] = "None" + interface.update(temp_dict) + + elif intf_type == 'nve': + key_map.update(base_key_map) + temp_dict = apply_key_map(key_map, interface_table) + if not temp_dict.get('description'): + temp_dict['description'] = "None" + interface.update(temp_dict) + + interface['type'] = intf_type + + return interface + + +def get_intf_args(interface): + intf_type = get_interface_type(interface) + + arguments = ['admin_state', 'description'] + + if intf_type in ['ethernet', 'portchannel']: + arguments.extend(['mode']) + if intf_type == 'svi': + arguments.extend(['ip_forward', 'fabric_forwarding_anycast_gateway']) + + return arguments + + +def get_interfaces_dict(module): + """Gets all active interfaces on a given switch + Returns: + dictionary with interface type (ethernet,svi,loop,portchannel) as the + keys. Each value is a list of interfaces of given interface (key) + type. + """ + command = 'show interface status' + try: + body = execute_show_command(command, module)[0] + except IndexError: + body = {} + + interfaces = { + 'ethernet': [], + 'svi': [], + 'loopback': [], + 'management': [], + 'portchannel': [], + 'nve': [], + 'unknown': [] + } + + interface_list = body.get('TABLE_interface')['ROW_interface'] + for index in interface_list: + intf = index ['interface'] + intf_type = get_interface_type(intf) + + interfaces[intf_type].append(intf) + + return interfaces + + +def normalize_interface(if_name): + """Return the normalized interface name + """ + def _get_number(if_name): + digits = '' + for char in if_name: + if char.isdigit() or char == '/': + digits += char + return digits + + if if_name.lower().startswith('et'): + if_type = 'Ethernet' + elif if_name.lower().startswith('vl'): + if_type = 'Vlan' + elif if_name.lower().startswith('lo'): + if_type = 'loopback' + elif if_name.lower().startswith('po'): + if_type = 'port-channel' + elif if_name.lower().startswith('nv'): + if_type = 'nve' + else: + if_type = None + + number_list = if_name.split(' ') + if len(number_list) == 2: + number = number_list[-1].strip() + else: + number = _get_number(if_name) + + if if_type: + proper_interface = if_type + number + else: + proper_interface = if_name + + return proper_interface + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def apply_value_map(value_map, resource): + for key, value in value_map.items(): + resource[key] = value[resource.get(key)] + return resource + + +def get_interface_config_commands(interface, intf, existing): + """Generates list of commands to configure on device + Args: + interface (str): k/v pairs in the form of a set that should + be configured on the device + intf (str): full name of interface, i.e. Ethernet1/1 + Returns: + list: ordered list of commands to be sent to device + """ + + commands = [] + desc = interface.get('description') + if desc: + commands.append('description {0}'.format(desc)) + + mode = interface.get('mode') + if mode: + if mode == 'layer2': + command = 'switchport' + elif mode == 'layer3': + command = 'no switchport' + commands.append(command) + + admin_state = interface.get('admin_state') + if admin_state: + command = get_admin_state(interface, intf, admin_state) + commands.append(command) + + ip_forward = interface.get('ip_forward') + if ip_forward: + if ip_forward == 'enable': + commands.append('ip forward') + else: + commands.append('no ip forward') + + fabric_forwarding_anycast_gateway = interface.get( + 'fabric_forwarding_anycast_gateway') + if fabric_forwarding_anycast_gateway is not None: + if fabric_forwarding_anycast_gateway is True: + commands.append('fabric forwarding mode anycast-gateway') + elif fabric_forwarding_anycast_gateway is False: + commands.append('no fabric forwarding mode anycast-gateway') + + if commands: + commands.insert(0, 'interface ' + intf) + + return commands + + +def get_admin_state(interface, intf, admin_state): + if admin_state == 'up': + command = 'no shutdown' + elif admin_state == 'down': + command = 'shutdown' + return command + + +def get_proposed(existing, normalized_interface, args): + + # gets proper params that are allowed based on interface type + allowed_params = get_intf_args(normalized_interface) + + proposed = {} + + # retrieves proper interface params from args (user defined params) + for param in allowed_params: + temp = args.get(param) + if temp is not None: + proposed[param] = temp + + return proposed + + +def smart_existing(module, intf_type, normalized_interface): + + # 7K BUG MAY CAUSE THIS TO FAIL + + all_interfaces = get_interfaces_dict(module) + if normalized_interface in all_interfaces[intf_type]: + existing = get_interface(normalized_interface, module) + is_default = is_default_interface(normalized_interface, module) + else: + if intf_type == 'ethernet': + module.fail_json(msg='Invalid Ethernet interface provided.', + interface=normalized_interface) + elif intf_type in ['loopback', 'portchannel', 'svi', 'nve']: + existing = {} + is_default = 'DNE' + return existing, is_default + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + + if module.params['transport'] == 'cli': + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def execute_modified_show_for_cli_text(command, module): + cmds = [command] + if module.params['transport'] == 'cli': + response = execute_show(cmds, module) + else: + response = execute_show(cmds, module, command_type='cli_show_ascii') + body = response + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_interface_type_removed_cmds(interfaces): + commands = [] + + for interface in interfaces: + if interface != 'Vlan1': + commands.append('no interface {0}'.format(interface)) + + return commands + + +def main(): + + argument_spec = dict( + interface=dict(required=False,), + admin_state=dict(default='up', choices=['up', 'down'], required=False), + description=dict(required=False, default=None), + mode=dict(choices=['layer2', 'layer3'], required=False), + interface_type=dict(required=False, + choices=['loopback', 'portchannel', 'svi', 'nve']), + ip_forward=dict(required=False, choices=['enable', 'disable']), + fabric_forwarding_anycast_gateway=dict(required=False, type='bool'), + state=dict(choices=['absent', 'present', 'default'], + default='present', required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + mutually_exclusive=[['interface', 'interface_type']], + supports_check_mode=True) + + interface = module.params['interface'] + interface_type = module.params['interface_type'] + admin_state = module.params['admin_state'] + description = module.params['description'] + mode = module.params['mode'] + ip_forward = module.params['ip_forward'] + fabric_forwarding_anycast_gateway = module.params['fabric_forwarding_anycast_gateway'] + state = module.params['state'] + + if interface: + interface = interface.lower() + intf_type = get_interface_type(interface) + normalized_interface = normalize_interface(interface) + + if normalized_interface == 'Vlan1' and state == 'absent': + module.fail_json(msg='ERROR: CANNOT REMOVE VLAN 1!') + + if intf_type == 'nve': + if description or mode: + module.fail_json(msg='description and mode params are not ' + 'supported in this module. Use ' + 'nxos_vxlan_vtep instead.') + if ((ip_forward or fabric_forwarding_anycast_gateway) and + intf_type != 'svi'): + module.fail_json(msg='The ip_forward and ' + 'fabric_forwarding_anycast_gateway features ' + ' are only available for SVIs.') + args = dict(interface=interface, admin_state=admin_state, + description=description, mode=mode, ip_forward=ip_forward, + fabric_forwarding_anycast_gateway=fabric_forwarding_anycast_gateway) + + if intf_type == 'unknown': + module.fail_json( + msg='unknown interface type found-1', + interface=interface) + + existing, is_default = smart_existing(module, intf_type, normalized_interface) + proposed = get_proposed(existing, normalized_interface, args) + else: + intf_type = normalized_interface = interface_type + proposed = dict(interface_type=interface_type) + + changed = False + commands = [] + if interface: + delta = dict() + + if state == 'absent': + if intf_type in ['svi', 'loopback', 'portchannel', 'nve']: + if is_default != 'DNE': + cmds = ['no interface {0}'.format(normalized_interface)] + commands.append(cmds) + elif intf_type in ['ethernet']: + if is_default is False: + cmds = ['default interface {0}'.format(normalized_interface)] + commands.append(cmds) + elif state == 'present': + if not existing: + cmds = get_interface_config_commands(proposed, + normalized_interface, + existing) + commands.append(cmds) + else: + delta = dict(set(proposed.iteritems()).difference( + existing.iteritems())) + if delta: + cmds = get_interface_config_commands(delta, + normalized_interface, + existing) + commands.append(cmds) + elif state == 'default': + if is_default is False: + cmds = ['default interface {0}'.format(normalized_interface)] + commands.append(cmds) + elif is_default == 'DNE': + module.exit_json(msg='interface you are trying to default does' + ' not exist') + elif interface_type: + if state == 'present': + module.fail_json(msg='The interface_type param can be used ' + 'only with state absent.') + + existing = get_interfaces_dict(module)[interface_type] + cmds = get_interface_type_removed_cmds(existing) + commands.append(cmds) + + cmds = flatten_list(commands) + end_state = existing + + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + execute_config_command(cmds, module) + changed = True + if module.params['interface']: + if delta.get('mode'): # or delta.get('admin_state'): + # if the mode changes from L2 to L3, the admin state + # seems to change after the API call, so adding a second API + # call to ensure it's in the desired state. + admin_state = delta.get('admin_state') or admin_state + c1 = 'interface {0}'.format(normalized_interface) + c2 = get_admin_state(delta, normalized_interface, admin_state) + cmds2 = [c1, c2] + execute_config_command(cmds2, module) + cmds.extend(cmds2) + end_state, is_default = smart_existing(module, intf_type, + normalized_interface) + else: + end_state = get_interfaces_dict(module)[interface_type] + cmds = [cmd for cmd in cmds if cmd != 'configure'] + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/network/nxos/nxos_interface_ospf.py b/network/nxos/nxos_interface_ospf.py new file mode 100644 index 00000000000..9f63bf57a04 --- /dev/null +++ b/network/nxos/nxos_interface_ospf.py @@ -0,0 +1,681 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_interface_ospf +version_added: "2.2" +short_description: Manages configuration of an OSPF interface instance. +description: + - Manages configuration of an OSPF interface instance. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - Default, where supported, restores params default value. + - To remove an existing authentication configuration you should use + C(message_digest_key_id=default) plus all other options matching their + existing values. + - C(state=absent) removes the whole OSPF interface configuration. +options: + interface: + description: + - Name of this cisco_interface resource. Valid value is a string. + required: true + ospf: + description: + - Name of the ospf instance. + required: true + area: + description: + - Ospf area associated with this cisco_interface_ospf instance. + Valid values are a string, formatted as an IP address + (i.e. "0.0.0.0") or as an integer. + required: true + cost: + description: + - The cost associated with this cisco_interface_ospf instance. + required: false + default: null + hello_interval: + description: + - Time between sending successive hello packets. + Valid values are an integer or the keyword 'default'. + required: false + default: null + dead_interval: + description: + - Time interval an ospf neighbor waits for a hello + packet before tearing down adjacencies. Valid values are an + integer or the keyword 'default'. + required: false + default: null + passive_interface: + description: + - Setting to true will prevent this interface from receiving + HELLO packets. Valid values are 'true' and 'false'. + required: false + choices: ['true','false'] + default: null + message_digest: + description: + - Enables or disables the usage of message digest authentication. + Valid values are 'true' and 'false'. + required: false + choices: ['true','false'] + default: null + message_digest_key_id: + description: + - Md5 authentication key-id associated with the ospf instance. + If this is present, message_digest_encryption_type, + message_digest_algorithm_type and message_digest_password are + mandatory. Valid value is an integer and 'default'. + required: false + default: null + message_digest_algorithm_type: + description: + - Algorithm used for authentication among neighboring routers + within an area. Valid values is 'md5'. + required: false + choices: ['md5'] + default: null + message_digest_encryption_type: + description: + - Specifies the scheme used for encrypting message_digest_password. + Valid values are '3des' or 'cisco_type_7' encryption. + required: false + choices: ['cisco_type_7','3des'] + default: null + message_digest_password: + description: + - Specifies the message_digest password. Valid value is a string. + required: false + default: null + state: + description: + - Determines whether the config should be present or not + on the device. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +- nxos_interface_ospf: + interface: ethernet1/32 + ospf: 1 + area: 1 + cost: default + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"area": "1", "interface": "ethernet1/32", "ospf": "1"} +existing: + description: k/v pairs of existing OSPF configuration + returned: verbose mode + type: dict + sample: {"area": "", "cost": "", "dead_interval": "", + "hello_interval": "", "interface": "ethernet1/32", + "message_digest": false, "message_digest_algorithm_type": "", + "message_digest_encryption_type": "", + "message_digest_key_id": "", "message_digest_password": "", + "ospf": "", "passive_interface": false} +end_state: + description: k/v pairs of OSPF configuration after module execution + returned: verbose mode + type: dict + sample: {"area": "0.0.0.1", "cost": "", "dead_interval": "", + "hello_interval": "", "interface": "ethernet1/32", + "message_digest": false, "message_digest_algorithm_type": "", + "message_digest_encryption_type": "", "message_digest_key_id": "", + "message_digest_password": "", "ospf": "1", + "passive_interface": false} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface Ethernet1/32", "ip router ospf 1 area 0.0.0.1"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.shell import ShellError + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +BOOL_PARAMS = [ + 'passive_interface', + 'message_digest' +] +PARAM_TO_COMMAND_KEYMAP = { + 'cost': 'ip ospf cost', + 'ospf': 'ip router ospf', + 'area': 'ip router ospf', + 'hello_interval': 'ip ospf hello-interval', + 'dead_interval': 'ip ospf dead-interval', + 'passive_interface': 'ip ospf passive-interface', + 'message_digest': 'ip ospf authentication message-digest', + 'message_digest_key_id': 'ip ospf message-digest-key', + 'message_digest_algorithm_type': 'ip ospf message-digest-key options', + 'message_digest_encryption_type': 'ip ospf message-digest-key options', + 'message_digest_password': 'ip ospf message-digest-key options', +} +PARAM_TO_DEFAULT_KEYMAP = { +} + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_custom_value(arg, config, module): + value = '' + if arg == 'ospf': + REGEX = re.compile(r'(?:ip router ospf\s)(?P.*)$', re.M) + value = '' + if 'ip router ospf' in config: + parsed = REGEX.search(config).group('value').split() + value = parsed[0] + + elif arg == 'area': + REGEX = re.compile(r'(?:ip router ospf\s)(?P.*)$', re.M) + value = '' + if 'ip router ospf' in config: + parsed = REGEX.search(config).group('value').split() + value = parsed[2] + + elif arg.startswith('message_digest_'): + REGEX = re.compile(r'(?:ip ospf message-digest-key\s)(?P.*)$', re.M) + value = '' + if 'ip ospf message-digest-key' in config: + value_list = REGEX.search(config).group('value').split() + if arg == 'message_digest_key_id': + value = value_list[0] + elif arg == 'message_digest_algorithm_type': + value = value_list[1] + elif arg == 'message_digest_encryption_type': + value = value_list[2] + if value == '3': + value = '3des' + elif value == '7': + value = 'cisco_type_7' + elif arg == 'message_digest_password': + value = value_list[3] + + elif arg == 'passive_interface': + REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + NO_REGEX = re.compile(r'\s+no\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + try: + if NO_REGEX.search(config): + value = False + elif REGEX.search(config): + value = True + except TypeError: + value = False + + return value + + +def get_value(arg, config, module): + custom = [ + 'ospf', + 'area', + 'message_digest_key_id', + 'message_digest_algorithm_type', + 'message_digest_encryption_type', + 'message_digest_password', + 'passive_interface' + ] + + if arg in custom: + value = get_custom_value(arg, config, module) + elif arg in BOOL_PARAMS: + REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + else: + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + parents = ['interface {0}'.format(module.params['interface'].capitalize())] + config = netcfg.get_section(parents) + if 'ospf' in config: + for arg in args: + if arg not in ['interface']: + existing[arg] = get_value(arg, config, module) + existing['interface'] = module.params['interface'] + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def get_default_commands(existing, proposed, existing_commands, key, module): + commands = list() + existing_value = existing_commands.get(key) + if key.startswith('ip ospf message-digest-key'): + check = False + for param in ['message_digest_encryption_type', + 'message_digest_algorithm_type', + 'message_digest_password']: + if existing[param] == proposed[param]: + check = True + if check: + if existing['message_digest_encryption_type'] == '3des': + encryption_type = '3' + elif existing['message_digest_encryption_type'] == 'cisco_type_7': + encryption_type = '7' + command = 'no {0} {1} {2} {3} {4}'.format( + key, + existing['message_digest_key_id'], + existing['message_digest_algorithm_type'], + encryption_type, + existing['message_digest_password']) + commands.append(command) + else: + commands.append('no {0} {1}'.format(key, existing_value)) + return commands + + +def get_custom_command(existing_cmd, proposed, key, module): + commands = list() + + if key == 'ip router ospf': + command = '{0} {1} area {2}'.format(key, proposed['ospf'], + proposed['area']) + if command not in existing_cmd: + commands.append(command) + + elif key.startswith('ip ospf message-digest-key'): + if (proposed['message_digest_key_id'] != 'default' and + 'options' not in key): + if proposed['message_digest_encryption_type'] == '3des': + encryption_type = '3' + elif proposed['message_digest_encryption_type'] == 'cisco_type_7': + encryption_type = '7' + command = '{0} {1} {2} {3} {4}'.format( + key, + proposed['message_digest_key_id'], + proposed['message_digest_algorithm_type'], + encryption_type, + proposed['message_digest_password']) + commands.append(command) + return commands + + +def state_present(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in proposed_commands.iteritems(): + if value is True: + commands.append(key) + elif value is False: + commands.append('no {0}'.format(key)) + elif value == 'default': + if existing_commands.get(key): + commands.extend(get_default_commands(existing, proposed, + existing_commands, key, + module)) + else: + if (key == 'ip router ospf' or + key.startswith('ip ospf message-digest-key')): + commands.extend(get_custom_command(commands, proposed, + key, module)) + else: + command = '{0} {1}'.format(key, value.lower()) + commands.append(command) + + if commands: + parents = ['interface {0}'.format(module.params['interface'].capitalize())] + candidate.add(commands, parents=parents) + + +def state_absent(module, existing, proposed, candidate): + commands = [] + parents = ['interface {0}'.format(module.params['interface'].capitalize())] + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in existing_commands.iteritems(): + if value: + if key.startswith('ip ospf message-digest-key'): + if 'options' not in key: + if existing['message_digest_encryption_type'] == '3des': + encryption_type = '3' + elif existing['message_digest_encryption_type'] == 'cisco_type_7': + encryption_type = '7' + command = 'no {0} {1} {2} {3} {4}'.format( + key, + existing['message_digest_key_id'], + existing['message_digest_algorithm_type'], + encryption_type, + existing['message_digest_password']) + commands.append(command) + elif key in ['ip ospf authentication message-digest', + 'ip ospf passive-interface']: + if value: + commands.append('no {0}'.format(key)) + elif key == 'ip router ospf': + command = 'no {0} {1} area {2}'.format(key, proposed['ospf'], + proposed['area']) + if command not in commands: + commands.append(command) + else: + existing_value = existing_commands.get(key) + commands.append('no {0} {1}'.format(key, existing_value)) + + candidate.add(commands, parents=parents) + + +def normalize_area(area, module): + try: + area = int(area) + area = '0.0.0.{0}'.format(area) + except ValueError: + splitted_area = area.split('.') + if len(splitted_area) != 4: + module.fail_json(msg='Incorrect Area ID format', area=area) + return area + + +def main(): + argument_spec = dict( + interface=dict(required=True, type='str'), + ospf=dict(required=True, type='str'), + area=dict(required=True, type='str'), + cost=dict(required=False, type='str'), + hello_interval=dict(required=False, type='str'), + dead_interval=dict(required=False, type='str'), + passive_interface=dict(required=False, type='bool'), + message_digest=dict(required=False, type='bool'), + message_digest_key_id=dict(required=False, type='str'), + message_digest_algorithm_type=dict(required=False, type='str', + choices=['md5']), + message_digest_encryption_type=dict(required=False, type='str', + choices=['cisco_type_7','3des']), + message_digest_password=dict(required=False, type='str'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + required_together=[['message_digest_key_id', + 'message_digest_algorithm_type', + 'message_digest_encryption_type', + 'message_digest_password']], + supports_check_mode=True) + + for param in ['message_digest_encryption_type', + 'message_digest_algorithm_type', + 'message_digest_password']: + if module.params[param] == 'default': + module.exit_json(msg='Use message_digest_key_id=default to remove' + ' an existing authentication configuration') + + state = module.params['state'] + args = [ + 'interface', + 'ospf', + 'area', + 'cost', + 'hello_interval', + 'dead_interval', + 'passive_interface', + 'message_digest', + 'message_digest_key_id', + 'message_digest_algorithm_type', + 'message_digest_encryption_type', + 'message_digest_password' + ] + + existing = invoke('get_existing', module, args) + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + proposed = {} + for key, value in proposed_args.iteritems(): + if key != 'interface': + if str(value).lower() == 'true': + value = True + elif str(value).lower() == 'false': + value = False + elif str(value).lower() == 'default': + value = PARAM_TO_DEFAULT_KEYMAP.get(key) + if value is None: + value = 'default' + if existing.get(key) or (not existing.get(key) and value): + proposed[key] = value + + proposed['area'] = normalize_area(proposed['area'], module) + result = {} + if (state == 'present' or (state == 'absent' and + existing.get('ospf') == proposed['ospf'] and + existing.get('area') == proposed['area'])): + + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_ip_interface.py b/network/nxos/nxos_ip_interface.py new file mode 100644 index 00000000000..b860ea20b6f --- /dev/null +++ b/network/nxos/nxos_ip_interface.py @@ -0,0 +1,723 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_ip_interface +version_added: "2.1" +short_description: Manages L3 attributes for IPv4 and IPv6 interfaces. +description: + - Manages Layer 3 attributes for IPv4 and IPv6 interfaces. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - Interface must already be a L3 port when using this module. + - Logical interfaces (po, loop, svi) must be created first. + - C(mask) must be inserted in decimal format (i.e. 24) for + both IPv6 and IPv4. + - A single interface can have multiple IPv6 configured. +options: + interface: + description: + - Full name of interface, i.e. Ethernet1/1, vlan10. + required: true + addr: + description: + - IPv4 or IPv6 Address. + required: false + default: null + mask: + description: + - Subnet mask for IPv4 or IPv6 Address in decimal format. + required: false + default: null + state: + description: + - Specify desired state of the resource. + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: Ensure ipv4 address is configured on Ethernet1/32 + nxos_ip_interface: + interface: Ethernet1/32 + transport: nxapi + version: v4 + state: present + addr: 20.20.20.20 + mask: 24 + +- name: Ensure ipv6 address is configured on Ethernet1/31 + nxos_ip_interface: + interface: Ethernet1/31 + transport: cli + version: v6 + state: present + addr: '2001::db8:800:200c:cccb' + mask: 64 +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"addr": "20.20.20.20", "interface": "ethernet1/32", "mask": "24"} +existing: + description: k/v pairs of existing IP attributes on the interface + type: dict + sample: {"addresses": [{"addr": "11.11.11.11", "mask": 17}], + "interface": "ethernet1/32", "prefix": "11.11.0.0", + "type": "ethernet", "vrf": "default"} +end_state: + description: k/v pairs of IP attributes after module execution + returned: always + type: dict + sample: {"addresses": [{"addr": "20.20.20.20", "mask": 24}], + "interface": "ethernet1/32", "prefix": "20.20.20.0", + "type": "ethernet", "vrf": "default"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface ethernet1/32", "ip address 20.20.20.20/24"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json +import collections + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, we assume if '^' is found in response, + it is an invalid command. + """ + if 'xml' in response[0]: + body = [] + elif '^' in response[0] or 'show run' in response[0] or response[0] == '\n': + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def get_interface_type(interface): + if interface.upper().startswith('ET'): + return 'ethernet' + elif interface.upper().startswith('VL'): + return 'svi' + elif interface.upper().startswith('LO'): + return 'loopback' + elif interface.upper().startswith('MG'): + return 'management' + elif interface.upper().startswith('MA'): + return 'management' + elif interface.upper().startswith('PO'): + return 'portchannel' + else: + return 'unknown' + + +def is_default(interface, module): + command = 'show run interface {0}'.format(interface) + + try: + body = execute_show_command(command, module)[0] + if 'invalid' in body.lower(): + return 'DNE' + else: + raw_list = body.split('\n') + if raw_list[-1].startswith('interface'): + return True + else: + return False + except (KeyError): + return 'DNE' + + +def get_interface_mode(interface, intf_type, module): + command = 'show interface {0}'.format(interface) + mode = 'unknown' + + if intf_type in ['ethernet', 'portchannel']: + body = execute_show_command(command, module)[0] + + if isinstance(body, str): + if 'invalid interface format' in body.lower(): + module.fail_json(msg='Invalid interface name. Please check ' + 'its format.', interface=interface) + + interface_table = body['TABLE_interface']['ROW_interface'] + mode = str(interface_table.get('eth_mode', 'layer3')) + if mode == 'access' or mode == 'trunk': + mode = 'layer2' + elif intf_type == 'svi': + mode = 'layer3' + return mode + + +def send_show_command(interface_name, version, module): + if version == 'v4': + command = 'show ip interface {0}'.format(interface_name) + elif version == 'v6': + command = 'show ipv6 interface {0}'.format(interface_name) + + if module.params['transport'] == 'nxapi' and version == 'v6': + body = execute_show_command(command, module, + command_type='cli_show_ascii') + else: + body = execute_show_command(command, module) + return body + + +def parse_structured_data(body, interface_name, version, module): + address_list = [] + + interface_key = { + 'subnet': 'prefix', + 'prefix': 'prefix' + } + + try: + interface_table = body[0]['TABLE_intf']['ROW_intf'] + try: + vrf_table = body[0]['TABLE_vrf']['ROW_vrf'] + vrf = vrf_table['vrf-name-out'] + except KeyError: + vrf = None + except (KeyError, AttributeError): + return {} + + interface = apply_key_map(interface_key, interface_table) + interface['interface'] = interface_name + interface['type'] = get_interface_type(interface_name) + interface['vrf'] = vrf + + if version == 'v4': + address = {} + address['addr'] = interface_table.get('prefix', None) + if address['addr'] is not None: + address['mask'] = str(interface_table.get('masklen', None)) + interface['addresses'] = [address] + prefix = "{0}/{1}".format(address['addr'], address['mask']) + address_list.append(prefix) + else: + interface['addresses'] = [] + + elif version == 'v6': + address_list = interface_table.get('addr', []) + interface['addresses'] = [] + + if address_list: + if not isinstance(address_list, list): + address_list = [address_list] + + for ipv6 in address_list: + address = {} + splitted_address = ipv6.split('/') + address['addr'] = splitted_address[0] + address['mask'] = splitted_address[1] + interface['addresses'].append(address) + else: + interface['addresses'] = [] + + return interface, address_list + + +def parse_unstructured_data(body, interface_name, module): + interface = {} + address_list = [] + vrf = None + + body = body[0] + if "ipv6 is disabled" not in body.lower(): + splitted_body = body.split('\n') + + # We can have multiple IPv6 on the same interface. + # We need to parse them manually from raw output. + for index in range(0, len(splitted_body) - 1): + if "IPv6 address:" in splitted_body[index]: + first_reference_point = index + 1 + elif "IPv6 subnet:" in splitted_body[index]: + last_reference_point = index + prefix_line = splitted_body[last_reference_point] + prefix = prefix_line.split('IPv6 subnet:')[1].strip() + interface['prefix'] = prefix + + interface_list_table = splitted_body[ + first_reference_point:last_reference_point] + + for each_line in interface_list_table: + address = each_line.strip().split(' ')[0] + if address not in address_list: + address_list.append(address) + + interface['addresses'] = [] + if address_list: + for ipv6 in address_list: + address = {} + splitted_address = ipv6.split('/') + address['addr'] = splitted_address[0] + address['mask'] = splitted_address[1] + interface['addresses'].append(address) + + try: + vrf_regex = '.*VRF\s+(?P\S+).*' + match_vrf = re.match(vrf_regex, body, re.DOTALL) + group_vrf = match_vrf.groupdict() + vrf = group_vrf["vrf"] + except AttributeError: + vrf = None + + else: + # IPv6's not been configured on this interface yet. + interface['addresses'] = [] + + interface['interface'] = interface_name + interface['type'] = get_interface_type(interface_name) + interface['vrf'] = vrf + + return interface, address_list + + +def get_ip_interface(interface_name, version, module): + body = send_show_command(interface_name, version, module) + + # nxapi default response doesn't reflect the actual interface state + # when dealing with IPv6. That's why we need to get raw output instead + # and manually parse it. + if module.params['transport'] == 'nxapi' and version == 'v6': + interface, address_list = parse_unstructured_data( + body, interface_name, module) + else: + interface, address_list = parse_structured_data( + body, interface_name, version, module) + + return interface, address_list + + +def get_remove_ip_config_commands(interface, addr, mask, version): + commands = [] + commands.append('interface {0}'.format(interface)) + if version == 'v4': + commands.append('no ip address') + else: + commands.append('no ipv6 address {0}/{1}'.format(addr, mask)) + + return commands + + +def get_config_ip_commands(delta, interface, existing, version): + commands = [] + delta = dict(delta) + + # loop used in the situation that just an IP address or just a + # mask is changing, not both. + for each in ['addr', 'mask']: + if each not in delta: + delta[each] = existing[each] + + if version == 'v4': + command = 'ip address {addr}/{mask}'.format(**delta) + else: + command = 'ipv6 address {addr}/{mask}'.format(**delta) + commands.append(command) + commands.insert(0, 'interface {0}'.format(interface)) + + return commands + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def validate_params(addr, interface, mask, version, state, intf_type, module): + if state == "present": + if addr is None or mask is None: + module.fail_json(msg="An IP address AND a mask must be provided " + "when state=present.") + elif state == "absent" and version == "v6": + if addr is None or mask is None: + module.fail_json(msg="IPv6 address and mask must be provided when " + "state=absent.") + + if (intf_type != "ethernet" and module.params["transport"] == "cli"): + if is_default(interface, module) == "DNE": + module.fail_json(msg="That interface does not exist yet. Create " + "it first.", interface=interface) + if mask is not None: + try: + if (int(mask) < 1 or int(mask) > 32) and version == "v4": + raise ValueError + elif int(mask) < 1 or int(mask) > 128: + raise ValueError + except ValueError: + module.fail_json(msg="Warning! 'mask' must be an integer between" + " 1 and 32 when version v4 and up to 128 " + "when version v6.", version=version, + mask=mask) + + +def main(): + argument_spec = dict( + interface=dict(required=True), + addr=dict(required=False), + version=dict(required=False, choices=['v4', 'v6'], + default='v4'), + mask=dict(type='str', required=False), + state=dict(required=False, default='present', + choices=['present', 'absent']), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + addr = module.params['addr'] + version = module.params['version'] + mask = module.params['mask'] + interface = module.params['interface'].lower() + state = module.params['state'] + + intf_type = get_interface_type(interface) + validate_params(addr, interface, mask, version, state, intf_type, module) + + mode = get_interface_mode(interface, intf_type, module) + if mode == 'layer2': + module.fail_json(msg='That interface is a layer2 port.\nMake it ' + 'a layer 3 port first.', interface=interface) + + existing, address_list = get_ip_interface(interface, version, module) + + args = dict(addr=addr, mask=mask, interface=interface) + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + commands = [] + changed = False + end_state = existing + + if state == 'absent' and existing['addresses']: + if version == 'v6': + for address in existing['addresses']: + if address['addr'] == addr and address['mask'] == mask: + command = get_remove_ip_config_commands(interface, addr, + mask, version) + commands.append(command) + + else: + command = get_remove_ip_config_commands(interface, addr, + mask, version) + commands.append(command) + + elif state == 'present': + if not existing['addresses']: + command = get_config_ip_commands(proposed, interface, + existing, version) + commands.append(command) + else: + prefix = "{0}/{1}".format(addr, mask) + if prefix not in address_list: + command = get_config_ip_commands(proposed, interface, + existing, version) + commands.append(command) + else: + for address in existing['addresses']: + if (address['addr'] == addr and + int(address['mask']) != int(mask)): + command = get_config_ip_commands(proposed, interface, + existing, version) + commands.append(command) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + execute_config_command(cmds, module) + changed = True + end_state, address_list = get_ip_interface(interface, version, + module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_mtu.py b/network/nxos/nxos_mtu.py new file mode 100644 index 00000000000..48a92c2f941 --- /dev/null +++ b/network/nxos/nxos_mtu.py @@ -0,0 +1,598 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_mtu +version_added: "2.2" +short_description: Manages MTU settings on Nexus switch. +description: + - Manages MTU settings on Nexus switch. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) +notes: + - Either C(sysmtu) param is required or C(interface) AND C(mtu) params are req'd. + - C(state=absent) unconfigures a given MTU if that value is currently present. +options: + interface: + description: + - Full name of interface, i.e. Ethernet1/1. + required: false + default: null + mtu: + description: + - MTU for a specific interface. + required: false + default: null + sysmtu: + description: + - System jumbo MTU. + required: false + default: null + state: + description: + - Specify desired state of the resource. + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +# Ensure system mtu is 9126 +- nxos_mtu: + sysmtu: 9216 + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# Config mtu on Eth1/1 (routed interface) +- nxos_mtu: + interface: Ethernet1/1 + mtu: 1600 + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# Config mtu on Eth1/3 (switched interface) +- nxos_mtu: + interface: Ethernet1/3 + mtu: 9216 + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# Unconfigure mtu on a given interface +- nxos_mtu: + interface: Ethernet1/3 + mtu: 9216 + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + state: absent +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"mtu": "1700"} +existing: + description: + - k/v pairs of existing mtu/sysmtu on the interface/system + type: dict + sample: {"mtu": "1600", "sysmtu": "9216"} +end_state: + description: k/v pairs of mtu/sysmtu values after module execution + returned: always + type: dict + sample: {"mtu": "1700", sysmtu": "9216"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["interface vlan10", "mtu 1700"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_mtu(interface, module): + command = 'show interface {0}'.format(interface) + mtu = {} + + body = execute_show_command(command, module) + + try: + mtu_table = body[0]['TABLE_interface']['ROW_interface'] + mtu['mtu'] = str( + mtu_table.get('eth_mtu', + mtu_table.get('svi_mtu', 'unreadable_via_api'))) + mtu['sysmtu'] = get_system_mtu(module)['sysmtu'] + except KeyError: + mtu = {} + + return mtu + + +def get_system_mtu(module): + command = 'show run all | inc jumbomtu' + sysmtu = '' + + body = execute_show_command(command, module, command_type='cli_show_ascii') + + if body: + sysmtu = str(body[0].split(' ')[-1]) + try: + sysmtu = int(sysmtu) + except: + sysmtu = "" + + return dict(sysmtu=str(sysmtu)) + + +def get_commands_config_mtu(delta, interface): + CONFIG_ARGS = { + 'mtu': 'mtu {mtu}', + 'sysmtu': 'system jumbomtu {sysmtu}', + } + + commands = [] + for param, value in delta.iteritems(): + command = CONFIG_ARGS.get(param, 'DNE').format(**delta) + if command and command != 'DNE': + commands.append(command) + command = None + mtu_check = delta.get('mtu', None) + if mtu_check: + commands.insert(0, 'interface {0}'.format(interface)) + return commands + + +def get_commands_remove_mtu(delta, interface): + CONFIG_ARGS = { + 'mtu': 'no mtu {mtu}', + 'sysmtu': 'no system jumbomtu {sysmtu}', + } + commands = [] + for param, value in delta.iteritems(): + command = CONFIG_ARGS.get(param, 'DNE').format(**delta) + if command and command != 'DNE': + commands.append(command) + command = None + mtu_check = delta.get('mtu', None) + if mtu_check: + commands.insert(0, 'interface {0}'.format(interface)) + return commands + + +def get_interface_type(interface): + if interface.upper().startswith('ET'): + return 'ethernet' + elif interface.upper().startswith('VL'): + return 'svi' + elif interface.upper().startswith('LO'): + return 'loopback' + elif interface.upper().startswith('MG'): + return 'management' + elif interface.upper().startswith('MA'): + return 'management' + elif interface.upper().startswith('PO'): + return 'portchannel' + else: + return 'unknown' + + +def is_default(interface, module): + command = 'show run interface {0}'.format(interface) + + try: + body = execute_show_command( + command, module, command_type='cli_show_ascii')[0] + if body == 'DNE': + return 'DNE' + else: + raw_list = body.split('\n') + if raw_list[-1].startswith('interface'): + return True + else: + return False + except (KeyError): + return 'DNE' + + +def get_interface_mode(interface, intf_type, module): + command = 'show interface {0}'.format(interface) + mode = 'unknown' + interface_table = {} + body = execute_show_command(command, module) + + try: + interface_table = body[0]['TABLE_interface']['ROW_interface'] + except (KeyError, AttributeError, IndexError): + return mode + + if intf_type in ['ethernet', 'portchannel']: + mode = str(interface_table.get('eth_mode', 'layer3')) + if mode in ['access', 'trunk']: + mode = 'layer2' + elif mode == 'routed': + mode = 'layer3' + elif intf_type in ['loopback', 'svi']: + mode = 'layer3' + return mode + + +def main(): + argument_spec = dict( + mtu=dict(type='str'), + interface=dict(type='str'), + sysmtu=dict(type='str'), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + required_together=[['mtu', 'interface']], + supports_check_mode=True) + + interface = module.params['interface'] + mtu = module.params['mtu'] + sysmtu = module.params['sysmtu'] + state = module.params['state'] + + if sysmtu and (interface or mtu): + module.fail_json(msg='Proper usage-- either just use the sysmtu param ' + 'or use interface AND mtu params') + + if interface: + intf_type = get_interface_type(interface) + if intf_type != 'ethernet': + if is_default(interface, module) == 'DNE': + module.fail_json(msg='Invalid interface. It does not exist ' + 'on the switch.') + + existing = get_mtu(interface, module) + else: + existing = get_system_mtu(module) + + if interface and mtu: + if intf_type == 'loopback': + module.fail_json(msg='Cannot set MTU for loopback interface.') + mode = get_interface_mode(interface, intf_type, module) + if mode == 'layer2': + if intf_type in ['ethernet', 'portchannel']: + if mtu not in [existing['sysmtu'], '1500']: + module.fail_json(msg='MTU on L2 interfaces can only be set' + ' to the system default (1500) or ' + 'existing sysmtu value which is ' + ' {0}'.format(existing['sysmtu'])) + elif mode == 'layer3': + if intf_type in ['ethernet', 'portchannel', 'svi']: + if ((int(mtu) < 576 or int(mtu) > 9216) or + ((int(mtu) % 2) != 0)): + module.fail_json(msg='Invalid MTU for Layer 3 interface' + 'needs to be an even number between' + '576 and 9216') + if sysmtu: + if ((int(sysmtu) < 576 or int(sysmtu) > 9216 or + ((int(sysmtu) % 2) != 0))): + module.fail_json(msg='Invalid MTU- needs to be an even ' + 'number between 576 and 9216') + + args = dict(mtu=mtu, sysmtu=sysmtu) + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + + changed = False + end_state = existing + commands = [] + + if state == 'present': + if delta: + command = get_commands_config_mtu(delta, interface) + commands.append(command) + + elif state == 'absent': + common = set(proposed.iteritems()).intersection(existing.iteritems()) + if common: + command = get_commands_remove_mtu(dict(common), interface) + commands.append(command) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + if interface: + end_state = get_mtu(interface, module) + else: + end_state = get_system_mtu(module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_ntp.py b/network/nxos/nxos_ntp.py new file mode 100644 index 00000000000..4c6e406e6b9 --- /dev/null +++ b/network/nxos/nxos_ntp.py @@ -0,0 +1,635 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_ntp +version_added: "2.2" +short_description: Manages core NTP configuration. +description: + - Manages core NTP configuration. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) +options: + server: + description: + - Network address of NTP server. + required: false + default: null + peer: + description: + - Network address of NTP peer. + required: false + default: null + key_id: + description: + - Authentication key identifier to use with + given NTP server or peer. + required: false + default: null + prefer: + description: + - Makes given NTP server or peer the preferred + NTP server or peer for the device. + required: false + default: null + choices: ['enabled', 'disabled'] + vrf_name: + description: + - Makes the device communicate with the given + NTP server or peer over a specific VRF. + required: false + default: null + source_addr: + description: + - Local source address from which NTP messages are sent. + required: false + default: null + source_int: + description: + - Local source interface from which NTP messages are sent. + Must be fully qualified interface name. + required: false + default: null + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +# Set NTP Server with parameters +- nxos_ntp: + server: 1.2.3.4 + key_id: 32 + prefer: enabled + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"address": "2.2.2.2", "key_id": "48", + "peer_type": "server", "prefer": "enabled", + "source": "3.3.3.3", "source_type": "source"} +existing: + description: + - k/v pairs of existing ntp server/peer + type: dict + sample: {"address": "2.2.2.2", "key_id": "32", + "peer_type": "server", "prefer": "enabled", + "source": "ethernet2/1", "source_type": "source-interface"} +end_state: + description: k/v pairs of ntp info after module execution + returned: always + type: dict + sample: {"address": "2.2.2.2", "key_id": "48", + "peer_type": "server", "prefer": "enabled", + "source": "3.3.3.3", "source_type": "source"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["ntp server 2.2.2.2 prefer key 48", + "no ntp source-interface ethernet2/1", "ntp source 3.3.3.3"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_ntp_source(module): + source_type = None + source = None + command = 'show run | inc ntp.source' + output = execute_show_command(command, module, command_type='cli_show_ascii') + + if output: + try: + if 'interface' in output[0]: + source_type = 'source-interface' + else: + source_type = 'source' + source = output[0].split()[2].lower() + except AttributeError: + source_type = None + source = None + + return source_type, source + + +def get_ntp_peer(module): + command = 'show run | inc ntp.(server|peer)' + ntp_peer_list = [] + ntp = execute_show_command( + command, module, command_type='cli_show_ascii')[0] + + ntp_regex = ( + ".*ntp\s(server\s(?P
\S+)|peer\s(?P\S+))" + "\s*((?Pprefer)\s*)?(use-vrf\s(?P\S+)\s*)?" + "(key\s(?P\d+))?.*" + ) + + split_ntp = ntp.splitlines() + for peer_line in split_ntp: + ntp_peer = {} + try: + peer_address = None + vrf_name = None + prefer = None + key_id = None + match_ntp = re.match(ntp_regex, peer_line, re.DOTALL) + group_ntp = match_ntp.groupdict() + + address = group_ntp["address"] + peer_address = group_ntp['peer_address'] + prefer = group_ntp['prefer'] + vrf_name = group_ntp['vrf_name'] + key_id = group_ntp['key_id'] + + if prefer is not None: + prefer = 'enabled' + else: + prefer = 'disabled' + + if address is not None: + peer_type = 'server' + elif peer_address is not None: + peer_type = 'peer' + address = peer_address + + args = dict(peer_type=peer_type, address=address, prefer=prefer, + vrf_name=vrf_name, key_id=key_id) + + ntp_peer = dict((k, v) for k, v in args.iteritems()) + ntp_peer_list.append(ntp_peer) + except AttributeError: + ntp_peer_list = [] + + return ntp_peer_list + + +def get_ntp_existing(address, peer_type, module): + peer_dict = {} + peer_server_list = [] + + peer_list = get_ntp_peer(module) + for peer in peer_list: + if peer['address'] == address: + peer_dict.update(peer) + else: + peer_server_list.append(peer) + + source_type, source = get_ntp_source(module) + + if (source_type is not None and source is not None): + peer_dict['source_type'] = source_type + peer_dict['source'] = source + + return (peer_dict, peer_server_list) + + +def set_ntp_server_peer(peer_type, address, prefer, key_id, vrf_name): + command_strings = [] + + if prefer: + command_strings.append(' prefer') + if key_id: + command_strings.append(' key {0}'.format(key_id)) + if vrf_name: + command_strings.append(' use-vrf {0}'.format(vrf_name)) + + command_strings.insert(0, 'ntp {0} {1}'.format(peer_type, address)) + + command = ''.join(command_strings) + + return command + + +def config_ntp(delta, existing): + address = delta.get('address', existing.get('address')) + peer_type = delta.get('peer_type', existing.get('peer_type')) + vrf_name = delta.get('vrf_name', existing.get('vrf_name')) + key_id = delta.get('key_id', existing.get('key_id')) + prefer = delta.get('prefer', existing.get('prefer')) + + source_type = delta.get('source_type') + source = delta.get('source') + + if prefer: + if prefer == 'enabled': + prefer = True + elif prefer == 'disabled': + prefer = False + + if source: + source_type = delta.get('source_type', existing.get('source_type')) + + ntp_cmds = [] + if peer_type: + ntp_cmds.append(set_ntp_server_peer( + peer_type, address, prefer, key_id, vrf_name)) + if source: + existing_source_type = existing.get('source_type') + existing_source = existing.get('source') + if existing_source_type and source_type != existing_source_type: + ntp_cmds.append('no ntp {0} {1}'.format(existing_source_type, existing_source)) + ntp_cmds.append('ntp {0} {1}'.format(source_type, source)) + + return ntp_cmds + + +def main(): + argument_spec = dict( + server=dict(type='str'), + peer=dict(type='str'), + key_id=dict(type='str'), + prefer=dict(type='str', choices=['enabled', 'disabled']), + vrf_name=dict(type='str'), + source_addr=dict(type='str'), + source_int=dict(type='str'), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + mutually_exclusive=[ + ['server','peer'], + ['source_addr','source_int']], + supports_check_mode=True) + + server = module.params['server'] or None + peer = module.params['peer'] or None + key_id = module.params['key_id'] + prefer = module.params['prefer'] + vrf_name = module.params['vrf_name'] + source_addr = module.params['source_addr'] + source_int = module.params['source_int'] + state = module.params['state'] + if source_int is not None: + source_int = source_int.lower() + + if server: + peer_type = 'server' + address = server + elif peer: + peer_type = 'peer' + address = peer + else: + peer_type = None + address = None + + source_type = None + source = None + if source_addr: + source_type = 'source' + source = source_addr + elif source_int: + source_type = 'source-interface' + source = source_int + + if key_id or vrf_name or prefer: + if not server and not peer: + module.fail_json( + msg='Please supply the server or peer parameter') + + args = dict(peer_type=peer_type, address=address, key_id=key_id, + prefer=prefer, vrf_name=vrf_name, source_type=source_type, + source=source) + + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + existing, peer_server_list = get_ntp_existing(address, peer_type, module) + + end_state = existing + changed = False + commands = [] + + if state == 'present': + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + if delta: + command = config_ntp(delta, existing) + if command: + commands.append(command) + + elif state == 'absent': + if existing.get('peer_type') and existing.get('address'): + command = 'no ntp {0} {1}'.format( + existing['peer_type'], existing['address']) + if command: + commands.append([command]) + + existing_source_type = existing.get('source_type') + existing_source = existing.get('source') + proposed_source_type = proposed.get('source_type') + proposed_source = proposed.get('source') + + if proposed_source_type: + if proposed_source_type == existing_source_type: + if proposed_source == existing_source: + command = 'no ntp {0} {1}'.format( + existing_source_type, existing_source) + if command: + commands.append([command]) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_ntp_existing(address, peer_type, module)[0] + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + results['peer_server_list'] = peer_server_list + + module.exit_json(**results) + + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_ntp_auth.py b/network/nxos/nxos_ntp_auth.py new file mode 100644 index 00000000000..25071d6b893 --- /dev/null +++ b/network/nxos/nxos_ntp_auth.py @@ -0,0 +1,572 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- + +module: nxos_ntp_auth +version_added: "2.2" +short_description: Manages NTP authentication. +description: + - Manages NTP authentication. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) +notes: + - If C(state=absent), the module will attempt to remove the given key configuration. + If a matching key configuration isn't found on the device, the module will fail. + - If C(state=absent) and C(authentication=on), authentication will be turned off. + - If C(state=absent) and C(authentication=off), authentication will be turned on. +options: + key_id: + description: + - Authentication key identifier (numeric). + required: true + md5string: + description: + - MD5 String. + required: true + default: null + auth_type: + description: + - Whether the given md5string is in cleartext or + has been encrypted. If in cleartext, the device + will encrypt it before storing it. + required: false + default: text + choices: ['text', 'encrypt'] + trusted_key: + description: + - Whether the given key is required to be supplied by a time source + for the device to synchronize to the time source. + required: false + default: false + choices: ['true', 'false'] + authentication: + description: + - Turns NTP authentication on or off. + required: false + default: null + choices: ['on', 'off'] + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +# Basic NTP authentication configuration +- nxos_ntp_auth: + key_id: 32 + md5string: hello + auth_type: text + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"auth_type": "text", "authentication": "off", + "key_id": "32", "md5string": "helloWorld", + "trusted_key": "true"} +existing: + description: + - k/v pairs of existing ntp authentication + type: dict + sample: {"authentication": "off", "trusted_key": "false"} +end_state: + description: k/v pairs of ntp authentication after module execution + returned: always + type: dict + sample: {"authentication": "off", "key_id": "32", + "md5string": "kapqgWjwdg", "trusted_key": "true"} +state: + description: state as sent in from the playbook + returned: always + type: string + sample: "present" +updates: + description: command sent to the device + returned: always + type: list + sample: ["ntp authentication-key 32 md5 helloWorld 0", "ntp trusted-key 32"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_ntp_auth(module): + command = 'show ntp authentication-status' + + body = execute_show_command(command, module)[0] + ntp_auth_str = body['authentication'] + + if 'enabled' in ntp_auth_str: + ntp_auth = True + else: + ntp_auth = False + + return ntp_auth + + +def get_ntp_trusted_key(module): + trusted_key_list = [] + command = 'show run | inc ntp.trusted-key' + + trusted_key_str = execute_show_command( + command, module, command_type='cli_show_ascii')[0] + if trusted_key_str: + trusted_keys = trusted_key_str.splitlines() + + else: + trusted_keys = [] + + for line in trusted_keys: + if line: + trusted_key_list.append(str(line.split()[2])) + + return trusted_key_list + + +def get_ntp_auth_key(key_id, module): + authentication_key = {} + command = 'show run | inc ntp.authentication-key.{0}'.format(key_id) + auth_regex = (".*ntp\sauthentication-key\s(?P\d+)\s" + "md5\s(?P\S+).*") + + body = execute_show_command(command, module, command_type='cli_show_ascii') + + try: + match_authentication = re.match(auth_regex, body[0], re.DOTALL) + group_authentication = match_authentication.groupdict() + key_id = group_authentication["key_id"] + md5string = group_authentication['md5string'] + authentication_key['key_id'] = key_id + authentication_key['md5string'] = md5string + except (AttributeError, TypeError): + authentication_key = {} + + return authentication_key + + +def get_ntp_auth_info(key_id, module): + auth_info = get_ntp_auth_key(key_id, module) + trusted_key_list = get_ntp_trusted_key(module) + auth_power = get_ntp_auth(module) + + if key_id in trusted_key_list: + auth_info['trusted_key'] = 'true' + else: + auth_info['trusted_key'] = 'false' + + if auth_power: + auth_info['authentication'] = 'on' + else: + auth_info['authentication'] = 'off' + + return auth_info + + +def auth_type_to_num(auth_type): + if auth_type == 'encrypt' : + return '7' + else: + return '0' + + +def set_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication): + ntp_auth_cmds = [] + auth_type_num = auth_type_to_num(auth_type) + ntp_auth_cmds.append( + 'ntp authentication-key {0} md5 {1} {2}'.format( + key_id, md5string, auth_type_num)) + + if trusted_key == 'true': + ntp_auth_cmds.append( + 'ntp trusted-key {0}'.format(key_id)) + elif trusted_key == 'false': + ntp_auth_cmds.append( + 'no ntp trusted-key {0}'.format(key_id)) + + if authentication == 'on': + ntp_auth_cmds.append( + 'ntp authenticate') + elif authentication == 'off': + ntp_auth_cmds.append( + 'no ntp authenticate') + + return ntp_auth_cmds + + +def remove_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication): + auth_remove_cmds = [] + auth_type_num = auth_type_to_num(auth_type) + auth_remove_cmds.append( + 'no ntp authentication-key {0} md5 {1} {2}'.format( + key_id, md5string, auth_type_num)) + + if authentication == 'on': + auth_remove_cmds.append( + 'no ntp authenticate') + elif authentication == 'off': + auth_remove_cmds.append( + 'ntp authenticate') + + return auth_remove_cmds + + +def main(): + argument_spec = dict( + key_id=dict(required=True, type='str'), + md5string=dict(required=True, type='str'), + auth_type=dict(choices=['text', 'encrypt'], default='text'), + trusted_key=dict(choices=['true', 'false'], default='false'), + authentication=dict(choices=['on', 'off']), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + key_id = module.params['key_id'] + md5string = module.params['md5string'] + auth_type = module.params['auth_type'] + trusted_key = module.params['trusted_key'] + authentication = module.params['authentication'] + state = module.params['state'] + + args = dict(key_id=key_id, md5string=md5string, + auth_type=auth_type, trusted_key=trusted_key, + authentication=authentication) + + changed = False + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + existing = get_ntp_auth_info(key_id, module) + end_state = existing + + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + + commands = [] + if state == 'present': + if delta: + command = set_ntp_auth_key( + key_id, md5string, auth_type, trusted_key, delta.get('authentication')) + if command: + commands.append(command) + elif state == 'absent': + if existing: + auth_toggle = None + if authentication == existing.get('authentication'): + auth_toggle = authentication + command = remove_ntp_auth_key( + key_id, md5string, auth_type, trusted_key, auth_toggle) + if command: + commands.append(command) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + try: + execute_config_command(cmds, module) + except ShellError: + clie = get_exception() + module.fail_json(msg=str(clie) + ": " + cmds) + end_state = get_ntp_auth_info(key_id, module) + delta = dict(set(end_state.iteritems()).difference(existing.iteritems())) + if delta or (len(existing) != len(end_state)): + changed = True + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + + module.exit_json(**results) + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_ntp_options.py b/network/nxos/nxos_ntp_options.py new file mode 100644 index 00000000000..010e67c8866 --- /dev/null +++ b/network/nxos/nxos_ntp_options.py @@ -0,0 +1,519 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- + +module: nxos_ntp_options +version_added: "2.2" +short_description: Manages NTP options. +description: + - Manages NTP options, e.g. authoritative server and logging. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) +notes: + - At least one of C(master) or C(logging) params must be supplied. + - When C(state=absent), boolean parameters are flipped, + e.g. C(master=true) will disable the authoritative server. + - When C(state=absent) and C(master=true), the stratum will be removed as well. + - When C(state=absent) and C(master=false), the stratum will be configured + to its default value, 8. +options: + master: + description: + - Sets whether the device is an authoritative NTP server. + required: false + default: null + choices: ['true','false'] + stratum: + description: + - If C(master=true), an optional stratum can be supplied (1-15). + The device default is 8. + required: false + default: null + logging: + description: + - Sets whether NTP logging is enabled on the device. + required: false + default: null + choices: ['true','false'] + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +# Basic NTP options configuration +- nxos_ntp_options: + master: true + stratum: 12 + logging: false + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"logging": false, "master": true, "stratum": "11"} +existing: + description: + - k/v pairs of existing ntp options + type: dict + sample: {"logging": true, "master": true, "stratum": "8"} +end_state: + description: k/v pairs of ntp options after module execution + returned: always + type: dict + sample: {"logging": false, "master": true, "stratum": "11"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["no ntp logging", "ntp master 11"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def get_ntp_master(module): + command = 'show run | inc ntp.master' + master_string = execute_show_command(command, module, command_type='cli_show_ascii') + + if master_string: + if master_string[0]: + master = True + else: + master = False + else: + master = False + + if master is True: + stratum = str(master_string[0].split()[2]) + else: + stratum = None + + return master, stratum + + +def get_ntp_log(module): + command = 'show ntp logging' + body = execute_show_command(command, module)[0] + + logging_string = body['loggingstatus'] + if 'enabled' in logging_string: + ntp_log = True + else: + ntp_log = False + + return ntp_log + + +def get_ntp_options(module): + existing = {} + existing['logging'] = get_ntp_log(module) + existing['master'], existing['stratum'] = get_ntp_master(module) + + return existing + + +def config_ntp_options(delta, flip=False): + master = delta.get('master') + stratum = delta.get('stratum') + log = delta.get('logging') + ntp_cmds = [] + + if flip: + log = not log + master = not master + + if log is not None: + if log is True: + ntp_cmds.append('ntp logging') + elif log is False: + ntp_cmds.append('no ntp logging') + if master is not None: + if master is True: + if not stratum: + stratum = '' + ntp_cmds.append('ntp master {0}'.format(stratum)) + elif master is False: + ntp_cmds.append('no ntp master') + + return ntp_cmds + + +def main(): + argument_spec = dict( + master=dict(required=False, type='bool'), + stratum=dict(type='str'), + logging=dict(required=False, type='bool'), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + required_one_of=[['master', 'logging']], + supports_check_mode=True) + + master = module.params['master'] + stratum = module.params['stratum'] + logging = module.params['logging'] + state = module.params['state'] + + if stratum: + if master is None: + module.fail_json(msg='The master param must be supplied when ' + 'stratum is supplied') + try: + stratum_int = int(stratum) + if stratum_int < 1 or stratum_int > 15: + raise ValueError + except ValueError: + module.fail_json(msg='Stratum must be an integer between 1 and 15') + + existing = get_ntp_options(module) + end_state = existing + + args = dict(master=master, stratum=stratum, logging=logging) + + changed = False + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + if master is False: + proposed['stratum'] = None + stratum = None + + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + delta_stratum = delta.get('stratum') + + if delta_stratum: + delta['master'] = True + + commands = [] + if state == 'present': + if delta: + command = config_ntp_options(delta) + if command: + commands.append(command) + elif state == 'absent': + if existing: + isection = dict(set(proposed.iteritems()).intersection( + existing.iteritems())) + command = config_ntp_options(isection, flip=True) + if command: + commands.append(command) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_ntp_options(module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_nxapi.py b/network/nxos/nxos_nxapi.py index fb480ac304f..5317869f5aa 100644 --- a/network/nxos/nxos_nxapi.py +++ b/network/nxos/nxos_nxapi.py @@ -17,213 +17,255 @@ # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- -module: nxos_nxapi +module: nxos_nxapi version_added: "2.1" -author: "Chris Houseknecht (@chouseknecht)" +author: "Peter Sprygada (@privateip)" short_description: Manage NXAPI configuration on an NXOS device. description: - - Use to enable or disable NXAPI access, set the port and state - of http and https servers, and enable or disable the sandbox. - - When enabling NXAPI access the default is to enable HTTP on port - 80, enable HTTPS on port 443, and enable the web based UI sandbox. - Use the options below to override the default configuration. + - Configures the NXAPI feature on devices running Cisco NXOS. The + NXAPI feature is absent from the configuration by default. Since + this module manages the NXAPI feature it only supports the use + of the C(Cli) transport. extends_documentation_fragment: nxos options: - state: - description: - - Set to started or stopped. A state of started will - enable NXAPI access, and a state of stopped will - disable or shutdown all NXAPI access. - choices: - - started - - stopped - requred: false - default: started - http_port: - description: - - Port on which the HTTP server will listen. - required: false - default: 80 - https_port: - description: - - Port on which the HTTPS server will listen. - required: false - default: 443 - http: - description: - - Enable/disable HTTP server. - required: false - default: true - aliases: - - enable_http - https: - description: - - Enable/disable HTTPS server. - required: false - default: true - aliases: - - enable_https - sandbox: - description: - - Enable/disable NXAPI web based UI for entering commands. - required: false - default: true - aliases: - - enable_sandbox + http_port: + description: + - Configure the port with which the HTTP server will listen on + for requests. By default, NXAPI will bind the HTTP service + to the standard HTTP port 80. This argument accepts valid + port values in the range of 1 to 65535. + required: false + default: 80 + http: + description: + - Controls the operating state of the HTTP protocol as one of the + underlying transports for NXAPI. By default, NXAPI will enable + the HTTP transport when the feature is first configured. To + disable the use of the HTTP transport, set the value of this + argument to False. + required: false + default: yes + choices: ['yes', 'no'] + aliases: ['enable_http'] + https_port: + description: + - Configure the port with which the HTTPS server will listen on + for requests. By default, NXAPI will bind the HTTPS service + to the standard HTTPS port 443. This argument accepts valid + port values in the range of 1 to 65535. + required: false + default: 443 + https: + description: + - Controls the operating state of the HTTPS protocol as one of the + underlying transports for NXAPI. By default, NXAPI will disable + the HTTPS transport when the feature is first configured. To + enable the use of the HTTPS transport, set the value of this + argument to True. + required: false + default: no + choices: ['yes', 'no'] + aliases: ['enable_https'] + sandbox: + description: + - The NXAPI feature provides a web base UI for developers for + entering commands. This feature is initially disabled when + the NXAPI feature is configured for the first time. When the + C(sandbox) argument is set to True, the developer sandbox URL + will accept requests and when the value is set to False, the + sandbox URL is unavailable. + required: false + default: no + choices: ['yes', 'no'] + aliases: ['enable_sandbox'] + config: + description: + - The C(config) argument provides an optional argument to + specify the device running-config to used as the basis for + configuring the remote system. The C(config) argument accepts + a string value that represents the device configuration. + required: false + default: null + version_added: "2.2" + state: + description: + - The C(state) argument controls whether or not the NXAPI + feature is configured on the remote device. When the value + is C(present) the NXAPI feature configuration is present in + the device running-config. When the values is C(absent) the + feature configuration is removed from the running-config. + choices: ['present', 'absent'] + required: false + default: present """ EXAMPLES = """ - - name: Enable NXAPI access with default configuration - nxos_nxapi: - provider: {{ provider }} - - - name: Enable NXAPI with no HTTP, HTTPS at port 9443 and sandbox disabled - nxos_nxapi: - enable_http: false - https_port: 9443 - enable_sandbox: no - provider: {{ provider }} - - - name: shutdown NXAPI access - nxos_nxapi: - state: stopped - provider: {{ provider }} +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + +- name: Enable NXAPI access with default configuration + nxos_nxapi: + provider: "{{ cli }}" + +- name: Enable NXAPI with no HTTP, HTTPS at port 9443 and sandbox disabled + nxos_nxapi: + enable_http: false + https_port: 9443 + https: yes + enable_sandbox: no + provider: "{{ cli }}" + +- name: remove NXAPI configuration + nxos_nxapi: + state: absent + provider: "{{ cli }}" """ RETURN = """ -changed: - description: - - Indicates if commands were sent to the device. - returned: always - type: boolean - sample: false - -commands: - description: - - Set of commands to be executed on remote device. If run in check mode, - commands will not be executed. - returned: always - type: list - sample: [ - 'nxapi feature', - 'nxapi http port 8080' - ] - -_config: - description: - - Configuration found on the device prior ro any commands being executed. - returned: always - type: object - sample: {...} +updates: + description: + - Returns the list of commands that need to be pushed into the remote + device to satisfy the arguments + returned: always + type: list + sample: ['no feature nxapi'] """ +import re +import time + +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.nxos import NetworkModule, NetworkError +from ansible.module_utils.basic import get_exception + +PRIVATE_KEYS_RE = re.compile('__.+__') + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + +def get_instance(module): + instance = dict(state='absent') + try: + resp = module.cli('show nxapi', 'json') + except NetworkError: + return instance + + instance['state'] = 'present' + + instance['http'] = 'http_port' in resp[0] + instance['http_port'] = resp[0].get('http_port') or 80 + instance['https'] = 'https_port' in resp[0] + instance['https_port'] = resp[0].get('https_port') or 443 -def http_commands(protocol, port, enable, config): - port_config = config.get('{0}_port'.format(protocol), None) - changed = False - commands = [] - if port_config is None and enable: - # enable - changed = True - commands.append('nxapi {0} port {1}'.format(protocol, port)) - elif port_config is not None: - if not enable: - # disable - commands.append('no nxapi {0}'.format(protocol)) - changed = True - elif port_config != port: - # update port - commands.append('nxapi {0} port {1}'.format(protocol, port)) - changed = True - return commands, changed - - -def execute_commands(module, commands): - if not module.params.get('check_mode'): - module.configure(commands) - -def get_nxapi_state(module): - features = module.execute(['show feature | grep nxapi'])[0] - if re.search('disabled', features) is None: - return 'started' - return 'stopped' - - -def config_server(module): - - nxapi_state = get_nxapi_state(module) - - config = dict() - if nxapi_state == 'started': - config = module.from_json(module.execute(['show nxapi | json'])[0]) - - state = module.params.get('state') - result = dict(changed=False, _config=config, commands=[]) - commands = [] - - if config.get('nxapi_status', 'Disabled') == 'Disabled': - if state == 'started': - # enable nxapi and get the new default config - commands.append('feature nxapi') - result['_config'] = dict() - result['changed'] = True - if module.params.get('check_mode'): - # make an assumption about default state - config['http_port'] = 80 - config['sandbox_status'] = 'Disabled' - else: - # get the default config - execute_commands(module, commands) - config = module.from_json(module.execute(['show nxapi | json'])[0]) - else: - # nxapi already disabled - return result - elif config.get('nxapi_status', 'Disabled') == 'Enabled' and state == 'stopped': - # disable nxapi and exit + instance['sandbox'] = resp[0]['sandbox_status'] + + return instance + +def present(module, instance, commands): + commands.append('feature nxapi') + setters = set() + for key, value in module.argument_spec.iteritems(): + setter = value.get('setter') or 'set_%s' % key + if setter not in setters: + setters.add(setter) + if module.params[key] is not None: + invoke(setter, module, instance, commands) + +def absent(module, instance, commands): + if instance['state'] != 'absent': commands.append('no feature nxapi') + +def set_http(module, instance, commands): + port = module.params['http_port'] + if not 0 <= port <= 65535: + module.fail_json(msg='http_port must be between 1 and 65535') + elif module.params['http'] is True: + commands.append('nxapi http port %s' % port) + elif module.params['http'] is False: + commands.append('no nxapi http') + +def set_https(module, instance, commands): + port = module.params['https_port'] + if not 0 <= port <= 65535: + module.fail_json(msg='https_port must be between 1 and 65535') + elif module.params['https'] is True: + commands.append('nxapi https port %s' % port) + elif module.params['https'] is False: + commands.append('no nxapi https') + +def set_sandbox(module, instance, commands): + if module.params['sandbox'] is True: + commands.append('nxapi sandbox') + elif module.params['sandbox'] is False: + commands.append('no nxapi sandbox') + +def get_config(module): + contents = module.params['config'] + if not contents: + try: + contents = module.cli(['show running-config nxapi all'])[0] + except NetworkError: + contents = None + config = NetworkConfig(indent=2) + if contents: + config.load(contents) + return config + +def load_checkpoint(module, result): + try: + checkpoint = result['__checkpoint__'] + module.cli(['rollback running-config checkpoint %s' % checkpoint, + 'no checkpoint %s' % checkpoint], output='text') + except KeyError: + module.fail_json(msg='unable to rollback, checkpoint not found') + except NetworkError: + exc = get_exception() + msg = 'unable to rollback configuration' + module.fail_json(msg=msg, checkpoint=checkpoint, **exc.kwargs) + +def load_config(module, commands, result): + # create a config checkpoint + checkpoint = 'ansible_%s' % int(time.time()) + module.cli(['checkpoint %s' % checkpoint], output='text') + result['__checkpoint__'] = checkpoint + + # load the config into the device + module.config.load_config(commands) + + # load was successfully, remove the config checkpoint + module.cli(['no checkpoint %s' % checkpoint]) + +def load(module, commands, result): + candidate = NetworkConfig(indent=2, contents='\n'.join(commands)) + config = get_config(module) + configobjs = candidate.difference(config) + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + result['updates'] = commands + if not module.check_mode: + load_config(module, commands, result) result['changed'] = True - result['commands'] = commands - execute_commands(module, commands) - return result - - # configure http and https - for protocol in ['http', 'https']: - cmds, chg = http_commands(protocol, module.params['{0}_port'.format(protocol)], - module.params[protocol], config) - if chg: - commands += cmds - result['changed'] = True - - # configure sandbox - config_sandbox = config.get('sandbox_status', None) - enable_sandbox = module.params.get('sandbox') - - if config_sandbox is None: - # there is no prior state, so we must set one - result['changed'] = True - if enable_sandbox: - commands.append('nxapi sandbox') - else: - commands.append('no nxapi sandbox') - else: - # there is a prior state, so be idempotent - if config_sandbox == 'Enabled' and not enable_sandbox: - # turn off sandbox - commands.append('no nxapi sandbox') - result['changed'] = True - elif config_sandbox == 'Disabled' and enable_sandbox: - # turn on sandbox - commands.append('nxapi sandbox') - result['changed'] = True - - if len(commands) > 0: - # something requires change - result['commands'] = commands - execute_commands(module, commands) - - return result + +def clean_result(result): + # strip out any keys that have two leading and two trailing + # underscore characters + for key in result.keys(): + if PRIVATE_KEYS_RE.match(key): + del result[key] def main(): @@ -231,28 +273,58 @@ def main(): """ argument_spec = dict( - state=dict(default='started', choices=['started','stopped']), - http_port=dict(default=80, type='int'), - https_port=dict(default=443, type='int'), - http=dict(aliases=['enable_http'], default=True, type='bool'), - https=dict(aliases=['enable_https'], default=True, type='bool'), - sandbox=dict(aliases=['enable_sandbox'], default=True, type='bool'), - - # Only allow configuration of NXAPI using cli transpsort - transport=dict(required=True, choices=['cli']) + http=dict(aliases=['enable_http'], default=True, type='bool', setter='set_http'), + http_port=dict(default=80, type='int', setter='set_http'), + + https=dict(aliases=['enable_https'], default=False, type='bool', setter='set_https'), + https_port=dict(default=443, type='int', setter='set_https'), + + sandbox=dict(aliases=['enable_sandbox'], default=False, type='bool'), + + # Only allow configuration of NXAPI using cli transport + transport=dict(required=True, choices=['cli']), + + config=dict(), + + # Support for started and stopped is for backwards capability only and + # will be removed in a future version + state=dict(default='present', choices=['started', 'stopped', 'present', 'absent']) ) - module = get_module(argument_spec=argument_spec, - supports_check_mode=True) + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + supports_check_mode=True) + + state = module.params['state'] + + warnings = list() + + result = dict(changed=False, warnings=warnings) - result = config_server(module) + if state == 'started': + state = 'present' + warnings.append('state=started is deprecated and will be removed in a ' + 'a future release. Please use state=present instead') + elif state == 'stopped': + state = 'absent' + warnings.append('state=stopped is deprecated and will be removed in a ' + 'a future release. Please use state=absent instead') - return module.exit_json(**result) + commands = list() + instance = get_instance(module) + + invoke(state, module, instance, commands) + + try: + load(module, commands, result) + except (ValueError, NetworkError): + load_checkpoint(module, result) + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) + + clean_result(result) + module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.nxos import * if __name__ == '__main__': main() - diff --git a/network/nxos/nxos_ospf.py b/network/nxos/nxos_ospf.py new file mode 100644 index 00000000000..024b5f0ba24 --- /dev/null +++ b/network/nxos/nxos_ospf.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_ospf +version_added: "2.2" +short_description: Manages configuration of an ospf instance. +description: + - Manages configuration of an ospf instance. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +options: + ospf: + description: + - Name of the ospf instance. + required: true + state: + description: + - Determines whether the config should be present or not + on the device. + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- nxos_ospf: + ospf: 1 + state: present + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"ospf": "1"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"ospf": ["2"]} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"ospf": ["1", "2"]} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["router ospf 1"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.shell import ShellError + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +PARAM_TO_COMMAND_KEYMAP = { + 'ospf': 'router ospf' +} + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(config, module): + splitted_config = config.splitlines() + value_list = [] + REGEX = '^router ospf\s(?P\S+).*' + for line in splitted_config: + value = '' + if 'router ospf' in line: + try: + match_ospf = re.match(REGEX, line, re.DOTALL) + ospf_group = match_ospf.groupdict() + value = ospf_group['ospf'] + except AttributeError: + value = '' + if value: + value_list.append(value) + + return value_list + + +def get_existing(module): + existing = {} + config = str(get_config(module)) + + value = get_value(config, module) + if value: + existing['ospf'] = value + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def state_present(module, proposed, candidate): + commands = ['router ospf {0}'.format(proposed['ospf'])] + candidate.add(commands, parents=[]) + + +def state_absent(module, proposed, candidate): + commands = ['no router ospf {0}'.format(proposed['ospf'])] + candidate.add(commands, parents=[]) + + +def main(): + argument_spec = dict( + ospf=dict(required=True, type='str'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + ospf = str(module.params['ospf']) + + existing = invoke('get_existing', module) + end_state = existing + proposed = dict(ospf=ospf) + + if not existing: + existing_list = [] + else: + existing_list = existing['ospf'] + + result = {} + if (state == 'present' or (state == 'absent' and ospf in existing_list)): + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_ospf_vrf.py b/network/nxos/nxos_ospf_vrf.py new file mode 100644 index 00000000000..35e0d398f04 --- /dev/null +++ b/network/nxos/nxos_ospf_vrf.py @@ -0,0 +1,593 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_ospf_vrf +version_added: "2.2" +short_description: Manages a VRF for an OSPF router. +description: + - Manages a VRF for an OSPF router. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - Value I(default) restores params default value, if any. + Otherwise it removes the existing param configuration. +options: + vrf: + description: + - Name of the resource instance. Valid value is a string. + The name 'default' is a valid VRF representing the global OSPF. + required: false + default: default + ospf: + description: + - Name of the OSPF instance. + required: true + default: null + router_id: + description: + - Router Identifier (ID) of the OSPF router VRF instance. + required: false + default: null + default_metric: + description: + - Specify the default Metric value. Valid values are an integer + or the keyword 'default'. + required: false + default: null + log_adjacency: + description: + - Controls the level of log messages generated whenever a + neighbor changes state. Valid values are 'log', 'detail', + and 'default'. + required: false + choices: ['log','detail','default'] + default: null + timer_throttle_lsa_start: + description: + - Specify the start interval for rate-limiting Link-State + Advertisement (LSA) generation. Valid values are an integer, + in milliseconds, or the keyword 'default'. + required: false + default: null + timer_throttle_lsa_hold: + description: + - Specify the hold interval for rate-limiting Link-State + Advertisement (LSA) generation. Valid values are an integer, + in milliseconds, or the keyword 'default'. + required: false + default: null + timer_throttle_lsa_max: + description: + - Specify the max interval for rate-limiting Link-State + Advertisement (LSA) generation. Valid values are an integer, + in milliseconds, or the keyword 'default'. + required: false + default: null + timer_throttle_spf_start: + description: + - Specify initial Shortest Path First (SPF) schedule delay. + Valid values are an integer, in milliseconds, or + the keyword 'default'. + required: false + default: null + timer_throttle_spf_hold: + description: + - Specify minimum hold time between Shortest Path First (SPF) + calculations. Valid values are an integer, in milliseconds, + or the keyword 'default'. + required: false + default: null + timer_throttle_spf_max: + description: + - Specify the maximum wait time between Shortest Path First (SPF) + calculations. Valid values are an integer, in milliseconds, + or the keyword 'default'. + required: false + default: null + auto_cost: + description: + - Specifies the reference bandwidth used to assign OSPF cost. + Valid values are an integer, in Mbps, or the keyword 'default'. + required: false + default: null +''' + +EXAMPLES = ''' +- nxos_ospf_vrf: + ospf: 1 + timer_throttle_spf_start: 50 + timer_throttle_spf_hold: 1000 + timer_throttle_spf_max: 2000 + timer_throttle_lsa_start: 60 + timer_throttle_lsa_hold: 1100 + timer_throttle_lsa_max: 3000 + vrf: test + state: present + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"ospf": "1", "timer_throttle_lsa_hold": "1100", + "timer_throttle_lsa_max": "3000", "timer_throttle_lsa_start": "60", + "timer_throttle_spf_hold": "1000", + "timer_throttle_spf_max": "2000", "timer_throttle_spf_start": "50", + "vrf": "test"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"auto_cost": "40000", "default_metric": "", "log_adjacency": "", + "ospf": "1", "router_id": "", "timer_throttle_lsa_hold": "5000", + "timer_throttle_lsa_max": "5000", "timer_throttle_lsa_start": "0", + "timer_throttle_spf_hold": "1000", + "timer_throttle_spf_max": "5000", + "timer_throttle_spf_start": "200", "vrf": "test"} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"auto_cost": "40000", "default_metric": "", "log_adjacency": "", + "ospf": "1", "router_id": "", "timer_throttle_lsa_hold": "1100", + "timer_throttle_lsa_max": "3000", "timer_throttle_lsa_start": "60", + "timer_throttle_spf_hold": "1000", + "timer_throttle_spf_max": "2000", "timer_throttle_spf_start": "50", + "vrf": "test"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["router ospf 1", "vrf test", "timers throttle lsa 60 1100 3000", + "timers throttle spf 50 1000 2000"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.shell import ShellError + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +PARAM_TO_COMMAND_KEYMAP = { + 'router_id': 'router-id', + 'default_metric': 'default-metric', + 'log_adjacency': 'log-adjacency-changes', + 'timer_throttle_lsa_start': 'timers throttle lsa', + 'timer_throttle_lsa_max': 'timers throttle lsa', + 'timer_throttle_lsa_hold': 'timers throttle lsa', + 'timer_throttle_spf_max': 'timers throttle spf', + 'timer_throttle_spf_start': 'timers throttle spf', + 'timer_throttle_spf_hold': 'timers throttle spf', + 'auto_cost': 'auto-cost reference-bandwidth' +} +PARAM_TO_DEFAULT_KEYMAP = { + 'timer_throttle_lsa_start': '0', + 'timer_throttle_lsa_max': '5000', + 'timer_throttle_lsa_hold': '5000', + 'timer_throttle_spf_start': '200', + 'timer_throttle_spf_max': '5000', + 'timer_throttle_spf_hold': '1000', + 'auto_cost': '40000' +} + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + if arg == 'log_adjacency': + if 'log-adjacency-changes detail' in config: + value = 'detail' + else: + value = 'log' + else: + value_list = REGEX.search(config).group('value').split() + if 'hold' in arg: + value = value_list[1] + elif 'max' in arg: + value = value_list[2] + elif 'auto' in arg: + if 'Gbps' in value_list: + value = str(int(value_list[0]) * 1000) + else: + value = value_list[0] + else: + value = value_list[0] + return value + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + parents = ['router ospf {0}'.format(module.params['ospf'])] + + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + config = netcfg.get_section(parents) + if config: + if module.params['vrf'] == 'default': + splitted_config = config.splitlines() + vrf_index = False + for index in range(0, len(splitted_config) - 1): + if 'vrf' in splitted_config[index].strip(): + vrf_index = index + break + if vrf_index: + config = '\n'.join(splitted_config[0:vrf_index]) + + for arg in args: + if arg not in ['ospf', 'vrf']: + existing[arg] = get_value(arg, config, module) + + existing['vrf'] = module.params['vrf'] + existing['ospf'] = module.params['ospf'] + + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def state_present(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in proposed_commands.iteritems(): + if value is True: + commands.append(key) + + elif value is False: + commands.append('no {0}'.format(key)) + + elif value == 'default': + if existing_commands.get(key): + existing_value = existing_commands.get(key) + commands.append('no {0} {1}'.format(key, existing_value)) + else: + if key == 'timers throttle lsa': + command = '{0} {1} {2} {3}'.format( + key, + proposed['timer_throttle_lsa_start'], + proposed['timer_throttle_lsa_hold'], + proposed['timer_throttle_lsa_max']) + elif key == 'timers throttle spf': + command = '{0} {1} {2} {3}'.format( + key, + proposed['timer_throttle_spf_start'], + proposed['timer_throttle_spf_hold'], + proposed['timer_throttle_spf_max']) + elif key == 'log-adjacency-changes': + if value == 'log': + command = key + elif value == 'detail': + command = '{0} {1}'.format(key, value) + elif key == 'auto-cost reference-bandwidth': + if len(value) < 5: + command = '{0} {1} Mbps'.format(key, value) + else: + value = str(int(value) / 1000) + command = '{0} {1} Gbps'.format(key, value) + else: + command = '{0} {1}'.format(key, value.lower()) + + if command not in commands: + commands.append(command) + + if commands: + parents = ['router ospf {0}'.format(module.params['ospf'])] + if module.params['vrf'] != 'default': + parents.append('vrf {0}'.format(module.params['vrf'])) + + candidate.add(commands, parents=parents) + + +def state_absent(module, existing, proposed, candidate): + commands = [] + parents = ['router ospf {0}'.format(module.params['ospf'])] + if module.params['vrf'] == 'default': + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + for key, value in existing_commands.iteritems(): + if value: + if key == 'timers throttle lsa': + command = 'no {0} {1} {2} {3}'.format( + key, + existing['timer_throttle_lsa_start'], + existing['timer_throttle_lsa_hold'], + existing['timer_throttle_lsa_max']) + elif key == 'timers throttle spf': + command = 'no {0} {1} {2} {3}'.format( + key, + existing['timer_throttle_spf_start'], + existing['timer_throttle_spf_hold'], + existing['timer_throttle_spf_max']) + else: + existing_value = existing_commands.get(key) + command = 'no {0} {1}'.format(key, existing_value) + + if command not in commands: + commands.append(command) + else: + commands = ['no vrf {0}'.format(module.params['vrf'])] + candidate.add(commands, parents=parents) + + +def main(): + argument_spec = dict( + vrf=dict(required=False, type='str', default='default'), + ospf=dict(required=True, type='str'), + router_id=dict(required=False, type='str'), + default_metric=dict(required=False, type='str'), + log_adjacency=dict(required=False, type='str', + choices=['log', 'detail', 'default']), + timer_throttle_lsa_start=dict(required=False, type='str'), + timer_throttle_lsa_hold=dict(required=False, type='str'), + timer_throttle_lsa_max=dict(required=False, type='str'), + timer_throttle_spf_start=dict(required=False, type='str'), + timer_throttle_spf_hold=dict(required=False, type='str'), + timer_throttle_spf_max=dict(required=False, type='str'), + auto_cost=dict(required=False, type='str'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + args = [ + 'vrf', + 'ospf', + 'router_id', + 'default_metric', + 'log_adjacency', + 'timer_throttle_lsa_start', + 'timer_throttle_lsa_hold', + 'timer_throttle_lsa_max', + 'timer_throttle_spf_start', + 'timer_throttle_spf_hold', + 'timer_throttle_spf_max', + 'auto_cost' + ] + + existing = invoke('get_existing', module, args) + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + proposed = {} + for key, value in proposed_args.iteritems(): + if key != 'interface': + if str(value).lower() == 'true': + value = True + elif str(value).lower() == 'false': + value = False + elif str(value).lower() == 'default': + value = PARAM_TO_DEFAULT_KEYMAP.get(key) + if value is None: + value = 'default' + if existing.get(key) or (not existing.get(key) and value): + proposed[key] = value + + result = {} + if state == 'present' or (state == 'absent' and existing): + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_overlay_global.py b/network/nxos/nxos_overlay_global.py new file mode 100644 index 00000000000..5b7d89c4b29 --- /dev/null +++ b/network/nxos/nxos_overlay_global.py @@ -0,0 +1,413 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_overlay_global +version_added: "2.2" +short_description: Configures anycast gateway MAC of the switch. +description: + - Configures anycast gateway MAC of the switch. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - Default restores params default value + - Supported MAC address format are "E.E.E", "EE-EE-EE-EE-EE-EE", + "EE:EE:EE:EE:EE:EE" and "EEEE.EEEE.EEEE" +options: + anycast_gateway_mac: + description: + - Anycast gateway mac of the switch. + required: true + default: null +''' + +EXAMPLES = ''' +- nxos_overlay_global: + anycast_gateway_mac: "b.b.b" + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"} +existing: + description: k/v pairs of existing BGP configuration + returned: verbose mode + type: dict + sample: {"asn": "65535", "bestpath_always_compare_med": false, + "bestpath_aspath_multipath_relax": false, + "bestpath_compare_neighborid": false, + "bestpath_compare_routerid": false, + "bestpath_cost_community_ignore": false, + "bestpath_med_confed": false, + "bestpath_med_missing_as_worst": false, + "bestpath_med_non_deterministic": false, "cluster_id": "", + "confederation_id": "", "confederation_peers": "", + "graceful_restart": true, "graceful_restart_helper": false, + "graceful_restart_timers_restart": "120", + "graceful_restart_timers_stalepath_time": "300", "local_as": "", + "log_neighbor_changes": false, "maxas_limit": "", + "neighbor_down_fib_accelerate": false, "reconnect_interval": "60", + "router_id": "11.11.11.11", "suppress_fib_pending": false, + "timer_bestpath_limit": "", "timer_bgp_hold": "180", + "timer_bgp_keepalive": "60", "vrf": "test"} +end_state: + description: k/v pairs of BGP configuration after module execution + returned: verbose mode + type: dict + sample: {"asn": "65535", "bestpath_always_compare_med": false, + "bestpath_aspath_multipath_relax": false, + "bestpath_compare_neighborid": false, + "bestpath_compare_routerid": false, + "bestpath_cost_community_ignore": false, + "bestpath_med_confed": false, + "bestpath_med_missing_as_worst": false, + "bestpath_med_non_deterministic": false, "cluster_id": "", + "confederation_id": "", "confederation_peers": "", + "graceful_restart": true, "graceful_restart_helper": false, + "graceful_restart_timers_restart": "120", + "graceful_restart_timers_stalepath_time": "300", "local_as": "", + "log_neighbor_changes": false, "maxas_limit": "", + "neighbor_down_fib_accelerate": false, "reconnect_interval": "60", + "router_id": "1.1.1.1", "suppress_fib_pending": false, + "timer_bestpath_limit": "", "timer_bgp_hold": "180", + "timer_bgp_keepalive": "60", "vrf": "test"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +PARAM_TO_COMMAND_KEYMAP = { + 'anycast_gateway_mac': 'fabric forwarding anycast-gateway-mac', +} + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def get_existing(module, args): + existing = {} + config = str(get_config(module)) + + for arg in args: + existing[arg] = get_value(arg, config, module) + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def get_commands(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in proposed_commands.iteritems(): + if value == 'default': + existing_value = existing_commands.get(key) + if existing_value: + commands.append('no {0} {1}'.format(key, existing_value)) + else: + if 'anycast-gateway-mac' in key: + value = normalize_mac(value, module) + command = '{0} {1}'.format(key, value) + commands.append(command) + + if commands: + candidate.add(commands, parents=[]) + + +def normalize_mac(proposed_mac, module): + try: + if '-' in proposed_mac: + splitted_mac = proposed_mac.split('-') + if len(splitted_mac) != 6: + raise ValueError + + for octect in splitted_mac: + if len(octect) != 2: + raise ValueError + + elif '.' in proposed_mac: + splitted_mac = [] + splitted_dot_mac = proposed_mac.split('.') + if len(splitted_dot_mac) != 3: + raise ValueError + + for octect in splitted_dot_mac: + if len(octect) > 4: + raise ValueError + else: + octect_len = len(octect) + padding = 4 - octect_len + splitted_mac.append(octect.zfill(padding+1)) + + elif ':' in proposed_mac: + splitted_mac = proposed_mac.split(':') + if len(splitted_mac) != 6: + raise ValueError + + for octect in splitted_mac: + if len(octect) != 2: + raise ValueError + else: + raise ValueError + except ValueError: + module.fail_json(msg='Invalid MAC address format', + proposed_mac=proposed_mac) + + joined_mac = ''.join(splitted_mac) + mac = [joined_mac[i:i+4] for i in range(0, len(joined_mac), 4)] + return '.'.join(mac).upper() + + +def main(): + argument_spec = dict( + anycast_gateway_mac=dict(required=True, type='str'), + m_facts=dict(required=False, default=False, type='bool'), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + args = [ + 'anycast_gateway_mac' + ] + + existing = invoke('get_existing', module, args) + end_state = existing + proposed = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + result = {} + candidate = CustomNetworkConfig(indent=3) + invoke('get_commands', module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_pim.py b/network/nxos/nxos_pim.py new file mode 100644 index 00000000000..659686ceaa2 --- /dev/null +++ b/network/nxos/nxos_pim.py @@ -0,0 +1,336 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_pim +version_added: "2.2" +short_description: Manages configuration of a PIM instance. +description: + - Manages configuration of a Protocol Independent Multicast (PIM) instance. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +options: + ssm_range: + description: + - Configure group ranges for Source Specific Multicast (SSM). + Valid values are multicast addresses or the keyword 'none'. + required: true +''' +EXAMPLES = ''' +- nxos_pim: + ssm_range: "232.0.0.0/8" + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"ssm_range": "232.0.0.0/8"} +existing: + description: k/v pairs of existing PIM configuration + returned: verbose mode + type: dict + sample: {"ssm_range": none} +end_state: + description: k/v pairs of BGP configuration after module execution + returned: verbose mode + type: dict + sample: {"ssm_range": "232.0.0.0/8"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["ip pim ssm range 232.0.0.0/8"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +PARAM_TO_COMMAND_KEYMAP = { + 'ssm_range': 'ip pim ssm range' +} +PARAM_TO_DEFAULT_KEYMAP = {} +WARNINGS = [] + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def get_existing(module, args): + existing = {} + config = str(get_config(module)) + for arg in args: + existing[arg] = get_value(arg, config, module) + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def get_commands(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in proposed_commands.iteritems(): + command = '{0} {1}'.format(key, value) + commands.append(command) + + if commands: + candidate.add(commands, parents=[]) + + +def main(): + argument_spec = dict( + ssm_range=dict(required=True, type='str'), + m_facts=dict(required=False, default=False, type='bool'), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + splitted_ssm_range = module.params['ssm_range'].split('.') + if len(splitted_ssm_range) != 4 and module.params['ssm_range'] != 'none': + module.fail_json(msg="Valid ssm_range values are multicast addresses " + "or the keyword 'none'.") + + args = [ + 'ssm_range' + ] + + existing = invoke('get_existing', module, args) + end_state = existing + proposed = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + result = {} + candidate = CustomNetworkConfig(indent=3) + invoke('get_commands', module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed + + if WARNINGS: + result['warnings'] = WARNINGS + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_pim_interface.py b/network/nxos/nxos_pim_interface.py new file mode 100644 index 00000000000..119785d47ed --- /dev/null +++ b/network/nxos/nxos_pim_interface.py @@ -0,0 +1,932 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_pim_interface +version_added: "2.2" +short_description: Manages PIM interface configuration. +description: + - Manages PIM interface configuration settings. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) +notes: + - When C(state=default), supported params will be reset to a default state. + These include C(dr_prio), C(hello_auth_key), C(hello_interval), C(jp_policy_out), + C(jp_policy_in), C(jp_type_in), C(jp_type_out), C(border), C(neighbor_policy), + C(neighbor_type). + - The C(hello_auth_key) param is not idempotent. + - C(hello_auth_key) only supports clear text passwords. + - When C(state=absent), pim interface configuration will be set to defaults and pim-sm + will be disabled on the interface. + - PIM must be enabled on the device to use this module. + - This module is for Layer 3 interfaces. +options: + interface: + description: + - Full name of the interface such as Ethernet1/33. + required: true + sparse: + description: + - Enable/disable sparse-mode on the interface. + required: false + default: true + choices: ['true', 'false'] + hello_auth_key: + description: + - Authentication for hellos on this interface. + required: false + default: null + hello_interval: + description: + - Hello interval in milliseconds for this interface. + required: false + default: null + choices: ['true', 'false'] + jp_policy_out: + description: + - Policy for join-prune messages (outbound). + required: true + default: null + jp_policy_in: + description: + - Policy for join-prune messages (inbound). + required: false + default: null + jp_type_out: + description: + - Type of policy mapped to C(jp_policy_out). + required: false + default: null + choices: ['prefix', 'routemap'] + jp_type_in: + description: + - Type of policy mapped to C(jp_policy_in). + required: false + default: null + choices: ['prefix', 'routemap'] + border: + description: + - Configures interface to be a boundary of a PIM domain. + required: false + default: null + choices: ['true', 'false'] + neighbor_policy: + description: + - Configures a neighbor policy for filtering adjacencies. + required: false + default: null + neighbor_type: + description: + - Type of policy mapped to neighbor_policy. + required: false + default: null + choices: ['prefix', 'routemap'] + state: + description: + - Manages desired state of the resource. + required: false + default: present + choices: ['present', 'default'] +''' +EXAMPLES = ''' +# ensure PIM is not running on the interface +- nxos_pim_interface: + interface: eth1/33 + state: absent + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# ensure the interface has pim-sm enabled with the appropriate priority and hello interval +- nxos_pim_interface: + interface: eth1/33 + dr_prio: 10 + hello_interval: 40 + state: present + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# ensure join-prune policies exist +- nxos_pim_interface: + interface: eth1/33 + jp_policy_in: JPIN + jp_policy_out: JPOUT + jp_type_in: routemap + jp_type_out: routemap + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# ensure defaults are in place +- nxos_pim_interface: + interface: eth1/33 + state: default + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"interface": "eth1/33", "neighbor_policy": "test", + "neighbor_type": "routemap", "sparse": true} +existing: + description: + - k/v pairs of existing configuration + type: dict + sample: {"border": false, "dr_prio": "1", "hello_interval": "30000", + "isauth": false, "jp_bidir": false, "jp_policy_in": "JPIN", + "jp_policy_out": "1", "jp_type_in": "routemap", + "jp_type_out": null, "neighbor_policy": "test1", + "neighbor_type": "prefix", "sparse": true} +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: {"border": false, "dr_prio": "1", "hello_interval": "30000", + "isauth": false, "jp_bidir": false, "jp_policy_in": "JPIN", + "jp_policy_out": "1", "jp_type_in": "routemap", + "jp_type_out": null, "neighbor_policy": "test", + "neighbor_type": "routemap", "sparse": true} +updates: + description: command sent to the device + returned: always + type: list + sample: ["interface eth1/33", "ip pim neighbor-policy test", + "ip pim neighbor-policy test"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +import json +import time + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module, text=False): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n' or '^' in response[0]: + body = [] + elif 'show run' in command or text: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show', text=False): + if module.params['transport'] == 'cli': + if 'show run' not in command and text is False: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module, text=text) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def local_existing(gexisting): + jp_bidir = False + isauth = False + if gexisting: + jp_bidir = gexisting.get('jp_bidir') + isauth = gexisting.get('isauth') + if jp_bidir and isauth: + gexisting.pop('jp_bidir') + gexisting.pop('isauth') + gexisting['sparse'] = True + + return gexisting, jp_bidir, isauth + + +def get_interface_type(interface): + if interface.upper().startswith('ET'): + return 'ethernet' + elif interface.upper().startswith('VL'): + return 'svi' + elif interface.upper().startswith('LO'): + return 'loopback' + elif interface.upper().startswith('MG'): + return 'management' + elif interface.upper().startswith('MA'): + return 'management' + elif interface.upper().startswith('PO'): + return 'portchannel' + else: + return 'unknown' + + +def get_interface_mode(interface, intf_type, module): + command = 'show interface {0}'.format(interface) + mode = 'unknown' + interface_table = {} + body = execute_show_command(command, module) + + try: + interface_table = body[0]['TABLE_interface']['ROW_interface'] + except (KeyError, AttributeError, IndexError): + return mode + + if intf_type in ['ethernet', 'portchannel']: + mode = str(interface_table.get('eth_mode', 'layer3')) + if mode in ['access', 'trunk']: + mode = 'layer2' + elif mode == 'routed': + mode = 'layer3' + elif intf_type in ['loopback', 'svi']: + mode = 'layer3' + return mode + + +def get_pim_interface(module, interface): + pim_interface = {} + command = 'show ip pim interface {0}'.format(interface) + + body = execute_show_command(command, module, + command_type='cli_show_ascii', text=True) + + if body: + if 'not running' not in body[0]: + body = execute_show_command(command, module) + + try: + get_data = body[0]['TABLE_iod']['ROW_iod'] + + if isinstance(get_data.get('dr-priority'), unicode) or \ + isinstance(get_data.get('dr-priority'), str): + pim_interface['dr_prio'] = get_data.get('dr-priority') + else: + pim_interface['dr_prio'] = get_data.get('dr-priority')[0] + + hello_interval = get_data.get('hello-interval-sec') + if hello_interval: + hello_interval_msec = int(get_data.get('hello-interval-sec'))*1000 + pim_interface['hello_interval'] = str(hello_interval_msec) + border = get_data.get('is-border') + + if border == 'true': + pim_interface['border'] = True + elif border == 'false': + pim_interface['border'] = False + + isauth = get_data.get('isauth-config') + if isauth == 'true': + pim_interface['isauth'] = True + elif isauth == 'false': + pim_interface['isauth'] = False + + pim_interface['neighbor_policy'] = get_data.get('nbr-policy-name') + if pim_interface['neighbor_policy'] == 'none configured': + pim_interface['neighbor_policy'] = None + + jp_in_policy = get_data.get('jp-in-policy-name') + pim_interface['jp_policy_in'] = jp_in_policy + if jp_in_policy == 'none configured': + pim_interface['jp_policy_in'] = None + + if isinstance(get_data.get('jp-out-policy-name'), unicode) or \ + isinstance(get_data.get('jp-out-policy-name'), str): + pim_interface['jp_policy_out'] = get_data.get('jp-out-policy-name') + else: + pim_interface['jp_policy_out'] = get_data.get( + 'jp-out-policy-name')[0] + + if pim_interface['jp_policy_out'] == 'none configured': + pim_interface['jp_policy_out'] = None + + except (KeyError, AttributeError, TypeError, IndexError): + return {} + + command = 'show run interface {0}'.format(interface) + + body = execute_show_command(command, module, command_type='cli_show_ascii') + + jp_configs = [] + neigh = None + if body: + all_lines = body[0].splitlines() + + for each in all_lines: + if 'jp-policy' in each: + jp_configs.append(str(each.strip())) + elif 'neighbor-policy' in each: + neigh = str(each) + + pim_interface['neighbor_type'] = None + neigh_type = None + if neigh: + if 'prefix-list' in neigh: + neigh_type = 'prefix' + else: + neigh_type = 'routemap' + pim_interface['neighbor_type'] = neigh_type + + len_existing = len(jp_configs) + list_of_prefix_type = len([x for x in jp_configs if 'prefix-list' in x]) + jp_type_in = None + jp_type_out = None + jp_bidir = False + if len_existing == 1: + # determine type + last_word = jp_configs[0].split(' ')[-1] + if last_word == 'in': + if list_of_prefix_type: + jp_type_in = 'prefix' + else: + jp_type_in = 'routemap' + elif last_word == 'out': + if list_of_prefix_type: + jp_type_out = 'prefix' + else: + jp_type_out = 'routemap' + else: + jp_bidir = True + if list_of_prefix_type: + jp_type_in = 'prefix' + jp_type_out = 'routemap' + else: + jp_type_in = 'routemap' + jp_type_out = 'routemap' + else: + for each in jp_configs: + last_word = each.split(' ')[-1] + if last_word == 'in': + if 'prefix-list' in each: + jp_type_in = 'prefix' + else: + jp_type_in = 'routemap' + elif last_word == 'out': + if 'prefix-list' in each: + jp_type_out = 'prefix' + else: + jp_type_out = 'routemap' + + pim_interface['jp_type_in'] = jp_type_in + pim_interface['jp_type_out'] = jp_type_out + pim_interface['jp_bidir'] = jp_bidir + + return pim_interface + + +def fix_delta(delta, existing): + if delta.get('sparse') is False and existing.get('sparse') is None: + delta.pop('sparse') + return delta + + +def config_pim_interface(delta, existing, jp_bidir, isauth): + command = None + commands = [] + + delta = fix_delta(delta, existing) + + CMDS = { + 'sparse': 'ip pim sparse-mode', + 'dr_prio': 'ip pim dr-priority {0}', + 'hello_interval': 'ip pim hello-interval {0}', + 'hello_auth_key': 'ip pim hello-authentication ah-md5 {0}', + 'border': 'ip pim border', + 'jp_policy_out': 'ip pim jp-policy prefix-list {0} out', + 'jp_policy_in': 'ip pim jp-policy prefix-list {0} in', + 'jp_type_in': '', + 'jp_type_out': '', + 'neighbor_policy': 'ip pim neighbor-policy prefix-list {0}', + 'neighbor_type': '' + } + + if jp_bidir: + if delta.get('jp_policy_in') or delta.get('jp_policy_out'): + if existing.get('jp_type_in') == 'prefix': + command = 'no ip pim jp-policy prefix-list {0}'.format( + existing.get('jp_policy_in') + ) + else: + command = 'no ip pim jp-policy {0}'.format( + existing.get('jp_policy_in') + ) + if command: + commands.append(command) + + for k, v in delta.iteritems(): + if k in ['dr_prio', 'hello_interval', 'hello_auth_key', 'border', + 'sparse']: + if v: + command = CMDS.get(k).format(v) + elif k == 'hello_auth_key': + if isauth: + command = 'no ip pim hello-authentication ah-md5' + else: + command = 'no ' + CMDS.get(k).format(v) + + if command: + commands.append(command) + elif k in ['neighbor_policy', 'jp_policy_in', 'jp_policy_out', + 'neighbor_type']: + if k in ['neighbor_policy', 'neighbor_type']: + temp = delta.get('neighbor_policy') or existing.get( + 'neighbor_policy') + if delta.get('neighbor_type') == 'prefix': + command = CMDS.get(k).format(temp) + elif delta.get('neighbor_type') == 'routemap': + command = 'ip pim neighbor-policy {0}'.format(temp) + elif existing.get('neighbor_type') == 'prefix': + command = CMDS.get(k).format(temp) + elif existing.get('neighbor_type') == 'routemap': + command = 'ip pim neighbor-policy {0}'.format(temp) + elif k in ['jp_policy_in', 'jp_type_in']: + temp = delta.get('jp_policy_in') or existing.get( + 'jp_policy_in') + if delta.get('jp_type_in') == 'prefix': + command = CMDS.get(k).format(temp) + elif delta.get('jp_type_in') == 'routemap': + command = 'ip pim jp-policy {0} in'.format(temp) + elif existing.get('jp_type_in') == 'prefix': + command = CMDS.get(k).format(temp) + elif existing.get('jp_type_in') == 'routemap': + command = 'ip pim jp-policy {0} in'.format(temp) + elif k in ['jp_policy_out', 'jp_type_out']: + temp = delta.get('jp_policy_out') or existing.get( + 'jp_policy_out') + if delta.get('jp_type_out') == 'prefix': + command = CMDS.get(k).format(temp) + elif delta.get('jp_type_out') == 'routemap': + command = 'ip pim jp-policy {0} out'.format(temp) + elif existing.get('jp_type_out') == 'prefix': + command = CMDS.get(k).format(temp) + elif existing.get('jp_type_out') == 'routemap': + command = 'ip pim jp-policy {0} out'.format(temp) + if command: + commands.append(command) + command = None + + return commands + + +def get_pim_interface_defaults(): + dr_prio = '1' + border = False + hello_interval = '30000' + hello_auth_key = False + + args = dict(dr_prio=dr_prio, border=border, + hello_interval=hello_interval, + hello_auth_key=hello_auth_key) + + default = dict((param, value) for (param, value) in args.iteritems() + if value is not None) + + return default + + +def default_pim_interface_policies(existing, jp_bidir): + commands = [] + + if jp_bidir: + if existing.get('jp_policy_in') or existing.get('jp_policy_out'): + if existing.get('jp_type_in') == 'prefix': + command = 'no ip pim jp-policy prefix-list {0}'.format( + existing.get('jp_policy_in') + ) + if command: + commands.append(command) + + elif not jp_bidir: + command = None + for k, v in existing.iteritems(): + if k == 'jp_policy_in': + if existing.get('jp_policy_in'): + if existing.get('jp_type_in') == 'prefix': + command = 'no ip pim jp-policy prefix-list {0} in'.format( + existing.get('jp_policy_in') + ) + else: + command = 'no ip pim jp-policy {0} in'.format( + existing.get('jp_policy_in') + ) + elif k == 'jp_policy_out': + if existing.get('jp_policy_out'): + if existing.get('jp_type_out') == 'prefix': + command = 'no ip pim jp-policy prefix-list {0} out'.format( + existing.get('jp_policy_out') + ) + else: + command = 'no ip pim jp-policy {0} out'.format( + existing.get('jp_policy_out') + ) + if command: + commands.append(command) + command = None + + if existing.get('neighbor_policy'): + command = 'no ip pim neighbor-policy' + commands.append(command) + + return commands + + +def config_pim_interface_defaults(existing, jp_bidir, isauth): + command = [] + + # returns a dict + defaults = get_pim_interface_defaults() + delta = dict(set(defaults.iteritems()).difference( + existing.iteritems())) + if delta: + # returns a list + command = config_pim_interface(delta, existing, + jp_bidir, isauth) + comm = default_pim_interface_policies(existing, jp_bidir) + if comm: + for each in comm: + command.append(each) + + return command + + +def main(): + argument_spec=dict( + interface=dict(required=True), + sparse=dict(type='bool', default=True), + dr_prio=dict(type='str'), + hello_auth_key=dict(type='str'), + hello_interval=dict(type='int'), + jp_policy_out=dict(type='str'), + jp_policy_in=dict(type='str'), + jp_type_out=dict(choices=['prefix', 'routemap']), + jp_type_in=dict(choices=['prefix', 'routemap']), + border=dict(type='bool'), + neighbor_policy=dict(type='str'), + neighbor_type=dict(choices=['prefix', 'routemap']), + state=dict(choices=['present', 'absent', 'default'], + default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + + sparse = module.params['sparse'] + interface = module.params['interface'] + jp_type_in = module.params['jp_type_in'] + jp_type_out = module.params['jp_type_out'] + jp_policy_in = module.params['jp_policy_in'] + jp_policy_out = module.params['jp_policy_out'] + neighbor_policy = module.params['neighbor_policy'] + neighbor_type = module.params['neighbor_type'] + hello_interval = module.params['hello_interval'] + + intf_type = get_interface_type(interface) + if get_interface_mode(interface, intf_type, module) == 'layer2': + module.fail_json(msg='this module only works on Layer 3 interfaces.') + + if jp_policy_in: + if not jp_type_in: + module.fail_json(msg='jp_type_in required when using jp_policy_in.') + if jp_policy_out: + if not jp_type_out: + module.fail_json(msg='jp_type_out required when using ' + ' jp_policy_out.') + if neighbor_policy: + if not neighbor_type: + module.fail_json(msg='neighbor_type required when using ' + 'neighbor_policy.') + + get_existing = get_pim_interface(module, interface) + existing, jp_bidir, isauth = local_existing(get_existing) + end_state = existing + changed = False + + commands = [] + + args = [ + 'interface', + 'sparse', + 'dr_prio', + 'hello_auth_key', + 'hello_interval', + 'jp_policy_out', + 'jp_type_out', + 'jp_type_in', + 'jp_policy_in', + 'border', + 'neighbor_type', + 'neighbor_policy' + ] + proposed = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + ''' + CANNOT_ABSENT = ['dr_prio', 'hello_interval', + 'hello_auth_key', 'jp_policy_out', 'jp_policy_in', + 'jp_type_out', 'jp_type_in', 'border', 'neighbor_type', + 'neighbor_policy'] + ''' + + if hello_interval: + proposed['hello_interval'] = str(proposed['hello_interval'] * 1000) + + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + + if state == 'present': + if delta: + command = config_pim_interface(delta, existing, jp_bidir, isauth) + if command: + commands.append(command) + elif state == 'default': + defaults = config_pim_interface_defaults(existing, jp_bidir, isauth) + if defaults: + commands.append(defaults) + + elif state == 'absent': + if existing.get('sparse') == True: + delta['sparse'] = False + # defaults is a list of commands + defaults = config_pim_interface_defaults(existing, jp_bidir, isauth) + if defaults: + commands.append(defaults) + + command = config_pim_interface(delta, existing, jp_bidir, isauth) + commands.append(command) + + if commands: + commands.insert(0, ['interface {0}'.format(interface)]) + + cmds = flatten_list(commands) + results = {} + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + time.sleep(1) + get_existing = get_pim_interface(module, interface) + end_state, jp_bidir, isauth = local_existing(get_existing) + if 'configure' in cmds: + cmds.pop(0) + + results['proposed'] = proposed + results['existing'] = existing + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_pim_rp_address.py b/network/nxos/nxos_pim_rp_address.py new file mode 100644 index 00000000000..c53747e02de --- /dev/null +++ b/network/nxos/nxos_pim_rp_address.py @@ -0,0 +1,416 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_pim_rp_address +version_added: "2.2" +short_description: Manages configuration of an PIM static RP address instance. +description: + - Manages configuration of an Protocol Independent Multicast (PIM) static + rendezvous point (RP) address instance. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - C(state=absent) remove the whole rp-address configuration, if existing. +options: + rp_address: + description: + - Configures a Protocol Independent Multicast (PIM) static + rendezvous point (RP) address. Valid values are + unicast addresses. + required: true + group_list: + description: + - Group range for static RP. Valid values are multicast addresses. + required: false + default: null + prefix_list: + description: + - Prefix list policy for static RP. Valid values are prefix-list + policy names. + required: false + default: null + route_map: + description: + - Route map policy for static RP. Valid values are route-map + policy names. + required: false + default: null + bidir: + description: + - Group range is treated in PIM bidirectional mode. + required: false + choices: ['true','false'] + default: null +''' +EXAMPLES = ''' +- nxos_pim_rp_address: + rp_address: "10.1.1.20" + state: present + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"rp_address": "10.1.1.21"} +existing: + description: list of existing pim rp-address configuration entries + returned: verbose mode + type: list + sample: [] +end_state: + description: pim rp-address configuration entries after module execution + returned: verbose mode + type: list + sample: [{"bidir": false, "group_list": "224.0.0.0/4", + "rp_address": "10.1.1.21"}] +updates: + description: commands sent to the device + returned: always + type: list + sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +BOOL_PARAMS = ['bidir'] +PARAM_TO_COMMAND_KEYMAP = { + 'rp_address': 'ip pim rp-address' +} +PARAM_TO_DEFAULT_KEYMAP = {} +WARNINGS = [] + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(config, module): + value_list = [] + splitted_config = config.splitlines() + for line in splitted_config: + tmp = {} + if 'ip pim rp-address' in line: + splitted_line = line.split() + tmp['rp_address'] = splitted_line[3] + if len(splitted_line) > 5: + value = splitted_line[5] + if splitted_line[4] == 'route-map': + tmp['route_map'] = value + elif splitted_line[4] == 'prefix-list': + tmp['prefix_list'] = value + elif splitted_line[4] == 'group-list': + tmp['group_list'] = value + if 'bidir' in line: + tmp['bidir'] = True + else: + tmp['bidir'] = False + value_list.append(tmp) + return value_list + + +def get_existing(module, args): + existing = {} + config = str(get_config(module)) + existing = get_value(config, module) + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def state_present(module, existing, proposed, candidate): + command = 'ip pim rp-address {0}'.format(module.params['rp_address']) + commands = build_command(proposed, command) + if commands: + candidate.add(commands, parents=[]) + + +def build_command(param_dict, command): + for param in ['group_list', 'prefix_list', 'route_map']: + if param_dict.get(param): + command += ' {0} {1}'.format( + param.replace('_', '-'), param_dict.get(param)) + if param_dict.get('bidir'): + command += ' bidir' + return [command] + + +def state_absent(module, existing, proposed, candidate): + commands = list() + for each in existing: + if each.get('rp_address') == proposed['rp_address']: + command = 'no ip pim rp-address {0}'.format(proposed['rp_address']) + if each.get('group_list'): + commands = build_command(each, command) + else: + commands = [command] + if commands: + candidate.add(commands, parents=[]) + + +def main(): + argument_spec = dict( + rp_address=dict(required=True, type='str'), + group_list=dict(required=False, type='str'), + prefix_list=dict(required=False, type='str'), + route_map=dict(required=False, type='str'), + bidir=dict(required=False, type='bool'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + mutually_exclusive=[['group_list', 'route_map'], + ['group_list', 'prefix_list'], + ['route_map', 'prefix_list']], + supports_check_mode=True) + + state = module.params['state'] + + args = [ + 'rp_address', + 'group_list', + 'prefix_list', + 'route_map', + 'bidir' + ] + + existing = invoke('get_existing', module, args) + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + proposed = {} + for key, value in proposed_args.iteritems(): + if str(value).lower() == 'true': + value = True + elif str(value).lower() == 'false': + value = False + for each in existing: + if each.get(key) or (not each.get(key) and value): + proposed[key] = value + + result = {} + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + if WARNINGS: + result['warnings'] = WARNINGS + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_ping.py b/network/nxos/nxos_ping.py new file mode 100644 index 00000000000..a698b98ba8d --- /dev/null +++ b/network/nxos/nxos_ping.py @@ -0,0 +1,446 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_ping +version_added: "2.1" +short_description: Tests reachability using ping from Nexus switch. +description: + - Tests reachability using ping from switch to a remote destination. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +options: + dest: + description: + - IP address or hostname (resolvable by switch) of remote node. + required: true + count: + description: + - Number of packets to send. + required: false + default: 2 + source: + description: + - Source IP Address. + required: false + default: null + vrf: + description: + - Outgoing VRF. + required: false + default: null +''' + +EXAMPLES = ''' +- name: Test reachability to 8.8.8.8 using mgmt vrf + nxos_ping: + dest: 8.8.8.8 + vrf: management + host: 68.170.147.165 + +- name: Test reachability to a few different public IPs using mgmt vrf + nxos_ping: + dest: nxos_ping + vrf: management + host: 68.170.147.165 + with_items: + - 8.8.8.8 + - 4.4.4.4 + - 198.6.1.4 +''' + +RETURN = ''' +action: + description: + - Show what action has been performed + returned: always + type: string + sample: "PING 8.8.8.8 (8.8.8.8): 56 data bytes" +updates: + description: Show the command sent + returned: always + type: list + sample: ["ping 8.8.8.8 count 2 vrf management"] +count: + description: Show amount of packets sent + returned: always + type: string + sample: "2" +dest: + description: Show the ping destination + returned: always + type: string + sample: "8.8.8.8" +rtt: + description: Show RTT stats + returned: always + type: dict + sample: {"avg": "6.264","max":"6.564", + "min": "5.978"} +packets_rx: + description: Packets successfully received + returned: always + type: string + sample: "2" +packets_tx: + description: Packets successfully transmitted + returned: always + type: string + sample: "2" +packet_loss: + description: Percentage of packets lost + returned: always + type: string + sample: "0.00%" +''' + +import json +import collections + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def get_summary(results_list, reference_point): + summary_string = results_list[reference_point+1] + summary_list = summary_string.split(',') + pkts_tx = summary_list[0].split('packets')[0].strip() + pkts_rx = summary_list[1].split('packets')[0].strip() + pkt_loss = summary_list[2].split('packet')[0].strip() + summary = dict(packets_tx=pkts_tx, + packets_rx=pkts_rx, + packet_loss=pkt_loss) + + if 'bytes from' not in results_list[reference_point-2]: + ping_pass = False + else: + ping_pass = True + + return summary, ping_pass + + +def get_rtt(results_list, packet_loss, location): + if packet_loss != '100.00%': + rtt_string = results_list[location] + base = rtt_string.split('=')[1] + rtt_list = base.split('/') + min_rtt = rtt_list[0].lstrip() + avg_rtt = rtt_list[1] + max_rtt = rtt_list[2][:-3] + rtt = dict(min=min_rtt, avg=avg_rtt, max=max_rtt) + else: + rtt = dict(min=None, avg=None, max=None) + + return rtt + + +def get_statistics_summary_line(response_as_list): + for each in response_as_list: + if '---' in each: + index = response_as_list.index(each) + return index + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command_ping(command, module, command_type='cli_show_ascii'): + cmds = [command] + if module.params['transport'] == 'cli': + body = execute_show(cmds, module) + elif module.params['transport'] == 'nxapi': + body = execute_show(cmds, module, command_type=command_type) + return body + + +def get_ping_results(command, module, transport): + ping = execute_show_command_ping(command, module)[0] + + if not ping: + module.fail_json(msg="An unexpected error occurred. Check all params.", + command=command, destination=module.params['dest'], + vrf=module.params['vrf'], + source=module.params['source']) + + elif "can't bind to address" in ping: + module.fail_json(msg="Can't bind to source address.", command=command) + elif "bad context" in ping: + module.fail_json(msg="Wrong VRF name inserted.", command=command, + vrf=module.params['vrf']) + else: + splitted_ping = ping.split('\n') + reference_point = get_statistics_summary_line(splitted_ping) + summary, ping_pass = get_summary(splitted_ping, reference_point) + rtt = get_rtt(splitted_ping, summary['packet_loss'], reference_point+2) + + return (splitted_ping, summary, rtt, ping_pass) + + +def main(): + argument_spec = dict( + dest=dict(required=True), + count=dict(required=False, default=2), + vrf=dict(required=False), + source=dict(required=False), + state=dict(required=False, choices=['present', 'absent'], + default='present'), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + destination = module.params['dest'] + count = module.params['count'] + vrf = module.params['vrf'] + source = module.params['source'] + state = module.params['state'] + + if count: + try: + if int(count) < 1 or int(count) > 655350: + raise ValueError + except ValueError: + module.fail_json(msg="'count' must be an integer between 1 " + "and 655350.", count=count) + + OPTIONS = { + 'vrf': vrf, + 'count': count, + 'source': source + } + + ping_command = 'ping {0}'.format(destination) + for command, arg in OPTIONS.iteritems(): + if arg: + ping_command += ' {0} {1}'.format(command, arg) + + ping_results, summary, rtt, ping_pass = get_ping_results( + ping_command, module, module.params['transport']) + + packet_loss = summary['packet_loss'] + packets_rx = summary['packets_rx'] + packets_tx = summary['packets_tx'] + + results = {} + results['updates'] = [ping_command] + results['action'] = ping_results[1] + results['dest'] = destination + results['count'] = count + results['packets_tx'] = packets_tx + results['packets_rx'] = packets_rx + results['packet_loss'] = packet_loss + results['rtt'] = rtt + results['state'] = module.params['state'] + + if ping_pass and state == 'absent': + module.fail_json(msg="Ping succeeded unexpectedly", results=results) + elif not ping_pass and state == 'present': + module.fail_json(msg="Ping failed unexpectedly", results=results) + else: + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_portchannel.py b/network/nxos/nxos_portchannel.py new file mode 100644 index 00000000000..acb0a61b4c4 --- /dev/null +++ b/network/nxos/nxos_portchannel.py @@ -0,0 +1,746 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_portchannel +version_added: "2.2" +short_description: Manages port-channel interfaces. +description: + - Manages port-channel specific configuration parameters. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - C(state=absent) removes the portchannel config and interface if it + already exists. If members to be removed are not explicitly + passed, all existing members (if any), are removed. + - Members must be a list. + - LACP needs to be enabled first if active/passive modes are used. +options: + group: + description: + - Channel-group number for the port-channel. + required: true + mode: + description: + - Mode for the port-channel, i.e. on, active, passive. + required: false + default: on + choices: ['active','passive','on'] + min_links: + description: + - Min links required to keep portchannel up. + required: false + default: null + members: + description: + - List of interfaces that will be managed in a given portchannel. + required: false + default: null + force: + description: + - When true it forces port-channel members to match what is + declared in the members param. This can be used to remove + members. + required: false + choices: ['true', 'false'] + default: false + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +# Ensure port-channel99 is created, add two members, and set to mode on +- nxos_portchannel: + group: 99 + members: ['Ethernet1/1','Ethernet1/2'] + mode: 'active' + state: present + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"group": "12", "members": ["Ethernet2/5", + "Ethernet2/6"], "mode": "on"} +existing: + description: + - k/v pairs of existing portchannel + type: dict + sample: {"group": "12", "members": ["Ethernet2/5", + "Ethernet2/6"], "members_detail": { + "Ethernet2/5": {"mode": "active", "status": "D"}, + "Ethernet2/6": {"mode": "active", "status": "D"}}, + "min_links": null, "mode": "active"} +end_state: + description: k/v pairs of portchannel info after module execution + returned: always + type: dict + sample: {"group": "12", "members": ["Ethernet2/5", + "Ethernet2/6"], "members_detail": { + "Ethernet2/5": {"mode": "on", "status": "D"}, + "Ethernet2/6": {"mode": "on", "status": "D"}}, + "min_links": null, "mode": "on"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["interface Ethernet2/6", "no channel-group 12", + "interface Ethernet2/5", "no channel-group 12", + "interface Ethernet2/6", "channel-group 12 mode on", + "interface Ethernet2/5", "channel-group 12 mode on"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import collections +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE +WARNINGS = [] +PARAM_TO_COMMAND_KEYMAP = { + 'min_links': 'lacp min-links' +} + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def check_interface(module, netcfg): + config = str(netcfg) + REGEX = re.compile(r'\s+interface port-channel{0}*$'.format(module.params['group']), re.M) + value = False + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + + return value + + +def get_custom_value(arg, config, module): + REGEX = re.compile(r'\s+member vni {0} associate-vrf\s*$'.format( + module.params['vni']), re.M) + value = False + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + return value + + +def execute_config_command(commands, module): + try: + output = module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + output = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + return output + + +def get_cli_body_ssh(command, response, module): + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show port-channel summary' in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def get_portchannel_members(pchannel): + try: + members = pchannel['TABLE_member']['ROW_member'] + except KeyError: + members = [] + + return members + + +def get_portchannel_mode(interface, protocol, module, netcfg): + if protocol != 'LACP': + mode = 'on' + else: + netcfg = get_config(module) + parents = ['interface {0}'.format(interface.capitalize())] + body = netcfg.get_section(parents) + + mode_list = body.split('\n') + + for line in mode_list: + this_line = line.strip() + if this_line.startswith('channel-group'): + find = this_line + if 'mode' in find: + if 'passive' in find: + mode = 'passive' + elif 'active' in find: + mode = 'active' + + return mode + + +def get_portchannel(module, netcfg=None): + command = 'show port-channel summary' + portchannel = {} + portchannel_table = {} + members = [] + + body = execute_show_command(command, module) + + try: + pc_table = body[0]['TABLE_channel']['ROW_channel'] + + if isinstance(pc_table, dict): + pc_table = [pc_table] + + for pc in pc_table: + if pc['group'] == module.params['group']: + portchannel_table = pc + except (KeyError, AttributeError, TypeError, IndexError): + return {} + + if portchannel_table: + portchannel['group'] = portchannel_table['group'] + protocol = portchannel_table['prtcl'] + members_list = get_portchannel_members(portchannel_table) + + if isinstance(members_list, dict): + members_list = [members_list] + + member_dictionary = {} + for each_member in members_list: + interface = each_member['port'] + members.append(interface) + + pc_member = {} + pc_member['status'] = str(each_member['port-status']) + pc_member['mode'] = get_portchannel_mode(interface, + protocol, module, netcfg) + + member_dictionary[interface] = pc_member + portchannel['members'] = members + portchannel['members_detail'] = member_dictionary + + # Ensure each member have the same mode. + modes = set() + for each, value in member_dictionary.iteritems(): + modes.update([value['mode']]) + if len(modes) == 1: + portchannel['mode'] = value['mode'] + else: + portchannel['mode'] = 'unknown' + return portchannel + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + + interface_exist = check_interface(module, netcfg) + if interface_exist: + parents = ['interface port-channel{0}'.format(module.params['group'])] + config = netcfg.get_section(parents) + + if config: + existing['min_links'] = get_value('min_links', config, module) + existing.update(get_portchannel(module, netcfg=netcfg)) + + return existing, interface_exist + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def config_portchannel(proposed, mode, group): + commands = [] + config_args = { + 'mode': 'channel-group {group} mode {mode}', + 'min_links': 'lacp min-links {min_links}', + } + + for member in proposed.get('members', []): + commands.append('interface {0}'.format(member)) + commands.append(config_args.get('mode').format(group=group, mode=mode)) + + min_links = proposed.get('min_links', None) + if min_links: + command = 'interface port-channel {0}'.format(group) + commands.append(command) + commands.append(config_args.get('min_links').format( + min_links=min_links)) + + return commands + + +def get_commands_to_add_members(proposed, existing, module): + try: + proposed_members = proposed['members'] + except KeyError: + proposed_members = [] + + try: + existing_members = existing['members'] + except KeyError: + existing_members = [] + + members_to_add = list(set(proposed_members).difference(existing_members)) + + commands = [] + if members_to_add: + for member in members_to_add: + commands.append('interface {0}'.format(member)) + commands.append('channel-group {0} mode {1}'.format( + existing['group'], proposed['mode'])) + + return commands + + +def get_commands_to_remove_members(proposed, existing, module): + try: + proposed_members = proposed['members'] + except KeyError: + proposed_members = [] + + try: + existing_members = existing['members'] + except KeyError: + existing_members = [] + + members_to_remove = list(set(existing_members).difference(proposed_members)) + commands = [] + if members_to_remove: + for member in members_to_remove: + commands.append('interface {0}'.format(member)) + commands.append('no channel-group {0}'.format(existing['group'])) + + return commands + + +def get_commands_if_mode_change(proposed, existing, group, mode, module): + try: + proposed_members = proposed['members'] + except KeyError: + proposed_members = [] + + try: + existing_members = existing['members'] + except KeyError: + existing_members = [] + + try: + members_dict = existing['members_detail'] + except KeyError: + members_dict = {} + + members_to_remove = set(existing_members).difference(proposed_members) + members_with_mode_change = [] + if members_dict: + for interface, values in members_dict.iteritems(): + if (interface in proposed_members and + (interface not in members_to_remove)): + if values['mode'] != mode: + members_with_mode_change.append(interface) + + commands = [] + if members_with_mode_change: + for member in members_with_mode_change: + commands.append('interface {0}'.format(member)) + commands.append('no channel-group {0}'.format(group)) + + for member in members_with_mode_change: + commands.append('interface {0}'.format(member)) + commands.append('channel-group {0} mode {1}'.format(group, mode)) + + return commands + + +def get_commands_min_links(existing, proposed, group, min_links, module): + commands = [] + try: + if (existing['min_links'] is None or + (existing['min_links'] != proposed['min_links'])): + commands.append('interface port-channel{0}'.format(group)) + commands.append('lacp min-link {0}'.format(min_links)) + except KeyError: + commands.append('interface port-channel{0}'.format(group)) + commands.append('lacp min-link {0}'.format(min_links)) + return commands + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def main(): + argument_spec = dict( + group=dict(required=True, type='str'), + mode=dict(required=False, choices=['on', 'active', 'passive'], + default='on', type='str'), + min_links=dict(required=False, default=None, type='str'), + members=dict(required=False, default=None, type='list'), + force=dict(required=False, default='false', type='str', + choices=['true', 'false']), + state=dict(required=False, choices=['absent', 'present'], + default='present'), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + group = str(module.params['group']) + mode = module.params['mode'] + min_links = module.params['min_links'] + members = module.params['members'] + state = module.params['state'] + + if str(module.params['force']).lower() == 'true': + force = True + elif module.params['force'] == 'false': + force = False + + if ((min_links or mode) and + (not members and state == 'present')): + module.fail_json(msg='"members" is required when state=present and ' + '"min_links" or "mode" are provided') + + changed = False + args = [ + 'group', + 'members', + 'min_links', + 'mode' + ] + + existing, interface_exist = invoke('get_existing', module, args) + end_state = existing + proposed = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + result = {} + commands = [] + if state == 'absent': + if existing: + commands.append(['no interface port-channel{0}'.format(group)]) + elif state == 'present': + if not interface_exist: + command = config_portchannel(proposed, mode, group) + commands.append(command) + commands.insert(0, 'interface port-channel{0}'.format(group)) + WARNINGS.append("The proposed port-channel interface did not " + "exist. It's recommended to use nxos_interface to " + "create all logical interfaces.") + + elif existing and interface_exist: + if force: + command = get_commands_to_remove_members(proposed, existing, module) + commands.append(command) + + command = get_commands_to_add_members(proposed, existing, module) + commands.append(command) + + mode_command = get_commands_if_mode_change(proposed, existing, + group, mode, module) + + commands.insert(0, mode_command) + + if min_links: + command = get_commands_min_links(existing, proposed, + group, min_links, module) + commands.append(command) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + output = execute_config_command(cmds, module) + changed = True + end_state, interface_exist = get_existing(module, args) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + if WARNINGS: + results['warnings'] = WARNINGS + + module.exit_json(**results) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/network/nxos/nxos_reboot.py b/network/nxos/nxos_reboot.py new file mode 100644 index 00000000000..c479d10a806 --- /dev/null +++ b/network/nxos/nxos_reboot.py @@ -0,0 +1,301 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_reboot +version_added: 2.2 +short_description: Reboot a network device. +description: + - Reboot a network device. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - The module will fail due to timeout issues, but the reboot will be + performed anyway. +options: + confirm: + description: + - Safeguard boolean. Set to true if you're sure you want to reboot. + required: false + default: false +''' + +EXAMPLES = ''' +- nxos_reboot: + confirm: true + host: "{{ inventory_hostname }}" + username: "{{ username }}" + password: "{{ password }}" +''' + +RETURN = ''' +rebooted: + description: Whether the device was instructed to reboot. + returned: success + type: boolean + sample: true +''' + +import json +import collections + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + + +def reboot(module): + disable_confirmation(module) + execute_show_command(['reload'], module, command_type='cli_show_ascii') + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + body = execute_show(command, module) + elif module.params['transport'] == 'nxapi': + body = execute_show(command, module, command_type=command_type) + + return body + + +def disable_confirmation(module): + command = ['terminal dont-ask'] + body = execute_show_command(command, module, command_type='cli_show_ascii')[0] + + +def main(): + argument_spec = dict( + confirm=dict(required=True, type='bool'), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + confirm = module.params['confirm'] + if not confirm: + module.fail_json(msg='confirm must be set to true for this ' + 'module to work.') + + changed = False + rebooted = False + + reboot(module) + + changed = True + rebooted = True + + results = {} + results['changed'] = changed + results['rebooted'] = rebooted + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_rollback.py b/network/nxos/nxos_rollback.py new file mode 100644 index 00000000000..736fa25ccaa --- /dev/null +++ b/network/nxos/nxos_rollback.py @@ -0,0 +1,328 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_rollback +version_added: "2.2" +short_description: Set a checkpoint or rollback to a checkpoint. +description: + - This module offers the ability to set a configuration checkpoint + file or rollback to a configuration checkpoint file on Cisco NXOS + switches. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - Sometimes C(transport=nxapi) may cause a timeout error. +options: + checkpoint_file: + description: + - Name of checkpoint file to create. Mutually exclusive + with rollback_to. + required: false + default: null + rollback_to: + description: + - Name of checkpoint file to rollback to. Mutually exclusive + with checkpoint_file. + required: false + default: null +''' + +EXAMPLES = ''' +- nxos_rollback: + checkpoint_file: backup.cfg + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +- nxos_rollback: + rollback_to: backup.cfg + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +filename: + description: The filename of the checkpoint/rollback file. + returned: success + type: string + sample: 'backup.cfg' +status: + description: Which operation took place and whether it was successful. + returned: success + type: string + sample: 'rollback executed' +''' + + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_commands(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def prepare_show_command(command, module): + if module.params['transport'] == 'cli': + execute_commands(command, module) + elif module.params['transport'] == 'nxapi': + execute_commands(command, module, command_type='cli_show_ascii') + + +def checkpoint(filename, module): + commands = ['terminal dont-ask', 'checkpoint file %s' % filename] + prepare_show_command(commands, module) + + +def rollback(filename, module): + commands = ['rollback running-config file %s' % filename] + try: + module.configure(commands) + except AttributeError: + try: + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def main(): + argument_spec = dict( + checkpoint_file=dict(required=False), + rollback_to=dict(required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + mutually_exclusive=[['checkpoint_file', + 'rollback_to']], + supports_check_mode=False) + + checkpoint_file = module.params['checkpoint_file'] + rollback_to = module.params['rollback_to'] + + status = None + filename = None + changed = False + try: + if checkpoint_file: + checkpoint(checkpoint_file, module) + status = 'checkpoint file created' + elif rollback_to: + rollback(rollback_to, module) + status = 'rollback executed' + changed = True + filename = rollback_to or checkpoint_file + except ShellError: + clie = get_exception() + module.fail_json(msg=str(clie)) + + module.exit_json(changed=changed, status=status, filename=filename) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_smu.py b/network/nxos/nxos_smu.py new file mode 100644 index 00000000000..1404725ac38 --- /dev/null +++ b/network/nxos/nxos_smu.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_smu +version_added: "2.2" +short_description: Perform SMUs on Cisco NX-OS devices. +description: + - Perform software maintenance upgrades (SMUs) on Cisco NX-OS devices. +extends_documentation_fragment: nxos +author: Gabriele Gerbino (@GGabriele) +notes: + - The module can only activate and commit a package, + not remove or deactivate it. + - Use C(transport=nxapi) to avoid connection timeout +options: + pkg: + description: + - Name of the remote package. + required: true + file_system: + description: + - The remote file system of the device. If omitted, + devices that support a file_system parameter will use + their default values. + required: false + default: null +''' + +EXAMPLES = ''' +- nxos_smu: + pkg: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm" + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +file_system: + description: The remote file system of the device. + returned: always + type: string + sample: "bootflash:" +pkg: + description: Name of the remote package + type: string + returned: always + sample: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm" +updates: + description: commands sent to the device + returned: always + type: list + sample: ["install add bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm", + "install activate bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm force", + "install commit bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import time +import json +import collections + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + cmds = [command] + body = execute_show(cmds, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def remote_file_exists(module, dst, file_system='bootflash:'): + command = 'dir {0}/{1}'.format(file_system, dst) + body = execute_show_command(command, module, command_type='cli_show_ascii') + if 'No such file' in body[0]: + return False + return True + + +def execute_config_command(commands, module): + try: + output = module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + output = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + return output + + +def apply_patch(module, commands): + for command in commands: + response = execute_config_command([command], module) + time.sleep(5) + if 'failed' in response: + module.fail_json(msg="Operation failed!", response=response) + + +def get_commands(module, pkg, file_system): + commands = [] + splitted_pkg = pkg.split('.') + fixed_pkg = '.'.join(splitted_pkg[0:-1]) + + command = 'show install inactive' + inactive_body = execute_show_command(command, module, + command_type='cli_show_ascii') + command = 'show install active' + active_body = execute_show_command(command, module, + command_type='cli_show_ascii') + + if fixed_pkg not in inactive_body[0] and fixed_pkg not in active_body[0]: + commands.append('install add {0}{1}'.format(file_system, pkg)) + + if fixed_pkg not in active_body[0]: + commands.append('install activate {0}{1} force'.format( + file_system, pkg)) + command = 'show install committed' + install_body = execute_show_command(command, module, + command_type='cli_show_ascii') + if fixed_pkg not in install_body[0]: + commands.append('install commit {0}{1}'.format(file_system, pkg)) + + return commands + + +def main(): + argument_spec = dict( + pkg=dict(required=True), + file_system=dict(required=False, default='bootflash:'), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + pkg = module.params['pkg'] + file_system = module.params['file_system'] + changed = False + remote_exists = remote_file_exists(module, pkg, file_system=file_system) + + if not remote_exists: + module.fail_json(msg="The requested package doesn't exist " + "on the device") + + commands = get_commands(module, pkg, file_system) + if not module.check_mode and commands: + try: + apply_patch(module, commands) + changed = True + except ShellError: + e = get_exception() + module.fail_json(msg=str(e)) + + if 'configure' in commands: + commands.pop(0) + + module.exit_json(changed=changed, + pkg=pkg, + file_system=file_system, + updates=commands) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_snapshot.py b/network/nxos/nxos_snapshot.py new file mode 100644 index 00000000000..e6f367d7bfa --- /dev/null +++ b/network/nxos/nxos_snapshot.py @@ -0,0 +1,674 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_snapshot +version_added: "2.2" +short_description: Manage snapshots of the running states of selected features. +description: + - Create snapshots of the running states of selected features, add + new show commands for snapshot creation, delete and compare + existing snapshots. +extends_documentation_fragment: nxos +author: + - Gabriele Gerbino (@GGabriele) +notes: + - C(transport=cli) may cause timeout errors. + - The C(element_key1) and C(element_key2) parameter specify the tags used + to distinguish among row entries. In most cases, only the element_key1 + parameter needs to specified to be able to distinguish among row entries. + - C(action=compare) will always store a comparison report on a local file. +options: + action: + description: + - Define what snapshot action the module would perform. + required: true + choices: ['create','add','compare','delete'] + snapshot_name: + description: + - Snapshot name, to be used when C(action=create) + or C(action=delete). + required: false + default: null + description: + description: + - Snapshot description to be used when C(action=create). + required: false + default: null + snapshot1: + description: + - First snapshot to be used when C(action=compare). + required: false + default: null + snapshot2: + description: + - Second snapshot to be used when C(action=compare). + required: false + default: null + comparison_results_file: + description: + - Name of the file where snapshots comparison will be store. + required: false + default: null + compare_option: + description: + - Snapshot options to be used when C(action=compare). + required: false + default: null + choices: ['summary','ipv4routes','ipv6routes'] + section: + description: + - Used to name the show command output, to be used + when C(action=add). + required: false + default: null + show_command: + description: + - Specify a new show command, to be used when C(action=add). + required: false + default: null + row_id: + description: + - Specifies the tag of each row entry of the show command's + XML output, to be used when C(action=add). + required: false + default: null + element_key1: + description: + - Specify the tags used to distinguish among row entries, + to be used when C(action=add). + required: false + default: null + element_key2: + description: + - Specify the tags used to distinguish among row entries, + to be used when C(action=add). + required: false + default: null + save_snapshot_locally: + description: + - Specify to locally store a new created snapshot, + to be used when C(action=create). + required: false + default: false + choices: ['true','false'] + path: + description: + - Specify the path of the file where new created snapshot or + snapshots comparison will be stored, to be used when + C(action=create) and C(save_snapshot_locally=true) or + C(action=compare). + required: false + default: './' +''' + +EXAMPLES = ''' +# Create a snapshot and store it locally +- nxos_snapshot: + action: create + snapshot_name: test_snapshot + description: Done with Ansible + save_snapshot_locally: true + path: /home/user/snapshots/ + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# Delete a snapshot +- nxos_snapshot: + action: delete + snapshot_name: test_snapshot + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# Delete all existing snapshots +- nxos_snapshot: + action: delete_all + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# Add a show command for snapshots creation +- nxos_snapshot: + section: myshow + show_command: show ip interface brief + row_id: ROW_intf + element_key1: intf-name + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# Compare two snapshots +- nxos_snapshot: + action: compare + snapshot1: pre_snapshot + snapshot2: post_snapshot + comparison_results_file: compare_snapshots.txt + compare_option: summary + path: '../snapshot_reports/' + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +existing_snapshots: + description: list of existing snapshots. + returned: verbose mode + type: list + sample: [{"date": "Tue Sep 13 10:58:08 2016", + "description": "First snapshot", "name": "first_snap"}, + {"date": "Tue Sep 13 10:27:31 2016", "description": "Pre-snapshot", + "name": "pre_snapshot"}] +final_snapshots: + description: list of final snapshots. + returned: verbose mode + type: list + sample: [{"date": "Tue Sep 13 10:58:08 2016", + "description": "First snapshot", "name": "first_snap"}, + {"date": "Tue Sep 13 10:27:31 2016", "description": "Pre-snapshot", + "name": "pre_snapshot"}, + {"date": "Tue Sep 13 10:37:50 2016", "description": "Post-snapshot", + "name": "post_snapshot"}] +report_file: + description: name of the file where the new snapshot or snapshots + comparison have been stored. + returned: verbose mode + type: string + sample: "/home/gabriele/Desktop/ntc-ansible/ansible_snapshot" +updates: + description: commands sent to the device + returned: verbose mode + type: list + sample: ["snapshot create post_snapshot Post-snapshot"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import os +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show_ascii'): + cmds = [command] + if module.params['transport'] == 'cli': + body = execute_show(cmds, module) + elif module.params['transport'] == 'nxapi': + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def get_existing(module): + existing = [] + command = 'show snapshots' + + body = execute_show_command(command, module)[0] + if body: + split_body = body.splitlines() + snapshot_regex = ('(?P\S+)\s+(?P\w+\s+\w+\s+\d+\s+\d+' + ':\d+:\d+\s+\d+)\s+(?P.*)') + for snapshot in split_body: + temp = {} + try: + match_snapshot = re.match(snapshot_regex, snapshot, re.DOTALL) + snapshot_group = match_snapshot.groupdict() + temp['name'] = snapshot_group['name'] + temp['date'] = snapshot_group['date'] + temp['description'] = snapshot_group['description'] + existing.append(temp) + except AttributeError: + pass + + return existing + + +def action_create(module, existing_snapshots): + commands = list() + exist = False + for snapshot in existing_snapshots: + if module.params['snapshot_name'] == snapshot['name']: + exist = True + + if exist is False: + commands.append('snapshot create {0} {1}'.format( + module.params['snapshot_name'], module.params['description'])) + + return commands + + +def action_add(module, existing_snapshots): + commands = list() + command = 'show snapshot sections' + sections = [] + body = execute_show_command(command, module)[0] + + if body: + section_regex = '.*\[(?P
\S+)\].*' + split_body = body.split('\n\n') + for section in split_body: + temp = {} + for line in section.splitlines(): + try: + match_section = re.match(section_regex, section, re.DOTALL) + temp['section'] = match_section.groupdict()['section'] + except (AttributeError, KeyError): + pass + + if 'show command' in line: + temp['show_command'] = line.split('show command: ')[1] + elif 'row id' in line: + temp['row_id'] = line.split('row id: ')[1] + elif 'key1' in line: + temp['element_key1'] = line.split('key1: ')[1] + elif 'key2' in line: + temp['element_key2'] = line.split('key2: ')[1] + + if temp: + sections.append(temp) + + proposed = { + 'section': module.params['section'], + 'show_command': module.params['show_command'], + 'row_id': module.params['row_id'], + 'element_key1': module.params['element_key1'], + 'element_key2': module.params['element_key2'] or '-', + } + + if proposed not in sections: + if module.params['element_key2']: + commands.append('snapshot section add {0} "{1}" {2} {3} {4}'.format( + module.params['section'], module.params['show_command'], + module.params['row_id'], module.params['element_key1'], + module.params['element_key2'])) + else: + commands.append('snapshot section add {0} "{1}" {2} {3}'.format( + module.params['section'], module.params['show_command'], + module.params['row_id'], module.params['element_key1'])) + + return commands + + +def action_compare(module, existing_snapshots): + command = 'show snapshot compare {0} {1}'.format( + module.params['snapshot1'], module.params['snapshot2']) + + if module.params['compare_option']: + command += ' {0}'.format(module.params['compare_option']) + + body = execute_show_command(command, module)[0] + return body + + +def action_delete(module, existing_snapshots): + commands = list() + + exist = False + for snapshot in existing_snapshots: + if module.params['snapshot_name'] == snapshot['name']: + exist = True + + if exist: + commands.append('snapshot delete {0}'.format( + module.params['snapshot_name'])) + + return commands + + +def action_delete_all(module, existing_snapshots): + commands = list() + if existing_snapshots: + commands.append('snapshot delete all') + return commands + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_snapshot(module): + command = 'show snapshot dump {0}'.format(module.params['snapshot_name']) + body = execute_show_command(command, module)[0] + return body + + +def write_on_file(content, filename, module): + path = module.params['path'] + if path[-1] != '/': + path += '/' + filepath = '{0}{1}'.format(path, filename) + try: + report = open(filepath, 'w') + report.write(content) + report.close() + except: + module.fail_json(msg="Error while writing on file.") + + return filepath + +def main(): + argument_spec = dict( + action=dict(required=True, choices=['create', 'add', + 'compare', 'delete', + 'delete_all']), + snapshot_name=dict(required=False, type='str'), + description=dict(required=False, type='str'), + snapshot1=dict(required=False, type='str'), + snapshot2=dict(required=False, type='str'), + compare_option=dict(required=False, + choices=['summary', 'ipv4routes', 'ipv6routes']), + comparison_results_file=dict(required=False, type='str'), + section=dict(required=False, type='str'), + show_command=dict(required=False, type='str'), + row_id=dict(required=False, type='str'), + element_key1=dict(required=False, type='str'), + element_key2=dict(required=False, type='str'), + save_snapshot_locally=dict(required=False, type='bool', + default=False), + path=dict(required=False, type='str', default='./') + ) + module = get_network_module(argument_spec=argument_spec, + mutually_exclusive=[['delete_all', + 'delete_snapshot']], + supports_check_mode=True) + + action = module.params['action'] + comparison_results_file = module.params['comparison_results_file'] + + CREATE_PARAMS = ['snapshot_name', 'description'] + ADD_PARAMS = ['section', 'show_command', 'row_id', 'element_key1'] + COMPARE_PARAMS = ['snapshot1', 'snapshot2', 'comparison_results_file'] + + if not os.path.isdir(module.params['path']): + module.fail_json(msg='{0} is not a valid directory name.'.format( + module.params['path'])) + + if action == 'create': + for param in CREATE_PARAMS: + if not module.params[param]: + module.fail_json(msg='snapshot_name and description are ' + 'required when action=create') + elif action == 'add': + for param in ADD_PARAMS: + if not module.params[param]: + module.fail_json(msg='section, show_command, row_id ' + 'and element_key1 are required ' + 'when action=add') + elif action == 'compare': + for param in COMPARE_PARAMS: + if not module.params[param]: + module.fail_json(msg='snapshot1 and snapshot2 are required ' + 'when action=create') + elif action == 'delete' and not module.params['snapshot_name']: + module.fail_json(msg='snapshot_name is required when action=delete') + + existing_snapshots = invoke('get_existing', module) + final_snapshots = existing_snapshots + changed = False + + action_results = invoke('action_%s' % action, module, existing_snapshots) + + result = {} + written_file = '' + if module.check_mode and action != 'compare': + module.exit_json(changed=True, commands=action_results) + else: + if action == 'compare': + written_file = write_on_file(action_results, + module.params['comparison_results_file'], + module) + result['updates'] = [] + else: + if action_results: + execute_config_command(action_results, module) + changed = True + final_snapshots = invoke('get_existing', module) + result['updates'] = action_results + + if (action == 'create' and + module.params['save_snapshot_locally']): + snapshot = get_snapshot(module) + written_file = write_on_file(snapshot, + module.params['snapshot_name'], module) + + result['connected'] = module.connected + result['changed'] = changed + if module._verbosity > 0: + end_state = invoke('get_existing', module) + result['final_snapshots'] = final_snapshots + result['existing_snapshots'] = existing_snapshots + if written_file: + result['report_file'] = written_file + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_snmp_community.py b/network/nxos/nxos_snmp_community.py new file mode 100644 index 00000000000..93ed3f7be4b --- /dev/null +++ b/network/nxos/nxos_snmp_community.py @@ -0,0 +1,503 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_snmp_community +version_added: "2.2" +short_description: Manages SNMP community configs. +description: + - Manages SNMP community configuration. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +options: + community: + description: + - Case-sensitive community string. + required: true + access: + description: + - Access type for community. + required: false + default: null + choices: ['ro','rw'] + group: + description: + - Group to which the community belongs. + required: false + default: null + acl: + description: + - ACL name to filter snmp requests. + required: false + default: 1 + state: + description: + - Manage the state of the resource. + required: true + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +# ensure snmp community is configured +- nxos_snmp_community: + community: TESTING7 + group: network-operator + state: present + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"group": "network-operator"} +existing: + description: k/v pairs of existing snmp community + type: dict + sample: {} +end_state: + description: k/v pairs of snmp community after module execution + returned: always + type: dict or null + sample: {"acl": "None", "group": "network-operator"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["snmp-server community TESTING7 group network-operator"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_snmp_groups(module): + command = 'show snmp group' + data = execute_show_command(command, module)[0] + + group_list = [] + + try: + group_table = data['TABLE_role']['ROW_role'] + for group in group_table: + group_list.append(group['role_name']) + except (KeyError, AttributeError): + return group_list + + return group_list + + +def get_snmp_community(module, find_filter=None): + command = 'show snmp community' + data = execute_show_command(command, module)[0] + + community_dict = {} + + community_map = { + 'grouporaccess': 'group', + 'aclfilter': 'acl' + } + + try: + community_table = data['TABLE_snmp_community']['ROW_snmp_community'] + for each in community_table: + community = apply_key_map(community_map, each) + key = each['community_name'] + community_dict[key] = community + except (KeyError, AttributeError): + return community_dict + + if find_filter: + find = community_dict.get(find_filter, None) + + if find_filter is None or find is None: + return {} + else: + fix_find = {} + for (key, value) in find.iteritems(): + if isinstance(value, str): + fix_find[key] = value.strip() + else: + fix_find[key] = value + return fix_find + + +def config_snmp_community(delta, community): + CMDS = { + 'group': 'snmp-server community {0} group {group}', + 'acl': 'snmp-server community {0} use-acl {acl}' + } + commands = [] + for k, v in delta.iteritems(): + cmd = CMDS.get(k).format(community, **delta) + if cmd: + commands.append(cmd) + cmd = None + return commands + + +def main(): + argument_spec = dict( + community=dict(required=True, type='str'), + access=dict(choices=['ro', 'rw']), + group=dict(type='str'), + acl=dict(type='str'), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + required_one_of=[['access', 'group']], + mutually_exclusive=[['access', 'group']], + supports_check_mode=True) + + access = module.params['access'] + group = module.params['group'] + community = module.params['community'] + acl = module.params['acl'] + state = module.params['state'] + + if access: + if access == 'ro': + group = 'network-operator' + elif access == 'rw': + group = 'network-admin' + + # group check - ensure group being configured exists on the device + configured_groups = get_snmp_groups(module) + + if group not in configured_groups: + module.fail_json(msg="group not on switch." + "please add before moving forward") + + existing = get_snmp_community(module, community) + args = dict(group=group, acl=acl) + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + + changed = False + end_state = existing + commands = [] + + if state == 'absent': + if existing: + command = "no snmp-server community {0}".format(community) + commands.append(command) + cmds = flatten_list(commands) + elif state == 'present': + if delta: + command = config_snmp_community(dict(delta), community) + commands.append(command) + cmds = flatten_list(commands) + + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_snmp_community(module, community) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_snmp_contact.py b/network/nxos/nxos_snmp_contact.py new file mode 100644 index 00000000000..b5b97da92c1 --- /dev/null +++ b/network/nxos/nxos_snmp_contact.py @@ -0,0 +1,401 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_snmp_contact +version_added: "2.2" +short_description: Manages SNMP contact info. +description: + - Manages SNMP contact information. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - C(state=absent) removes the contact configuration if it is configured. +options: + contact: + description: + - Contact information. + required: true + state: + description: + - Manage the state of the resource. + required: true + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +# ensure snmp contact is configured +- nxos_snmp_contact: + contact: Test + state: present + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"contact": "New_Test"} +existing: + description: k/v pairs of existing snmp contact + type: dict + sample: {"contact": "Test"} +end_state: + description: k/v pairs of snmp contact after module execution + returned: always + type: dict + sample: {"contact": "New_Test"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["snmp-server contact New_Test"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_snmp_contact(module): + contact = {} + contact_regex = '.*snmp-server\scontact\s(?P\S+).*' + command = 'show run snmp' + + body = execute_show_command(command, module, command_type='cli_show_ascii')[0] + + try: + match_contact = re.match(contact_regex, body, re.DOTALL) + group_contact = match_contact.groupdict() + contact['contact'] = group_contact["contact"] + except AttributeError: + contact = {} + + return contact + + +def main(): + argument_spec = dict( + contact=dict(required=True, type='str'), + state=dict(choices=['absent', 'present'], + default='present') + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + contact = module.params['contact'] + state = module.params['state'] + + existing = get_snmp_contact(module) + changed = False + proposed = dict(contact=contact) + end_state = existing + commands = [] + + if state == 'absent': + if existing and existing['contact'] == contact: + commands.append('no snmp-server contact') + elif state == 'present': + if not existing or existing['contact'] != contact: + commands.append('snmp-server contact {0}'.format(contact)) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_snmp_contact(module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_snmp_host.py b/network/nxos/nxos_snmp_host.py new file mode 100644 index 00000000000..366da22a918 --- /dev/null +++ b/network/nxos/nxos_snmp_host.py @@ -0,0 +1,642 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_snmp_host +version_added: "2.2" +short_description: Manages SNMP host configuration. +description: + - Manages SNMP host configuration parameters. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - C(state=absent) removes the host configuration if it is configured. +options: + snmp_host: + description: + - IP address of hostname of target host. + required: true + version: + description: + - SNMP version. + required: false + default: v2c + choices: ['v2c', 'v3'] + community: + description: + - Community string or v3 username. + required: false + default: null + udp: + description: + - UDP port number (0-65535). + required: false + default: null + type: + description: + - type of message to send to host. + required: false + default: traps + choices: ['trap', 'inform'] + vrf: + description: + - VRF to use to source traffic to source. + required: false + default: null + vrf_filter: + description: + - Name of VRF to filter. + required: false + default: null + src_intf: + description: + - Source interface. + required: false + default: null + state: + description: + - Manage the state of the resource. + required: true + default: present + choices: ['present','absent'] + +''' + +EXAMPLES = ''' +# ensure snmp host is configured +- nxos_snmp_host: + snmp_host: 3.3.3.3 + community: TESTING + state: present + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"community": "TESTING", "snmp_host": "3.3.3.3", + "snmp_type": "trap", "version": "v2c", "vrf_filter": "one_more_vrf"} +existing: + description: k/v pairs of existing snmp host + type: dict + sample: {"community": "TESTING", "snmp_type": "trap", + "udp": "162", "v3": "noauth", "version": "v2c", + "vrf": "test_vrf", "vrf_filter": ["test_vrf", + "another_test_vrf"]} +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict or null + sample: {"community": "TESTING", "snmp_type": "trap", + "udp": "162", "v3": "noauth", "version": "v2c", + "vrf": "test_vrf", "vrf_filter": ["test_vrf", + "another_test_vrf", "one_more_vrf"]} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["snmp-server host 3.3.3.3 filter-vrf another_test_vrf"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def get_snmp_host(host, module): + command = 'show snmp host' + body = execute_show_command(command, module) + + host_map = { + 'port': 'udp', + 'version': 'version', + 'level': 'v3', + 'type': 'snmp_type', + 'secname': 'community' + } + + resource = {} + + if body: + try: + resource_table = body[0]['TABLE_host']['ROW_host'] + + if isinstance(resource_table, dict): + resource_table = [resource_table] + + for each in resource_table: + key = str(each['host']) + src = each.get('src_intf', None) + host_resource = apply_key_map(host_map, each) + + if src: + host_resource['src_intf'] = src.split(':')[1].strip() + + vrf_filt = each.get('TABLE_vrf_filters', None) + if vrf_filt: + vrf_filter = vrf_filt['ROW_vrf_filters']['vrf_filter'].split(':')[1].split(',') + filters = [vrf.strip() for vrf in vrf_filter] + host_resource['vrf_filter'] = filters + + vrf = each.get('vrf', None) + if vrf: + host_resource['vrf'] = vrf.split(':')[1].strip() + resource[key] = host_resource + + except (KeyError, AttributeError, TypeError): + return resource + + find = resource.get(host, None) + + if find: + fix_find = {} + for (key, value) in find.iteritems(): + if isinstance(value, str): + fix_find[key] = value.strip() + else: + fix_find[key] = value + return fix_find + else: + return {} + else: + return {} + + +def remove_snmp_host(host, existing): + commands = [] + if existing['version'] == 'v3': + existing['version'] = '3' + command = 'no snmp-server host {0} {snmp_type} version \ + {version} {v3} {community}'.format(host, **existing) + + elif existing['version'] == 'v2c': + existing['version'] = '2c' + command = 'no snmp-server host {0} {snmp_type} version \ + {version} {community}'.format(host, **existing) + + if command: + commands.append(command) + return commands + + +def config_snmp_host(delta, proposed, existing, module): + commands = [] + command_builder = [] + host = proposed['snmp_host'] + cmd = 'snmp-server host {0}'.format(proposed['snmp_host']) + + snmp_type = delta.get('snmp_type', None) + version = delta.get('version', None) + ver = delta.get('v3', None) + community = delta.get('community', None) + + command_builder.append(cmd) + if any([snmp_type, version, ver, community]): + type_string = snmp_type or existing.get('type') + if type_string: + command_builder.append(type_string) + + version = version or existing.get('version') + if version: + if version == 'v2c': + vn = '2c' + elif version == 'v3': + vn = '3' + + version_string = 'version {0}'.format(vn) + command_builder.append(version_string) + + if ver: + ver_string = ver or existing.get('v3') + command_builder.append(ver_string) + + if community: + community_string = community or existing.get('community') + command_builder.append(community_string) + + cmd = ' '.join(command_builder) + + commands.append(cmd) + + CMDS = { + 'vrf_filter': 'snmp-server host {0} filter-vrf {vrf_filter}', + 'vrf': 'snmp-server host {0} use-vrf {vrf}', + 'udp': 'snmp-server host {0} udp-port {udp}', + 'src_intf': 'snmp-server host {0} source-interface {src_intf}' + } + + for key, value in delta.iteritems(): + if key in ['vrf_filter', 'vrf', 'udp', 'src_intf']: + command = CMDS.get(key, None) + if command: + cmd = command.format(host, **delta) + commands.append(cmd) + cmd = None + return commands + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def main(): + argument_spec = dict( + snmp_host=dict(required=True, type='str'), + community=dict(type='str'), + udp=dict(type='str'), + version=dict(choices=['v2c', 'v3'], default='v2c'), + src_intf=dict(type='str'), + v3=dict(choices=['noauth', 'auth', 'priv']), + vrf_filter=dict(type='str'), + vrf=dict(type='str'), + snmp_type=dict(choices=['trap', 'inform'], default='trap'), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + + snmp_host = module.params['snmp_host'] + community = module.params['community'] + udp = module.params['udp'] + version = module.params['version'] + src_intf = module.params['src_intf'] + v3 = module.params['v3'] + vrf_filter = module.params['vrf_filter'] + vrf = module.params['vrf'] + snmp_type = module.params['snmp_type'] + + state = module.params['state'] + + if snmp_type == 'inform' and version != 'v3': + module.fail_json(msg='inform requires snmp v3') + + if version == 'v2c' and v3: + module.fail_json(msg='param: "v3" should not be used when ' + 'using version v2c') + + if not any([vrf_filter, vrf, udp, src_intf]): + if not all([snmp_type, version, community]): + module.fail_json(msg='when not configuring options like ' + 'vrf_filter, vrf, udp, and src_intf,' + 'the following params are required: ' + 'type, version, community') + + if version == 'v3' and v3 is None: + module.fail_json(msg='when using version=v3, the param v3 ' + '(options: auth, noauth, priv) is also required') + + existing = get_snmp_host(snmp_host, module) + + # existing returns the list of vrfs configured for a given host + # checking to see if the proposed is in the list + store = existing.get('vrf_filter', None) + if existing and store: + if vrf_filter not in existing['vrf_filter']: + existing['vrf_filter'] = None + else: + existing['vrf_filter'] = vrf_filter + + args = dict( + community=community, + snmp_host=snmp_host, + udp=udp, + version=version, + src_intf=src_intf, + vrf_filter=vrf_filter, + v3=v3, + vrf=vrf, + snmp_type=snmp_type + ) + + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + + changed = False + commands = [] + end_state = existing + + if state == 'absent': + if existing: + command = remove_snmp_host(snmp_host, existing) + commands.append(command) + elif state == 'present': + if delta: + command = config_snmp_host(delta, proposed, existing, module) + commands.append(command) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_snmp_host(snmp_host, module) + if 'configure' in cmds: + cmds.pop(0) + + if store: + existing['vrf_filter'] = store + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/network/nxos/nxos_snmp_location.py b/network/nxos/nxos_snmp_location.py new file mode 100644 index 00000000000..e3b90973e72 --- /dev/null +++ b/network/nxos/nxos_snmp_location.py @@ -0,0 +1,422 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_snmp_location +version_added: "2.2" +short_description: Manages SNMP location information. +description: + - Manages SNMP location configuration. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +options: + location: + description: + - Location information. + required: true + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +# ensure snmp location is configured +- nxos_snmp_location: + location: Test + state: present + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# ensure snmp location is not configured +- nxos_snmp_location: + location: Test + state: absent + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"location": "New_Test"} +existing: + description: k/v pairs of existing snmp location + type: dict + sample: {"location": "Test"} +end_state: + description: k/v pairs of location info after module execution + returned: always + type: dict or null + sample: {"location": "New_Test"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["snmp-server location New_Test"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_snmp_location(module): + location = {} + location_regex = '.*snmp-server\slocation\s(?P\S+).*' + command = 'show run snmp' + + body = execute_show_command(command, module, command_type='cli_show_ascii') + try: + match_location = re.match(location_regex, body[0], re.DOTALL) + group_location = match_location.groupdict() + location['location'] = group_location["location"] + except (AttributeError, TypeError): + location = {} + + return location + + +def main(): + argument_spec = dict( + location=dict(required=True, type='str'), + state=dict(choices=['absent', 'present'], + default='present') + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + + location = module.params['location'] + state = module.params['state'] + + existing = get_snmp_location(module) + changed = False + commands = [] + proposed = dict(location=location) + end_state = existing + + if state == 'absent': + if existing and existing['location'] == location: + commands.append('no snmp-server location') + elif state == 'present': + if not existing or existing['location'] != location: + commands.append('snmp-server location {0}'.format(location)) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_snmp_location(module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +from ansible.module_utils.basic import * +if __name__ == "__main__": + main() diff --git a/network/nxos/nxos_snmp_traps.py b/network/nxos/nxos_snmp_traps.py new file mode 100644 index 00000000000..632e8e8d1d7 --- /dev/null +++ b/network/nxos/nxos_snmp_traps.py @@ -0,0 +1,499 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_snmp_trap +version_added: "2.2" +short_description: Manages SNMP traps. +description: + - Manages SNMP traps configurations. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) +notes: + - This module works at the group level for traps. If you need to only + enable/disable 1 specific trap within a group, use the M(nxos_command) + module. + - Be aware that you can set a trap only for an enabled feature. +options: + group: + description: + - Case sensitive group. + required: true + choices: ['aaa', 'bridge', 'callhome', 'cfs', 'config', 'entity', + 'feature-control', 'hsrp', 'license', 'link', 'lldp', 'ospf', 'pim', + 'rf', 'rmon', 'snmp', 'storm-control', 'stpx', 'sysmgr', 'system', + 'upgrade', 'vtp', 'all'] + state: + description: + - Manage the state of the resource. + required: false + default: enabled + choices: ['enabled','disabled'] +''' + +EXAMPLES = ''' +# ensure lldp trap configured +- nxos_snmp_traps: + group: lldp + state: enabled + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# ensure lldp trap is not configured +- nxos_snmp_traps: + group: lldp + state: disabled + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"group": "lldp"} +existing: + description: k/v pairs of existing trap status + type: dict + sample: {"lldp": [{"enabled": "No", + "trap": "lldpRemTablesChange"}]} +end_state: + description: k/v pairs of trap info after module execution + returned: always + type: dict + sample: {"lldp": [{"enabled": "Yes", + "trap": "lldpRemTablesChange"}]} +updates: + description: command sent to the device + returned: always + type: list + sample: "snmp-server enable traps lldp ;" +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + + +def get_snmp_traps(group, module): + command = 'show snmp trap' + body = execute_show_command(command, module) + + trap_key = { + 'description': 'trap', + 'isEnabled': 'enabled' + } + + resource = {} + + try: + resource_table = body[0]['TABLE_snmp_trap']['ROW_snmp_trap'] + + for each_feature in ['aaa', 'bridge', 'callhome', 'cfs', 'config', + 'entity', 'feature-control', 'hsrp', 'license', + 'link', 'lldp', 'ospf', 'pim', 'rf', 'rmon', + 'snmp', 'storm-control', 'stpx', 'sysmgr', + 'system', 'upgrade', 'vtp']: + + resource[each_feature] = [] + + for each_resource in resource_table: + key = str(each_resource['trap_type']) + mapped_trap = apply_key_map(trap_key, each_resource) + + if key != 'Generic': + resource[key].append(mapped_trap) + + except (KeyError, AttributeError): + return resource + + find = resource.get(group, None) + + if group == 'all'.lower(): + return resource + elif find: + trap_resource = {group: resource[group]} + return trap_resource + else: + # if 'find' is None, it means that 'group' is a + # currently disabled feature. + return {} + + +def get_trap_commands(group, state, existing, module): + commands = [] + enabled = False + disabled = False + + if group == 'all': + if state == 'disabled': + for feature in existing: + trap_commands = ['no snmp-server enable traps {0}'.format(feature) for + trap in existing[feature] if trap['enabled'] == 'Yes'] + trap_commands = list(set(trap_commands)) + commands.append(trap_commands) + + elif state == 'enabled': + for feature in existing: + trap_commands = ['snmp-server enable traps {0}'.format(feature) for + trap in existing[feature] if trap['enabled'] == 'No'] + trap_commands = list(set(trap_commands)) + commands.append(trap_commands) + + else: + if group in existing: + for each_trap in existing[group]: + check = each_trap['enabled'] + if check.lower() == 'yes': + enabled = True + if check.lower() == 'no': + disabled = True + + if state == 'disabled' and enabled: + commands.append(['no snmp-server enable traps {0}'.format(group)]) + elif state == 'enabled' and disabled: + commands.append(['snmp-server enable traps {0}'.format(group)]) + else: + module.fail_json(msg='{0} is not a currently ' + 'enabled feature.'.format(group)) + + return commands + + +def main(): + argument_spec = dict( + state=dict(choices=['enabled', 'disabled'], default='enabled'), + group=dict(choices=['aaa', 'bridge', 'callhome', 'cfs', 'config', + 'entity', 'feature-control', 'hsrp', + 'license', 'link', 'lldp', 'ospf', 'pim', 'rf', + 'rmon', 'snmp', 'storm-control', 'stpx', + 'sysmgr', 'system', 'upgrade', 'vtp', 'all'], + required=True), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + group = module.params['group'].lower() + state = module.params['state'] + + existing = get_snmp_traps(group, module) + proposed = {'group': group} + + changed = False + end_state = existing + commands = get_trap_commands(group, state, existing, module) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_snmp_traps(group, module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_snmp_user.py b/network/nxos/nxos_snmp_user.py new file mode 100644 index 00000000000..a06a0151019 --- /dev/null +++ b/network/nxos/nxos_snmp_user.py @@ -0,0 +1,561 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_snmp_user +version_added: "2.2" +short_description: Manages SNMP users for monitoring. +description: + - Manages SNMP user configuration. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) +notes: + - Authentication parameters not idempotent. +options: + user: + description: + - Name of the user. + required: true + group: + description: + - Group to which the user will belong to. + required: true + auth: + description: + - Auth parameters for the user. + required: false + default: null + choices: ['md5', 'sha'] + pwd: + description: + - Auth password when using md5 or sha. + required: false + default: null + privacy: + description: + - Privacy password for the user. + required: false + default: null + encrypt: + description: + - Enables AES-128 bit encryption when using privacy password. + required: false + default: null + choices: ['true','false'] + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- nxos_snmp_user: + user: ntc + group: network-operator + auth: md5 + pwd: test_password + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"authentication": "md5", "group": "network-operator", + "pwd": "test_password", "user": "ntc"} +existing: + description: + - k/v pairs of existing configuration + type: dict + sample: {"authentication": "no", "encrypt": "none", + "group": ["network-operator"], "user": "ntc"} +end_state: + description: k/v pairs configuration vtp after module execution + returned: always + type: dict + sample: {"authentication": "md5", "encrypt": "none", + "group": ["network-operator"], "user": "ntc"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["snmp-server user ntc network-operator auth md5 test_password"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module, text=False): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'show run' in command or text: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show', text=False): + if module.params['transport'] == 'cli': + if 'show run' not in command and text is False: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module, text=text) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_snmp_groups(module): + command = 'show snmp group' + body = execute_show_command(command, module) + g_list = [] + + try: + group_table = body[0]['TABLE_role']['ROW_role'] + for each in group_table: + g_list.append(each['role_name']) + + except (KeyError, AttributeError, IndexError): + return g_list + + return g_list + + +def get_snmp_user(user, module): + command = 'show snmp user {0}'.format(user) + body = execute_show_command(command, module, text=True) + + if 'No such entry' not in body[0]: + body = execute_show_command(command, module) + + resource = {} + group_list = [] + try: + resource_table = body[0]['TABLE_snmp_users']['ROW_snmp_users'] + resource['user'] = str(resource_table['user']) + resource['authentication'] = str(resource_table['auth']).strip() + encrypt = str(resource_table['priv']).strip() + if encrypt.startswith('aes'): + resource['encrypt'] = 'aes-128' + else: + resource['encrypt'] = 'none' + + group_table = resource_table['TABLE_groups']['ROW_groups'] + + groups = [] + try: + for group in group_table: + groups.append(str(group['group']).strip()) + except TypeError: + groups.append(str(group_table['group']).strip()) + + resource['group'] = groups + + except (KeyError, AttributeError, IndexError, TypeError): + return resource + + return resource + + +def remove_snmp_user(user): + return ['no snmp-server user {0}'.format(user)] + + +def config_snmp_user(proposed, user, reset, new): + if reset and not new: + commands = remove_snmp_user(user) + else: + commands = [] + + group = proposed.get('group', None) + + cmd = '' + + if group: + cmd = 'snmp-server user {0} {group}'.format(user, **proposed) + + auth = proposed.get('authentication', None) + pwd = proposed.get('pwd', None) + + if auth and pwd: + cmd += ' auth {authentication} {pwd}'.format(**proposed) + + encrypt = proposed.get('encrypt', None) + privacy = proposed.get('privacy', None) + + if encrypt and privacy: + cmd += ' priv {encrypt} {privacy}'.format(**proposed) + elif privacy: + cmd += ' priv {privacy}'.format(**proposed) + + if cmd: + commands.append(cmd) + + return commands + + +def main(): + argument_spec = dict( + user=dict(required=True, type='str'), + group=dict(type='str', required=True), + pwd=dict(type='str'), + privacy=dict(type='str'), + authentication=dict(choices=['md5', 'sha']), + encrypt=dict(type='bool'), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + required_together=[['authentication', 'pwd'], + ['encrypt', 'privacy']], + supports_check_mode=True) + + user = module.params['user'] + group = module.params['group'] + pwd = module.params['pwd'] + privacy = module.params['privacy'] + encrypt = module.params['encrypt'] + authentication = module.params['authentication'] + state = module.params['state'] + + if privacy and encrypt: + if not pwd and authentication: + module.fail_json(msg='pwd and authentication must be provided ' + 'when using privacy and encrypt') + + if group and group not in get_snmp_groups(module): + module.fail_json(msg='group not configured yet on switch.') + + existing = get_snmp_user(user, module) + end_state = existing + + store = existing.get('group', None) + if existing: + if group not in existing['group']: + existing['group'] = None + else: + existing['group'] = group + + changed = False + commands = [] + proposed = {} + + if state == 'absent' and existing: + commands.append(remove_snmp_user(user)) + + elif state == 'present': + new = False + reset = False + + args = dict(user=user, pwd=pwd, group=group, privacy=privacy, + encrypt=encrypt, authentication=authentication) + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + if not existing: + if encrypt: + proposed['encrypt'] = 'aes-128' + commands.append(config_snmp_user(proposed, user, reset, new)) + + elif existing: + if encrypt and not existing['encrypt'].startswith('aes'): + reset = True + proposed['encrypt'] = 'aes-128' + + elif encrypt: + proposed['encrypt'] = 'aes-128' + + delta = dict( + set(proposed.iteritems()).difference(existing.iteritems())) + + if delta.get('pwd'): + delta['authentication'] = authentication + + if delta: + delta['group'] = group + + command = config_snmp_user(delta, user, reset, new) + commands.append(command) + + cmds = flatten_list(commands) + results = {} + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_snmp_user(user, module) + if 'configure' in cmds: + cmds.pop(0) + + if store: + existing['group'] = store + + results['proposed'] = proposed + results['existing'] = existing + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/network/nxos/nxos_static_route.py b/network/nxos/nxos_static_route.py new file mode 100644 index 00000000000..2ff042d0307 --- /dev/null +++ b/network/nxos/nxos_static_route.py @@ -0,0 +1,472 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_static_route +version_added: "2.2" +short_description: Manages static route configuration +description: + - Manages static route configuration +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - If no vrf is supplied, vrf is set to default. + - If C(state=absent), the route will be removed, regardless of the + non-required parameters. +options: + prefix: + description: + - Destination prefix of static route. + required: true + next_hop: + description: + - Next hop address or interface of static route. + If interface, it must be the fully-qualified interface name. + required: true + vrf: + description: + - VRF for static route. + required: false + default: default + tag: + description: + - Route tag value (numeric). + required: false + default: null + route_name: + description: + - Name of the route. Used with the name parameter on the CLI. + required: false + default: null + pref: + description: + - Preference or administrative difference of route (range 1-255). + required: false + default: null + state: + description: + - Manage the state of the resource. + required: true + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- nxos_static_route: + prefix: "192.168.20.64/24" + next_hop: "3.3.3.3" + route_name: testing + pref: 100 + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"next_hop": "3.3.3.3", "pref": "100", + "prefix": "192.168.20.64/24", "route_name": "testing", + "vrf": "default"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"next_hop": "3.3.3.3", "pref": "100", + "prefix": "192.168.20.0/24", "route_name": "testing", + "tag": null} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["ip route 192.168.20.0/24 3.3.3.3 name testing 100"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine, dumps +from ansible.module_utils.network import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + if self._device_os == 'junos': + return dumps(section, output='lines') + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def state_present(module, candidate, prefix): + commands = list() + invoke('set_route', module, commands, prefix) + if commands: + if module.params['vrf'] == 'default': + candidate.add(commands, parents=[]) + else: + candidate.add(commands, parents=['vrf context {0}'.format(module.params['vrf'])]) + + +def state_absent(module, candidate, prefix): + netcfg = get_config(module) + commands = list() + parents = 'vrf context {0}'.format(module.params['vrf']) + invoke('set_route', module, commands, prefix) + if module.params['vrf'] == 'default': + config = netcfg.get_section(commands[0]) + if config: + invoke('remove_route', module, commands, config, prefix) + candidate.add(commands, parents=[]) + else: + config = netcfg.get_section(parents) + splitted_config = config.split('\n') + splitted_config = map(str.strip, splitted_config) + if commands[0] in splitted_config: + invoke('remove_route', module, commands, config, prefix) + candidate.add(commands, parents=[parents]) + + +def fix_prefix_to_regex(prefix): + prefix = prefix.replace('.', '\.').replace('/', '\/') + return prefix + + +def get_existing(module, prefix, warnings): + key_map = ['tag', 'pref', 'route_name', 'next_hop'] + netcfg = get_config(module) + parents = 'vrf context {0}'.format(module.params['vrf']) + prefix_to_regex = fix_prefix_to_regex(prefix) + + route_regex = ('.*ip\sroute\s{0}\s(?P\S+)(\sname\s(?P\S+))?' + '(\stag\s(?P\d+))?(\s(?P\d+)).*'.format(prefix_to_regex)) + + if module.params['vrf'] == 'default': + config = str(netcfg) + else: + config = netcfg.get_section(parents) + + if config: + try: + match_route = re.match(route_regex, config, re.DOTALL) + group_route = match_route.groupdict() + + for key in key_map: + if key not in group_route: + group_route[key] = '' + group_route['prefix'] = prefix + group_route['vrf'] = module.params['vrf'] + except (AttributeError, TypeError): + group_route = {} + else: + group_route = {} + msg = ("VRF {0} didn't exist.".format(module.params['vrf'])) + if msg not in warnings: + warnings.append(msg) + + return group_route + + +def remove_route(module, commands, config, prefix): + commands.append('no ip route {0} {1}'.format(prefix, module.params['next_hop'])) + + +def set_route(module, commands, prefix): + route_cmd = 'ip route {0} {1}'.format(prefix, module.params['next_hop']) + + if module.params['route_name']: + route_cmd += ' name {0}'.format(module.params['route_name']) + if module.params['tag']: + route_cmd += ' tag {0}'.format(module.params['tag']) + if module.params['pref']: + route_cmd += ' {0}'.format(module.params['pref']) + commands.append(route_cmd) + + +def get_dotted_mask(mask): + bits = 0 + for i in xrange(32-mask,32): + bits |= (1 << i) + mask = ("%d.%d.%d.%d" % ((bits & 0xff000000) >> 24, + (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8 , (bits & 0xff))) + return mask + + +def get_network_start(address, netmask): + address = address.split('.') + netmask = netmask.split('.') + return [str(int(address[x]) & int(netmask[x])) for x in range(0, 4)] + + +def network_from_string(address, mask, module): + octects = address.split('.') + + if len(octects) > 4: + module.fail_json(msg='Incorrect address format.', address=address) + + for octect in octects: + try: + if int(octect) < 0 or int(octect) > 255: + module.fail_json(msg='Address may contain invalid values.', + address=address) + except ValueError: + module.fail_json(msg='Address may contain non-integer values.', + address=address) + + try: + if int(mask) < 0 or int(mask) > 32: + module.fail_json(msg='Incorrect mask value.', mask=mask) + except ValueError: + module.fail_json(msg='Mask may contain non-integer values.', mask=mask) + + netmask = get_dotted_mask(int(mask)) + return '.'.join(get_network_start(address, netmask)) + + +def normalize_prefix(module, prefix): + splitted_prefix = prefix.split('/') + + address = splitted_prefix[0] + if len(splitted_prefix) > 2: + module.fail_json(msg='Incorrect address format.', address=address) + elif len(splitted_prefix) == 2: + mask = splitted_prefix[1] + network = network_from_string(address, mask, module) + + normalized_prefix = str(network) + '/' + str(mask) + else: + normalized_prefix = prefix + '/' + str(32) + + return normalized_prefix + + +def main(): + argument_spec = dict( + prefix=dict(required=True, type='str'), + next_hop=dict(required=True, type='str'), + vrf=dict(type='str', default='default'), + tag=dict(type='str'), + route_name=dict(type='str'), + pref=dict(type='str'), + state=dict(choices=['absent', 'present'], + default='present'), + include_defaults=dict(default=True), + + config=dict(), + save=dict(type='bool', default=False) + ) + + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + + result = dict(changed=False) + warnings = list() + prefix = invoke('normalize_prefix', module, module.params['prefix']) + + existing = invoke('get_existing', module, prefix, warnings) + end_state = existing + + args = ['route_name', 'vrf', 'pref', 'tag', 'next_hop', 'prefix'] + proposed = dict((k, v) for k, v in module.params.iteritems() if v is not None and k in args) + + if state == 'present' or (state == 'absent' and existing): + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, candidate, prefix) + + try: + response = load_config(module, candidate) + result.update(response) + except Exception: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['warnings'] = warnings + + if module._verbosity > 0: + end_state = invoke('get_existing', module, prefix, warnings) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_switchport.py b/network/nxos/nxos_switchport.py new file mode 100644 index 00000000000..f0e06163ce1 --- /dev/null +++ b/network/nxos/nxos_switchport.py @@ -0,0 +1,835 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_switchport +version_added: "2.1" +short_description: Manages Layer 2 switchport interfaces. +extends_documentation_fragment: nxos +description: + - Manages Layer 2 interfaces +author: Jason Edelman (@jedelman8) +notes: + - When C(state=absent), VLANs can be added/removed from trunk links and + the existing access VLAN can be 'unconfigured' to just having VLAN 1 + on that interface. + - When working with trunks VLANs the keywords add/remove are always sent + in the `switchport trunk allowed vlan` command. Use verbose mode to see + commands sent. + - When C(state=unconfigured), the interface will result with having a default + Layer 2 interface, i.e. vlan 1 in access mode. +options: + interface: + description: + - Full name of the interface, i.e. Ethernet1/1. + required: true + default: null + mode: + description: + - Mode for the Layer 2 port. + required: false + default: null + choices: ['access','trunk'] + access_vlan: + description: + - If C(mode=access), used as the access VLAN ID. + required: false + default: null + native_vlan: + description: + - If C(mode=trunk), used as the trunk native VLAN ID. + required: false + default: null + trunk_vlans: + description: + - If C(mode=trunk), used as the VLAN range to ADD or REMOVE + from the trunk. + aliases: + - trunk_add_vlans + required: false + default: null + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent', 'unconfigured'] + trunk_allowed_vlans: + description: + - if C(mode=trunk), these are the only VLANs that will be + configured on the trunk, i.e. "2-10,15". + required: false + version_added: 2.2 + default: null +''' +EXAMPLES = ''' +- name: Ensure Eth1/5 is in its default switchport state + nxos_switchport: + interface: eth1/5 + state: unconfigured + host: "{{ inventory_hostname }}" + +- name: Ensure Eth1/5 is configured for access vlan 20 + nxos_switchport: + interface: eth1/5 + mode: access + access_vlan: 20 + host: "{{ inventory_hostname }}" + +- name: Ensure Eth1/5 only has vlans 5-10 as trunk vlans + nxos_switchport: + interface: eth1/5 + mode: trunk + native_vlan: 10 + trunk_vlans: 5-10 + host: "{{ inventory_hostname }}" + +- name: Ensure eth1/5 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged) + nxos_switchport: + interface: eth1/5 + mode: trunk + native_vlan: 10 + trunk_vlans: 2-50 + host: "{{ inventory_hostname }}" + +- name: Ensure these VLANs are not being tagged on the trunk + nxos_switchport: + interface: eth1/5 + mode: trunk + trunk_vlans: 51-4094 + host: "{{ inventory_hostname }} " + state: absent +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"access_vlan": "10", "interface": "eth1/5", "mode": "access"} +existing: + description: k/v pairs of existing switchport + type: dict + sample: {"access_vlan": "10", "access_vlan_name": "VLAN0010", + "interface": "Ethernet1/5", "mode": "access", + "native_vlan": "1", "native_vlan_name": "default", + "switchport": "Enabled", "trunk_vlans": "1-4094"} +end_state: + description: k/v pairs of switchport after module execution + returned: always + type: dict or null + sample: {"access_vlan": "10", "access_vlan_name": "VLAN0010", + "interface": "Ethernet1/5", "mode": "access", + "native_vlan": "1", "native_vlan_name": "default", + "switchport": "Enabled", "trunk_vlans": "1-4094"} +updates: + description: command string sent to the device + returned: always + type: list + sample: ["interface eth1/5", "switchport access vlan 20"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def get_interface_type(interface): + """Gets the type of interface + Args: + interface (str): full name of interface, i.e. Ethernet1/1, loopback10, + port-channel20, vlan20 + Returns: + type of interface: ethernet, svi, loopback, management, portchannel, + or unknown + """ + if interface.upper().startswith('ET'): + return 'ethernet' + elif interface.upper().startswith('VL'): + return 'svi' + elif interface.upper().startswith('LO'): + return 'loopback' + elif interface.upper().startswith('MG'): + return 'management' + elif interface.upper().startswith('MA'): + return 'management' + elif interface.upper().startswith('PO'): + return 'portchannel' + else: + return 'unknown' + + +def get_interface_mode(interface, module): + """Gets current mode of interface: layer2 or layer3 + Args: + device (Device): This is the device object of an NX-API enabled device + using the Device class within device.py + interface (string): full name of interface, i.e. Ethernet1/1, + loopback10, port-channel20, vlan20 + Returns: + str: 'layer2' or 'layer3' + """ + command = 'show interface ' + interface + intf_type = get_interface_type(interface) + body = execute_show_command(command, module) + mode = 'unknown' + interface_table = {} + + try: + interface_table = body[0]['TABLE_interface']['ROW_interface'] + except (KeyError, AttributeError, IndexError): + return mode + + if interface_table: + # HACK FOR NOW + if intf_type in ['ethernet', 'portchannel']: + mode = str(interface_table.get('eth_mode', 'layer3')) + if mode in ['access', 'trunk']: + mode = 'layer2' + if mode == 'routed': + mode = 'layer3' + elif intf_type == 'loopback' or intf_type == 'svi': + mode = 'layer3' + return mode + + +def interface_is_portchannel(interface, module): + """Checks to see if an interface is part of portchannel bundle + Args: + interface (str): full name of interface, i.e. Ethernet1/1 + Returns: + True/False based on if interface is a member of a portchannel bundle + """ + intf_type = get_interface_type(interface) + if intf_type == 'ethernet': + command = 'show interface ' + interface + body = execute_show_command(command, module) + try: + interface_table = body[0]['TABLE_interface']['ROW_interface'] + except (KeyError, AttributeError, IndexError): + interface_table = None + + if interface_table: + state = interface_table.get('eth_bundle') + if state: + return True + else: + return False + + return False + + +def get_switchport(port, module): + """Gets current config of L2 switchport + Args: + device (Device): This is the device object of an NX-API enabled device + using the Device class within device.py + port (str): full name of interface, i.e. Ethernet1/1 + Returns: + dictionary with k/v pairs for L2 vlan config + """ + + command = 'show interface {0} switchport'.format(port) + + body = execute_show_command(command, module) + + try: + body = execute_show_command(command, module)[0] + except IndexError: + body = [] + + if body: + key_map = { + "interface": "interface", + "oper_mode": "mode", + "switchport": "switchport", + "access_vlan": "access_vlan", + "access_vlan_name": "access_vlan_name", + "native_vlan": "native_vlan", + "native_vlan_name": "native_vlan_name", + "trunk_vlans": "trunk_vlans" + } + + sp_table = body['TABLE_interface']['ROW_interface'] + + sp = apply_key_map(key_map, sp_table) + + return sp + else: + return {} + + +def remove_switchport_config_commands(interface, existing, proposed, module): + mode = proposed.get('mode') + commands = [] + command = None + if mode == 'access': + av_check = existing.get('access_vlan') == proposed.get('access_vlan') + if av_check: + command = 'no switchport access vlan {0}'.format( + existing.get('access_vlan')) + commands.append(command) + elif mode == 'trunk': + tv_check = existing.get('trunk_vlans_list') == proposed.get('trunk_vlans_list') + if not tv_check: + existing_vlans = existing.get('trunk_vlans_list') + proposed_vlans = proposed.get('trunk_vlans_list') + vlans_to_remove = set(proposed_vlans).intersection(existing_vlans) + if vlans_to_remove: + command = 'switchport trunk allowed vlan remove {0}'.format( + proposed.get('trunk_vlans', proposed.get('trunk_allowed_vlans'))) + commands.append(command) + native_check = existing.get( + 'native_vlan') == proposed.get('native_vlan') + if native_check and proposed.get('native_vlan'): + command = 'no switchport trunk native vlan {0}'.format( + existing.get('native_vlan')) + commands.append(command) + if commands: + commands.insert(0, 'interface ' + interface) + return commands + + +def get_switchport_config_commands(interface, existing, proposed, module): + """Gets commands required to config a given switchport interface + """ + + proposed_mode = proposed.get('mode') + existing_mode = existing.get('mode') + + commands = [] + command = None + if proposed_mode != existing_mode: + if proposed_mode == 'trunk': + command = 'switchport mode trunk' + elif proposed_mode == 'access': + command = 'switchport mode access' + if command: + commands.append(command) + + if proposed_mode == 'access': + av_check = existing.get('access_vlan') == proposed.get('access_vlan') + if not av_check: + command = 'switchport access vlan {0}'.format( + proposed.get('access_vlan')) + commands.append(command) + elif proposed_mode == 'trunk': + tv_check = existing.get('trunk_vlans_list') == proposed.get('trunk_vlans_list') + if not tv_check: + if proposed.get('allowed'): + command = 'switchport trunk allowed vlan {0}'.format(proposed.get('trunk_allowed_vlans')) + commands.append(command) + else: + existing_vlans = existing.get('trunk_vlans_list') + proposed_vlans = proposed.get('trunk_vlans_list') + vlans_to_add = set(proposed_vlans).difference(existing_vlans) + if vlans_to_add: + command = 'switchport trunk allowed vlan add {0}'.format(proposed.get('trunk_vlans')) + commands.append(command) + + native_check = existing.get( + 'native_vlan') == proposed.get('native_vlan') + if not native_check and proposed.get('native_vlan'): + command = 'switchport trunk native vlan {0}'.format( + proposed.get('native_vlan')) + commands.append(command) + if commands: + commands.insert(0, 'interface ' + interface) + return commands + + +def is_switchport_default(existing): + """Determines if switchport has a default config based on mode + Args: + existing (dict): existing switchport configuration from Ansible mod + Returns: + boolean: True if switchport has OOB Layer 2 config, i.e. + vlan 1 and trunk all and mode is access + """ + + c1 = existing['access_vlan'] == '1' + c2 = existing['native_vlan'] == '1' + c3 = existing['trunk_vlans'] == '1-4094' + c4 = existing['mode'] == 'access' + + default = c1 and c2 and c3 and c4 + + return default + + +def default_switchport_config(interface): + commands = [] + commands.append('interface ' + interface) + commands.append('switchport mode access') + commands.append('switch access vlan 1') + commands.append('switchport trunk native vlan 1') + commands.append('switchport trunk allowed vlan all') + return commands + + +def vlan_range_to_list(vlans): + result = [] + if vlans: + for part in vlans.split(','): + if part == 'none': + break + if '-' in part: + a, b = part.split('-') + a, b = int(a), int(b) + result.extend(range(a, b + 1)) + else: + a = int(part) + result.append(a) + return numerical_sort(result) + return result + + +def get_list_of_vlans(module): + + command = 'show vlan' + body = execute_show_command(command, module) + vlan_list = [] + vlan_table = body[0].get('TABLE_vlanbrief')['ROW_vlanbrief'] + + if isinstance(vlan_table, list): + for vlan in vlan_table: + vlan_list.append(str(vlan['vlanshowbr-vlanid-utf'])) + else: + vlan_list.append('1') + + return vlan_list + + +def numerical_sort(string_int_list): + """Sorts list of strings/integers that are digits in numerical order. + """ + + as_int_list = [] + as_str_list = [] + for vlan in string_int_list: + as_int_list.append(int(vlan)) + as_int_list.sort() + for vlan in as_int_list: + as_str_list.append(str(vlan)) + return as_str_list + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + new_dict[new_key] = str(value) + return new_dict + + +def apply_value_map(value_map, resource): + for key, value in value_map.items(): + resource[key] = value[resource.get(key)] + return resource + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'status' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'status' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def main(): + + argument_spec = dict( + interface=dict(required=True, type='str'), + mode=dict(choices=['access', 'trunk'], required=False), + access_vlan=dict(type='str', required=False), + native_vlan=dict(type='str', required=False), + trunk_vlans=dict(type='str', aliases=['trunk_add_vlans'], required=False), + trunk_allowed_vlans=dict(type='str', required=False), + state=dict(choices=['absent', 'present', 'unconfigured'], + default='present') + ) + module = get_network_module(argument_spec=argument_spec, + mutually_exclusive=[['access_vlan', 'trunk_vlans'], + ['access_vlan', 'native_vlan'], + ['access_vlan', 'trunk_allowed_vlans']], + supports_check_mode=True) + + interface = module.params['interface'] + mode = module.params['mode'] + access_vlan = module.params['access_vlan'] + state = module.params['state'] + trunk_vlans = module.params['trunk_vlans'] + native_vlan = module.params['native_vlan'] + trunk_allowed_vlans = module.params['trunk_allowed_vlans'] + + args = dict(interface=interface, mode=mode, access_vlan=access_vlan, + native_vlan=native_vlan, trunk_vlans=trunk_vlans, + trunk_allowed_vlans=trunk_allowed_vlans) + + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + interface = interface.lower() + + if mode == 'access' and state == 'present' and not access_vlan: + module.fail_json(msg='access_vlan param is required when ' + 'mode=access && state=present') + + if mode == 'trunk' and access_vlan: + module.fail_json(msg='access_vlan param not supported when ' + 'using mode=trunk') + + current_mode = get_interface_mode(interface, module) + + # Current mode will return layer3, layer2, or unknown + if current_mode == 'unknown' or current_mode == 'layer3': + module.fail_json(msg='Ensure interface is configured to be a L2' + '\nport first before using this module. You can use' + '\nthe nxos_interface module for this.') + + if interface_is_portchannel(interface, module): + module.fail_json(msg='Cannot change L2 config on physical ' + '\nport because it is in a portchannel. ' + '\nYou should update the portchannel config.') + + # existing will never be null for Eth intfs as there is always a default + existing = get_switchport(interface, module) + + # Safeguard check + # If there isn't an existing, something is wrong per previous comment + if not existing: + module.fail_json(msg='Make sure you are using the FULL interface name') + + current_vlans = get_list_of_vlans(module) + + if state == 'present': + if access_vlan and access_vlan not in current_vlans: + module.fail_json(msg='You are trying to configure a VLAN' + ' on an interface that\ndoes not exist on the ' + ' switch yet!', vlan=access_vlan) + elif native_vlan and native_vlan not in current_vlans: + module.fail_json(msg='You are trying to configure a VLAN' + ' on an interface that\ndoes not exist on the ' + ' switch yet!', vlan=native_vlan) + + if trunk_vlans or trunk_allowed_vlans: + if trunk_vlans: + trunk_vlans_list = vlan_range_to_list(trunk_vlans) + elif trunk_allowed_vlans: + trunk_vlans_list = vlan_range_to_list(trunk_allowed_vlans) + proposed['allowed'] = True + + existing_trunks_list = vlan_range_to_list( + (existing['trunk_vlans']) + ) + + existing['trunk_vlans_list'] = existing_trunks_list + proposed['trunk_vlans_list'] = trunk_vlans_list + + changed = False + + commands = [] + if state == 'present': + command = get_switchport_config_commands(interface, existing, proposed, module) + commands.append(command) + elif state == 'unconfigured': + is_default = is_switchport_default(existing) + if not is_default: + command = default_switchport_config(interface) + commands.append(command) + elif state == 'absent': + command = remove_switchport_config_commands(interface, existing, + proposed, module) + commands.append(command) + + if trunk_vlans or trunk_allowed_vlans: + existing.pop('trunk_vlans_list') + proposed.pop('trunk_vlans_list') + + end_state = existing + + cmds = flatten_list(commands) + + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_switchport(interface, module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_udld.py b/network/nxos/nxos_udld.py new file mode 100644 index 00000000000..8318f4025be --- /dev/null +++ b/network/nxos/nxos_udld.py @@ -0,0 +1,505 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- + +module: nxos_udld +version_added: "2.2" +short_description: Manages UDLD global configuration params. +description: + - Manages UDLD global configuration params. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) +notes: + - When C(state=absent), it unconfigures existing settings C(msg_time) and set it + to its default value of 15. It is cleaner to always use C(state=present). + - Module will fail if the udld feature has not been previously enabled. +options: + aggressive: + description: + - Toggles aggressive mode. + required: false + default: null + choices: ['enabled','disabled'] + msg_time: + description: + - Message time in seconds for UDLD packets. + required: false + default: null + reset: + description: + - Ability to reset UDLD down interfaces. + required: false + default: null + choices: ['true','false'] + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent'] + +''' +EXAMPLES = ''' +# ensure udld aggressive mode is globally disabled and se global message interval is 20 +- nxos_udld: + aggressive: disabled + msg_time: 20 + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# Ensure agg mode is globally enabled and msg time is 15 +- nxos_udld: + aggressive: enabled + msg_time: 15 + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"aggressive": "enabled", "msg_time": "40"} +existing: + description: + - k/v pairs of existing udld configuration + type: dict + sample: {"aggressive": "disabled", "msg_time": "15"} +end_state: + description: k/v pairs of udld configuration after module execution + returned: always + type: dict + sample: {"aggressive": "enabled", "msg_time": "40"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["udld message-time 40", "udld aggressive"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + + +def get_commands_config_udld_global(delta, reset): + config_args = { + 'enabled': 'udld aggressive', + 'disabled': 'no udld aggressive', + 'msg_time': 'udld message-time {msg_time}' + } + commands = [] + for param, value in delta.iteritems(): + if param == 'aggressive': + if value == 'enabled': + command = 'udld aggressive' + elif value == 'disabled': + command = 'no udld aggressive' + else: + command = config_args.get(param, 'DNE').format(**delta) + if command and command != 'DNE': + commands.append(command) + command = None + + if reset: + command = 'udld reset' + commands.append(command) + return commands + + +def get_commands_remove_udld_global(delta): + config_args = { + 'aggressive': 'no udld aggressive', + 'msg_time': 'no udld message-time {msg_time}', + } + commands = [] + for param, value in delta.iteritems(): + command = config_args.get(param, 'DNE').format(**delta) + if command and command != 'DNE': + commands.append(command) + command = None + return commands + + +def get_udld_global(module): + command = 'show udld global' + udld_table = execute_show_command(command, module)[0] + + status = str(udld_table.get('udld-global-mode', None)) + if status == 'enabled-aggressive': + aggressive = 'enabled' + else: + aggressive = 'disabled' + + interval = str(udld_table.get('message-interval', None)) + udld = dict(msg_time=interval, aggressive=aggressive) + + return udld + + +def main(): + argument_spec = dict( + aggressive=dict(required=False, choices=['enabled', 'disabled']), + msg_time=dict(required=False, type='str'), + reset=dict(required=False, type='bool'), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + required_one_of=[['aggressive', 'msg_time', 'reset']], + supports_check_mode=True) + + aggressive = module.params['aggressive'] + msg_time = module.params['msg_time'] + reset = module.params['reset'] + state = module.params['state'] + + if (aggressive or reset) and state == 'absent': + module.fail_json(msg="It's better to use state=present when " + "configuring or unconfiguring aggressive mode " + "or using reset flag. state=absent is just for " + "when using msg_time param.") + + if msg_time: + try: + msg_time_int = int(msg_time) + if msg_time_int < 7 or msg_time_int > 90: + raise ValueError + except ValueError: + module.fail_json(msg='msg_time must be an integer' + 'between 7 and 90') + + args = dict(aggressive=aggressive, msg_time=msg_time, reset=reset) + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + existing = get_udld_global(module) + end_state = existing + + delta = set(proposed.iteritems()).difference(existing.iteritems()) + changed = False + + commands = [] + if state == 'present': + if delta: + command = get_commands_config_udld_global(dict(delta), reset) + commands.append(command) + + elif state == 'absent': + common = set(proposed.iteritems()).intersection(existing.iteritems()) + if common: + command = get_commands_remove_udld_global(dict(common)) + commands.append(command) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_udld_global(module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_udld_interface.py b/network/nxos/nxos_udld_interface.py new file mode 100644 index 00000000000..f73670c2b1a --- /dev/null +++ b/network/nxos/nxos_udld_interface.py @@ -0,0 +1,518 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_udld_interface +version_added: "2.2" +short_description: Manages UDLD interface configuration params. +description: + - Manages UDLD interface configuration params. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) +notes: + - Feature UDLD must be enabled on the device to use this module. +options: + mode: + description: + - Manages UDLD mode for an interface. + required: true + choices: ['enabled','disabled','aggressive'] + interface: + description: + - FULL name of the interface, i.e. Ethernet1/1- + required: true + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +# ensure Ethernet1/1 is configured to be in aggressive mode +- nxos_udld_interface: + interface: Ethernet1/1 + mode: aggressive + state: present + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# Remove the aggressive config only if it's currently in aggressive mode and then disable udld (switch default) +- nxos_udld_interface: + interface: Ethernet1/1 + mode: aggressive + state: absent + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# ensure Ethernet1/1 has aggressive mode enabled +- nxos_udld_interface: + interface: Ethernet1/1 + mode: enabled + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"mode": "enabled"} +existing: + description: + - k/v pairs of existing configuration + type: dict + sample: {"mode": "aggressive"} +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: {"mode": "enabled"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["interface ethernet1/33", + "no udld aggressive ; no udld disable"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_udld_interface(module, interface): + command = 'show udld {0}'.format(interface) + interface_udld = {} + mode = None + try: + body = execute_show_command(command, module)[0] + table = body['TABLE_interface']['ROW_interface'] + + status = str(table.get('mib-port-status', None)) + # Note: 'mib-aggresive-mode' is NOT a typo + agg = str(table.get('mib-aggresive-mode', 'disabled')) + + if agg == 'enabled': + mode = 'aggressive' + else: + mode = status + + interface_udld['mode'] = mode + + except (KeyError, AttributeError, IndexError): + interface_udld = {} + + return interface_udld + + +def is_interface_copper(module, interface): + command = 'show interface status' + copper = [] + try: + body = execute_show_command(command, module)[0] + table = body['TABLE_interface']['ROW_interface'] + for each in table: + itype = each.get('type', 'DNE') + if 'CU' in itype or '1000' in itype or '10GBaseT' in itype: + copper.append(str(each['interface'].lower())) + except (KeyError, AttributeError): + pass + + if interface in copper: + found = True + else: + found = False + + return found + + +def get_commands_config_udld_interface(delta, interface, module, existing): + commands = [] + copper = is_interface_copper(module, interface) + if delta: + mode = delta['mode'] + if mode == 'aggressive': + command = 'udld aggressive' + elif copper: + if mode == 'enabled': + if existing['mode'] == 'aggressive': + command = 'no udld aggressive ; udld enable' + else: + command = 'udld enable' + elif mode == 'disabled': + command = 'no udld enable' + elif not copper: + if mode == 'enabled': + if existing['mode'] == 'aggressive': + command = 'no udld aggressive ; no udld disable' + else: + command = 'no udld disable' + elif mode == 'disabled': + command = 'udld disable' + if command: + commands.append(command) + commands.insert(0, 'interface {0}'.format(interface)) + + return commands + + +def get_commands_remove_udld_interface(delta, interface, module, existing): + commands = [] + copper = is_interface_copper(module, interface) + + if delta: + mode = delta['mode'] + if mode == 'aggressive': + command = 'no udld aggressive' + elif copper: + if mode == 'enabled': + command = 'no udld enable' + elif mode == 'disabled': + command = 'udld enable' + elif not copper: + if mode == 'enabled': + command = 'udld disable' + elif mode == 'disabled': + command = 'no udld disable' + if command: + commands.append(command) + commands.insert(0, 'interface {0}'.format(interface)) + + return commands + + +def main(): + argument_spec = dict( + mode=dict(choices=['enabled', 'disabled', 'aggressive'], + required=True), + interface=dict(type='str', required=True), + state=dict(choices=['absent', 'present'], default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + interface = module.params['interface'].lower() + mode = module.params['mode'] + state = module.params['state'] + + proposed = dict(mode=mode) + existing = get_udld_interface(module, interface) + end_state = existing + + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + + changed = False + commands = [] + if state == 'present': + if delta: + command = get_commands_config_udld_interface(delta, interface, + module, existing) + commands.append(command) + elif state == 'absent': + common = set(proposed.iteritems()).intersection(existing.iteritems()) + if common: + command = get_commands_remove_udld_interface( + dict(common), interface, module, existing + ) + commands.append(command) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_udld_interface(module, interface) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vlan.py b/network/nxos/nxos_vlan.py new file mode 100644 index 00000000000..67719262bd0 --- /dev/null +++ b/network/nxos/nxos_vlan.py @@ -0,0 +1,661 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_vlan +version_added: "2.1" +short_description: Manages VLAN resources and attributes. +description: + - Manages VLAN configurations on NX-OS switches. +author: Jason Edelman (@jedelman8) +extends_documentation_fragment: nxos +options: + vlan_id: + description: + - Single VLAN ID. + required: false + default: null + vlan_range: + description: + - Range of VLANs such as 2-10 or 2,5,10-15, etc. + required: false + default: null + name: + description: + - Name of VLAN. + required: false + default: null + vlan_state: + description: + - Manage the vlan operational state of the VLAN + (equivalent to state {active | suspend} command. + required: false + default: active + choices: ['active','suspend'] + admin_state: + description: + - Manage the VLAN administrative state of the VLAN equivalent + to shut/no shut in VLAN config mode. + required: false + default: up + choices: ['up','down'] + mapped_vni: + description: + - The Virtual Network Identifier (VNI) ID that is mapped to the + VLAN. Valid values are integer and keyword 'default'. + required: false + default: null + version_added: "2.2" + state: + description: + - Manage the state of the resource. + required: false + default: present + choices: ['present','absent'] + +''' +EXAMPLES = ''' +- name: Ensure a range of VLANs are not present on the switch + nxos_vlan: + vlan_range: "2-10,20,50,55-60,100-150" + host: 68.170.147.165 + username: cisco + password: cisco + state: absent + transport: nxapi + +- name: Ensure VLAN 50 exists with the name WEB and is in the shutdown state + nxos_vlan: + vlan_id: 50 + host: 68.170.147.165 + admin_state: down + name: WEB + transport: nxapi + username: cisco + password: cisco + +- name: Ensure VLAN is NOT on the device + nxos_vlan: + vlan_id: 50 + host: 68.170.147.165 + state: absent + transport: nxapi + username: cisco + password: cisco +''' + +RETURN = ''' + +proposed_vlans_list: + description: list of VLANs being proposed + returned: always + type: list + sample: ["100"] +existing_vlans_list: + description: list of existing VLANs on the switch prior to making changes + returned: always + type: list + sample: ["1", "2", "3", "4", "5", "20"] +end_state_vlans_list: + description: list of VLANs after the module is executed + returned: always + type: list + sample: ["1", "2", "3", "4", "5", "20", "100"] +proposed: + description: k/v pairs of parameters passed into module (does not include + vlan_id or vlan_range) + returned: always + type: dict or null + sample: {"admin_state": "down", "name": "app_vlan", + "vlan_state": "suspend", "mapped_vni": "5000"} +existing: + description: k/v pairs of existing vlan or null when using vlan_range + returned: always + type: dict + sample: {"admin_state": "down", "name": "app_vlan", + "vlan_id": "20", "vlan_state": "suspend", "mapped_vni": ""} +end_state: + description: k/v pairs of the VLAN after executing module or null + when using vlan_range + returned: always + type: dict or null + sample: {"admin_state": "down", "name": "app_vlan", "vlan_id": "20", + "vlan_state": "suspend", "mapped_vni": "5000"} +updates: + description: command string sent to the device + returned: always + type: list + sample: ["vlan 20", "vlan 55", "vn-segment 5000"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true + +''' + +import json +import collections + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def vlan_range_to_list(vlans): + result = [] + if vlans: + for part in vlans.split(','): + if part == 'none': + break + if '-' in part: + a, b = part.split('-') + a, b = int(a), int(b) + result.extend(range(a, b + 1)) + else: + a = int(part) + result.append(a) + return numerical_sort(result) + return result + + +def numerical_sort(string_int_list): + """Sort list of strings (VLAN IDs) that are digits in numerical order. + """ + + as_int_list = [] + as_str_list = [] + for vlan in string_int_list: + as_int_list.append(int(vlan)) + as_int_list.sort() + for vlan in as_int_list: + as_str_list.append(str(vlan)) + return as_str_list + + +def build_commands(vlans, state): + commands = [] + for vlan in vlans: + if state == 'present': + command = 'vlan {0}'.format(vlan) + commands.append(command) + elif state == 'absent': + command = 'no vlan {0}'.format(vlan) + commands.append(command) + return commands + + +def get_vlan_config_commands(vlan, vid): + """Build command list required for VLAN configuration + """ + + reverse_value_map = { + "admin_state": { + "down": "shutdown", + "up": "no shutdown" + } + } + + if vlan.get('admin_state'): + # apply value map when making change to the admin state + # note: would need to be a loop or more in depth check if + # value map has more than 1 key + vlan = apply_value_map(reverse_value_map, vlan) + + VLAN_ARGS = { + 'name': 'name {0}', + 'vlan_state': 'state {0}', + 'admin_state': '{0}', + 'mode': 'mode {0}', + 'mapped_vni': 'vn-segment {0}' + } + + commands = [] + + for param, value in vlan.iteritems(): + if param == 'mapped_vni' and value == 'default': + command = 'no vn-segment' + else: + command = VLAN_ARGS.get(param).format(vlan.get(param)) + if command: + commands.append(command) + + commands.insert(0, 'vlan ' + vid) + commands.append('exit') + + return commands + + +def get_list_of_vlans(module): + command = 'show vlan' + body = execute_show_command(command, module) + vlan_list = [] + vlan_table = body[0].get('TABLE_vlanbrief')['ROW_vlanbrief'] + + if isinstance(vlan_table, list): + for vlan in vlan_table: + vlan_list.append(str(vlan['vlanshowbr-vlanid-utf'])) + else: + vlan_list.append('1') + + return vlan_list + + +def get_vni(vlanid, module): + command = 'show run all | section vlan.{0}'.format(vlanid) + body = execute_show_command(command, module, command_type='cli_show_ascii')[0] + value = '' + if body: + REGEX = re.compile(r'(?:vn-segment\s)(?P.*)$', re.M) + if 'vn-segment' in body: + value = REGEX.search(body).group('value') + return value + + +def get_vlan(vlanid, module): + """Get instance of VLAN as a dictionary + """ + + command = 'show vlan id ' + vlanid + + body = execute_show_command(command, module) + + try: + vlan_table = body[0]['TABLE_vlanbriefid']['ROW_vlanbriefid'] + except (TypeError, IndexError): + return {} + + key_map = { + "vlanshowbr-vlanid-utf": "vlan_id", + "vlanshowbr-vlanname": "name", + "vlanshowbr-vlanstate": "vlan_state", + "vlanshowbr-shutstate": "admin_state" + } + + vlan = apply_key_map(key_map, vlan_table) + + value_map = { + "admin_state": { + "shutdown": "down", + "noshutdown": "up" + } + } + + vlan = apply_value_map(value_map, vlan) + vlan['mapped_vni'] = get_vni(vlanid, module) + return vlan + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + new_dict[new_key] = str(value) + return new_dict + + +def apply_value_map(value_map, resource): + for key, value in value_map.items(): + resource[key] = value[resource.get(key)] + return resource + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. + """ + if 'show run' in command or response[0] == '\n': + body = response + elif 'xml' in response[0]: + body = [] + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def main(): + argument_spec = dict( + vlan_id=dict(required=False, type='str'), + vlan_range=dict(required=False), + name=dict(required=False), + vlan_state=dict(choices=['active', 'suspend'], required=False), + mapped_vni=dict(required=False, type='str'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + admin_state=dict(choices=['up', 'down'], required=False), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + mutually_exclusive=[['vlan_range', 'name'], + ['vlan_id', 'vlan_range']], + supports_check_mode=True) + + vlan_range = module.params['vlan_range'] + vlan_id = module.params['vlan_id'] + name = module.params['name'] + vlan_state = module.params['vlan_state'] + admin_state = module.params['admin_state'] + mapped_vni = module.params['mapped_vni'] + state = module.params['state'] + + changed = False + + if vlan_id: + if not vlan_id.isdigit(): + module.fail_json(msg='vlan_id must be a valid VLAN ID') + + args = dict(name=name, vlan_state=vlan_state, + admin_state=admin_state, mapped_vni=mapped_vni) + + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + proposed_vlans_list = numerical_sort(vlan_range_to_list( + vlan_id or vlan_range)) + existing_vlans_list = numerical_sort(get_list_of_vlans(module)) + commands = [] + existing = None + + if vlan_range: + if state == 'present': + # These are all of the VLANs being proposed that don't + # already exist on the switch + vlans_delta = list( + set(proposed_vlans_list).difference(existing_vlans_list)) + commands = build_commands(vlans_delta, state) + elif state == 'absent': + # VLANs that are common between what is being proposed and + # what is on the switch + vlans_common = list( + set(proposed_vlans_list).intersection(existing_vlans_list)) + commands = build_commands(vlans_common, state) + else: + existing = get_vlan(vlan_id, module) + if state == 'absent': + if existing: + commands = ['no vlan ' + vlan_id] + elif state == 'present': + if (existing.get('mapped_vni') == '0' and + proposed.get('mapped_vni') == 'default'): + proposed.pop('mapped_vni') + delta = dict(set( + proposed.iteritems()).difference(existing.iteritems())) + if delta or not existing: + commands = get_vlan_config_commands(delta, vlan_id) + + end_state = existing + end_state_vlans_list = existing_vlans_list + + if commands: + if existing.get('mapped_vni'): + if (existing.get('mapped_vni') != proposed.get('mapped_vni') and + existing.get('mapped_vni') != '0' and proposed.get('mapped_vni') != 'default'): + commands.insert(1, 'no vn-segment') + if module.check_mode: + module.exit_json(changed=True, + commands=commands) + else: + execute_config_command(commands, module) + changed = True + end_state_vlans_list = numerical_sort(get_list_of_vlans(module)) + if 'configure' in commands: + commands.pop(0) + if vlan_id: + end_state = get_vlan(vlan_id, module) + + results = {} + results['proposed_vlans_list'] = proposed_vlans_list + results['existing_vlans_list'] = existing_vlans_list + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['end_state_vlans_list'] = end_state_vlans_list + results['updates'] = commands + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vpc.py b/network/nxos/nxos_vpc.py new file mode 100644 index 00000000000..42af1cb6511 --- /dev/null +++ b/network/nxos/nxos_vpc.py @@ -0,0 +1,659 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_vpc +version_added: "2.2" +short_description: Manages global VPC configuration +description: + - Manages global VPC configuration +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - The feature vpc must be enabled before this module can be used + - If not using management vrf, vrf must be globally on the device + before using in the pkl config + - Although source IP isn't required on the command line it is + required when using this module. The PKL VRF must also be configured + prior to using this module. + - Both pkl_src and pkl_dest are needed when changing PKL VRF. +options: + domain: + description: + - VPC domain + required: true + role_priority: + description: + - Role priority for device. Remember lower is better. + required: false + default: null + system_priority: + description: + - System priority device. Remember they must match between peers. + required: false + default: null + pkl_src: + description: + - Source IP address used for peer keepalive link + required: false + default: null + pkl_dest: + description: + - Destination (remote) IP address used for peer keepalive link + required: false + default: null + pkl_vrf: + description: + - VRF used for peer keepalive link + required: false + default: management + peer_gw: + description: + - Enables/Disables peer gateway + required: true + choices: ['true','false'] + auto_recovery: + description: + - Enables/Disables auto recovery + required: true + choices: ['true','false'] + delay_restore: + description: + - manages delay restore command and config value in seconds + required: false + default: null + state: + description: + - Manages desired state of the resource + required: true + choices: ['present','absent'] +''' + +EXAMPLES = ''' +# configure a simple asn +- nxos_vpc: + domain: 100 + role_priority: 1000 + system_priority: 2000 + pkl_dest: 192.168.100.4 + pkl_src: 10.1.100.20 + peer_gw: true + auto_recovery: true + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"auto_recovery": true, "domain": "100", + "peer_gw": true, "pkl_dest": "192.168.100.4", + "pkl_src": "10.1.100.20", "pkl_vrf": "management", + "role_priority": "1000", "system_priority": "2000"} +existing: + description: k/v pairs of existing VPC configuration + type: dict + sample: {"auto_recovery": true, "delay_restore": null, + "domain": "100", "peer_gw": true, + "pkl_dest": "192.168.100.2", "pkl_src": "10.1.100.20", + "pkl_vrf": "management", "role_priority": "1000", + "system_priority": "2000"} +end_state: + description: k/v pairs of VPC configuration after module execution + returned: always + type: dict + sample: {"auto_recovery": true, "domain": "100", + "peer_gw": true, "pkl_dest": "192.168.100.4", + "pkl_src": "10.1.100.20", "pkl_vrf": "management", + "role_priority": "1000", "system_priority": "2000"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["vpc domain 100", + "peer-keepalive destination 192.168.100.4 source 10.1.100.20 vrf management", + "auto-recovery", "peer-gateway"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json +import collections + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. + """ + if '^' == response[0]: + body = [] + elif 'running' in command: + body = response + else: + if command in response[0]: + response = [response[0].split(command)[1]] + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if "section" not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_vrf_list(module): + command = 'show vrf all' + vrf_table = None + + body = execute_show_command(command, module) + + try: + vrf_table = body[0]['TABLE_vrf']['ROW_vrf'] + except (KeyError, AttributeError): + return [] + + vrf_list = [] + if vrf_table: + for each in vrf_table: + vrf_list.append(str(each['vrf_name'].lower())) + + return vrf_list + + +def get_autorecovery(auto): + auto_recovery = auto.split(' ')[0] + if 'enabled' in auto_recovery.lower(): + return True + else: + return False + + +def get_vpc_running_config(module): + command = 'show running section vpc' + body = execute_show_command(command, module, command_type='cli_show_ascii') + + return body + + +def get_vpc(module): + vpc = {} + + command = 'show vpc' + body = execute_show_command(command, module)[0] + domain = str(body['vpc-domain-id']) + auto_recovery = get_autorecovery(str( + body['vpc-auto-recovery-status'])) + + if domain != 'not configured': + delay_restore = None + pkl_src = None + role_priority = None + system_priority = None + pkl_dest = None + pkl_vrf = None + peer_gw = False + + run = get_vpc_running_config(module)[0] + if run: + vpc_list = run.split('\n') + for each in vpc_list: + if 'delay restore' in each: + line = each.split() + if len(line) == 5: + delay_restore = line[-1] + if 'peer-keepalive destination' in each: + line = each.split() + pkl_dest = line[2] + for word in line: + if 'source' in word: + index = line.index(word) + pkl_src = line[index + 1] + if 'role priority' in each: + line = each.split() + role_priority = line[-1] + if 'system-priority' in each: + line = each.split() + system_priority = line[-1] + if 'peer-gateway' in each: + peer_gw = True + + + command = 'show vpc peer-keepalive' + body = execute_show_command(command, module)[0] + + if body: + pkl_dest = body['vpc-keepalive-dest'] + if 'N/A' in pkl_dest: + pkl_dest = None + elif len(pkl_dest) == 2: + pkl_dest = pkl_dest[0] + pkl_vrf = str(body['vpc-keepalive-vrf']) + + vpc['domain'] = domain + vpc['auto_recovery'] = auto_recovery + vpc['delay_restore'] = delay_restore + vpc['pkl_src'] = pkl_src + vpc['role_priority'] = role_priority + vpc['system_priority'] = system_priority + vpc['pkl_dest'] = pkl_dest + vpc['pkl_vrf'] = pkl_vrf + vpc['peer_gw'] = peer_gw + else: + vpc = {} + + return vpc + + +def get_commands_to_config_vpc(module, vpc, domain, existing): + vpc = dict(vpc) + + domain_only = vpc.get('domain') + pkl_src = vpc.get('pkl_src') + pkl_dest = vpc.get('pkl_dest') + pkl_vrf = vpc.get('pkl_vrf') or existing.get('pkl_vrf') + vpc['pkl_vrf'] = pkl_vrf + + commands = [] + if pkl_src or pkl_dest: + if pkl_src is None: + vpc['pkl_src'] = existing.get('pkl_src') + elif pkl_dest is None: + vpc['pkl_dest'] = existing.get('pkl_dest') + pkl_command = 'peer-keepalive destination {pkl_dest}'.format(**vpc) \ + + ' source {pkl_src} vrf {pkl_vrf}'.format(**vpc) + commands.append(pkl_command) + elif pkl_vrf: + pkl_src = existing.get('pkl_src') + pkl_dest = existing.get('pkl_dest') + if pkl_src and pkl_dest: + pkl_command = ('peer-keepalive destination {0}' + ' source {1} vrf {2}'.format(pkl_dest, pkl_src, pkl_vrf)) + commands.append(pkl_command) + + if vpc.get('auto_recovery') == False: + vpc['auto_recovery'] = 'no' + else: + vpc['auto_recovery'] = '' + + if vpc.get('peer_gw') == False: + vpc['peer_gw'] = 'no' + else: + vpc['peer_gw'] = '' + + CONFIG_ARGS = { + 'role_priority': 'role priority {role_priority}', + 'system_priority': 'system-priority {system_priority}', + 'delay_restore': 'delay restore {delay_restore}', + 'peer_gw': '{peer_gw} peer-gateway', + 'auto_recovery': '{auto_recovery} auto-recovery', + } + + for param, value in vpc.iteritems(): + command = CONFIG_ARGS.get(param, 'DNE').format(**vpc) + if command and command != 'DNE': + commands.append(command.strip()) + command = None + + if commands or domain_only: + commands.insert(0, 'vpc domain {0}'.format(domain)) + return commands + + +def get_commands_to_remove_vpc_interface(portchannel, config_value): + commands = [] + command = 'no vpc {0}'.format(config_value) + commands.append(command) + commands.insert(0, 'interface port-channel{0}'.format(portchannel)) + return commands + + +def main(): + argument_spec = dict( + domain=dict(required=True, type='str'), + role_priority=dict(required=False, type='str'), + system_priority=dict(required=False, type='str'), + pkl_src=dict(required=False), + pkl_dest=dict(required=False), + pkl_vrf=dict(required=False, default='management'), + peer_gw=dict(required=True, type='bool'), + auto_recovery=dict(required=True, type='bool'), + delay_restore=dict(required=False, type='str'), + state=dict(choices=['absent', 'present'], default='present'), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + domain = module.params['domain'] + role_priority = module.params['role_priority'] + system_priority = module.params['system_priority'] + pkl_src = module.params['pkl_src'] + pkl_dest = module.params['pkl_dest'] + pkl_vrf = module.params['pkl_vrf'] + peer_gw = module.params['peer_gw'] + auto_recovery = module.params['auto_recovery'] + delay_restore = module.params['delay_restore'] + state = module.params['state'] + + args = dict(domain=domain, role_priority=role_priority, + system_priority=system_priority, pkl_src=pkl_src, + pkl_dest=pkl_dest, pkl_vrf=pkl_vrf, peer_gw=peer_gw, + auto_recovery=auto_recovery, + delay_restore=delay_restore) + + if not (pkl_src and pkl_dest and pkl_vrf): + # if only the source or dest is set, it'll fail and ask to set the + # other + if pkl_src or pkl_dest: + module.fail_json(msg='source AND dest IP for pkl are required at ' + 'this time (although source is technically not ' + ' required by the device.)') + + args.pop('pkl_src') + args.pop('pkl_dest') + args.pop('pkl_vrf') + + if pkl_vrf: + if pkl_vrf.lower() not in get_vrf_list(module): + module.fail_json(msg='The VRF you are trying to use for the peer ' + 'keepalive link is not on device yet. Add it' + ' first, please.') + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + changed = False + existing = get_vpc(module) + end_state = existing + + commands = [] + if state == 'present': + delta = set(proposed.iteritems()).difference(existing.iteritems()) + if delta: + command = get_commands_to_config_vpc(module, delta, domain, existing) + commands.append(command) + elif state == 'absent': + if existing: + if domain != existing['domain']: + module.fail_json(msg="You are trying to remove a domain that " + "does not exist on the device") + else: + commands.append('no vpc domain {0}'.format(domain)) + + cmds = flatten_list(commands) + + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_vpc(module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vpc_interface.py b/network/nxos/nxos_vpc_interface.py new file mode 100644 index 00000000000..6f122f6f2d5 --- /dev/null +++ b/network/nxos/nxos_vpc_interface.py @@ -0,0 +1,593 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_vpc_interface +version_added: "2.2" +short_description: Manages interface VPC configuration +description: + - Manages interface VPC configuration +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - Either vpc or peer_link param is required, but not both. + - C(state=absent) removes whatever VPC config is on a port-channel + if one exists. + - Re-assigning a vpc or peerlink from one portchannel to another is not + supported. The module will force the user to unconfigure an existing + vpc/pl before configuring the same value on a new portchannel +options: + portchannel: + description: + - Group number of the portchannel that will be configured. + required: true + vpc: + description: + - VPC group/id that will be configured on associated portchannel. + required: false + default: null + peer_link: + description: + - Set to true/false for peer link config on associated portchannel. + required: false + default: null + state: + description: + - Manages desired state of the resource. + required: true + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- nxos_vpc_portchannel: + portchannel: 10 + vpc: 100 + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"portchannel": "100", "vpc": "10"} +existing: + description: k/v pairs of existing configuration + type: dict + sample: {} +end_state: + description: k/v pairs of configuration after module execution + returned: always + type: dict + sample: {"peer-link": false, "portchannel": "100", "vpc": "10"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface port-channel100", "vpc 10"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +import collections +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def execute_config_command(commands, module): + try: + response = module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + return response + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. + """ + if '^' == response[0]: + body = [] + elif 'running' in command or 'xml' in response[0]: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_portchannel_list(module): + command = 'show port-channel summary' + portchannels = [] + pc_list = [] + + body = execute_show_command(command, module) + + try: + pc_list = body[0]['TABLE_channel']['ROW_channel'] + except (KeyError, AttributeError): + return portchannels + + if pc_list: + if isinstance(pc_list, dict): + pc_list = [pc_list] + + for pc in pc_list: + portchannels.append(pc['group']) + + return portchannels + + +def get_existing_portchannel_to_vpc_mappings(module): + command = 'show vpc brief' + pc_vpc_mapping = {} + + body = execute_show_command(command, module) + + try: + vpc_table = body[0]['TABLE_vpc']['ROW_vpc'] + except (KeyError, AttributeError, TypeError): + vpc_table = None + + if vpc_table: + if isinstance(vpc_table, dict): + vpc_table = [vpc_table] + + for vpc in vpc_table: + pc_vpc_mapping[str(vpc['vpc-id'])] = str(vpc['vpc-ifindex']) + + return pc_vpc_mapping + + +def peer_link_exists(module): + found = False + run = get_vpc_running_config(module) + + vpc_list = run.split('\n') + for each in vpc_list: + if 'peer-link' in each: + found = True + return found + + +def get_vpc_running_config(module): + command = 'show running section vpc' + body = execute_show_command(command, module, + command_type='cli_show_ascii')[0] + + return body + + +def get_active_vpc_peer_link(module): + command = 'show vpc brief' + peer_link = None + body = execute_show_command(command, module) + try: + peer_link = body[0]['TABLE_peerlink']['ROW_peerlink']['peerlink-ifindex'] + except (KeyError, AttributeError): + return peer_link + + return peer_link + + +def get_portchannel_vpc_config(module, portchannel): + command = 'show vpc brief' + peer_link_pc = None + peer_link = False + vpc = "" + pc = "" + config = {} + + body = execute_show_command(command, module) + + try: + table = body[0]['TABLE_peerlink']['ROW_peerlink'] + except (KeyError, AttributeError, TypeError): + table = {} + + if table: + peer_link_pc = table.get('peerlink-ifindex', None) + + if peer_link_pc: + plpc = str(peer_link_pc[2:]) + if portchannel == plpc: + config['portchannel'] = portchannel + config['peer-link'] = True + config['vpc'] = vpc + + mapping = get_existing_portchannel_to_vpc_mappings(module) + + for existing_vpc, port_channel in mapping.iteritems(): + port_ch = str(port_channel[2:]) + if port_ch == portchannel: + pc = port_ch + vpc = str(existing_vpc) + + config['portchannel'] = pc + config['peer-link'] = peer_link + config['vpc'] = vpc + + return config + + +def get_commands_to_config_vpc_interface(portchannel, delta, config_value, existing): + commands = [] + + if delta.get('peer-link') is False and existing.get('peer-link') is True: + command = 'no vpc peer-link' + commands.append('no vpc peer-link') + commands.insert(0, 'interface port-channel{0}'.format(portchannel)) + + elif delta.get('peer-link') or not existing.get('vpc'): + command = 'vpc {0}'.format(config_value) + commands.append(command) + commands.insert(0, 'interface port-channel{0}'.format(portchannel)) + + return commands + + +def main(): + argument_spec = dict( + portchannel=dict(required=True, type='str'), + vpc=dict(required=False, type='str'), + peer_link=dict(required=False, type='bool'), + state=dict(choices=['absent', 'present'], default='present'), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + mutually_exclusive=[['vpc', 'peer_link']], + supports_check_mode=True) + + portchannel = module.params['portchannel'] + vpc = module.params['vpc'] + peer_link = module.params['peer_link'] + state = module.params['state'] + + changed = False + args = {'portchannel': portchannel, 'vpc': vpc, 'peer-link': peer_link} + active_peer_link = None + + if portchannel not in get_portchannel_list(module): + module.fail_json(msg="The portchannel you are trying to make a" + " VPC or PL is not created yet. " + "Create it first!") + if vpc: + mapping = get_existing_portchannel_to_vpc_mappings(module) + + if vpc in mapping and portchannel != mapping[vpc].strip('Po'): + module.fail_json(msg="This vpc is already configured on " + "another portchannel. Remove it first " + "before trying to assign it here. ", + existing_portchannel=mapping[vpc]) + + for vpcid, existing_pc in mapping.iteritems(): + if portchannel == existing_pc.strip('Po') and vpcid != vpc: + module.fail_json(msg="This portchannel already has another" + " VPC configured. Remove it first " + "before assigning this one", + existing_vpc=vpcid) + + if peer_link_exists(module): + active_peer_link = get_active_vpc_peer_link(module) + if active_peer_link[-2:] == portchannel: + module.fail_json(msg="That port channel is the current " + "PEER LINK. Remove it if you want it" + " to be a VPC") + config_value = vpc + + elif peer_link is not None: + if peer_link_exists(module): + active_peer_link = get_active_vpc_peer_link(module)[2::] + if active_peer_link != portchannel: + if peer_link: + module.fail_json(msg="A peer link already exists on" + " the device. Remove it first", + current_peer_link='Po{0}'.format( + active_peer_link)) + config_value = 'peer-link' + + + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + existing = get_portchannel_vpc_config(module, portchannel) + end_state = existing + commands = [] + + if state == 'present': + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + if delta: + command = get_commands_to_config_vpc_interface( + portchannel, + delta, + config_value, + existing + ) + commands.append(command) + + elif state == 'absent': + if existing.get('vpc'): + command = ['no vpc'] + commands.append(command) + elif existing.get('peer-link'): + command = ['no vpc peer-link'] + commands.append(command) + if commands: + commands.insert(0, ['interface port-channel{0}'.format(portchannel)]) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + output = execute_config_command(cmds, module) + if module.params['transport'] == 'cli': + output = ' '.join(output) + if 'error' in output.lower(): + module.fail_json(msg=output.replace('\n', '')) + end_state = get_portchannel_vpc_config(module, portchannel) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vrf.py b/network/nxos/nxos_vrf.py new file mode 100644 index 00000000000..eb60306e08a --- /dev/null +++ b/network/nxos/nxos_vrf.py @@ -0,0 +1,532 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_vrf +version_added: "2.1" +short_description: Manages global VRF configuration. +description: + - Manages global VRF configuration. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - Cisco NX-OS creates the default VRF by itself. Therefore, + you're not allowed to use default as I(vrf) name in this module. + - C(vrf) name must be shorter than 32 chars. + - VRF names are not case sensible in NX-OS. Anyway, the name is stored + just like it's inserted by the user and it'll not be changed again + unless the VRF is removed and re-created. i.e. C(vrf=NTC) will create + a VRF named NTC, but running it again with C(vrf=ntc) will not cause + a configuration change. +options: + vrf: + description: + - Name of VRF to be managed. + required: true + admin_state: + description: + - Administrative state of the VRF. + required: false + default: up + choices: ['up','down'] + vni: + description: + - Specify virtual network identifier. Valid values are Integer + or keyword 'default'. + required: false + default: null + version_added: "2.2" + route_distinguisher: + description: + - VPN Route Distinguisher (RD). Valid values are a string in + one of the route-distinguisher formats (ASN2:NN, ASN4:NN, or + IPV4:NN); the keyword 'auto', or the keyword 'default'. + required: false + default: null + version_added: "2.2" + state: + description: + - Manages desired state of the resource. + required: false + default: present + choices: ['present','absent'] + description: + description: + - Description of the VRF. + required: false + default: null +''' + +EXAMPLES = ''' +- name: Ensure ntc VRF exists on switch + nxos_vrf: + vrf: ntc + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"admin_state": "Up", "description": "Test test", + "vrf": "ntc"} +existing: + description: k/v pairs of existing vrf + type: dict + sample: {"admin_state": "Up", "description": "Old test", + "vrf": "old_ntc"} +end_state: + description: k/v pairs of vrf info after module execution + returned: always + type: dict + sample: {"admin_state": "Up", "description": "Test test", + "vrf": "ntc"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["vrf context ntc", "shutdown"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION +import re + +import ansible.module_utils.nxos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError +from ansible.module_utils.network import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh_vrf(module, command, response): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when using multiple |. + """ + command_splitted = command.split('|') + if len(command_splitted) > 2 or 'show run' in command: + body = response + elif 'xml' in response[0] or response[0] == '\n': + body = [] + else: + body = [json.loads(response[0])] + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh_vrf(module, command, response) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def get_commands_to_config_vrf(delta, vrf): + commands = [] + for param, value in delta.iteritems(): + command = '' + if param == 'description': + command = 'description {0}'.format(value) + elif param == 'admin_state': + if value.lower() == 'up': + command = 'no shutdown' + elif value.lower() == 'down': + command = 'shutdown' + elif param == 'rd': + command = 'rd {0}'.format(value) + elif param == 'vni': + command = 'vni {0}'.format(value) + if command: + commands.append(command) + if commands: + commands.insert(0, 'vrf context {0}'.format(vrf)) + return commands + + +def get_vrf_description(vrf, module): + command_type = 'cli_show_ascii' + command = (r'show run section vrf | begin ^vrf\scontext\s{0} | end ^vrf.*'.format(vrf)) + + description = '' + descr_regex = r".*description\s(?P[\S+\s]+).*" + body = execute_show_command(command, module, command_type) + + try: + body = body[0] + splitted_body = body.split('\n') + except (AttributeError, IndexError): + return description + + for element in splitted_body: + if 'description' in element: + match_description = re.match(descr_regex, element, + re.DOTALL) + group_description = match_description.groupdict() + description = group_description["descr"] + + return description + + +def get_value(arg, config, module): + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(arg), re.M) + value = '' + if arg in config: + value = REGEX.search(config).group('value') + return value + + +def get_vrf(vrf, module): + command = 'show vrf {0}'.format(vrf) + vrf_key = { + 'vrf_name': 'vrf', + 'vrf_state': 'admin_state' + } + + body = execute_show_command(command, module) + try: + vrf_table = body[0]['TABLE_vrf']['ROW_vrf'] + except (TypeError, IndexError): + return {} + + parsed_vrf = apply_key_map(vrf_key, vrf_table) + + command = 'show run all | section vrf.context.{0}'.format(vrf) + body = execute_show_command(command, module, 'cli_show_ascii') + extra_params = ['vni', 'rd', 'description'] + for param in extra_params: + parsed_vrf[param] = get_value(param, body[0], module) + + return parsed_vrf + + +def main(): + argument_spec = dict( + vrf=dict(required=True), + description=dict(default=None, required=False), + vni=dict(required=False, type='str'), + rd=dict(required=False, type='str'), + admin_state=dict(default='up', choices=['up', 'down'], + required=False), + state=dict(default='present', choices=['present', 'absent'], + required=False), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + vrf = module.params['vrf'] + admin_state = module.params['admin_state'].lower() + description = module.params['description'] + rd = module.params['rd'] + vni = module.params['vni'] + state = module.params['state'] + + if vrf == 'default': + module.fail_json(msg='cannot use default as name of a VRF') + elif len(vrf) > 32: + module.fail_json(msg='VRF name exceeded max length of 32', + vrf=vrf) + + existing = get_vrf(vrf, module) + args = dict(vrf=vrf, description=description, vni=vni, + admin_state=admin_state, rd=rd) + + end_state = existing + changed = False + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + + """Since 'admin_state' is either 'Up' or 'Down' from outputs, + we use the following to make sure right letter case is used so that delta + results will be consistent to the actual configuration.""" + if existing: + if existing['admin_state'].lower() == admin_state: + proposed['admin_state'] = existing['admin_state'] + + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + changed = False + end_state = existing + commands = [] + if state == 'absent': + if existing: + command = ['no vrf context {0}'.format(vrf)] + commands.extend(command) + + elif state == 'present': + if not existing: + command = get_commands_to_config_vrf(delta, vrf) + commands.extend(command) + elif delta: + command = get_commands_to_config_vrf(delta, vrf) + commands.extend(command) + + if commands: + if proposed.get('vni'): + if existing.get('vni') and existing.get('vni') != '': + commands.insert(1, 'no vni {0}'.format(existing['vni'])) + if module.check_mode: + module.exit_json(changed=True, commands=commands) + else: + execute_config_command(commands, module) + changed = True + end_state = get_vrf(vrf, module) + if 'configure' in commands: + commands.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = commands + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vrf_af.py b/network/nxos/nxos_vrf_af.py new file mode 100644 index 00000000000..70ca67109e7 --- /dev/null +++ b/network/nxos/nxos_vrf_af.py @@ -0,0 +1,436 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_vrf_af +version_added: "2.2" +short_description: Manages VRF AF. +description: + - Manages VRF AF +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - Default, where supported, restores params default value. +options: + vrf: + description: + - Name of the VRF. + required: true + afi: + description: + - Address-Family Identifier (AFI). + required: true + choices: ['ipv4', 'ipv6'] + default: null + safi: + description: + - Sub Address-Family Identifier (SAFI). + required: true + choices: ['unicast', 'multicast'] + default: null + route_target_both_auto_evpn: + description: + - Enable/Disable the EVPN route-target 'auto' setting for both + import and export target communities. + required: false + choices: ['true', 'false'] + default: null + state: + description: + - Determines whether the config should be present or + not on the device. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +- nxos_vrf_af: + interface: nve1 + vni: 6000 + ingress_replication: true + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"afi": "ipv4", "route_target_both_auto_evpn": true, + "safi": "unicast", "vrf": "test"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {"afi": "ipv4", "route_target_both_auto_evpn": false, + "safi": "unicast", "vrf": "test"} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"afi": "ipv4", "route_target_both_auto_evpn": true, + "safi": "unicast", "vrf": "test"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["vrf context test", "address-family ipv4 unicast", + "route-target both auto evpn"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +BOOL_PARAMS = ['route_target_both_auto_evpn'] +PARAM_TO_COMMAND_KEYMAP = { + 'route_target_both_auto_evpn': 'route-target both auto evpn', +} +PARAM_TO_DEFAULT_KEYMAP = {} +WARNINGS = [] + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + if arg in BOOL_PARAMS: + REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + else: + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + + parents = ['vrf context {0}'.format(module.params['vrf'])] + parents.append('address-family {0} {1}'.format(module.params['afi'], + module.params['safi'])) + config = netcfg.get_section(parents) + if config: + splitted_config = config.splitlines() + vrf_index = False + for index in range(0, len(splitted_config) - 1): + if 'vrf' in splitted_config[index].strip(): + vrf_index = index + break + if vrf_index: + config = '\n'.join(splitted_config[0:vrf_index]) + + for arg in args: + if arg not in ['afi', 'safi', 'vrf']: + existing[arg] = get_value(arg, config, module) + + existing['afi'] = module.params['afi'] + existing['safi'] = module.params['safi'] + existing['vrf'] = module.params['vrf'] + + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def state_present(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in proposed_commands.iteritems(): + if value is True: + commands.append(key) + + elif value is False: + commands.append('no {0}'.format(key)) + + elif value == 'default': + if existing_commands.get(key): + existing_value = existing_commands.get(key) + commands.append('no {0} {1}'.format(key, existing_value)) + else: + command = '{0} {1}'.format(key, value.lower()) + commands.append(command) + + if commands: + parents = ['vrf context {0}'.format(module.params['vrf'])] + parents.append('address-family {0} {1}'.format(module.params['afi'], + module.params['safi'])) + candidate.add(commands, parents=parents) + + +def state_absent(module, existing, proposed, candidate): + commands = [] + parents = ['vrf context {0}'.format(module.params['vrf'])] + commands.append('no address-family {0} {1}'.format(module.params['afi'], + module.params['safi'])) + candidate.add(commands, parents=parents) + + +def main(): + argument_spec = dict( + vrf=dict(required=True, type='str'), + safi=dict(required=True, type='str', choices=['unicast','multicast']), + afi=dict(required=True, type='str', choices=['ipv4','ipv6']), + route_target_both_auto_evpn=dict(required=False, type='bool'), + m_facts=dict(required=False, default=False, type='bool'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + + args = [ + 'vrf', + 'safi', + 'afi', + 'route_target_both_auto_evpn' + ] + + existing = invoke('get_existing', module, args) + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + proposed = {} + for key, value in proposed_args.iteritems(): + if key != 'interface': + if str(value).lower() == 'default': + value = PARAM_TO_DEFAULT_KEYMAP.get(key) + if value is None: + value = 'default' + if existing.get(key) or (not existing.get(key) and value): + proposed[key] = value + + result = {} + if state == 'present' or (state == 'absent' and existing): + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + if WARNINGS: + result['warnings'] = WARNINGS + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vrf_interface.py b/network/nxos/nxos_vrf_interface.py new file mode 100644 index 00000000000..a3420307a6f --- /dev/null +++ b/network/nxos/nxos_vrf_interface.py @@ -0,0 +1,508 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_vrf_interface +version_added: "2.1" +short_description: Manages interface specific VRF configuration. +description: + - Manages interface specific VRF configuration. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - VRF needs to be added globally with M(nxos_vrf) before + adding a VRF to an interface. + - Remove a VRF from an interface will still remove + all L3 attributes just as it does from CLI. + - VRF is not read from an interface until IP address is + configured on that interface. +options: + vrf: + description: + - Name of VRF to be managed. + required: true + interface: + description: + - Full name of interface to be managed, i.e. Ethernet1/1. + required: true + state: + description: + - Manages desired state of the resource. + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: Ensure vrf ntc exists on Eth1/1 + nxos_vrf_interface: + vrf: ntc + interface: Ethernet1/1 + host: 68.170.147.165 + state: present + +- name: Ensure ntc VRF does not exist on Eth1/1 + nxos_vrf_interface: + vrf: ntc + interface: Ethernet1/1 + host: 68.170.147.165 + state: absent +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"interface": "loopback16", "vrf": "ntc"} +existing: + description: k/v pairs of existing vrf on the interface + type: dict + sample: {"interface": "loopback16", "vrf": ""} +end_state: + description: k/v pairs of vrf after module execution + returned: always + type: dict + sample: {"interface": "loopback16", "vrf": "ntc"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface loopback16", "vrf member ntc"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json +import collections + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +WARNINGS = [] + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh_vrf_interface(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. As such, + we assume if '^' is found in response, it is an invalid command. Instead, + the output will be a raw string when issuing commands containing 'show run'. + """ + if '^' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + body = [json.loads(response[0])] + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh_vrf_interface(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def get_interface_type(interface): + if interface.upper().startswith('ET'): + return 'ethernet' + elif interface.upper().startswith('VL'): + return 'svi' + elif interface.upper().startswith('LO'): + return 'loopback' + elif interface.upper().startswith('MG'): + return 'management' + elif interface.upper().startswith('MA'): + return 'management' + elif interface.upper().startswith('PO'): + return 'portchannel' + else: + return 'unknown' + + +def get_interface_mode(interface, intf_type, module): + command = 'show interface {0}'.format(interface) + interface = {} + mode = 'unknown' + + if intf_type in ['ethernet', 'portchannel']: + body = execute_show_command(command, module)[0] + interface_table = body['TABLE_interface']['ROW_interface'] + mode = str(interface_table.get('eth_mode', 'layer3')) + if mode == 'access' or mode == 'trunk': + mode = 'layer2' + elif intf_type == 'loopback' or intf_type == 'svi': + mode = 'layer3' + return mode + + +def get_vrf_list(module): + command = 'show vrf all' + vrf_list = [] + body = execute_show_command(command, module)[0] + + try: + vrf_table = body['TABLE_vrf']['ROW_vrf'] + except (KeyError, AttributeError): + return vrf_list + + for each in vrf_table: + vrf_list.append(str(each['vrf_name'])) + + return vrf_list + + +def get_interface_info(interface, module): + command = 'show run | section interface.{0}'.format(interface.capitalize()) + vrf_regex = ".*vrf\s+member\s+(?P\S+).*" + + try: + body = execute_show_command(command, module, + command_type='cli_show_ascii')[0] + match_vrf = re.match(vrf_regex, body, re.DOTALL) + group_vrf = match_vrf.groupdict() + vrf = group_vrf["vrf"] + except (AttributeError, TypeError): + return "" + + return vrf + + +def is_default(interface, module): + command = 'show run interface {0}'.format(interface) + + try: + body = execute_show_command(command, module, + command_type='cli_show_ascii')[0] + raw_list = body.split('\n') + if raw_list[-1].startswith('interface'): + return True + else: + return False + + except (KeyError, IndexError): + return 'DNE' + + +def main(): + argument_spec = dict( + vrf=dict(required=True), + interface=dict(type='str', required=True), + state=dict(default='present', choices=['present', 'absent'], + required=False), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + vrf = module.params['vrf'] + interface = module.params['interface'].lower() + state = module.params['state'] + + current_vrfs = get_vrf_list(module) + if vrf not in current_vrfs: + WARNINGS.append("The VRF is not present/active on the device. " + "Use nxos_vrf to fix this.") + + intf_type = get_interface_type(interface) + if (intf_type != 'ethernet' and module.params['transport'] == 'cli'): + if is_default(interface, module) == 'DNE': + module.fail_json(msg="interface does not exist on switch. Verify " + "switch platform or create it first with " + "nxos_interface if it's a logical interface") + + mode = get_interface_mode(interface, intf_type, module) + if mode == 'layer2': + module.fail_json(msg='Ensure interface is a Layer 3 port before ' + 'configuring a VRF on an interface. You can ' + 'use nxos_interface') + + proposed = dict(interface=interface, vrf=vrf) + + current_vrf = get_interface_info(interface, module) + existing = dict(interface=interface, vrf=current_vrf) + changed = False + end_state = existing + + if vrf != existing['vrf'] and state == 'absent': + module.fail_json(msg='The VRF you are trying to remove ' + 'from the interface does not exist ' + 'on that interface.', + interface=interface, proposed_vrf=vrf, + existing_vrf=existing['vrf']) + + commands = [] + if existing: + if state == 'absent': + if existing and vrf == existing['vrf']: + command = 'no vrf member {0}'.format(vrf) + commands.append(command) + + elif state == 'present': + if existing['vrf'] != vrf: + command = 'vrf member {0}'.format(vrf) + commands.append(command) + + if commands: + commands.insert(0, 'interface {0}'.format(interface)) + + if commands: + if module.check_mode: + module.exit_json(changed=True, commands=commands) + else: + execute_config_command(commands, module) + changed = True + changed_vrf = get_interface_info(interface, module) + end_state = dict(interface=interface, vrf=changed_vrf) + if 'configure' in commands: + commands.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = commands + results['changed'] = changed + + if WARNINGS: + results['warnings'] = WARNINGS + + module.exit_json(**results) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/network/nxos/nxos_vrrp.py b/network/nxos/nxos_vrrp.py new file mode 100644 index 00000000000..58c04a8367a --- /dev/null +++ b/network/nxos/nxos_vrrp.py @@ -0,0 +1,668 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_vrrp +version_added: "2.1" +short_description: Manages VRRP configuration on NX-OS switches. +description: + - Manages VRRP configuration on NX-OS switches. +extends_documentation_fragment: nxos +author: + - Jason Edelman (@jedelman8) + - Gabriele Gerbino (@GGabriele) +notes: + - VRRP feature needs to be enabled first on the system. + - SVIs must exist before using this module. + - Interface must be a L3 port before using this module. + - C(state=absent) removes the VRRP group if it exists on the device. + - VRRP cannot be configured on loopback interfaces. +options: + group: + description: + - VRRP group number. + required: true + interface: + description: + - Full name of interface that is being managed for VRRP. + required: true + priority: + description: + - VRRP priority. + required: false + default: null + vip: + description: + - VRRP virtual IP address. + required: false + default: null + authentication: + description: + - Clear text authentication string. + required: false + default: null + admin_state: + description: + - Used to enable or disable the VRRP process. + required: false + choices: ['shutdown', 'no shutdown'] + default: no shutdown + version_added: "2.2" + state: + description: + - Specify desired state of the resource. + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +- name: Ensure vrrp group 100 and vip 10.1.100.1 is on vlan10 + nxos_vrrp: + interface: vlan10 + group: 100 + vip: 10.1.100.1 + host: 68.170.147.165 + +- name: Ensure removal of the vrrp group config + # vip is required to ensure the user knows what they are removing + nxos_vrrp: + interface: vlan10 + group: 100 + vip: 10.1.100.1 + state: absent + host: 68.170.147.165 + +- name: Re-config with more params + nxos_vrrp: + interface: vlan10 + group: 100 + vip: 10.1.100.1 + preempt: false + priority: 130 + authentication: AUTHKEY + host: 68.170.147.165 +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"authentication": "testing", "group": "150", "vip": "10.1.15.1", + "admin_state": "no shutdown"} +existing: + description: k/v pairs of existing vrrp info on the interface + type: dict + sample: {} +end_state: + description: k/v pairs of vrrp after module execution + returned: always + type: dict + sample: {"authentication": "testing", "group": "150", "interval": "1", + "preempt": true, "priority": "100", "vip": "10.1.15.1", + "admin_state": "no shutdown"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface vlan10", "vrrp 150", "address 10.1.15.1", + "authentication text testing", "no shutdown"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json +import collections + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh_vrrp(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0]: + body = [] + elif 'show run' in command: + body = response + else: + try: + response = response[0].replace(command + '\n\n', '').strip() + body = [json.loads(response)] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh_vrrp(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def get_interface_type(interface): + if interface.upper().startswith('ET'): + return 'ethernet' + elif interface.upper().startswith('VL'): + return 'svi' + elif interface.upper().startswith('LO'): + return 'loopback' + elif interface.upper().startswith('MG'): + return 'management' + elif interface.upper().startswith('MA'): + return 'management' + elif interface.upper().startswith('PO'): + return 'portchannel' + else: + return 'unknown' + + +def is_default(interface, module): + command = 'show run interface {0}'.format(interface) + + try: + body = execute_show_command(command, module)[0] + if 'invalid' in body.lower(): + return 'DNE' + else: + raw_list = body.split('\n') + if raw_list[-1].startswith('interface'): + return True + else: + return False + except (KeyError): + return 'DNE' + + +def get_interface_mode(interface, intf_type, module): + command = 'show interface {0}'.format(interface) + interface = {} + mode = 'unknown' + body = execute_show_command(command, module)[0] + interface_table = body['TABLE_interface']['ROW_interface'] + name = interface_table.get('interface') + + if intf_type in ['ethernet', 'portchannel']: + mode = str(interface_table.get('eth_mode', 'layer3')) + + if mode == 'access' or mode == 'trunk': + mode = 'layer2' + elif intf_type == 'svi': + mode = 'layer3' + + return mode, name + + +def get_vrr_status(group, module, interface): + command = 'show run all | section interface.{0}$'.format(interface) + body = execute_show_command(command, module, command_type='cli_show_ascii')[0] + vrf_index = None + admin_state = 'shutdown' + + if body: + splitted_body = body.splitlines() + for index in range(0, len(splitted_body) - 1): + if splitted_body[index].strip() == 'vrrp {0}'.format(group): + vrf_index = index + vrf_section = splitted_body[vrf_index::] + + for line in vrf_section: + if line.strip() == 'no shutdown': + admin_state = 'no shutdown' + break + + return admin_state + + +def get_existing_vrrp(interface, group, module, name): + command = 'show vrrp detail interface {0}'.format(interface) + body = execute_show_command(command, module) + vrrp = {} + + vrrp_key = { + 'sh_group_id': 'group', + 'sh_vip_addr': 'vip', + 'sh_priority': 'priority', + 'sh_group_preempt': 'preempt', + 'sh_auth_text': 'authentication', + 'sh_adv_interval': 'interval' + } + + try: + vrrp_table = body[0]['TABLE_vrrp_group'] + except (AttributeError, IndexError, TypeError): + return {} + + if isinstance(vrrp_table, dict): + vrrp_table = [vrrp_table] + + for each_vrrp in vrrp_table: + vrrp_row = each_vrrp['ROW_vrrp_group'] + parsed_vrrp = apply_key_map(vrrp_key, vrrp_row) + + if parsed_vrrp['preempt'] == 'Disable': + parsed_vrrp['preempt'] = False + elif parsed_vrrp['preempt'] == 'Enable': + parsed_vrrp['preempt'] = True + + if parsed_vrrp['group'] == group: + parsed_vrrp['admin_state'] = get_vrr_status(group, module, name) + + return parsed_vrrp + return vrrp + + +def get_commands_config_vrrp(delta, group): + commands = [] + + CMDS = { + 'priority': 'priority {0}', + 'preempt': 'preempt', + 'vip': 'address {0}', + 'interval': 'advertisement-interval {0}', + 'auth': 'authentication text {0}' + } + + vip = delta.get('vip') + priority = delta.get('priority') + preempt = delta.get('preempt') + interval = delta.get('interval') + auth = delta.get('authentication') + admin_state = delta.get('admin_state') + + if vip: + commands.append((CMDS.get('vip')).format(vip)) + if priority: + commands.append((CMDS.get('priority')).format(priority)) + if preempt: + commands.append(CMDS.get('preempt')) + elif preempt is False: + commands.append('no ' + CMDS.get('preempt')) + if interval: + commands.append((CMDS.get('interval')).format(interval)) + if auth: + commands.append((CMDS.get('auth')).format(auth)) + if admin_state: + commands.append(admin_state) + + commands.insert(0, 'vrrp {0}'.format(group)) + + return commands + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def validate_params(param, module): + value = module.params[param] + + if param == 'group': + try: + if (int(value) < 1 or int(value) > 255): + raise ValueError + except ValueError: + module.fail_json(msg="Warning! 'group' must be an integer between" + " 1 and 255", group=value) + elif param == 'priority': + try: + if (int(value) < 1 or int(value) > 254): + raise ValueError + except ValueError: + module.fail_json(msg="Warning! 'priority' must be an integer " + "between 1 and 254", priority=value) + + +def main(): + argument_spec = dict( + group=dict(required=True, type='str'), + interface=dict(required=True), + priority=dict(required=False, type='str'), + preempt=dict(required=False, type='bool'), + vip=dict(required=False, type='str'), + admin_state=dict(required=False, type='str', + choices=['shutdown', 'no shutdown'], + default='no shutdown'), + authentication=dict(required=False, type='str'), + state=dict(choices=['absent', 'present'], + required=False, default='present'), + include_defaults=dict(default=False), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + interface = module.params['interface'].lower() + group = module.params['group'] + priority = module.params['priority'] + preempt = module.params['preempt'] + vip = module.params['vip'] + authentication = module.params['authentication'] + admin_state = module.params['admin_state'] + + transport = module.params['transport'] + + if state == 'present' and not vip: + module.fail_json(msg='the "vip" param is required when state=present') + + intf_type = get_interface_type(interface) + if (intf_type != 'ethernet' and transport == 'cli'): + if is_default(interface, module) == 'DNE': + module.fail_json(msg='That interface does not exist yet. Create ' + 'it first.', interface=interface) + if intf_type == 'loopback': + module.fail_json(msg="Loopback interfaces don't support VRRP.", + interface=interface) + + mode, name = get_interface_mode(interface, intf_type, module) + if mode == 'layer2': + module.fail_json(msg='That interface is a layer2 port.\nMake it ' + 'a layer 3 port first.', interface=interface) + + args = dict(group=group, priority=priority, preempt=preempt, + vip=vip, authentication=authentication, + admin_state=admin_state) + + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + existing = get_existing_vrrp(interface, group, module, name) + + changed = False + end_state = existing + commands = [] + + if state == 'present': + delta = dict( + set(proposed.iteritems()).difference(existing.iteritems())) + if delta: + command = get_commands_config_vrrp(delta, group) + commands.append(command) + + elif state == 'absent': + if existing: + commands.append(['no vrrp {0}'.format(group)]) + + if commands: + commands.insert(0, ['interface {0}'.format(interface)]) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + execute_config_command(cmds, module) + changed = True + end_state = get_existing_vrrp(interface, group, module, name) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['updates'] = cmds + results['changed'] = changed + results['end_state'] = end_state + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vtp_domain.py b/network/nxos/nxos_vtp_domain.py new file mode 100644 index 00000000000..f96db115a6b --- /dev/null +++ b/network/nxos/nxos_vtp_domain.py @@ -0,0 +1,423 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_vtp_domain +version_added: "2.2" +short_description: Manages VTP domain configuration. +description: + - Manages VTP domain configuration. +extends_documentation_fragment: nxos +author: + - Gabriele Gerbino (@GGabriele) +notes: + - VTP feature must be active on the device to use this module. + - This module is used to manage only VTP domain names. + - VTP domain names are case-sensible. + - If it's never been configured before, VTP version is set to 1 by default. + Otherwise, it leaves the previous configured version untouched. + Use M(nxos_vtp_version) to change it. + - Use this in combination with M(nxos_vtp_password) and M(nxos_vtp_version) + to fully manage VTP operations. +options: + domain: + description: + - VTP domain name. + required: true +''' + +EXAMPLES = ''' +# ENSURE VTP DOMAIN IS CONFIGURED +- nxos_vtp_domain: + domain: ntc + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"domain": "ntc"} +existing: + description: + - k/v pairs of existing vtp domain + type: dict + sample: {"domain": "testing", "version": "2", "vtp_password": "\"} +end_state: + description: k/v pairs of vtp domain after module execution + returned: always + type: dict + sample: {"domain": "ntc", "version": "2", "vtp_password": "\"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["vtp domain ntc"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'status' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'status' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_vtp_config(module): + command = 'show vtp status' + + body = execute_show_command( + command, module, command_type='cli_show_ascii')[0] + vtp_parsed = {} + + if body: + version_regex = '.*VTP version running\s+:\s+(?P\d).*' + domain_regex = '.*VTP Domain Name\s+:\s+(?P\S+).*' + + try: + match_version = re.match(version_regex, body, re.DOTALL) + version = match_version.groupdict()['version'] + except AttributeError: + version = '' + + try: + match_domain = re.match(domain_regex, body, re.DOTALL) + domain = match_domain.groupdict()['domain'] + except AttributeError: + domain = '' + + if domain and version: + vtp_parsed['domain'] = domain + vtp_parsed['version'] = version + vtp_parsed['vtp_password'] = get_vtp_password(module) + + return vtp_parsed + + +def get_vtp_password(module): + command = 'show vtp password' + body = execute_show_command(command, module)[0] + password = body['passwd'] + if password: + return str(password) + else: + return "" + + +def main(): + argument_spec = dict( + domain=dict(type='str', required=True), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + domain = module.params['domain'] + + existing = get_vtp_config(module) + end_state = existing + + args = dict(domain=domain) + + changed = False + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + + commands = [] + if delta: + commands.append(['vtp domain {0}'.format(domain)]) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_vtp_config(module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vtp_password.py b/network/nxos/nxos_vtp_password.py new file mode 100644 index 00000000000..12c142c2fc7 --- /dev/null +++ b/network/nxos/nxos_vtp_password.py @@ -0,0 +1,480 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- + +module: nxos_vtp +version_added: "2.2" +short_description: Manages VTP password configuration. +description: + - Manages VTP password configuration. +extends_documentation_fragment: nxos +author: + - Gabriele Gerbino (@GGabriele) +notes: + - VTP feature must be active on the device to use this module. + - This module is used to manage only VTP passwords. + - Use this in combination with M(nxos_vtp_domain) and M(nxos_vtp_version) + to fully manage VTP operations. + - You can set/remove password only if a VTP domain already exist. + - If C(state=absent) and no C(vtp_password) is provided, it remove the current + VTP password. + - If C(state=absent) and C(vtp_password) is provided, the proposed C(vtp_password) + has to match the existing one in order to remove it. +options: + vtp_password: + description: + - VTP password + required: false + default: null + state: + description: + - Manage the state of the resource + required: false + default: present + choices: ['present','absent'] +''' + +EXAMPLES = ''' +# ENSURE VTP PASSWORD IS SET +- nxos_vtp_password: + password: ntc + state: present + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" + +# ENSURE VTP PASSWORD IS REMOVED +- nxos_vtp_password: + password: ntc + state: absent + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"vtp_password": "new_ntc"} +existing: + description: + - k/v pairs of existing vtp + type: dict + sample: {"domain": "ntc", "version": "1", "vtp_password": "ntc"} +end_state: + description: k/v pairs of vtp after module execution + returned: always + type: dict + sample: {"domain": "ntc", "version": "1", "vtp_password": "new_ntc"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["vtp password new_ntc"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'show run' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'show run' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = str(value) + else: + new_dict[new_key] = value + return new_dict + + +def get_vtp_config(module): + command = 'show vtp status' + + body = execute_show_command( + command, module, command_type='cli_show_ascii')[0] + vtp_parsed = {} + + if body: + version_regex = '.*VTP version running\s+:\s+(?P\d).*' + domain_regex = '.*VTP Domain Name\s+:\s+(?P\S+).*' + + try: + match_version = re.match(version_regex, body, re.DOTALL) + version = match_version.groupdict()['version'] + except AttributeError: + version = '' + + try: + match_domain = re.match(domain_regex, body, re.DOTALL) + domain = match_domain.groupdict()['domain'] + except AttributeError: + domain = '' + + if domain and version: + vtp_parsed['domain'] = domain + vtp_parsed['version'] = version + vtp_parsed['vtp_password'] = get_vtp_password(module) + + return vtp_parsed + + +def get_vtp_password(module): + command = 'show vtp password' + body = execute_show_command(command, module)[0] + password = body['passwd'] + if password: + return str(password) + else: + return "" + + +def main(): + argument_spec = dict( + vtp_password=dict(type='str', no_log=True), + state=dict(choices=['absent', 'present'], + default='present'), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + vtp_password = module.params['vtp_password'] or None + state = module.params['state'] + + existing = get_vtp_config(module) + end_state = existing + + args = dict(vtp_password=vtp_password) + + changed = False + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + + commands = [] + if state == 'absent': + if vtp_password is not None: + if existing['vtp_password'] == proposed['vtp_password']: + commands.append(['no vtp password']) + else: + module.fail_json(msg="Proposed vtp password doesn't match " + "current vtp password. It cannot be " + "removed when state=absent. If you are " + "trying to change the vtp password, use " + "state=present.") + else: + if not existing.get('domain'): + module.fail_json(msg='Cannot remove a vtp password ' + 'before vtp domain is set.') + + elif existing['vtp_password'] != ('\\'): + commands.append(['no vtp password']) + + elif state == 'present': + if delta: + if not existing.get('domain'): + module.fail_json(msg='Cannot set vtp password ' + 'before vtp domain is set.') + + else: + commands.append(['vtp password {0}'.format(vtp_password)]) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_vtp_config(module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vtp_version.py b/network/nxos/nxos_vtp_version.py new file mode 100644 index 00000000000..bd10745ba86 --- /dev/null +++ b/network/nxos/nxos_vtp_version.py @@ -0,0 +1,418 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- + +module: nxos_vtp_version +version_added: "2.2" +short_description: Manages VTP version configuration. +description: + - Manages VTP version configuration. +extends_documentation_fragment: nxos +author: + - Gabriele Gerbino (@GGabriele) +notes: + - VTP feature must be active on the device to use this module. + - This module is used to manage only VTP version. + - Use this in combination with M(nxos_vtp_password) and M(nxos_vtp_version) + to fully manage VTP operations. +options: + version: + description: + - VTP version number. + required: true + choices: ['1', '2'] +''' +EXAMPLES = ''' +# ENSURE VTP VERSION IS 2 +- nxos_vtp_version: + version: 2 + host: "{{ inventory_hostname }}" + username: "{{ un }}" + password: "{{ pwd }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: always + type: dict + sample: {"version": "2"} +existing: + description: + - k/v pairs of existing vtp + type: dict + sample: {"domain": "testing", "version": "1", "vtp_password": "\"} +end_state: + description: k/v pairs of vtp after module execution + returned: always + type: dict + sample: {"domain": "testing", "version": "2", "vtp_password": "\"} +updates: + description: command sent to the device + returned: always + type: list + sample: ["vtp version 2"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' +import json + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + + +def execute_config_command(commands, module): + try: + module.configure(commands) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + except AttributeError: + try: + commands.insert(0, 'configure') + module.cli.add_commands(commands, output='config') + module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending CLI commands', + error=str(clie), commands=commands) + + +def get_cli_body_ssh(command, response, module): + """Get response for when transport=cli. This is kind of a hack and mainly + needed because these modules were originally written for NX-API. And + not every command supports "| json" when using cli/ssh. As such, we assume + if | json returns an XML string, it is a valid command, but that the + resource doesn't exist yet. Instead, the output will be a raw string + when issuing commands containing 'show run'. + """ + if 'xml' in response[0] or response[0] == '\n': + body = [] + elif 'status' in command: + body = response + else: + try: + body = [json.loads(response[0])] + except ValueError: + module.fail_json(msg='Command does not support JSON output', + command=command) + return body + + +def execute_show(cmds, module, command_type=None): + command_type_map = { + 'cli_show': 'json', + 'cli_show_ascii': 'text' + } + + try: + if command_type: + response = module.execute(cmds, command_type=command_type) + else: + response = module.execute(cmds) + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + except AttributeError: + try: + if command_type: + command_type = command_type_map.get(command_type) + module.cli.add_commands(cmds, output=command_type) + response = module.cli.run_commands() + else: + module.cli.add_commands(cmds, raw=True) + response = module.cli.run_commands() + except ShellError: + clie = get_exception() + module.fail_json(msg='Error sending {0}'.format(cmds), + error=str(clie)) + return response + + +def execute_show_command(command, module, command_type='cli_show'): + if module.params['transport'] == 'cli': + if 'status' not in command: + command += ' | json' + cmds = [command] + response = execute_show(cmds, module) + body = get_cli_body_ssh(command, response, module) + elif module.params['transport'] == 'nxapi': + cmds = [command] + body = execute_show(cmds, module, command_type=command_type) + + return body + + +def flatten_list(command_lists): + flat_command_list = [] + for command in command_lists: + if isinstance(command, list): + flat_command_list.extend(command) + else: + flat_command_list.append(command) + return flat_command_list + + +def get_vtp_config(module): + command = 'show vtp status' + + body = execute_show_command( + command, module, command_type='cli_show_ascii')[0] + vtp_parsed = {} + + if body: + version_regex = '.*VTP version running\s+:\s+(?P\d).*' + domain_regex = '.*VTP Domain Name\s+:\s+(?P\S+).*' + + try: + match_version = re.match(version_regex, body, re.DOTALL) + version = match_version.groupdict()['version'] + except AttributeError: + version = '' + + try: + match_domain = re.match(domain_regex, body, re.DOTALL) + domain = match_domain.groupdict()['domain'] + except AttributeError: + domain = '' + + if domain and version: + vtp_parsed['domain'] = domain + vtp_parsed['version'] = version + vtp_parsed['vtp_password'] = get_vtp_password(module) + + return vtp_parsed + + +def get_vtp_password(module): + command = 'show vtp password' + body = execute_show_command(command, module)[0] + password = body['passwd'] + if password: + return str(password) + else: + return "" + + +def main(): + argument_spec = dict( + version=dict(type='str', choices=['1', '2'], required=True), + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + version = module.params['version'] + + existing = get_vtp_config(module) + end_state = existing + + args = dict(version=version) + + changed = False + proposed = dict((k, v) for k, v in args.iteritems() if v is not None) + delta = dict(set(proposed.iteritems()).difference(existing.iteritems())) + + commands = [] + if delta: + commands.append(['vtp version {0}'.format(version)]) + + cmds = flatten_list(commands) + if cmds: + if module.check_mode: + module.exit_json(changed=True, commands=cmds) + else: + changed = True + execute_config_command(cmds, module) + end_state = get_vtp_config(module) + if 'configure' in cmds: + cmds.pop(0) + + results = {} + results['proposed'] = proposed + results['existing'] = existing + results['end_state'] = end_state + results['updates'] = cmds + results['changed'] = changed + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vxlan_vtep.py b/network/nxos/nxos_vxlan_vtep.py new file mode 100644 index 00000000000..6d29597cd28 --- /dev/null +++ b/network/nxos/nxos_vxlan_vtep.py @@ -0,0 +1,530 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_vxlan_vtep +version_added: "2.2" +short_description: Manages VXLAN Network Virtualization Endpoint (NVE). +description: + - Manages VXLAN Network Virtualization Endpoint (NVE) overlay interface + that terminates VXLAN tunnels. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - The module is used to manage NVE properties, not to create NVE + interfaces. Use M(nxos_interface) if you wish to do so. + - C(state=absent) removes the interface. + - Default, where supported, restores params default value. +options: + interface: + description: + - Interface name for the VXLAN Network Virtualization Endpoint. + required: true + description: + description: + - Description of the NVE interface. + required: false + default: null + host_reachability: + description: + - Specify mechanism for host reachability advertisement. + required: false + choices: ['true', 'false'] + default: null + shutdown: + description: + - Administratively shutdown the NVE interface. + required: false + choices: ['true','false'] + default: false + source_interface: + description: + - Specify the loopback interface whose IP address should be + used for the NVE interface. + required: false + default: null + source_interface_hold_down_time: + description: + - Suppresses advertisement of the NVE loopback address until + the overlay has converged. + required: false + default: null + state: + description: + - Determines whether the config should be present or not + on the device. + required: false + default: present + choices: ['present','absent'] +''' +EXAMPLES = ''' +- nxos_vxlan_vtep: + interface: nve1 + description: default + host_reachability: default + source_interface: Loopback0 + source_interface_hold_down_time: 30 + shutdown: default + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"description": "simple description", "host_reachability": true, + "interface": "nve1", "shutdown": true, "source_interface": "loopback0", + "source_interface_hold_down_time": "30"} +existing: + description: k/v pairs of existing VXLAN VTEP configuration + returned: verbose mode + type: dict + sample: {} +end_state: + description: k/v pairs of VXLAN VTEP configuration after module execution + returned: verbose mode + type: dict + sample: {"description": "simple description", "host_reachability": true, + "interface": "nve1", "shutdown": true, "source_interface": "loopback0", + "source_interface_hold_down_time": "30"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface nve1", "source-interface loopback0", + "source-interface hold-down-time 30", "description simple description", + "shutdown", "host-reachability protocol bgp"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +BOOL_PARAMS = [ + 'shutdown', + 'host_reachability' +] +PARAM_TO_COMMAND_KEYMAP = { + 'description': 'description', + 'host_reachability': 'host-reachability protocol bgp', + 'interface': 'interface', + 'shutdown': 'shutdown', + 'source_interface': 'source-interface', + 'source_interface_hold_down_time': 'source-interface hold-down-time' +} +PARAM_TO_DEFAULT_KEYMAP = { + 'description': False, + 'shutdown': True, +} + +WARNINGS = [] + + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + if arg in BOOL_PARAMS: + REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + NO_SHUT_REGEX = re.compile(r'\s+no shutdown\s*$', re.M) + value = False + if arg == 'shutdown': + try: + if NO_SHUT_REGEX.search(config): + value = False + elif REGEX.search(config): + value = True + except TypeError: + value = False + else: + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + else: + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + NO_DESC_REGEX = re.compile(r'\s+{0}\s*$'.format('no description'), re.M) + SOURCE_INTF_REGEX = re.compile(r'(?:{0}\s)(?P\S+)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if arg == 'description': + if NO_DESC_REGEX.search(config): + value = '' + elif PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value').strip() + elif arg == 'source_interface': + for line in config.splitlines(): + try: + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = SOURCE_INTF_REGEX.search(config).group('value').strip() + break + except AttributeError: + value = '' + else: + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value').strip() + return value + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + + interface_string = 'interface {0}'.format(module.params['interface'].lower()) + parents = [interface_string] + config = netcfg.get_section(parents) + + if config: + for arg in args: + existing[arg] = get_value(arg, config, module) + + existing['interface'] = module.params['interface'].lower() + else: + if interface_string in str(netcfg): + existing['interface'] = module.params['interface'].lower() + for arg in args: + existing[arg] = '' + return existing + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def fix_commands(commands, module): + source_interface_command = '' + no_source_interface_command = '' + + for command in commands: + if 'no source-interface hold-down-time' in command: + pass + elif 'source-interface hold-down-time' in command: + pass + elif 'no source-interface' in command: + no_source_interface_command = command + elif 'source-interface' in command: + source_interface_command = command + + if source_interface_command: + commands.pop(commands.index(source_interface_command)) + commands.insert(0, source_interface_command) + + if no_source_interface_command: + commands.pop(commands.index(no_source_interface_command)) + commands.append(no_source_interface_command) + return commands + + +def state_present(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + for key, value in proposed_commands.iteritems(): + if value is True: + commands.append(key) + + elif value is False: + commands.append('no {0}'.format(key)) + + elif value == 'default': + if existing_commands.get(key): + existing_value = existing_commands.get(key) + commands.append('no {0} {1}'.format(key, existing_value)) + else: + if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS: + commands.append('no {0}'.format(key.lower())) + module.exit_json(commands=commands) + else: + command = '{0} {1}'.format(key, value.lower()) + commands.append(command) + + if commands: + commands = fix_commands(commands, module) + parents = ['interface {0}'.format(module.params['interface'].lower())] + candidate.add(commands, parents=parents) + else: + if not existing and module.params['interface']: + commands = ['interface {0}'.format(module.params['interface'].lower())] + candidate.add(commands, parents=[]) + + +def state_absent(module, existing, proposed, candidate): + commands = ['no interface {0}'.format(module.params['interface'].lower())] + candidate.add(commands, parents=[]) + + +def main(): + argument_spec = dict( + interface=dict(required=True, type='str'), + description=dict(required=False, type='str'), + host_reachability=dict(required=False, type='bool'), + shutdown=dict(required=False, type='bool'), + source_interface=dict(required=False, type='str'), + source_interface_hold_down_time=dict(required=False, type='str'), + m_facts=dict(required=False, default=False, type='bool'), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + interface = module.params['interface'].lower() + + args = [ + 'interface', + 'description', + 'host_reachability', + 'shutdown', + 'source_interface', + 'source_interface_hold_down_time' + ] + + existing = invoke('get_existing', module, args) + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + proposed = {} + for key, value in proposed_args.iteritems(): + if key != 'interface': + if str(value).lower() == 'true': + value = True + elif str(value).lower() == 'false': + value = False + elif str(value).lower() == 'default': + value = PARAM_TO_DEFAULT_KEYMAP.get(key) + if value is None: + if key in BOOL_PARAMS: + value = False + else: + value = 'default' + if existing.get(key) or (not existing.get(key) and value): + proposed[key] = value + + result = {} + if state == 'present' or (state == 'absent' and existing): + if not existing: + WARNINGS.append("The proposed NVE interface did not exist. " + "It's recommended to use nxos_interface to create " + "all logical interfaces.") + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + if WARNINGS: + result['warnings'] = WARNINGS + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/nxos/nxos_vxlan_vtep_vni.py b/network/nxos/nxos_vxlan_vtep_vni.py new file mode 100644 index 00000000000..cf354d59c4f --- /dev/null +++ b/network/nxos/nxos_vxlan_vtep_vni.py @@ -0,0 +1,589 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nxos_vxlan_vtep_vni +version_added: "2.2" +short_description: Creates a Virtual Network Identifier member (VNI) +description: + - Creates a Virtual Network Identifier member (VNI) for an NVE + overlay interface. +author: Gabriele Gerbino (@GGabriele) +extends_documentation_fragment: nxos +notes: + - default, where supported, restores params default value. +options: + interface: + description: + - Interface name for the VXLAN Network Virtualization Endpoint. + required: true + vni: + description: + - ID of the Virtual Network Identifier. + required: true + assoc_vrf: + description: + - This attribute is used to identify and separate processing VNIs + that are associated with a VRF and used for routing. The VRF + and VNI specified with this command must match the configuration + of the VNI under the VRF. + required: false + choices: ['true','false'] + default: null + ingress_replication: + description: + - Specifies mechanism for host reachability advertisement. + required: false + choices: ['bgp','static'] + default: null + multicast_group: + description: + - The multicast group (range) of the VNI. Valid values are + string and keyword 'default'. + required: false + default: null + peer_list: + description: + - Set the ingress-replication static peer list. Valid values + are an array, a space-separated string of ip addresses, + or the keyword 'default'. + required: false + default: null + suppress_arp: + description: + - Suppress arp under layer 2 VNI. + required: false + choices: ['true','false'] + default: null + state: + description: + - Determines whether the config should be present or not + on the device. + required: false + default: present + choices: ['present','absent'] + include_defaults: + description: + - Specify to use or not the complete running configuration + for module operations. + required: false + default: true + choices: ['true','true'] + config: + description: + - Configuration string to be used for module operations. If not + specified, the module will use the current running configuration. + required: false + default: null + save: + description: + - Specify to save the running configuration after + module operations. + required: false + default: false + choices: ['true','false'] +''' +EXAMPLES = ''' +- nxos_vxlan_vtep_vni: + interface: nve1 + vni: 6000 + ingress_replication: default + username: "{{ un }}" + password: "{{ pwd }}" + host: "{{ inventory_hostname }}" +''' + +RETURN = ''' +proposed: + description: k/v pairs of parameters passed into module + returned: verbose mode + type: dict + sample: {"ingress_replication": "default", "interface": "nve1", "vni": "6000"} +existing: + description: k/v pairs of existing configuration + returned: verbose mode + type: dict + sample: {} +end_state: + description: k/v pairs of configuration after module execution + returned: verbose mode + type: dict + sample: {"assoc_vrf": false, "ingress_replication": "", "interface": "nve1", + "multicast_group": "", "peer_list": [], + "suppress_arp": false, "vni": "6000"} +updates: + description: commands sent to the device + returned: always + type: list + sample: ["interface nve1", "member vni 6000"] +changed: + description: check to see if a change was made on the device + returned: always + type: boolean + sample: true +''' + +# COMMON CODE FOR MIGRATION +import re + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcfg import NetworkConfig, ConfigLine +from ansible.module_utils.shell import ShellError + +try: + from ansible.module_utils.nxos import get_module +except ImportError: + from ansible.module_utils.nxos import NetworkModule + + +def to_list(val): + if isinstance(val, (list, tuple)): + return list(val) + elif val is not None: + return [val] + else: + return list() + + +class CustomNetworkConfig(NetworkConfig): + + def expand_section(self, configobj, S=None): + if S is None: + S = list() + S.append(configobj) + for child in configobj.children: + if child in S: + continue + self.expand_section(child, S) + return S + + def get_object(self, path): + for item in self.items: + if item.text == path[-1]: + parents = [p.text for p in item.parents] + if parents == path[:-1]: + return item + + def to_block(self, section): + return '\n'.join([item.raw for item in section]) + + def get_section(self, path): + try: + section = self.get_section_objects(path) + return self.to_block(section) + except ValueError: + return list() + + def get_section_objects(self, path): + if not isinstance(path, list): + path = [path] + obj = self.get_object(path) + if not obj: + raise ValueError('path does not exist in config') + return self.expand_section(obj) + + + def add(self, lines, parents=None): + """Adds one or lines of configuration + """ + + ancestors = list() + offset = 0 + obj = None + + ## global config command + if not parents: + for line in to_list(lines): + item = ConfigLine(line) + item.raw = line + if item not in self.items: + self.items.append(item) + + else: + for index, p in enumerate(parents): + try: + i = index + 1 + obj = self.get_section_objects(parents[:i])[0] + ancestors.append(obj) + + except ValueError: + # add parent to config + offset = index * self.indent + obj = ConfigLine(p) + obj.raw = p.rjust(len(p) + offset) + if ancestors: + obj.parents = list(ancestors) + ancestors[-1].children.append(obj) + self.items.append(obj) + ancestors.append(obj) + + # add child objects + for line in to_list(lines): + # check if child already exists + for child in ancestors[-1].children: + if child.text == line: + break + else: + offset = len(parents) * self.indent + item = ConfigLine(line) + item.raw = line.rjust(len(line) + offset) + item.parents = ancestors + ancestors[-1].children.append(item) + self.items.append(item) + + +def get_network_module(**kwargs): + try: + return get_module(**kwargs) + except NameError: + return NetworkModule(**kwargs) + +def get_config(module, include_defaults=False): + config = module.params['config'] + if not config: + try: + config = module.get_config() + except AttributeError: + defaults = module.params['include_defaults'] + config = module.config.get_config(include_defaults=defaults) + return CustomNetworkConfig(indent=2, contents=config) + +def load_config(module, candidate): + config = get_config(module) + + commands = candidate.difference(config) + commands = [str(c).strip() for c in commands] + + save_config = module.params['save'] + + result = dict(changed=False) + + if commands: + if not module.check_mode: + try: + module.configure(commands) + except AttributeError: + module.config(commands) + + if save_config: + try: + module.config.save_config() + except AttributeError: + module.execute(['copy running-config startup-config']) + + result['changed'] = True + result['updates'] = commands + + return result +# END OF COMMON CODE + +BOOL_PARAMS = ['suppress_arp'] +PARAM_TO_COMMAND_KEYMAP = { + 'assoc_vrf': 'associate-vrf', + 'interface': 'interface', + 'vni': 'member vni', + 'ingress_replication': 'ingress-replication protocol', + 'multicast_group': 'mcast-group', + 'peer_list': 'peer-ip', + 'suppress_arp': 'suppress-arp' +} +PARAM_TO_DEFAULT_KEYMAP = {} +WARNINGS = [] + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + + +def get_value(arg, config, module): + if arg in BOOL_PARAMS: + REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = False + try: + if REGEX.search(config): + value = True + except TypeError: + value = False + else: + REGEX = re.compile(r'(?:{0}\s)(?P.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) + value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in config: + value = REGEX.search(config).group('value') + return value + + +def check_interface(module, netcfg): + config = str(netcfg) + + REGEX = re.compile(r'(?:interface nve)(?P.*)$', re.M) + value = '' + if 'interface nve' in config: + value = 'nve{0}'.format(REGEX.search(config).group('value')) + + return value + + +def get_custom_value(arg, config, module): + splitted_config = config.splitlines() + if arg == 'assoc_vrf': + value = False + if 'associate-vrf' in config: + value = True + elif arg == 'peer_list': + value = [] + REGEX = re.compile(r'(?:peer-ip\s)(?P.*)$', re.M) + for line in splitted_config: + peer_value = '' + if PARAM_TO_COMMAND_KEYMAP[arg] in line: + peer_value = REGEX.search(line).group('peer_value') + if peer_value: + value.append(peer_value) + return value + + +def get_existing(module, args): + existing = {} + netcfg = get_config(module) + + custom = [ + 'assoc_vrf', + 'peer_list' + ] + + interface_exist = check_interface(module, netcfg) + if interface_exist: + parents = ['interface {0}'.format(interface_exist)] + temp_config = netcfg.get_section(parents) + + if 'associate-vrf' in temp_config: + parents.append('member vni {0} associate-vrf'.format( + module.params['vni'])) + config = netcfg.get_section(parents) + elif 'member vni' in temp_config: + parents.append('member vni {0}'.format(module.params['vni'])) + config = netcfg.get_section(parents) + else: + config = {} + + if config: + for arg in args: + if arg not in ['interface', 'vni']: + if arg in custom: + existing[arg] = get_custom_value(arg, config, module) + else: + existing[arg] = get_value(arg, config, module) + existing['interface'] = interface_exist + existing['vni'] = module.params['vni'] + + return existing, interface_exist + + +def apply_key_map(key_map, table): + new_dict = {} + for key, value in table.items(): + new_key = key_map.get(key) + if new_key: + value = table.get(key) + if value: + new_dict[new_key] = value + else: + new_dict[new_key] = value + return new_dict + + +def state_present(module, existing, proposed, candidate): + commands = list() + proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) + existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) + + for key, value in proposed_commands.iteritems(): + if key == 'associate-vrf': + command = 'member vni {0} {1}'.format(module.params['vni'], key) + + if value: + commands.append(command) + else: + commands.append('no {0}'.format(command)) + + elif key == 'peer-ip' and value != 'default': + for peer in value: + commands.append('{0} {1}'.format(key, peer)) + + elif value is True: + commands.append(key) + + elif value is False: + commands.append('no {0}'.format(key)) + + elif value == 'default': + if existing_commands.get(key): + existing_value = existing_commands.get(key) + if key == 'peer-ip': + for peer in existing_value: + commands.append('no {0} {1}'.format(key, peer)) + else: + commands.append('no {0} {1}'.format(key, existing_value)) + else: + if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS: + commands.append('no {0}'.format(key.lower())) + else: + command = '{0} {1}'.format(key, value.lower()) + commands.append(command) + + if commands: + vni_command = 'member vni {0}'.format(module.params['vni']) + ingress_replication_command = 'ingress-replication protocol static' + interface_command = 'interface {0}'.format(module.params['interface']) + + if ingress_replication_command in commands: + static_level_cmds = [cmd for cmd in commands if 'peer' in cmd] + parents = [interface_command, vni_command, ingress_replication_command] + candidate.add(static_level_cmds, parents=parents) + commands = [cmd for cmd in commands if 'peer' not in cmd] + + if vni_command in commands: + parents = [interface_command] + commands.remove(vni_command) + if module.params['assoc_vrf'] is None: + parents.append(vni_command) + candidate.add(commands, parents=parents) + + +def state_absent(module, existing, proposed, candidate): + if existing['assoc_vrf']: + commands = ['no member vni {0} associate-vrf'.format( + module.params['vni'])] + else: + commands = ['no member vni {0}'.format(module.params['vni'])] + parents = ['interface {0}'.format(module.params['interface'])] + candidate.add(commands, parents=parents) + + +def main(): + argument_spec = dict( + interface=dict(required=True, type='str'), + vni=dict(required=True, type='str'), + assoc_vrf=dict(required=False, type='bool'), + multicast_group=dict(required=False, type='str'), + peer_list=dict(required=False, type='list'), + suppress_arp=dict(required=False, type='bool'), + ingress_replication=dict(required=False, type='str', + choices=['bgp', 'static', 'default']), + state=dict(choices=['present', 'absent'], default='present', + required=False), + include_defaults=dict(default=True), + config=dict(), + save=dict(type='bool', default=False) + ) + module = get_network_module(argument_spec=argument_spec, + supports_check_mode=True) + + if module.params['assoc_vrf']: + mutually_exclusive_params = ['multicast_group', + 'suppress_arp', + 'ingress_replication'] + for param in mutually_exclusive_params: + if module.params[param]: + module.fail_json(msg='assoc_vrf cannot be used with ' + '{0} param'.format(param)) + if module.params['peer_list']: + if module.params['ingress_replication'] != 'static': + module.fail_json(msg='ingress_replication=static is required ' + 'when using peer_list param') + else: + peer_list = module.params['peer_list'] + if peer_list[0] == 'default': + module.params['peer_list'] = 'default' + else: + stripped_peer_list = map(str.strip, peer_list) + module.params['peer_list'] = stripped_peer_list + + state = module.params['state'] + args = [ + 'assoc_vrf', + 'interface', + 'vni', + 'ingress_replication', + 'multicast_group', + 'peer_list', + 'suppress_arp' + ] + + existing, interface_exist = invoke('get_existing', module, args) + end_state = existing + proposed_args = dict((k, v) for k, v in module.params.iteritems() + if v is not None and k in args) + + proposed = {} + for key, value in proposed_args.iteritems(): + if key != 'interface': + if str(value).lower() == 'default': + value = PARAM_TO_DEFAULT_KEYMAP.get(key) + if value is None: + value = 'default' + if existing.get(key) or (not existing.get(key) and value): + proposed[key] = value + + result = {} + if state == 'present' or (state == 'absent' and existing): + if not interface_exist: + WARNINGS.append("The proposed NVE interface does not exist. " + "Use nxos_interface to create it first.") + elif interface_exist != module.params['interface']: + module.fail_json(msg='Only 1 NVE interface is allowed on ' + 'the switch.') + elif (existing and state == 'absent' and + existing['vni'] != module.params['vni']): + module.fail_json(msg="ERROR: VNI delete failed: Could not find" + " vni node for {0}".format( + module.params['vni']), + existing_vni=existing['vni']) + else: + candidate = CustomNetworkConfig(indent=3) + invoke('state_%s' % state, module, existing, proposed, candidate) + + try: + response = load_config(module, candidate) + result.update(response) + except ShellError: + exc = get_exception() + module.fail_json(msg=str(exc)) + else: + result['updates'] = [] + + result['connected'] = module.connected + if module._verbosity > 0: + end_state, interface_exist = invoke('get_existing', module, args) + result['end_state'] = end_state + result['existing'] = existing + result['proposed'] = proposed_args + + if WARNINGS: + result['warnings'] = WARNINGS + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/openswitch/ops_template.py b/network/openswitch/_ops_template.py similarity index 71% rename from network/openswitch/ops_template.py rename to network/openswitch/_ops_template.py index 91c737b6173..d3cb0e00d10 100644 --- a/network/openswitch/ops_template.py +++ b/network/openswitch/_ops_template.py @@ -15,6 +15,11 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + + DOCUMENTATION = """ --- module: ops_template @@ -28,6 +33,7 @@ against a provided candidate configuration. If there are changes, the candidate configuration is merged with the current configuration and pushed into OpenSwitch +deprecated: Deprecated in 2.2. Use ops_config instead extends_documentation_fragment: openswitch options: src: @@ -62,8 +68,8 @@ against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the - implementer to pass in the configuruation to use as the base - config for comparision. + implementer to pass in the configuration to use as the base + config for comparison. required: false default: null """ @@ -86,54 +92,35 @@ RETURN = """ updates: description: The list of configuration updates to be merged - retured: always + returned: always type: dict sample: {obj, obj} responses: - desription: returns the responses when configuring using cli + description: returns the responses when configuring using cli returned: when transport == cli type: list sample: [...] """ -import copy - -def compare(this, other): - parents = [item.text for item in this.parents] - for entry in other: - if this == entry: - return None - return this - -def expand(obj, queue): - block = [item.raw for item in obj.parents] - block.append(obj.raw) - - current_level = queue - for b in block: - if b not in current_level: - current_level[b] = collections.OrderedDict() - current_level = current_level[b] - for c in obj.children: - if c.raw not in current_level: - current_level[c.raw] = collections.OrderedDict() - -def flatten(data, obj): - for k, v in data.items(): - obj.append(k) - flatten(v, obj) - return obj + +import ansible.module_utils.openswitch +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.openswitch import HAS_OPS + def get_config(module): config = module.params['config'] or dict() if not config and not module.params['force']: - config = module.config + config = module.config.get_config() return config + def sort(val): if isinstance(val, (list, set)): return sorted(val) return val + def diff(this, other, path=None): updates = list() path = path or list() @@ -152,6 +139,7 @@ def diff(this, other, path=None): updates.append((list(path), key, value, other_value)) return updates + def merge(changeset, config=None): config = config or dict() for path, key, value, _ in changeset: @@ -168,7 +156,7 @@ def main(): """ argument_spec = dict( - src=dict(type='dict'), + src=dict(type='str'), force=dict(default=False, type='bool'), backup=dict(default=False, type='bool'), config=dict(type='dict'), @@ -176,72 +164,63 @@ def main(): mutually_exclusive = [('config', 'backup'), ('config', 'force')] - module = get_module(argument_spec=argument_spec, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True) + module = NetworkModule(argument_spec=argument_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) - src = module.params['src'] + if not module.params['transport'] and not HAS_OPS: + module.fail_json(msg='unable to import ops.dc library') result = dict(changed=False) contents = get_config(module) - result['_backup'] = copy.deepcopy(module.config) + result['_backup'] = contents if module.params['transport'] in ['ssh', 'rest']: config = contents + try: + src = module.from_json(module.params['src']) + except ValueError: + module.fail_json(msg='unable to load src due to json parsing error') + changeset = diff(src, config) candidate = merge(changeset, config) updates = dict() for path, key, new_value, old_value in changeset: path = '%s.%s' % ('.'.join(path), key) - updates[path] = new_value + updates[path] = str(new_value) result['updates'] = updates if changeset: if not module.check_mode: - module.configure(config) + module.config(config) result['changed'] = True else: - config = module.parse_config(config) - candidate = module.parse_config(module.params['src']) - - commands = collections.OrderedDict() - toplevel = [c.text for c in config] + candidate = NetworkConfig(contents=module.params['src'], indent=4) - for line in candidate: - if line.text in ['!', '']: - continue + if contents: + config = NetworkConfig(contents=contents, indent=4) - if not line.parents: - if line.text not in toplevel: - expand(line, commands) - else: - item = compare(line, config) - if item: - expand(item, commands) - - commands = flatten(commands, list()) + if not module.params['force']: + commands = candidate.difference(config) + commands = dumps(commands, 'commands').split('\n') + commands = [str(c) for c in commands if c] + else: + commands = str(candidate).split('\n') if commands: if not module.check_mode: - commands = [str(c).strip() for c in commands] - response = module.configure(commands) + response = module.config(commands) result['responses'] = response result['changed'] = True + result['updates'] = commands module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.shell import * -from ansible.module_utils.openswitch import * -if __name__ == '__main__': - main() - - +if __name__ == '__main__': + main() diff --git a/network/openswitch/ops_command.py b/network/openswitch/ops_command.py index acc01268821..0f1ffd04a1d 100644 --- a/network/openswitch/ops_command.py +++ b/network/openswitch/ops_command.py @@ -16,15 +16,19 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: ops_command version_added: "2.1" -author: "Peter sprygada (@privateip)" +author: "Peter Sprygada (@privateip)" short_description: Run arbitrary commands on OpenSwitch devices. description: - Sends arbitrary commands to an OpenSwitch node and returns the results - read from the device. The M(ops_command) module includes an + read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met. extends_documentation_fragment: openswitch @@ -33,30 +37,44 @@ description: - List of commands to send to the remote ops device over the configured provider. The resulting output from the command - is returned. If the I(waitfor) argument is provided, the + is returned. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or the number of retires as expired. required: true - waitfor: + wait_for: description: - List of conditions to evaluate against the output of the - command. The task will wait for a each condition to be true + command. The task will wait for each condition to be true before moving forward. If the conditional is not true within the configured number of retries, the task fails. See examples. required: false default: null + aliases: ['waitfor'] + version_added: "2.2" + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the I(wait_for) must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + required: false + default: all + choices: ['any', 'all'] + version_added: "2.2" retries: description: - Specifies the number of retries a command should by tried before it is considered failed. The command is run on the target device every retry and evaluated against the - waitfor conditions. + I(wait_for) conditions. required: false default: 10 interval: description: - - Configures the interval in seconds to wait between retries + - Configures the interval in seconds to wait between I(retries) of the command. If the command does not pass the specified conditions, the interval indicates how long to wait before trying the command again. @@ -65,21 +83,32 @@ """ EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: netop + password: netop + transport: cli + - ops_command: commands: - show version - register: output + provider: "{{ cli }}" - ops_command: commands: - show version - waitfor: + wait_for: - "result[0] contains OpenSwitch" + provider: "{{ cli }}" - ops_command: - commands: - - show version - - show interfaces + commands: + - show version + - show interfaces + provider: "{{ cli }}" """ RETURN = """ @@ -97,69 +126,103 @@ failed_conditions: description: the conditionals that failed - retured: failed + returned: failed type: list sample: ['...', '...'] """ +import ansible.module_utils.openswitch +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.netcli import AddCommandError, FailedConditionsError +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.six import string_types -import time +VALID_KEYS = ['command', 'prompt', 'response'] def to_lines(stdout): for item in stdout: - if isinstance(item, basestring): + if isinstance(item, string_types): item = str(item).split('\n') yield item +def parse_commands(module): + for cmd in module.params['commands']: + if isinstance(cmd, string_types): + cmd = dict(command=cmd, output=None) + elif 'command' not in cmd: + module.fail_json(msg='command keyword argument is required') + elif not set(cmd.keys()).issubset(VALID_KEYS): + module.fail_json(msg='unknown keyword specified') + yield cmd + def main(): spec = dict( - commands=dict(type='list'), - waitfor=dict(type='list'), + # { command: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + retries=dict(default=10, type='int'), - interval=dict(default=1, type='int'), - transport=dict(default='cli', choices=['cli']) + interval=dict(default=1, type='int') ) - module = get_module(argument_spec=spec, - supports_check_mode=True) + module = NetworkModule(argument_spec=spec, + connect_on_load=False, + supports_check_mode=True) - commands = module.params['commands'] + commands = list(parse_commands(module)) + conditionals = module.params['wait_for'] or list() - retries = module.params['retries'] - interval = module.params['interval'] + warnings = list() - try: - queue = set() - for entry in (module.params['waitfor'] or list()): - queue.add(Conditional(entry)) - except AttributeError, exc: - module.fail_json(msg=exc.message) + runner = CommandRunner(module) - result = dict(changed=False) + for cmd in commands: + if module.check_mode and not cmd['command'].startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd['command']) + else: + if cmd['command'].startswith('conf'): + module.fail_json(msg='ops_command does not support running ' + 'config mode commands. Please use ' + 'ops_config instead') + try: + runner.add_command(**cmd) + except AddCommandError: + exc = get_exception() + warnings.append('duplicate command detected: %s' % cmd) - while retries > 0: - response = module.execute(commands) - result['stdout'] = response + for item in conditionals: + runner.add_conditional(item) - for item in list(queue): - if item(response): - queue.remove(item) + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + runner.match = module.params['match'] - if not queue: - break + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result = dict(changed=False, stdout=list()) + + for cmd in commands: + try: + output = runner.get_command(cmd['command']) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) + + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) - time.sleep(interval) - retries -= 1 - else: - failed_conditions = [item.raw for item in queue] - module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions) + module.exit_json(**result) - result['stdout_lines'] = list(to_lines(result['stdout'])) - return module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.openswitch import * if __name__ == '__main__': - main() - + main() diff --git a/network/openswitch/ops_config.py b/network/openswitch/ops_config.py index d1e794068e6..79c23aafcb4 100644 --- a/network/openswitch/ops_config.py +++ b/network/openswitch/ops_config.py @@ -16,15 +16,19 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = """ --- module: ops_config version_added: "2.1" -author: "Peter sprygada (@privateip)" +author: "Peter Sprygada (@privateip)" short_description: Manage OpenSwitch configuration using CLI description: - - OpenSwitch configurations use a simple block indent file sytanx - for segementing configuration into sections. This module provides + - OpenSwitch configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides an implementation for working with ops configuration sections in a deterministic way. extends_documentation_fragment: openswitch @@ -34,9 +38,10 @@ - The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration - command syntanx as some commands are automatically modified by the + command syntax as some commands are automatically modified by the device config parser. - required: true + required: false + default: null parents: description: - The ordered set of parents that uniquely identify the section @@ -45,19 +50,30 @@ level or global commands. required: false default: null + src: + description: + - The I(src) argument provides a path to the configuration file + to load into the remote system. The path can either be a full + system path to the configuration file if the value starts with / + or relative to the root of the implemented role or playbook. + This argument is mutually exclusive with the I(lines) and + I(parents) arguments. + required: false + default: null + version_added: "2.2" before: description: - The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched - against the system + against the system. required: false default: null after: description: - The ordered set of commands to append to the end of the command - stack if a changed needs to be made. Just like with I(before) this + stack if a change needs to be made. Just like with I(before) this allows the playbook designer to append a set of commands to be executed after the command set. required: false @@ -68,11 +84,13 @@ the set of commands against the current device config. If match is set to I(line), commands are matched line by line. If match is set to I(strict), command lines are matched with respect - to position. Finally if match is set to I(exact), command lines - must be an equal match. + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. required: false default: line - choices: ['line', 'strict', 'exact'] + choices: ['line', 'strict', 'exact', 'none'] replace: description: - Instructs the module on the way to perform the configuration @@ -80,7 +98,7 @@ the modified lines are pushed to the device in configuration mode. If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any - line is not correct + line is not correct. required: false default: line choices: ['line', 'block'] @@ -90,9 +108,12 @@ current devices running-config. When set to true, this will cause the module to push the contents of I(src) into the device without first checking if already configured. + - Note this argument should be considered deprecated. To achieve + the equivalent, set the C(match=none) which is idempotent. This argument + will be removed in a future release. required: false default: false - choices: ['true', 'false'] + choices: ['yes', 'no'] config: description: - The module, by default, will connect to the remote device and @@ -100,24 +121,50 @@ against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the - implementer to pass in the configuruation to use as the base - config for comparision. + implementer to pass in the configuration to use as the base + config for comparison. required: false default: null + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + required: false + default: no + choices: ['yes', 'no'] + version_added: "2.2" """ EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: netop + password: netop + - name: configure hostname over cli ops_config: - lines: - - "hostname {{ inventory_hostname }}" + lines: + - "hostname {{ inventory_hostname }}" + provider: "{{ cli }}" + - name: configure vlan 10 over cli ops_config: - lines: - - no shutdown - parents: - - vlan 10 + lines: + - no shutdown + parents: + - vlan 10 + provider: "{{ cli }}" + +- name: load config from file + ops_config: + src: ops01.cfg + backup: yes + provider: "{{ cli }}" """ RETURN = """ @@ -126,122 +173,140 @@ returned: always type: list sample: ['...', '...'] - -responses: - description: The set of responses from issuing the commands on the device - retured: when not check_mode - type: list - sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: path + sample: /playbooks/ansible/backup/ops_config.2016-07-16@22:28:34 """ import re -import itertools - -def get_config(module): - config = module.params['config'] or dict() - if not config and not module.params['force']: - config = module.config - return config - -def build_candidate(lines, parents, config, strategy): - candidate = list() - - if strategy == 'strict': - for index, cmd in enumerate(lines): - try: - if cmd != config[index]: - candidate.append(cmd) - except IndexError: - candidate.append(cmd) - - elif strategy == 'exact': - if len(lines) != len(config): - candidate = list(lines) - else: - for cmd, cfg in itertools.izip(lines, config): - if cmd != cfg: - candidate = list(lines) - break - else: - for cmd in lines: - if cmd not in config: - candidate.append(cmd) +from ansible.module_utils.basic import get_exception +from ansible.module_utils.openswitch import NetworkModule, NetworkError +from ansible.module_utils.netcfg import NetworkConfig, dumps + +def check_args(module, warnings): + if module.params['force']: + warnings.append('The force argument is deprecated, please use ' + 'match=none instead. This argument will be ' + 'removed in the future') + +def get_config(module, result): + contents = module.params['config'] + if not contents: + contents = module.config.get_config() + return NetworkConfig(indent=4, contents=contents) +def get_candidate(module): + candidate = NetworkConfig(indent=4) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) return candidate +def load_config(module, commands, result): + if not module.check_mode: + module.config(commands) + result['changed'] = True + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + path = module.params['parents'] + + candidate = get_candidate(module) + + if match != 'none': + config = get_config(module, result) + configobjs = candidate.difference(config, path=path, match=match, + replace=replace) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['lines']: + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['updates'] = commands + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + module.config.load_config(commands) + result['changed'] = True + + if module.params['save']: + if not module.check_mode: + module.config.save_config() + result['changed'] = True def main(): argument_spec = dict( - lines=dict(aliases=['commands'], required=True, type='list'), + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), + before=dict(type='list'), after=dict(type='list'), - match=dict(default='line', choices=['line', 'strict', 'exact']), - replace=dict(default='line', choices=['line', 'block']), - force=dict(default=False, type='bool'), - config=dict(), - transport=dict(default='cli', choices=['cli']) - ) - module = get_module(argument_spec=argument_spec, - supports_check_mode=True) + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), - lines = module.params['lines'] - parents = module.params['parents'] or list() + # this argument is deprecated in favor of setting match: none + # it will be removed in a future version + force=dict(default=False, type='bool'), - before = module.params['before'] - after = module.params['after'] + config=dict(), - match = module.params['match'] - replace = module.params['replace'] + save=dict(type='bool', default=False), + backup=dict(type='bool', default=False), - contents = get_config(module) - config = module.parse_config(contents) + # ops_config is only supported over Cli transport so force + # the value of transport to be cli + transport=dict(default='cli', choices=['cli']) + ) - if parents: - for parent in parents: - for item in config: - if item.text == parent: - config = item + mutually_exclusive = [('lines', 'src')] - try: - children = [c.text for c in config.children] - except AttributeError: - children = [c.text for c in config] + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines'])] - else: - children = [c.text for c in config if not c.parents] + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) - result = dict(changed=False) + if module.params['force'] is True: + module.params['match'] = 'none' - candidate = build_candidate(lines, parents, children, match) + warnings = list() + check_args(module, warnings) - if candidate: - if replace == 'line': - candidate[:0] = parents - else: - candidate = list(parents) - candidate.extend(lines) + result = dict(changed=False, warnings=warnings) - if before: - candidate[:0] = before + if module.params['backup']: + result['__backup__'] = module.config.get_config() - if after: - candidate.extend(after) + try: + run(module, result) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) - if not module.check_mode: - response = module.configure(candidate) - result['responses'] = response - result['changed'] = True + module.exit_json(**result) - result['updates'] = candidate - return module.exit_json(**result) -from ansible.module_utils.basic import * -from ansible.module_utils.shell import * -from ansible.module_utils.netcfg import * -from ansible.module_utils.openswitch import * if __name__ == '__main__': main() - diff --git a/network/openswitch/ops_facts.py b/network/openswitch/ops_facts.py new file mode 100644 index 00000000000..cbf31bc87e7 --- /dev/null +++ b/network/openswitch/ops_facts.py @@ -0,0 +1,421 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: ops_facts +version_added: "2.1" +author: "Peter Sprygada (@privateip)" +short_description: Collect device specific facts from OpenSwitch +description: + - Collects facts from devices running the OpenSwitch operating + system. Fact collection is supported over both Cli and Rest + transports. This module prepends all of the base network fact keys + with C(ansible_net_). The facts module will always collect a + base set of facts from the device and can enable or disable + collection of additional facts. + - The facts collected from pre Ansible 2.2 are still available and + are collected for backwards compatibility; however, these facts + should be considered deprecated and will be removed in a future + release. +extends_documentation_fragment: openswitch +options: + config: + description: + - When enabled, this argument will collect the current + running configuration from the remote device. If the + C(transport=rest) then the collected configuration will + be the full system configuration. + required: false + choices: + - true + - false + default: false + endpoints: + description: + - Accepts a list of endpoints to retrieve from the remote + device using the REST API. The endpoints should be valid + endpoints available on the device. This argument is only + valid when the C(transport=rest). + required: false + default: null + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, legacy, and interfaces. Can specify a + list of values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: '!config' + version_added: "2.2" +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: netop + password: netop + transport: cli + rest: + host: "{{ inventory_hostname }}" + username: netop + password: netop + transport: rest + +- ops_facts: + gather_subset: all + provider: "{{ rest }}" + +# Collect only the config and default facts +- ops_facts: + gather_subset: config + provider: "{{ cli }}" + +# Do not collect config facts +- ops_facts: + gather_subset: + - "!config" + provider: "{{ cli }}" + +- name: collect device facts + ops_facts: + provider: "{{ cli }}" + +- name: include the config + ops_facts: + config: yes + provider: "{{ rest }}" + +- name: include a set of rest endpoints + ops_facts: + endpoints: + - /system/interfaces/1 + - /system/interfaces/2 + provider: "{{ rest }}" +""" + +RETURN = """ +ansible_net_gather_subset: + description: The list of fact subsets collected from the device + returned: always + type: list + +# default +ansible_net_model: + description: The model name returned from the device + returned: when transport is cli + type: str +ansible_net_serialnum: + description: The serial number of the remote device + returned: when transport is cli + type: str +ansible_net_version: + description: The operating system version running on the remote device + returned: always + type: str +ansible_net_hostname: + description: The configured hostname of the device + returned: always + type: string +ansible_net_image: + description: The image file the device is running + returned: when transport is cli + type: string + +# config +ansible_net_config: + description: The current active config from the device + returned: when config is enabled + type: str + +# legacy (pre Ansible 2.2) +config: + description: The current system configuration + returned: when enabled + type: string + sample: '....' +hostname: + description: returns the configured hostname + returned: always + type: string + sample: ops01 +version: + description: The current version of OpenSwitch + returned: always + type: string + sample: '0.3.0' +endpoints: + description: The JSON response from the URL endpoint + returned: when endpoints argument is defined and transport is rest + type: list + sample: [{....}, {....}] +""" +import re + +import ansible.module_utils.openswitch +from ansible.module_utils.netcli import CommandRunner, AddCommandError +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.six import iteritems + + +def add_command(runner, command): + try: + runner.add_command(command) + except AddCommandError: + # AddCommandError is raised for any issue adding a command to + # the runner. Silently ignore the exception in this case + pass + + +class FactsBase(object): + + def __init__(self, module, runner): + self.module = module + self.transport = module.params['transport'] + self.runner = runner + self.facts = dict() + + if self.transport == 'cli': + self.commands() + + def commands(self): + raise NotImplementedError + + def populate(self): + getattr(self, self.transport)() + + def cli(self): + pass + + def rest(self): + pass + + +class Default(FactsBase): + + def commands(self): + add_command(self.runner, 'show system') + add_command(self.runner, 'show hostname') + + def rest(self): + self.facts.update(self.get_system()) + + def cli(self): + data = self.runner.get_command('show system') + + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + self.facts['image'] = self.parse_image(data) + + self.facts['hostname'] = self.runner.get_command('show hostname') + + def parse_version(self, data): + match = re.search(r'OpenSwitch Version\s+: (\S+)', data) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'Platform\s+:\s(\S+)', data, re.M) + if match: + return match.group(1) + + def parse_image(self, data): + match = re.search(r'\(Build: (\S+)\)', data, re.M) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'Serial Number\s+: (\S+)', data) + if match: + return match.group(1) + + def get_system(self): + response = self.module.connection.get('/system') + return dict( + hostname=response.json['configuration']['hostname'], + version=response.json['status']['switch_version'] + ) + + +class Config(FactsBase): + + def commands(self): + add_command(self.runner, 'show running-config') + + def cli(self): + self.facts['config'] = self.runner.get_command('show running-config') + +class Legacy(FactsBase): + # facts from ops_facts 2.1 + + def commands(self): + add_command(self.runner, 'show system') + add_command(self.runner, 'show hostname') + + if self.module.params['config']: + add_command(self.runner, 'show running-config') + + def rest(self): + self.facts['_endpoints'] = self.get_endpoints() + self.facts.update(self.get_system()) + + if self.module.params['config']: + self.facts['_config'] = self.get_config() + + def cli(self): + self.facts['_hostname'] = self.runner.get_command('show hostname') + + data = self.runner.get_command('show system') + self.facts['_version'] = self.parse_version(data) + + if self.module.params['config']: + self.facts['_config'] = self.runner.get_command('show running-config') + + def parse_version(self, data): + match = re.search(r'OpenSwitch Version\s+: (\S+)', data) + if match: + return match.group(1) + + def get_endpoints(self): + responses = list() + urls = self.module.params['endpoints'] or list() + for ep in urls: + response = self.module.connection.get(ep) + if response.headers['status'] != 200: + self.module.fail_json(msg=response.headers['msg']) + responses.append(response.json) + return responses + + def get_system(self): + response = self.module.connection.get('/system') + return dict( + _hostname=response.json['configuration']['hostname'], + _version=response.json['status']['switch_version'] + ) + + def get_config(self): + response = self.module.connection.get('/system/full-configuration') + return response.json + +def check_args(module, warnings): + if module.params['transport'] != 'rest' and module.params['endpoints']: + warnings.append('Endpoints can only be collected when transport is ' + 'set to "rest". Endpoints will not be collected') + + +FACT_SUBSETS = dict( + default=Default, + config=Config, + legacy=Legacy +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list'), + + # the next two arguments are legacy from pre 2.2 ops_facts + # these will be deprecated and ultimately removed + config=dict(default=False, type='bool'), + endpoints=dict(type='list'), + + transport=dict(default='cli', choices=['cli', 'rest']) + ) + + module = NetworkModule(argument_spec=spec, supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + warnings = list() + check_args(module, warnings) + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + runable_subsets.add('legacy') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + runner = CommandRunner(module) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](module, runner)) + + if module.params['transport'] == 'cli': + runner.run() + + try: + for inst in instances: + inst.populate() + facts.update(inst.facts) + except Exception: + module.exit_json(out=module.from_json(runner.items)) + + ansible_facts = dict() + for key, value in iteritems(facts): + # this is to maintain capability with ops_facts 2.1 + if key.startswith('_'): + ansible_facts[key[1:]] = value + else: + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts, warnings=warnings) + + +if __name__ == '__main__': + main() diff --git a/network/sros/__init__.py b/network/sros/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/sros/sros_command.py b/network/sros/sros_command.py new file mode 100644 index 00000000000..3c7dd21e203 --- /dev/null +++ b/network/sros/sros_command.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: sros_command +version_added: "2.2" +author: "Peter Sprygada (@privateip)" +short_description: Run commands on remote devices running Nokia SR OS +description: + - Sends arbitrary commands to an SR OS node and returns the results + read from the device. This module includes an argument that will + cause the module to wait for a specific condition before returning + or timing out if the condition is not met. + - This module does not support running commands in configuration mode. + Please use M(sros_config) to configure SR OS devices. +extends_documentation_fragment: sros +options: + commands: + description: + - List of commands to send to the remote SR OS device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retries has expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + required: false + default: null + aliases: ['waitfor'] + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + required: false + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + required: false + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + required: false + default: 1 +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +tasks: + - name: run show version on remote devices + sros_command: + commands: show version + provider: "{{ cli }}" + + - name: run show version and check to see if output contains sros + sros_command: + commands: show version + wait_for: result[0] contains sros + provider: "{{ cli }}" + + - name: run multiple commands on remote nodes + sros_command: + commands: + - show version + - show port detail + provider: "{{ cli }}" + + - name: run multiple commands and evaluate the output + sros_command: + commands: + - show version + - show port detail + wait_for: + - result[0] contains TiMOS-B-14.0.R4 + provider: "{{ cli }}" +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always + type: list + sample: ['...', '...'] + +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] + +failed_conditions: + description: The list of conditionals that have failed + returned: failed + type: list + sample: ['...', '...'] +""" +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.netcli import AddCommandError, FailedConditionsError +from ansible.module_utils.sros import NetworkModule, NetworkError + +VALID_KEYS = ['command', 'output', 'prompt', 'response'] + +def to_lines(stdout): + for item in stdout: + if isinstance(item, basestring): + item = str(item).split('\n') + yield item + +def parse_commands(module): + for cmd in module.params['commands']: + if isinstance(cmd, basestring): + cmd = dict(command=cmd, output=None) + elif 'command' not in cmd: + module.fail_json(msg='command keyword argument is required') + elif cmd.get('output') not in [None, 'text']: + module.fail_json(msg='invalid output specified for command') + elif not set(cmd.keys()).issubset(VALID_KEYS): + module.fail_json(msg='unknown keyword specified') + yield cmd + +def main(): + spec = dict( + # { command: , output: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = NetworkModule(argument_spec=spec, + connect_on_load=False, + supports_check_mode=True) + + commands = list(parse_commands(module)) + conditionals = module.params['wait_for'] or list() + + warnings = list() + + runner = CommandRunner(module) + + for cmd in commands: + if module.check_mode and not cmd['command'].startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd['command']) + else: + if cmd['command'].startswith('conf'): + module.fail_json(msg='sros_command does not support running ' + 'config mode commands. Please use ' + 'sros_config instead') + try: + runner.add_command(**cmd) + except AddCommandError: + exc = get_exception() + warnings.append('duplicate command detected: %s' % cmd) + + for item in conditionals: + runner.add_conditional(item) + + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + runner.match = module.params['match'] + + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result = dict(changed=False, stdout=list()) + + for cmd in commands: + try: + output = runner.get_command(cmd['command']) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) + + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/sros/sros_config.py b/network/sros/sros_config.py new file mode 100644 index 00000000000..2efca84dc12 --- /dev/null +++ b/network/sros/sros_config.py @@ -0,0 +1,315 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: sros_config +version_added: "2.2" +author: "Peter Sprygada (@privateip)" +short_description: Manage Nokia SR OS device configuration +description: + - Nokia SR OS configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with SR OS configuration sections in + a deterministic way. +extends_documentation_fragment: sros +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + required: false + default: null + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + required: false + default: null + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines). + required: false + default: null + version_added: "2.2" + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + required: false + default: null + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + required: false + default: null + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + required: false + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + required: false + default: line + choices: ['line', 'block'] + force: + description: + - The force argument instructs the module to not consider the + current devices running-config. When set to true, this will + cause the module to push the contents of I(src) into the device + without first checking if already configured. + - Note this argument should be considered deprecated. To achieve + the equivalent, set the C(match=none) which is idempotent. This argument + will be removed in a future release. + required: false + default: false + choices: [ "true", "false" ] + version_added: "2.2" + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + required: false + default: no + choices: ['yes', 'no'] + version_added: "2.2" + config: + description: + - The C(config) argument allows the playbook designer to supply + the base configuration to be used to validate configuration + changes necessary. If this argument is provided, the module + will not download the running-config from the remote node. + required: false + default: null + version_added: "2.2" + defaults: + description: + - This argument specifies whether or not to collect all defaults + when getting the remote device running config. When enabled, + the module will get the current config by issuing the command + C(show running-config all). + required: false + default: no + choices: ['yes', 'no'] + aliases: ['detail'] + version_added: "2.2" + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + required: false + default: no + choices: ['yes', 'no'] + version_added: "2.2" +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +- name: enable rollback location + sros_config: + lines: configure system rollback rollback-location "cf3:/ansible" + provider: "{{ cli }}" + +- name: set system name to {{ inventory_hostname }} using one line + sros_config: + lines: + - configure system name "{{ inventory_hostname }}" + provider: "{{ cli }}" + +- name: set system name to {{ inventory_hostname }} using parents + sros_config: + lines: + - 'name "{{ inventory_hostname }}"' + parents: + - configure + - system + provider: "{{ cli }}" + backup: yes + +- name: load config from file + sros_config: + src: "{{ inventory_hostname }}.cfg" + provider: "{{ cli }}" + save: yes +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: path + sample: /playbooks/ansible/backup/sros_config.2016-07-16@22:28:34 +""" +from ansible.module_utils.basic import get_exception +from ansible.module_utils.sros import NetworkModule, NetworkError +from ansible.module_utils.netcfg import NetworkConfig, dumps + +def sanitize_config(lines): + commands = list() + for line in lines: + for index, entry in enumerate(commands): + if line.startswith(entry): + del commands[index] + break + commands.append(line) + return commands + +def get_config(module, result): + contents = module.params['config'] + if not contents: + defaults = module.params['defaults'] + contents = module.config.get_config(detail=defaults) + return NetworkConfig(device_os='sros', contents=contents) + +def get_candidate(module): + candidate = NetworkConfig(device_os='sros') + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + +def run(module, result): + match = module.params['match'] + + candidate = get_candidate(module) + + if match != 'none': + config = get_config(module, result) + configobjs = candidate.difference(config) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'lines') + commands = sanitize_config(commands.split('\n')) + + result['updates'] = commands + + # check if creating checkpoints is possible + if not module.connection.rollback_enabled: + warn = 'Cannot create checkpoint. Please enable this feature ' \ + 'using the sros_rollback module. Automatic rollback ' \ + 'will be disabled' + result['warnings'].append(warn) + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + module.config.load_config(commands) + result['changed'] = True + + if module.params['save']: + if not module.check_mode: + module.config.save_config() + result['changed'] = True + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + match=dict(default='line', choices=['line', 'none']), + + config=dict(), + defaults=dict(type='bool', default=False, aliases=['detail']), + + backup=dict(type='bool', default=False), + save=dict(type='bool', default=False), + ) + + mutually_exclusive = [('lines', 'src')] + + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + result = dict(changed=False, warnings=list()) + + if module.params['backup']: + result['__backup__'] = module.config.get_config() + + try: + run(module, result) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/sros/sros_rollback.py b/network/sros/sros_rollback.py new file mode 100644 index 00000000000..85d7bdc5067 --- /dev/null +++ b/network/sros/sros_rollback.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: sros_rollback +version_added: "2.2" +author: "Peter Sprygada (@privateip)" +short_description: Configure Nokia SR OS rollback +description: + - Configure the rollback feature on remote Nokia devices running + the SR OS operating system. this module provides a stateful + implementation for managing the configuration of the rollback + feature +extends_documentation_fragment: sros +options: + rollback_location: + description: + - The I(rollback_location) specifies the location and filename + of the rollback checkpoint files. This argument supports any + valid local or remote URL as specified in SR OS + required: false + default: null + remote_max_checkpoints: + description: + - The I(remote_max_checkpoints) argument configures the maximum + number of rollback files that can be transfered and saved to + a remote location. Valid values for this argument are in the + range of 1 to 50 + required: false + default: null + local_max_checkpoints: + description: + - The I(local_max_checkpoints) argument configures the maximum + number of rollback files that can be saved on the devices local + compact flash. Valid values for this argument are in the range + of 1 to 50 + required: false + default: null + rescue_location: + description: + - The I(rescue_location) specifies the location of the + rescue file. This argument supports any valid local + or remote URL as specified in SR OS + required: false + default: null + state: + description: + - The I(state) argument specifies the state of the configuration + entries in the devices active configuration. When the state + value is set to C(true) the configuration is present in the + devices active configuration. When the state value is set to + C(false) the configuration values are removed from the devices + active configuration. + required: false + default: present + choices: ['present', 'absent'] +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: admin + password: admin + transport: cli + +- name: configure rollback location + sros_rollback: + rollback_location: "cb3:/ansible" + provider: "{{ cli }}" + +- name: remove all rollback configuration + sros_rollback: + state: absent + provider: "{{ cli }}" +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['...', '...'] +""" +from ansible.module_utils.basic import get_exception +from ansible.module_utils.sros import NetworkModule, NetworkError +from ansible.module_utils.netcfg import NetworkConfig, dumps + +def invoke(name, *args, **kwargs): + func = globals().get(name) + if func: + return func(*args, **kwargs) + +def sanitize_config(lines): + commands = list() + for line in lines: + for index, entry in enumerate(commands): + if line.startswith(entry): + del commands[index] + break + commands.append(line) + return commands + +def present(module, commands): + setters = set() + for key, value in module.argument_spec.iteritems(): + if module.params[key] is not None: + setter = value.get('setter') or 'set_%s' % key + if setter not in setters: + setters.add(setter) + invoke(setter, module, commands) + +def absent(module, commands): + config = module.config.get_config() + if 'rollback-location' in config: + commands.append('configure system rollback no rollback-location') + if 'rescue-location' in config: + commands.append('configure system rollback no rescue-location') + if 'remote-max-checkpoints' in config: + commands.append('configure system rollback no remote-max-checkpoints') + if 'local-max-checkpoints' in config: + commands.append('configure system rollback no remote-max-checkpoints') + +def set_rollback_location(module, commands): + value = module.params['rollback_location'] + commands.append('configure system rollback rollback-location "%s"' % value) + +def set_local_max_checkpoints(module, commands): + value = module.params['local_max_checkpoints'] + if not 1 <= value <= 50: + module.fail_json(msg='local_max_checkpoints must be between 1 and 50') + commands.append('configure system rollback local-max-checkpoints %s' % value) + +def set_remote_max_checkpoints(module, commands): + value = module.params['remote_max_checkpoints'] + if not 1 <= value <= 50: + module.fail_json(msg='remote_max_checkpoints must be between 1 and 50') + commands.append('configure system rollback remote-max-checkpoints %s' % value) + +def set_rescue_location(module, commands): + value = module.params['rescue_location'] + commands.append('configure system rollback rescue-location "%s"' % value) + +def get_config(module): + contents = module.config.get_config() + return NetworkConfig(device_os='sros', contents=contents) + +def load_config(module, commands, result): + candidate = NetworkConfig(device_os='sros', contents='\n'.join(commands)) + config = get_config(module) + configobjs = candidate.difference(config) + + if configobjs: + commands = dumps(configobjs, 'lines') + commands = sanitize_config(commands.split('\n')) + + result['updates'] = commands + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + module.config(commands) + + result['changed'] = True + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + rollback_location=dict(), + + local_max_checkpoints=dict(type='int'), + remote_max_checkpoints=dict(type='int'), + + rescue_location=dict(), + + state=dict(default='present', choices=['present', 'absent']) + ) + + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + supports_check_mode=True) + + state = module.params['state'] + + result = dict(changed=False) + + commands = list() + invoke(state, module, commands) + + try: + load_config(module, commands, result) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/vyos/__init__.py b/network/vyos/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/vyos/vyos_command.py b/network/vyos/vyos_command.py new file mode 100644 index 00000000000..14180e305d7 --- /dev/null +++ b/network/vyos/vyos_command.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: vyos_command +version_added: "2.2" +author: "Peter Sprygada (@privateip)" +short_description: Run one or more commands on VyOS devices +description: + - The command module allows running one or more commands on remote + devices running VyOS. This module can also be introspected + to validate key parameters before returning successfully. If the + conditional statements are not met in the wait period, the task + fails. + - Certain C(show) commands in VyOS produce many lines of output and + use a custom pager that can cause this module to hang. If the + value of the environment variable C(ANSIBLE_VYOS_TERMINAL_LENGTH) + is not set, the default number of 10000 is used. +extends_documentation_fragment: vyos +options: + commands: + description: + - The ordered set of commands to execute on the remote device + running VyOS. The output from the command execution is + returned to the playbook. If the I(wait_for) argument is + provided, the module is not returned until the condition is + satisfied or the number of retries has been exceeded. + required: true + wait_for: + description: + - Specifies what to evaluate from the output of the command + and what conditionals to apply. This argument will cause + the task to wait for a particular conditional to be true + before moving forward. If the conditional is not true + by the configured I(retries), the task fails. See examples. + required: false + default: null + aliases: ['waitfor'] + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + required: false + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should be tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the I(wait_for) + conditionals. + required: false + default: 10 + interval: + description: + - Configures the interval in seconds to wait between I(retries) + of the command. If the command does not pass the specified + conditional, the interval indicates how to long to wait before + trying the command again. + required: false + default: 1 + +notes: + - Running C(show system boot-messages all) will cause the module to hang since + VyOS is using a custom pager setting to display the output of that command. +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: vyos + password: vyos + transport: cli + +- vyos_command: + commands: + - show interfaces ethernet {{ item }} + provider: "{{ cli }}" + with_items: + - eth0 + - eth1 + +- vyos_command: + commands: + - show version + - show hardware cpu + wait_for: + - "result[0] contains 'VyOS 1.1.7'" + provider: "{{ cli }}" +""" + +RETURN = """ +stdout: + description: The set of responses from the commands + returned: always + type: list + sample: ['...', '...'] +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] +failed_conditions: + description: The conditionals that failed + returned: failed + type: list + sample: ['...', '...'] +warnings: + description: The list of warnings (if any) generated by module based on arguments + returned: always + type: list + sample: ['...', '...'] +""" +import ansible.module_utils.vyos +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.netcli import AddCommandError, FailedConditionsError +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.six import string_types + +VALID_KEYS = ['command', 'output', 'prompt', 'response'] + +def to_lines(stdout): + for item in stdout: + if isinstance(item, string_types): + item = str(item).split('\n') + yield item + +def parse_commands(module): + for cmd in module.params['commands']: + if isinstance(cmd, string_types): + cmd = dict(command=cmd, output=None) + elif 'command' not in cmd: + module.fail_json(msg='command keyword argument is required') + elif cmd.get('output') not in [None, 'text']: + module.fail_json(msg='invalid output specified for command') + elif not set(cmd.keys()).issubset(VALID_KEYS): + module.fail_json(msg='unknown keyword specified') + yield cmd + +def main(): + spec = dict( + # { command: , output: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = NetworkModule(argument_spec=spec, + connect_on_load=False, + supports_check_mode=True) + + commands = list(parse_commands(module)) + conditionals = module.params['wait_for'] or list() + + warnings = list() + + runner = CommandRunner(module) + + for cmd in commands: + if module.check_mode and not cmd['command'].startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd['command']) + else: + if cmd['command'].startswith('conf'): + module.fail_json(msg='vyos_command does not support running ' + 'config mode commands. Please use ' + 'vyos_config instead') + try: + runner.add_command(**cmd) + except AddCommandError: + exc = get_exception() + warnings.append('duplicate command detected: %s' % cmd) + + for item in conditionals: + runner.add_conditional(item) + + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + runner.match = module.params['match'] + + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result = dict(changed=False, stdout=list()) + + for cmd in commands: + try: + output = runner.get_command(cmd['command']) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) + + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() + diff --git a/network/vyos/vyos_config.py b/network/vyos/vyos_config.py new file mode 100644 index 00000000000..8cf2c3d6047 --- /dev/null +++ b/network/vyos/vyos_config.py @@ -0,0 +1,296 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: vyos_config +version_added: "2.2" +author: "Peter Sprygada (@privateip)" +short_description: Manage VyOS configuration on remote device +description: + - This module provides configuration file management of VyOS + devices. It provides arguments for managing both the + configuration file and state of the active configuration. All + configuration statements are based on `set` and `delete` commands + in the device configuration. +extends_documentation_fragment: vyos +options: + lines: + description: + - The ordered set of configuration lines to be managed and + compared with the existing configuration on the remote + device. + required: false + default: null + src: + description: + - The C(src) argument specifies the path to the source config + file to load. The source config file can either be in + bracket format or set format. The source file can include + Jinja2 template variables. + required: no + default: null + match: + description: + - The C(match) argument controls the method used to match + against the current active configuration. By default, the + desired config is matched against the active config and the + deltas are loaded. If the C(match) argument is set to C(none) + the active configuration is ignored and the configuration is + always loaded. + required: false + default: line + choices: ['line', 'none'] + backup: + description: + - The C(backup) argument will backup the current devices active + configuration to the Ansible control host prior to making any + changes. The backup file will be located in the backup folder + in the root of the playbook + required: false + default: false + choices: ['yes', 'no'] + comment: + description: + - Allows a commit description to be specified to be included + when the configuration is committed. If the configuration is + not changed or committed, this argument is ignored. + required: false + default: 'configured by vyos_config' + config: + description: + - The C(config) argument specifies the base configuration to use + to compare against the desired configuration. If this value + is not specified, the module will automatically retrieve the + current active configuration from the remote device. + required: false + default: null + save: + description: + - The C(save) argument controls whether or not changes made + to the active configuration are saved to disk. This is + independent of committing the config. When set to True, the + active configuration is saved. + required: false + default: false + choices: ['yes', 'no'] +""" + +RETURN = """ +updates: + description: The list of configuration commands sent to the device + returned: always + type: list + sample: ['...', '...'] +filtered: + description: The list of configuration commands removed to avoid a load failure + returned: always + type: list + sample: ['...', '...'] +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: vyos + password: vyos + transport: cli + +- name: configure the remote device + vyos_config: + lines: + - set system host-name {{ inventory_hostname }} + - set service lldp + - delete service dhcp-server + provider: "{{ cli }}" + +- name: backup and load from file + vyos_config: + src: vyos.cfg + backup: yes + provider: "{{ cli }}" +""" +import re + +from ansible.module_utils.network import Command, get_exception +from ansible.module_utils.netcfg import NetworkConfig, dumps +from ansible.module_utils.vyos import NetworkModule, NetworkError + + +DEFAULT_COMMENT = 'configured by vyos_config' + +CONFIG_FILTERS = [ + re.compile(r'set system login user \S+ authentication encrypted-password') +] + + +def config_to_commands(config): + set_format = config.startswith('set') or config.startswith('delete') + candidate = NetworkConfig(indent=4, contents=config, device_os='junos') + if not set_format: + candidate = [c.line for c in candidate.items] + commands = list() + # this filters out less specific lines + for item in candidate: + for index, entry in enumerate(commands): + if item.startswith(entry): + del commands[index] + break + commands.append(item) + + else: + commands = str(candidate).split('\n') + + return commands + +def get_config(module, result): + contents = module.params['config'] + if not contents: + contents = module.config.get_config(output='set').split('\n') + else: + contents = config_to_commands(contents) + + return contents + +def get_candidate(module): + contents = module.params['src'] or module.params['lines'] + + if module.params['lines']: + contents = '\n'.join(contents) + + return config_to_commands(contents) + +def diff_config(commands, config): + config = [str(c).replace("'", '') for c in config] + + updates = list() + visited = set() + + for line in commands: + item = str(line).replace("'", '') + + if not item.startswith('set') and not item.startswith('delete'): + raise ValueError('line must start with either `set` or `delete`') + + elif item.startswith('set') and item not in config: + updates.append(line) + + elif item.startswith('delete'): + if not config: + updates.append(line) + else: + item = re.sub(r'delete', 'set', item) + for entry in config: + if entry.startswith(item) and line not in visited: + updates.append(line) + visited.add(line) + + return list(updates) + +def sanitize_config(config, result): + result['filtered'] = list() + for regex in CONFIG_FILTERS: + for index, line in enumerate(list(config)): + if regex.search(line): + result['filtered'].append(line) + del config[index] + +def load_config(module, commands, result): + comment = module.params['comment'] + commit = not module.check_mode + save = module.params['save'] + + # sanitize loadable config to remove items that will fail + # remove items will be returned in the sanitized keyword + # in the result. + sanitize_config(commands, result) + + diff = module.config.load_config(commands, commit=commit, comment=comment, + save=save) + + if diff: + result['diff'] = dict(prepared=diff) + result['changed'] = True + + +def run(module, result): + # get the current active config from the node or passed in via + # the config param + config = get_config(module, result) + + # create the candidate config object from the arguments + candidate = get_candidate(module) + + # create loadable config that includes only the configuration updates + updates = diff_config(candidate, config) + + result['updates'] = updates + + load_config(module, updates, result) + + if result.get('filtered'): + result['warnings'].append('Some configuration commands where ' + 'removed, please see the filtered key') + + +def main(): + + argument_spec = dict( + src=dict(type='path'), + lines=dict(type='list'), + + match=dict(default='line', choices=['line', 'none']), + + comment=dict(default=DEFAULT_COMMENT), + + config=dict(), + + backup=dict(default=False, type='bool'), + save=dict(default=False, type='bool'), + ) + + mutually_exclusive = [('lines', 'src')] + + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True) + + result = dict(changed=False) + + if module.params['backup']: + result['__backup__'] = module.config.get_config() + + try: + run(module, result) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/vyos/vyos_facts.py b/network/vyos/vyos_facts.py new file mode 100644 index 00000000000..ff081f8abc5 --- /dev/null +++ b/network/vyos/vyos_facts.py @@ -0,0 +1,320 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: vyos_facts +version_added: "2.2" +author: "Peter Sprygada (@privateip)" +short_description: Collect facts from remote devices running OS +description: + - Collects a base set of device facts from a remote device that + is running VyOS. This module prepends all of the + base network fact keys with U(ansible_net_). The facts + module will always collect a base set of facts from the device + and can enable or disable collection of additional facts. +extends_documentation_fragment: vyos +options: + gather_subset: + description: + - When supplied, this argument will restrict the facts collected + to a given subset. Possible values for this argument include + all, hardware, config, and interfaces. Can specify a list of + values to include a larger subset. Values can also be used + with an initial C(M(!)) to specify that a specific subset should + not be collected. + required: false + default: "!config" +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node +vars: + cli: + host: "{{ inventory_hostname }}" + username: vyos + password: vyos + transport: cli + +- name: collect all facts from the device + vyos_facts: + gather_subset: all + +- name: collect only the config and default facts + vyos_facts: + gather_subset: config + +- name: collect everything exception the config + vyos_facts: + gather_subset: "!config" +""" + +RETURN = """ +ansible_net_config: + description: The running-config from the device + returned: when config is configured + type: str +ansible_net_commits: + description: The set of available configuration revisions + returned: when present + type: list +ansible_net_hostname: + description: The configured system hostname + returned: always + type: str +ansible_net_model: + description: The device model string + returned: always + type: str +ansible_net_serialnum: + description: The serial number of the device + returned: always + type: str +ansible_net_version: + description: The version of the software running + returned: always + type: str +ansible_net_neighbors: + description: The set of LLDP neighbors + returned: when interface is configured + type: list +ansible_net_gather_subset: + description: The list of subsets gathered by the module + returned: always + type: list +""" +import re + +import ansible.module_utils.vyos +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.six import iteritems + + +class FactsBase(object): + + def __init__(self, runner): + self.runner = runner + self.facts = dict() + + self.commands() + + def commands(self): + raise NotImplementedError + + +class Default(FactsBase): + + def commands(self): + self.runner.add_command('show version') + self.runner.add_command('show host name') + + def populate(self): + data = self.runner.get_command('show version') + + self.facts['version'] = self.parse_version(data) + self.facts['serialnum'] = self.parse_serialnum(data) + self.facts['model'] = self.parse_model(data) + + self.facts['hostname'] = self.runner.get_command('show host name') + + def parse_version(self, data): + match = re.search(r'Version:\s*(\S+)', data) + if match: + return match.group(1) + + def parse_model(self, data): + match = re.search(r'HW model:\s*(\S+)', data) + if match: + return match.group(1) + + def parse_serialnum(self, data): + match = re.search(r'HW S/N:\s+(\S+)', data) + if match: + return match.group(1) + + +class Config(FactsBase): + + def commands(self): + self.runner.add_command('show configuration commands') + self.runner.add_command('show system commit') + + def populate(self): + + config = self.runner.get_command('show configuration commands') + self.facts['config'] = str(config).split('\n') + + commits = self.runner.get_command('show system commit') + entries = list() + entry = None + + for line in commits.split('\n'): + match = re.match(r'(\d+)\s+(.+)by(.+)via(.+)', line) + if match: + if entry: + entries.append(entry) + + entry = dict(revision=match.group(1), + datetime=match.group(2), + by=str(match.group(3)).strip(), + via=str(match.group(4)).strip(), + comment=None) + else: + entry['comment'] = line.strip() + + self.facts['commits'] = entries + + +class Neighbors(FactsBase): + + def commands(self): + self.runner.add_command('show lldp neighbors') + self.runner.add_command('show lldp neighbors detail') + + def populate(self): + all_neighbors = self.runner.get_command('show lldp neighbors') + if 'LLDP not configured' not in all_neighbors: + neighbors = self.parse( + self.runner.get_command('show lldp neighbors detail') + ) + self.facts['neighbors'] = self.parse_neighbors(neighbors) + + def parse(self, data): + parsed = list() + values = None + for line in data.split('\n'): + if len(line) == 0: + continue + elif line[0] == ' ': + values += '\n%s' % line + elif line.startswith('Interface'): + if values: + parsed.append(values) + values = line + return parsed + + def parse_neighbors(self, data): + facts = dict() + for item in data: + interface = self.parse_interface(item) + host = self.parse_host(item) + port = self.parse_port(item) + if interface not in facts: + facts[interface] = list() + facts[interface].append(dict(host=host, port=port)) + return facts + + def parse_interface(self, data): + match = re.search(r'^Interface:\s+(\S+),', data) + return match.group(1) + + def parse_host(self, data): + match = re.search(r'SysName:\s+(.+)$', data, re.M) + if match: + return match.group(1) + + def parse_port(self, data): + match = re.search(r'PortDescr:\s+(.+)$', data, re.M) + if match: + return match.group(1) + + +FACT_SUBSETS = dict( + default=Default, + neighbors=Neighbors, + config=Config +) + +VALID_SUBSETS = frozenset(FACT_SUBSETS.keys()) + + +def main(): + spec = dict( + gather_subset=dict(default=['!config'], type='list') + ) + + module = NetworkModule(argument_spec=spec, supports_check_mode=True) + + gather_subset = module.params['gather_subset'] + + runable_subsets = set() + exclude_subsets = set() + + for subset in gather_subset: + if subset == 'all': + runable_subsets.update(VALID_SUBSETS) + continue + + if subset.startswith('!'): + subset = subset[1:] + if subset == 'all': + exclude_subsets.update(VALID_SUBSETS) + continue + exclude = True + else: + exclude = False + + if subset not in VALID_SUBSETS: + module.fail_json(msg='Bad subset') + + if exclude: + exclude_subsets.add(subset) + else: + runable_subsets.add(subset) + + if not runable_subsets: + runable_subsets.update(VALID_SUBSETS) + + runable_subsets.difference_update(exclude_subsets) + runable_subsets.add('default') + + facts = dict() + facts['gather_subset'] = list(runable_subsets) + + runner = CommandRunner(module) + + instances = list() + for key in runable_subsets: + instances.append(FACT_SUBSETS[key](runner)) + + runner.run() + + try: + for inst in instances: + inst.populate() + facts.update(inst.facts) + except Exception: + exc = get_exception() + module.fail_json(msg='unknown failure', output=runner.items, exc=str(exc)) + + ansible_facts = dict() + for key, value in iteritems(facts): + key = 'ansible_net_%s' % key + ansible_facts[key] = value + + module.exit_json(ansible_facts=ansible_facts) + + +if __name__ == '__main__': + main() diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py index 017f6b818a6..40b1026a76a 100644 --- a/packaging/language/easy_install.py +++ b/packaging/language/easy_install.py @@ -22,6 +22,10 @@ import tempfile import os.path +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: easy_install @@ -90,10 +94,14 @@ EXAMPLES = ''' # Examples from Ansible Playbooks -- easy_install: name=pip state=latest +- easy_install: + name: pip + state: latest # Install Bottle into the specified virtualenv. -- easy_install: name=bottle virtualenv=/webapps/myapp/venv +- easy_install: + name: bottle + virtualenv: /webapps/myapp/venv ''' def _is_package_installed(module, name, easy_install, executable_arguments): @@ -200,4 +208,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/language/gem.py b/packaging/language/gem.py index 491402d115f..41a0961b2af 100644 --- a/packaging/language/gem.py +++ b/packaging/language/gem.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: gem @@ -63,6 +67,12 @@ - Override the path to the gem executable required: false version_added: "1.4" + env_shebang: + description: + - Rewrite the shebang line on installed scripts to use /usr/bin/env. + required: false + default: "no" + version_added: "2.2" version: description: - Version of the gem to be installed/removed. @@ -91,13 +101,21 @@ EXAMPLES = ''' # Installs version 1.0 of vagrant. -- gem: name=vagrant version=1.0 state=present +- gem: + name: vagrant + version: 1.0 + state: present # Installs latest available version of rake. -- gem: name=rake state=latest +- gem: + name: rake + state: latest # Installs rake version 1.0 from a local gem on disk. -- gem: name=rake gem_source=/path/to/gems/rake-1.0.gem state=present +- gem: + name: rake + gem_source: /path/to/gems/rake-1.0.gem + state: present ''' import re @@ -201,6 +219,8 @@ def install(module): cmd.append('--no-ri') else: cmd.append('--no-document') + if module.params['env_shebang']: + cmd.append('--env-shebang') cmd.append(module.params['gem_source']) if module.params['build_flags']: cmd.extend([ '--', module.params['build_flags'] ]) @@ -210,15 +230,16 @@ def main(): module = AnsibleModule( argument_spec = dict( - executable = dict(required=False, type='str'), - gem_source = dict(required=False, type='str'), + executable = dict(required=False, type='path'), + gem_source = dict(required=False, type='path'), include_dependencies = dict(required=False, default=True, type='bool'), name = dict(required=True, type='str'), repository = dict(required=False, aliases=['source'], type='str'), state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'), user_install = dict(required=False, default=True, type='bool'), pre_release = dict(required=False, default=False, type='bool'), - include_doc = dict(required=False, default=False, type='bool'), + include_doc = dict(required=False, default=False, type='bool'), + env_shebang = dict(required=False, default=False, type='bool'), version = dict(required=False, type='str'), build_flags = dict(required=False, type='str'), ), @@ -256,4 +277,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 45ca288fb37..ddf6e4af013 100755 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -19,10 +19,9 @@ # along with Ansible. If not, see . # -import tempfile -import re -import os -import sys +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -36,6 +35,7 @@ name: description: - The name of a Python library to install or the url of the remote package. + - As of 2.2 you can supply a list of names. required: false default: null version: @@ -45,14 +45,18 @@ default: null requirements: description: - - The path to a pip requirements file, which should be local to the remote system. - File can be specified as a relative path if using the chdir option. + - The path to a pip requirements file, which should be local to the remote system. + File can be specified as a relative path if using the chdir option. required: false default: null virtualenv: description: - An optional path to a I(virtualenv) directory to install into. - It cannot be specified together with the 'executable' parameter (added in 2.1). + It cannot be specified together with the 'executable' parameter + (added in 2.1). + If the virtualenv does not exist, it will be created before installing + packages. The optional virtualenv_site_packages, virtualenv_command, + and virtualenv_python options affect the creation of the virtualenv. required: false default: null virtualenv_site_packages: @@ -78,8 +82,8 @@ version_added: "2.0" description: - The Python executable used for creating the virtual environment. - For example C(python3.4), C(python2.7). When not specified, the - system Python version is used. + For example C(python3.5), C(python2.7). When not specified, the + Python version used to run the ansible module is used. required: false default: null state: @@ -114,6 +118,8 @@ example C(pip-3.3), if there are both Python 2.7 and 3.3 installations in the system and you want to run pip for the Python 3.3 installation. It cannot be specified together with the 'virtualenv' parameter (added in 2.1). + By default, it will take the appropriate version for the python interpreter + use by ansible, e.g. pip3 on python 3, and pip2 or pip on python 2. version_added: "1.3" required: false default: null @@ -129,56 +135,103 @@ default: null notes: - - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified and the virtualenv needs to be initialized. + - Please note that virtualenv (U(http://www.virtualenv.org/)) must be + installed on the remote host if the virtualenv parameter is specified and + the virtualenv needs to be created. + - By default, this module will use the appropriate version of pip for the + interpreter used by ansible (e.g. pip3 when using python 3, pip2 otherwise) requirements: [ "virtualenv", "pip" ] author: "Matt Wright (@mattupstate)" ''' EXAMPLES = ''' # Install (Bottle) python package. -- pip: name=bottle +- pip: + name: bottle # Install (Bottle) python package on version 0.11. -- pip: name=bottle version=0.11 +- pip: + name: bottle + version: 0.11 # Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args. -- pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp' +- pip: + name: svn+http://myrepo/svn/MyApp# + egg: MyApp' # Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way. -- pip: name='git+http://myrepo/app/MyApp' editable=false +- pip: + name: git+http://myrepo/app/MyApp + editable: false # Install (MyApp) from local tarball -- pip: name='file:///path/to/MyApp.tar.gz' +- pip: + name: file:///path/to/MyApp.tar.gz # Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules -- pip: name=bottle virtualenv=/my_app/venv +- pip: + name: bottle + virtualenv: /my_app/venv # Install (Bottle) into the specified (virtualenv), inheriting globally installed modules -- pip: name=bottle virtualenv=/my_app/venv virtualenv_site_packages=yes +- pip: + name: bottle + virtualenv: /my_app/venv + virtualenv_site_packages: yes # Install (Bottle) into the specified (virtualenv), using Python 2.7 -- pip: name=bottle virtualenv=/my_app/venv virtualenv_command=virtualenv-2.7 +- pip: + name: bottle + virtualenv: /my_app/venv + virtualenv_command: virtualenv-2.7 # Install specified python requirements. -- pip: requirements=/my_app/requirements.txt +- pip: + requirements: /my_app/requirements.txt # Install specified python requirements in indicated (virtualenv). -- pip: requirements=/my_app/requirements.txt virtualenv=/my_app/venv +- pip: + requirements: /my_app/requirements.txt + virtualenv: /my_app/venv # Install specified python requirements and custom Index URL. -- pip: requirements=/my_app/requirements.txt extra_args='-i https://example.com/pypi/simple' +- pip: + requirements: /my_app/requirements.txt + extra_args: -i https://example.com/pypi/simple # Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable. -- pip: name=bottle executable=pip-3.3 +- pip: + name: bottle + executable: pip-3.3 # Install (Bottle), forcing reinstallation if it's already installed -- pip: name=bottle state=forcereinstall +- pip: + name: bottle + state: forcereinstall # Install (Bottle) while ensuring the umask is 0022 (to ensure other users can use it) -- pip: name=bottle umask=0022 +- pip: + name: bottle + umask: 0022 become: True ''' +import tempfile +import re +import os +import sys + +from ansible.module_utils.basic import AnsibleModule, is_executable +from ansible.module_utils._text import to_native +from ansible.module_utils.six import PY3 + +#: Python one-liners to be run at the command line that will determine the +# installed version for these special libraries. These are libraries that +# don't end up in the output of pip freeze. +_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)', + 'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'} + + def _get_cmd_options(module, cmd): thiscmd = cmd + " --help" rc, stdout, stderr = module.run_command(thiscmd) @@ -186,7 +239,7 @@ def _get_cmd_options(module, cmd): module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr)) words = stdout.strip().split() - cmd_options = [ x for x in words if x.startswith('--') ] + cmd_options = [x for x in words if x.startswith('--')] return cmd_options @@ -197,27 +250,59 @@ def _get_full_name(name, version=None): resp = name + '==' + version return resp -def _is_present(name, version, installed_pkgs): + +def _get_packages(module, pip, chdir): + '''Return results of pip command to get packages.''' + # Try 'pip list' command first. + command = '%s list' % pip + lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env) + + # If there was an error (pip version too old) then use 'pip freeze'. + if rc != 0: + command = '%s freeze' % pip + rc, out, err = module.run_command(command, cwd=chdir) + if rc != 0: + _fail(module, command, out, err) + + return (command, out, err) + + +def _is_present(name, version, installed_pkgs, pkg_command): + '''Return whether or not package is installed.''' for pkg in installed_pkgs: - if '==' not in pkg: + # Package listing will be different depending on which pip + # command was used ('pip list' vs. 'pip freeze'). + if 'list' in pkg_command: + pkg = pkg.replace('(', '').replace(')', '') + if ',' in pkg: + pkg_name, pkg_version, _ = pkg.replace(',', '').split(' ') + else: + pkg_name, pkg_version = pkg.split(' ') + elif 'freeze' in pkg_command: + if '==' in pkg: + pkg_name, pkg_version = pkg.split('==') + else: + continue + else: continue - [pkg_name, pkg_version] = pkg.split('==') - if pkg_name == name and (version is None or version == pkg_version): return True return False - def _get_pip(module, env=None, executable=None): - # On Debian and Ubuntu, pip is pip. - # On Fedora18 and up, pip is python-pip. - # On Fedora17 and below, CentOS and RedHat 6 and 5, pip is pip-python. - # On Fedora, CentOS, and RedHat, the exception is in the virtualenv. - # There, pip is just pip. - candidate_pip_basenames = ['pip', 'python-pip', 'pip-python'] + # Older pip only installed under the "/usr/bin/pip" name. Many Linux + # distros install it there. + # By default, we try to use pip required for the current python + # interpreter, so people can use pip to install modules dependencies + candidate_pip_basenames = ('pip2', 'pip') + if PY3: + # pip under python3 installs the "/usr/bin/pip3" name + candidate_pip_basenames = ('pip3',) + pip = None if executable is not None: executable = os.path.expanduser(executable) @@ -225,22 +310,39 @@ def _get_pip(module, env=None, executable=None): pip = executable else: # If you define your own executable that executable should be the only candidate. - candidate_pip_basenames = [executable] + # As noted in the docs, executable doesn't work with virtualenvs. + candidate_pip_basenames = (executable,) + if pip is None: if env is None: opt_dirs = [] + for basename in candidate_pip_basenames: + pip = module.get_bin_path(basename, False, opt_dirs) + if pip is not None: + break + else: + # For-else: Means that we did not break out of the loop + # (therefore, that pip was not found) + module.fail_json(msg='Unable to find any of %s to use. pip' + ' needs to be installed.' % ', '.join(candidate_pip_basenames)) else: - # Try pip with the virtualenv directory first. - opt_dirs = ['%s/bin' % env] - for basename in candidate_pip_basenames: - pip = module.get_bin_path(basename, False, opt_dirs) - if pip is not None: - break - # pip should have been found by now. The final call to get_bin_path will - # trigger fail_json. - if pip is None: - basename = candidate_pip_basenames[0] - pip = module.get_bin_path(basename, True, opt_dirs) + # If we're using a virtualenv we must use the pip from the + # virtualenv + venv_dir = os.path.join(env, 'bin') + candidate_pip_basenames = (candidate_pip_basenames[0], 'pip') + for basename in candidate_pip_basenames: + candidate = os.path.join(venv_dir, basename) + if os.path.exists(candidate) and is_executable(candidate): + pip = candidate + break + else: + # For-else: Means that we did not break out of the loop + # (therefore, that pip was not found) + module.fail_json(msg='Unable to find pip in the virtualenv,' + ' %s, under any of these names: %s. Make sure pip is' + ' present in the virtualenv.' % (env, + ', '.join(candidate_pip_basenames))) + return pip @@ -253,6 +355,31 @@ def _fail(module, cmd, out, err): module.fail_json(cmd=cmd, msg=msg) +def _get_package_info(module, package, env=None): + """This is only needed for special packages which do not show up in pip freeze + + pip and setuptools fall into this category. + + :returns: a string containing the version number if the package is + installed. None if the package is not installed. + """ + if env: + opt_dirs = ['%s/bin' % env] + else: + opt_dirs = [] + python_bin = module.get_bin_path('python', False, opt_dirs) + + if python_bin is None: + formatted_dep = None + else: + rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]]) + if rc: + formatted_dep = None + else: + formatted_dep = '%s==%s' % (package, out.strip()) + return formatted_dep + + def main(): state_map = dict( present='install', @@ -264,19 +391,19 @@ def main(): module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=state_map.keys()), - name=dict(default=None, required=False), - version=dict(default=None, required=False, type='str'), - requirements=dict(default=None, required=False), - virtualenv=dict(default=None, required=False), - virtualenv_site_packages=dict(default='no', type='bool'), - virtualenv_command=dict(default='virtualenv', required=False), - virtualenv_python=dict(default=None, required=False, type='str'), - use_mirrors=dict(default='yes', type='bool'), - extra_args=dict(default=None, required=False), - editable=dict(default='yes', type='bool', required=False), - chdir=dict(default=None, required=False, type='path'), - executable=dict(default=None, required=False), - umask=dict(required=False,default=None), + name=dict(type='list'), + version=dict(type='str'), + requirements=dict(), + virtualenv=dict(type='path'), + virtualenv_site_packages=dict(default=False, type='bool'), + virtualenv_command=dict(default='virtualenv', type='path'), + virtualenv_python=dict(type='str'), + use_mirrors=dict(default=True, type='bool'), + extra_args=dict(), + editable=dict(default=True, type='bool'), + chdir=dict(type='path'), + executable=dict(), + umask=dict(), ), required_one_of=[['name', 'requirements']], mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']], @@ -297,11 +424,10 @@ def main(): umask = int(umask, 8) except Exception: module.fail_json(msg="umask must be an octal integer", - details=str(sys.exc_info()[1])) - + details=to_native(sys.exc_info()[1])) old_umask = None - if umask != None: + if umask is not None: old_umask = os.umask(umask) try: if state == 'latest' and version is not None: @@ -309,23 +435,21 @@ def main(): if chdir is None: # this is done to avoid permissions issues with privilege escalation and virtualenvs - chdir = tempfile.gettempdir() + chdir = tempfile.gettempdir() err = '' out = '' env = module.params['virtualenv'] - virtualenv_command = module.params['virtualenv_command'] if env: - env = os.path.expanduser(env) if not os.path.exists(os.path.join(env, 'bin', 'activate')): if module.check_mode: module.exit_json(changed=True) - cmd = os.path.expanduser(virtualenv_command) + cmd = module.params['virtualenv_command'] if os.path.basename(cmd) == cmd: - cmd = module.get_bin_path(virtualenv_command, True) + cmd = module.get_bin_path(cmd, True) if module.params['virtualenv_site_packages']: cmd += ' --system-site-packages' @@ -336,6 +460,14 @@ def main(): if virtualenv_python: cmd += ' -p%s' % virtualenv_python + elif PY3: + # Ubuntu currently has a patch making virtualenv always + # try to use python2. Since Ubuntu16 works without + # python2 installed, this is a problem. This code mimics + # the upstream behaviour of using the python which invoked + # virtualenv to determine which python is used inside of + # the virtualenv (when none are specified). + cmd += ' -p%s' % sys.executable cmd = "%s %s" % (cmd, env) rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir) @@ -360,7 +492,13 @@ def main(): # Automatically apply -e option to extra_args when source is a VCS url. VCS # includes those beginning with svn+, git+, hg+ or bzr+ - has_vcs = bool(name and re.match(r'(svn|git|hg|bzr)\+', name)) + has_vcs = False + if name: + for pkg in name: + if bool(pkg and re.match(r'(svn|git|hg|bzr)\+', pkg)): + has_vcs = True + break + if has_vcs and module.params['editable']: args_list = [] # used if extra_args is not used at all if extra_args: @@ -372,11 +510,13 @@ def main(): if extra_args: cmd += ' %s' % extra_args - if name: - cmd += ' %s' % _get_full_name(name, version) - elif requirements: - cmd += ' -r %s' % requirements + if name: + for pkg in name: + cmd += ' %s' % _get_full_name(pkg, version) + else: + if requirements: + cmd += ' -r %s' % requirements if module.check_mode: if extra_args or requirements or state == 'latest' or not name: @@ -384,24 +524,35 @@ def main(): elif has_vcs: module.exit_json(changed=True) - freeze_cmd = '%s freeze' % pip - - rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=chdir) - - if rc != 0: - module.exit_json(changed=True) + pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir) out += out_pip err += err_pip - is_present = _is_present(name, version, out.split()) - - changed = (state == 'present' and not is_present) or (state == 'absent' and is_present) - module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err) + changed = False + if name: + pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p] + + if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name): + # Older versions of pip (pre-1.3) do not have pip list. + # pip freeze does not list setuptools or pip in its output + # So we need to get those via a specialcase + for pkg in ('setuptools', 'pip'): + if pkg in name: + formatted_dep = _get_package_info(module, pkg, env) + if formatted_dep is not None: + pkg_list.append(formatted_dep) + out += '%s\n' % formatted_dep + + for pkg in name: + is_present = _is_present(pkg, version, pkg_list, pkg_cmd) + if (state == 'present' and not is_present) or (state == 'absent' and is_present): + changed = True + break + module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err) if requirements or has_vcs: - freeze_cmd = '%s freeze' % pip - out_freeze_before = module.run_command(freeze_cmd, cwd=chdir)[1] + _, out_freeze_before, _ = _get_packages(module, pip, chdir) else: out_freeze_before = None @@ -423,17 +574,15 @@ def main(): if out_freeze_before is None: changed = 'Successfully installed' in out_pip else: - out_freeze_after = module.run_command(freeze_cmd, cwd=chdir)[1] + _, out_freeze_after, _ = _get_packages(module, pip, chdir) changed = out_freeze_before != out_freeze_after module.exit_json(changed=changed, cmd=cmd, name=name, version=version, state=state, requirements=requirements, virtualenv=env, stdout=out, stderr=err) finally: - if old_umask != None: + if old_umask is not None: os.umask(old_umask) -# import module snippets -from ansible.module_utils.basic import * - -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/apt.py b/packaging/os/apt.py index d1a844ad738..5637680cd9a 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -19,6 +19,10 @@ # along with this software. If not, see . # +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: apt @@ -47,9 +51,9 @@ choices: [ "yes", "no" ] cache_valid_time: description: - - If C(update_cache) is specified and the last run is less or equal than I(cache_valid_time) seconds ago, the C(update_cache) gets skipped. + - Update the apt cache if its older than the I(cache_valid_time). This option is set in seconds. required: false - default: no + default: 0 purge: description: - Will force purging of configuration files if the module state is set to I(absent). @@ -112,12 +116,14 @@ version_added: "2.1" only_upgrade: description: - - Only install/upgrade a package it it is already installed. + - Only install/upgrade a package if it is already installed. required: false default: false version_added: "2.1" - -requirements: [ python-apt, aptitude ] +requirements: + - python-apt (python 2) + - python3-apt (python 3) + - aptitude author: "Matthew Williams (@mgwilliams)" notes: - Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise @@ -125,44 +131,70 @@ ''' EXAMPLES = ''' -# Update repositories cache and install "foo" package -- apt: name=foo update_cache=yes - -# Remove "foo" package -- apt: name=foo state=absent - -# Install the package "foo" -- apt: name=foo state=present - -# Install the version '1.00' of package "foo" -- apt: name=foo=1.00 state=present - -# Update the repository cache and update package "nginx" to latest version using default release squeeze-backport -- apt: name=nginx state=latest default_release=squeeze-backports update_cache=yes - -# Install latest version of "openjdk-6-jdk" ignoring "install-recommends" -- apt: name=openjdk-6-jdk state=latest install_recommends=no - -# Update all packages to the latest version -- apt: upgrade=dist - -# Run the equivalent of "apt-get update" as a separate step -- apt: update_cache=yes - -# Only run "update_cache=yes" if the last one is more than 3600 seconds ago -- apt: update_cache=yes cache_valid_time=3600 - -# Pass options to dpkg on run -- apt: upgrade=dist update_cache=yes dpkg_options='force-confold,force-confdef' - -# Install a .deb package -- apt: deb=/tmp/mypackage.deb - -# Install the build dependencies for package "foo" -- apt: pkg=foo state=build-dep - -# Install a .deb package from the internet. -- apt: deb=https://example.com/python-ppq_0.1-1_all.deb +- name: Update repositories cache and install "foo" package + apt: + name: foo + update_cache: yes + +- name: Remove "foo" package + apt: + name: foo + state: absent + +- name: Install the package "foo" + apt: + name: foo + state: present + +- name: Install the version '1.00' of package "foo" + apt: + name: foo=1.00 + state: present + +- name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport + apt: + name: nginx + state: latest + default_release: squeeze-backports + update_cache: yes + +- name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends" + apt: + name: openjdk-6-jdk + state: latest + install_recommends: no + +- name: Update all packages to the latest version + apt: + upgrade: dist + +- name: Run the equivalent of "apt-get update" as a separate step + apt: + update_cache: yes + +- name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago + apt: + update_cache: yes + cache_valid_time: 3600 + +- name: Pass options to dpkg on run + apt: + upgrade: dist + update_cache: yes + dpkg_options: 'force-confold,force-confdef' + +- name: Install a .deb package + apt: + deb: /tmp/mypackage.deb + +- name: Install the build dependencies for package "foo" + apt: + pkg: foo + state: build-dep + +- name: Install a .deb package from the internet. + apt: + deb: https://example.com/python-ppq_0.1-1_all.deb ''' RETURN = ''' @@ -188,15 +220,22 @@ sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..." ''' -import traceback # added to stave off future warnings about apt api import warnings warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning) -import os import datetime import fnmatch import itertools +import os +import re +import sys +import time + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import fetch_url # APT related constants APT_ENV_VARS = dict( @@ -224,6 +263,12 @@ except ImportError: HAS_PYTHON_APT = False +if sys.version_info[0] < 3: + PYTHON_APT = 'python-apt' +else: + PYTHON_APT = 'python3-apt' + + def package_split(pkgspec): parts = pkgspec.split('=', 1) if len(parts) > 1: @@ -231,6 +276,7 @@ def package_split(pkgspec): else: return parts[0], None + def package_versions(pkgname, pkg, pkg_cache): try: versions = set(p.version for p in pkg.versions) @@ -243,17 +289,19 @@ def package_versions(pkgname, pkg, pkg_cache): return versions + def package_version_compare(version, other_version): try: return apt_pkg.version_compare(version, other_version) except AttributeError: return apt_pkg.VersionCompare(version, other_version) + def package_status(m, pkgname, version, cache, state): try: # get the package from the cache, as well as the - # the low-level apt_pkg.Package object which contains - # state fields not directly acccesible from the + # low-level apt_pkg.Package object which contains + # state fields not directly accessible from the # higher-level apt.package.Package object. pkg = cache[pkgname] ll_pkg = cache._cache[pkgname] # the low-level package object @@ -263,13 +311,14 @@ def package_status(m, pkgname, version, cache, state): provided_packages = cache.get_providing_packages(pkgname) if provided_packages: is_installed = False + upgradable = False # when virtual package providing only one package, look up status of target package if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: package = provided_packages[0] installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install') if installed: is_installed = True - return is_installed, True, False + return is_installed, upgradable, False m.fail_json(msg="No package matching '%s' is available" % pkgname) except AttributeError: # python-apt version too old to detect virtual packages @@ -324,6 +373,7 @@ def package_status(m, pkgname, version, cache, state): return package_is_installed, package_is_upgradable, has_files + def expand_dpkg_options(dpkg_options_compressed): options_list = dpkg_options_compressed.split(',') dpkg_options = "" @@ -332,6 +382,7 @@ def expand_dpkg_options(dpkg_options_compressed): % (dpkg_options, dpkg_option) return dpkg_options.strip() + def expand_pkgspec_from_fnmatches(m, pkgspec, cache): # Note: apt-get does implicit regex matching when an exact package name # match is not found. Something like this: @@ -349,16 +400,19 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): if frozenset('*?[]!').intersection(pkgname_pattern): # handle multiarch pkgnames, the idea is that "apt*" should # only select native packages. But "apt*:i386" should still work - if not ":" in pkgname_pattern: + if ":" not in pkgname_pattern: + # Filter the multiarch packages from the cache only once try: pkg_name_cache = _non_multiarch except NameError: - pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if not ':' in pkg.name] + pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if ':' not in pkg.name] # noqa: F841 else: + # Create a cache of pkg_names including multiarch only once try: pkg_name_cache = _all_pkg_names except NameError: - pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] + pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] # noqa: F841 + matches = fnmatch.filter(pkg_name_cache, pkgname_pattern) if len(matches) == 0: @@ -370,6 +424,29 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): new_pkgspec.append(pkgspec_pattern) return new_pkgspec + +def parse_diff(output): + diff = to_native(output).splitlines() + try: + # check for start marker from aptitude + diff_start = diff.index('Resolving dependencies...') + except ValueError: + try: + # check for start marker from apt-get + diff_start = diff.index('Reading state information...') + except ValueError: + # show everything + diff_start = -1 + try: + # check for end marker line from both apt-get and aptitude + diff_end = next(i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item)) + except StopIteration: + diff_end = len(diff) + diff_start += 1 + diff_end += 1 + return {'prepared': '\n'.join(diff[diff_start:diff_end])} + + def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=None, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), @@ -379,12 +456,13 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: - name, version = package_split(package) - installed, upgradable, has_files = package_status(m, name, version, cache, state='install') if build_dep: # Let apt decide what to install pkg_list.append("'%s'" % package) continue + + name, version = package_split(package) + installed, upgradable, has_files = package_status(m, name, version, cache, state='install') if not installed or (upgrade and upgradable): pkg_list.append("'%s'" % package) if installed and upgradable and version: @@ -436,13 +514,27 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, cmd += " --allow-unauthenticated" rc, out, err = m.run_command(cmd) + if m._diff: + diff = parse_diff(out) + else: + diff = {} if rc: return (False, dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err)) else: - return (True, dict(changed=True, stdout=out, stderr=err)) + return (True, dict(changed=True, stdout=out, stderr=err, diff=diff)) else: return (True, dict(changed=False)) + +def get_field_of_deb(m, deb_file, field="Version"): + cmd_dpkg = m.get_bin_path("dpkg", True) + cmd = cmd_dpkg + " --field %s %s" % (deb_file, field) + rc, stdout, stderr = m.run_command(cmd) + if rc != 0: + m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr) + return to_native(stdout).strip('\n') + + def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated, dpkg_options): changed=False deps_to_install = [] @@ -450,10 +542,17 @@ def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated for deb_file in debs.split(','): try: pkg = apt.debfile.DebPackage(deb_file) - - # Check if it's already installed - if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME: - continue + pkg_name = get_field_of_deb(m, deb_file, "Package") + pkg_version = get_field_of_deb(m, deb_file, "Version") + try: + installed_pkg = apt.Cache()[pkg_name] + installed_version = installed_pkg.installed.version + if package_version_compare(pkg_version, installed_version) == 0: + # Does not need to down-/upgrade, move on to next package + continue + except Exception: + # Must not be installed, continue with installation + pass # Check if package is installable if not pkg.check() and not force: m.fail_json(msg=pkg._failure_string) @@ -462,7 +561,8 @@ def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated # to install so they're all done in one shot deps_to_install.extend(pkg.missing_deps) - except Exception, e: + except Exception: + e = get_exception() m.fail_json(msg="Unable to install package: %s" % str(e)) # and add this deb to the list of packages to install @@ -491,19 +591,26 @@ def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated stdout = retvals["stdout"] + out else: stdout = out + if "diff" in retvals: + diff = retvals["diff"] + if 'prepared' in diff: + diff['prepared'] += '\n\n' + out + else: + diff = parse_diff(out) if "stderr" in retvals: stderr = retvals["stderr"] + err else: stderr = err if rc == 0: - m.exit_json(changed=True, stdout=stdout, stderr=stderr) + m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff) else: m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr) else: - m.exit_json(changed=changed, stdout=retvals.get('stdout',''), stderr=retvals.get('stderr','')) + m.exit_json(changed=changed, stdout=retvals.get('stdout',''), stderr=retvals.get('stderr',''), diff=retvals.get('diff', '')) -def remove(m, pkgspec, cache, purge=False, + +def remove(m, pkgspec, cache, purge=False, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False): pkg_list = [] pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) @@ -517,6 +624,11 @@ def remove(m, pkgspec, cache, purge=False, if len(packages) == 0: m.exit_json(changed=False) else: + if force: + force_yes = '--force-yes' + else: + force_yes = '' + if purge: purge = '--purge' else: @@ -527,15 +639,22 @@ def remove(m, pkgspec, cache, purge=False, else: autoremove = '' - cmd = "%s -q -y %s %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, autoremove, packages) - if m.check_mode: - m.exit_json(changed=True) + check_arg = '--simulate' + else: + check_arg = '' + + cmd = "%s -q -y %s %s %s %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, force_yes ,autoremove, check_arg, packages) rc, out, err = m.run_command(cmd) + if m._diff: + diff = parse_diff(out) + else: + diff = {} if rc: m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err) - m.exit_json(changed=True, stdout=out, stderr=err) + m.exit_json(changed=True, stdout=out, stderr=err, diff=diff) + def upgrade(m, mode="yes", force=False, default_release=None, dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): @@ -577,11 +696,16 @@ def upgrade(m, mode="yes", force=False, default_release=None, cmd += " -t '%s'" % (default_release,) rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex) + if m._diff: + diff = parse_diff(out) + else: + diff = {} if rc: m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out) if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out): m.exit_json(changed=False, msg=out, stdout=out, stderr=err) - m.exit_json(changed=True, msg=out, stdout=out, stderr=err) + m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff) + def download(module, deb): tempdir = os.path.dirname(__file__) @@ -603,20 +727,72 @@ def download(module, deb): f.write(data) f.close() deb = package - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failure downloading %s, %s" % (deb, e)) return deb + +def get_cache_mtime(): + """Return mtime of a valid apt cache file. + Stat the apt cache file and if no cache file is found return 0 + :returns: ``int`` + """ + if os.path.exists(APT_UPDATE_SUCCESS_STAMP_PATH): + return os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime + elif os.path.exists(APT_LISTS_PATH): + return os.stat(APT_LISTS_PATH).st_mtime + else: + return 0 + + +def get_updated_cache_time(): + """Return the mtime time stamp and the updated cache time. + Always retrieve the mtime of the apt cache or set the `cache_mtime` + variable to 0 + :returns: ``tuple`` + """ + cache_mtime = get_cache_mtime() + mtimestamp = datetime.datetime.fromtimestamp(cache_mtime) + updated_cache_time = int(time.mktime(mtimestamp.timetuple())) + return mtimestamp, updated_cache_time + + +# https://github.com/ansible/ansible-modules-core/issues/2951 +def get_cache(module): + '''Attempt to get the cache object and update till it works''' + cache = None + try: + cache = apt.Cache() + except SystemError: + e = get_exception() + if '/var/lib/apt/lists/' in str(e).lower(): + # update cache until files are fixed or retries exceeded + retries = 0 + while retries < 2: + (rc, so, se) = module.run_command(['apt-get', 'update', '-q']) + retries += 1 + if rc == 0: + break + if rc != 0: + module.fail_json(msg='Updating the cache to correct corrupt package lists failed:\n%s\n%s' % (str(e), str(so) + str(se))) + # try again + cache = apt.Cache() + else: + module.fail_json(msg=str(e)) + return cache + + def main(): module = AnsibleModule( argument_spec = dict( state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present', 'build-dep']), update_cache = dict(default=False, aliases=['update-cache'], type='bool'), - cache_valid_time = dict(type='int'), + cache_valid_time = dict(type='int', default=0), purge = dict(default=False, type='bool'), package = dict(default=None, aliases=['pkg', 'name'], type='list'), - deb = dict(default=None), + deb = dict(default=None, type='path'), default_release = dict(default=None, aliases=['default-release']), install_recommends = dict(default=None, aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), @@ -635,16 +811,18 @@ def main(): if not HAS_PYTHON_APT: if module.check_mode: - module.fail_json(msg="python-apt must be installed to use check mode. If run normally this module can autoinstall it") + module.fail_json(msg="%s must be installed to use check mode. " + "If run normally this module can auto-install it." % PYTHON_APT) try: - module.run_command('apt-get update', check_rc=True) - module.run_command('apt-get install python-apt -y -q', check_rc=True) + module.run_command(['apt-get', 'update'], check_rc=True) + module.run_command(['apt-get', 'install', PYTHON_APT, '-y', '-q'], check_rc=True) global apt, apt_pkg import apt import apt.debfile import apt_pkg except ImportError: - module.fail_json(msg="Could not import python modules: apt, apt_pkg. Please install python-apt package.") + module.fail_json(msg="Could not import python modules: apt, apt_pkg. " + "Please install %s package." % PYTHON_APT) global APTITUDE_CMD APTITUDE_CMD = module.get_bin_path("aptitude", False) @@ -672,8 +850,10 @@ def main(): if p['state'] == 'removed': p['state'] = 'absent' + # Get the cache object + cache = get_cache(module) + try: - cache = apt.Cache() if p['default_release']: try: apt_pkg.config['APT::Default-Release'] = p['default_release'] @@ -682,47 +862,35 @@ def main(): # reopen cache w/ modified config cache.open(progress=None) + mtimestamp, updated_cache_time = get_updated_cache_time() + # Cache valid time is default 0, which will update the cache if + # needed and `update_cache` was set to true + updated_cache = False if p['update_cache']: - # Default is: always update the cache - cache_valid = False now = datetime.datetime.now() - if p.get('cache_valid_time', False): - try: - mtime = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime - except: - # Looks like the update-success-stamp is not available - # Fallback: Checking the mtime of the lists - try: - mtime = os.stat(APT_LISTS_PATH).st_mtime - except: - # No mtime could be read. We update the cache to be safe - mtime = False - - if mtime: - tdelta = datetime.timedelta(seconds=p['cache_valid_time']) - mtimestamp = datetime.datetime.fromtimestamp(mtime) - if mtimestamp + tdelta >= now: - cache_valid = True - updated_cache_time = int(time.mktime(mtimestamp.timetuple())) - - if cache_valid is not True: - for retry in xrange(3): + tdelta = datetime.timedelta(seconds=p['cache_valid_time']) + if not mtimestamp + tdelta >= now: + # Retry to update the cache up to 3 times + for retry in range(3): try: cache.update() break except apt.cache.FetchFailedException: pass else: - #out of retries, pass on the exception - raise + module.fail_json(msg='Failed to update apt cache.') cache.open(progress=None) updated_cache = True - updated_cache_time = int(time.mktime(now.timetuple())) + mtimestamp, updated_cache_time = get_updated_cache_time() + + # If there is nothing else to do exit. This will set state as + # changed based on if the cache was updated. if not p['package'] and not p['upgrade'] and not p['deb']: - module.exit_json(changed=False, cache_updated=updated_cache, cache_update_time=updated_cache_time) - else: - updated_cache = False - updated_cache_time = 0 + module.exit_json( + changed=updated_cache, + cache_updated=updated_cache, + cache_update_time=updated_cache_time + ) force_yes = p['force'] @@ -754,31 +922,39 @@ def main(): state_upgrade = True if p['state'] == 'build-dep': state_builddep = True - result = install(module, packages, cache, upgrade=state_upgrade, - default_release=p['default_release'], - install_recommends=install_recommends, - force=force_yes, dpkg_options=dpkg_options, - build_dep=state_builddep, autoremove=autoremove, - only_upgrade=p['only_upgrade'], - allow_unauthenticated=allow_unauthenticated) - (success, retvals) = result - retvals['cache_updated']=updated_cache - retvals['cache_update_time']=updated_cache_time + + success, retvals = install( + module, + packages, + cache, + upgrade=state_upgrade, + default_release=p['default_release'], + install_recommends=install_recommends, + force=force_yes, + dpkg_options=dpkg_options, + build_dep=state_builddep, + autoremove=autoremove, + only_upgrade=p['only_upgrade'], + allow_unauthenticated=allow_unauthenticated + ) + + # Store if the cache has been updated + retvals['cache_updated'] = updated_cache + # Store when the update time was last + retvals['cache_update_time'] = updated_cache_time + if success: module.exit_json(**retvals) else: module.fail_json(**retvals) elif p['state'] == 'absent': - remove(module, packages, cache, p['purge'], dpkg_options, autoremove) + remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove) except apt.cache.LockFailedException: module.fail_json(msg="Failed to lock apt for exclusive operation") except apt.cache.FetchFailedException: module.fail_json(msg="Could not fetch updated apt files") -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * if __name__ == "__main__": main() diff --git a/packaging/os/apt_key.py b/packaging/os/apt_key.py index bc8cb80b5c8..a5fd723f6c3 100644 --- a/packaging/os/apt_key.py +++ b/packaging/os/apt_key.py @@ -19,6 +19,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: apt_key @@ -37,16 +41,17 @@ default: none description: - identifier of key. Including this allows check mode to correctly report the changed state. + - "If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead." data: required: false default: none description: - - keyfile contents + - keyfile contents to add to the keyring file: required: false default: none description: - - keyfile path + - path to a keyfile on the remote server to add to the keyring keyring: required: false default: none @@ -82,57 +87,120 @@ EXAMPLES = ''' # Add an apt key by id from a keyserver -- apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9 +- apt_key: + keyserver: keyserver.ubuntu.com + id: 36A1D7869245C8950F966E92D8576A8BA88D21E9 # Add an Apt signing key, uses whichever key is at the URL -- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present +- apt_key: + url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc" + state: present # Add an Apt signing key, will not download if present -- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present +- apt_key: + id: 473041FA + url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc" + state: present # Remove an Apt signing key, uses whichever key is at the URL -- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=absent +- apt_key: + url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc" + state: absent # Remove a Apt specific signing key, leading 0x is valid -- apt_key: id=0x473041FA state=absent +- apt_key: + id: 0x473041FA + state: absent # Add a key from a file on the Ansible server -- apt_key: data="{{ lookup('file', 'apt.gpg') }}" state=present +- apt_key: + data: "{{ lookup('file', 'apt.gpg') }}" + state: present # Add an Apt signing key to a specific keyring file -- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc keyring=/etc/apt/trusted.gpg.d/debian.gpg state=present +- apt_key: + id: 473041FA + url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc" + keyring: /etc/apt/trusted.gpg.d/debian.gpg + +# Add Apt signing key on remote server to keyring +- apt_key: + id: 473041FA + file: /tmp/apt.gpg + state: present ''' # FIXME: standardize into module_common from traceback import format_exc -from re import compile as re_compile -# FIXME: standardize into module_common -from distutils.spawn import find_executable -from os import environ -from sys import exc_info -import traceback -match_key = re_compile("^gpg:.*key ([0-9a-fA-F]+):.*$") +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import fetch_url + + +apt_key_bin = None + + +def find_needed_binaries(module): + global apt_key_bin + + apt_key_bin = module.get_bin_path('apt-key', required=True) + + ### FIXME: Is there a reason that gpg and grep are checked? Is it just + # cruft or does the apt .deb package not require them (and if they're not + # installed, /usr/bin/apt-key fails?) + module.get_bin_path('gpg', required=True) + module.get_bin_path('grep', required=True) + -REQUIRED_EXECUTABLES=['gpg', 'grep', 'apt-key'] +def parse_key_id(key_id): + """validate the key_id and break it into segments + :arg key_id: The key_id as supplied by the user. A valid key_id will be + 8, 16, or more hexadecimal chars with an optional leading ``0x``. + :returns: The portion of key_id suitable for apt-key del, the portion + suitable for comparisons with --list-public-keys, and the portion that + can be used with --recv-key. If key_id is long enough, these will be + the last 8 characters of key_id, the last 16 characters, and all of + key_id. If key_id is not long enough, some of the values will be the + same. + + * apt-key del <= 1.10 has a bug with key_id != 8 chars + * apt-key adv --list-public-keys prints 16 chars + * apt-key adv --recv-key can take more chars + + """ + # Make sure the key_id is valid hexadecimal + int(key_id, 16) + + key_id = key_id.upper() + if key_id.startswith('0X'): + key_id = key_id[2:] + + key_id_len = len(key_id) + if (key_id_len != 8 and key_id_len != 16) and key_id_len <= 16: + raise ValueError('key_id must be 8, 16, or 16+ hexadecimal characters in length') + + short_key_id = key_id[-8:] + + fingerprint = key_id + if key_id_len > 16: + fingerprint = key_id[-16:] + + return short_key_id, fingerprint, key_id -def check_missing_binaries(module): - missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)] - if len(missing): - module.fail_json(msg="binaries are missing", names=missing) def all_keys(module, keyring, short_format): if keyring: - cmd = "apt-key --keyring %s adv --list-public-keys --keyid-format=long" % keyring + cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring) else: - cmd = "apt-key adv --list-public-keys --keyid-format=long" + cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin (rc, out, err) = module.run_command(cmd) results = [] - lines = out.split('\n') + lines = to_native(out).split('\n') for line in lines: - if line.startswith("pub"): + if line.startswith("pub") or line.startswith("sub"): tokens = line.split() code = tokens[1] (len_type, real_code) = code.split("/") @@ -141,6 +209,7 @@ def all_keys(module, keyring, short_format): results = shorten_key_ids(results) return results + def shorten_key_ids(key_id_list): """ Takes a list of key ids, and converts them to the 'short' format, @@ -151,6 +220,7 @@ def shorten_key_ids(key_id_list): short.append(key[-8:]) return short + def download_key(module, url): # FIXME: move get_url code to common, allow for in-memory D/L, support proxies # and reuse here @@ -166,52 +236,69 @@ def download_key(module, url): except Exception: module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc()) + def import_key(module, keyring, keyserver, key_id): if keyring: - cmd = "apt-key --keyring %s adv --keyserver %s --recv %s" % (keyring, keyserver, key_id) + cmd = "%s --keyring %s adv --keyserver %s --recv %s" % (apt_key_bin, keyring, keyserver, key_id) else: - cmd = "apt-key adv --keyserver %s --recv %s" % (keyserver, key_id) - (rc, out, err) = module.run_command(cmd, check_rc=True) + cmd = "%s adv --keyserver %s --recv %s" % (apt_key_bin, keyserver, key_id) + for retry in range(5): + lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + (rc, out, err) = module.run_command(cmd, environ_update=lang_env) + if rc == 0: + break + else: + # Out of retries + if rc == 2 and 'not found on keyserver' in out: + msg = 'Key %s not found on keyserver %s' % (key_id, keyserver) + module.fail_json(cmd=cmd, msg=msg) + else: + msg = "Error fetching key %s from keyserver: %s" % (key_id, keyserver) + module.fail_json(cmd=cmd, msg=msg, rc=rc, stdout=out, stderr=err) return True + def add_key(module, keyfile, keyring, data=None): if data is not None: if keyring: - cmd = "apt-key --keyring %s add -" % keyring + cmd = "%s --keyring %s add -" % (apt_key_bin, keyring) else: - cmd = "apt-key add -" + cmd = "%s add -" % apt_key_bin (rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True) else: if keyring: - cmd = "apt-key --keyring %s add %s" % (keyring, keyfile) + cmd = "%s --keyring %s add %s" % (apt_key_bin, keyring, keyfile) else: - cmd = "apt-key add %s" % (keyfile) + cmd = "%s add %s" % (apt_key_bin, keyfile) (rc, out, err) = module.run_command(cmd, check_rc=True) return True + def remove_key(module, key_id, keyring): # FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout if keyring: - cmd = 'apt-key --keyring %s del %s' % (keyring, key_id) + cmd = '%s --keyring %s del %s' % (apt_key_bin, keyring, key_id) else: - cmd = 'apt-key del %s' % key_id + cmd = '%s del %s' % (apt_key_bin, key_id) (rc, out, err) = module.run_command(cmd, check_rc=True) return True + def main(): module = AnsibleModule( argument_spec=dict( id=dict(required=False, default=None), url=dict(required=False), data=dict(required=False), - file=dict(required=False), + file=dict(required=False, type='path'), key=dict(required=False), - keyring=dict(required=False), + keyring=dict(required=False, type='path'), validate_certs=dict(default='yes', type='bool'), keyserver=dict(required=False), state=dict(required=False, choices=['present', 'absent'], default='present') ), - supports_check_mode=True + supports_check_mode=True, + mutually_exclusive=(('filename', 'keyserver', 'data', 'url'),), ) key_id = module.params['id'] @@ -223,64 +310,70 @@ def main(): keyserver = module.params['keyserver'] changed = False - # we use the "short" id: key_id[-8:], short_format=True - # it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871 - + fingerprint = short_key_id = key_id + short_format = False if key_id: try: - _ = int(key_id, 16) - if key_id.startswith('0x'): - key_id = key_id[2:] - key_id = key_id.upper()[-8:] + short_key_id, fingerprint, key_id = parse_key_id(key_id) except ValueError: - module.fail_json(msg="Invalid key_id", id=key_id) + module.fail_json(msg='Invalid key_id', id=key_id) - # FIXME: I think we have a common facility for this, if not, want - check_missing_binaries(module) + if len(fingerprint) == 8: + short_format = True + + find_needed_binaries(module) - short_format = True keys = all_keys(module, keyring, short_format) return_values = {} if state == 'present': - if key_id and key_id in keys: + if fingerprint and fingerprint in keys: module.exit_json(changed=False) + elif fingerprint and fingerprint not in keys and module.check_mode: + ### TODO: Someday we could go further -- write keys out to + # a temporary file and then extract the key id from there via gpg + # to decide if the key is installed or not. + module.exit_json(changed=True) else: if not filename and not data and not keyserver: data = download_key(module, url) - if key_id and key_id in keys: - module.exit_json(changed=False) + + if filename: + add_key(module, filename, keyring) + elif keyserver: + import_key(module, keyring, keyserver, key_id) else: - if module.check_mode: - module.exit_json(changed=True) - if filename: - add_key(module, filename, keyring) - elif keyserver: - import_key(module, keyring, keyserver, key_id) - else: - add_key(module, "-", keyring, data) - changed=False - keys2 = all_keys(module, keyring, short_format) - if len(keys) != len(keys2): - changed=True - if key_id and not key_id in keys2: - module.fail_json(msg="key does not seem to have been added", id=key_id) - module.exit_json(changed=changed) + add_key(module, "-", keyring, data) + + changed = False + keys2 = all_keys(module, keyring, short_format) + if len(keys) != len(keys2): + changed=True + + if fingerprint and fingerprint not in keys2: + module.fail_json(msg="key does not seem to have been added", id=key_id) + module.exit_json(changed=changed) + elif state == 'absent': if not key_id: module.fail_json(msg="key is required") - if key_id in keys: + if fingerprint in keys: if module.check_mode: module.exit_json(changed=True) - if remove_key(module, key_id, keyring): - changed=True + + # we use the "short" id: key_id[-8:], short_format=True + # it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871 + if remove_key(module, short_key_id, keyring): + keys = all_keys(module, keyring, short_format) + if fingerprint in keys: + module.fail_json(msg="apt-key del did not return an error but the key was not removed (check that the id is correct and *not* a subkey)", id=key_id) + changed = True else: - # FIXME: module.fail_json or exit-json immediately at point of failure + # FIXME: module.fail_json or exit-json immediately at point of failure module.fail_json(msg="error removing key_id", **return_values) module.exit_json(changed=changed, **return_values) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() + +if __name__ == '__main__': + main() diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index 55b60987824..dc60a2a722f 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -21,6 +21,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: apt_repository @@ -28,9 +32,8 @@ description: - Add or remove an APT repositories in Ubuntu and Debian. notes: - - This module works on Debian and Ubuntu and requires C(python-apt). + - This module works on Debian, Ubuntu and their derivatives. - This module supports Debian Squeeze (version 6) as well as its successors. - - This module treats Debian and Ubuntu distributions separately. So PPA could be installed only on Ubuntu machines. options: repo: required: true @@ -70,32 +73,57 @@ Defaults to a file name based on the repository source url. The .list extension will be automatically added. required: false + codename: + version_added: '2.3' + description: + - Override the distribution codename to use for PPA repositories. + Should usually only be set when working with a PPA on a non-Ubuntu target (e.g. Debian or Mint) + required: false author: "Alexander Saltanov (@sashka)" version_added: "0.7" -requirements: [ python-apt ] +requirements: + - python-apt (python 2) + - python3-apt (python 3) ''' EXAMPLES = ''' # Add specified repository into sources list. -apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=present +- apt_repository: + repo: deb http://archive.canonical.com/ubuntu hardy partner + state: present # Add specified repository into sources list using specified filename. -apt_repository: repo='deb http://dl.google.com/linux/chrome/deb/ stable main' state=present filename='google-chrome' +- apt_repository: + repo: deb http://dl.google.com/linux/chrome/deb/ stable main + state: present + filename: 'google-chrome' # Add source repository into sources list. -apt_repository: repo='deb-src http://archive.canonical.com/ubuntu hardy partner' state=present +- apt_repository: + repo: deb-src http://archive.canonical.com/ubuntu hardy partner + state: present # Remove specified repository from sources list. -apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=absent - -# On Ubuntu target: add nginx stable repository from PPA and install its signing key. -# On Debian target: adding PPA is not available, so it will fail immediately. -apt_repository: repo='ppa:nginx/stable' +- apt_repository: + repo: deb http://archive.canonical.com/ubuntu hardy partner + state: absent + +# Add nginx stable repository from PPA and install its signing key. +# On Ubuntu target: +- apt_repository: + repo: 'ppa:nginx/stable' + +# On Debian target +- apt_repository: + repo: 'ppa:nginx/stable' + codename: 'trusty' + repo: 'ppa:nginx/stable' ''' import glob import os import re +import sys import tempfile try: @@ -108,9 +136,16 @@ distro = None HAVE_PYTHON_APT = False +if sys.version_info[0] < 3: + PYTHON_APT = 'python-apt' +else: + PYTHON_APT = 'python3-apt' + +DEFAULT_SOURCES_PERM = int('0644', 8) VALID_SOURCE_TYPES = ('deb', 'deb-src') + def install_python_apt(module): if not module.check_mode: @@ -118,8 +153,8 @@ def install_python_apt(module): if apt_get_path: rc, so, se = module.run_command([apt_get_path, 'update']) if rc != 0: - module.fail_json(msg="Failed to auto-install python-apt. Error was: '%s'" % se.strip()) - rc, so, se = module.run_command([apt_get_path, 'install', 'python-apt', '-y', '-q']) + module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip())) + rc, so, se = module.run_command([apt_get_path, 'install', PYTHON_APT, '-y', '-q']) if rc == 0: global apt, apt_pkg, aptsources_distro, distro, HAVE_PYTHON_APT import apt @@ -128,9 +163,10 @@ def install_python_apt(module): distro = aptsources_distro.get_distro() HAVE_PYTHON_APT = True else: - module.fail_json(msg="Failed to auto-install python-apt. Error was: '%s'" % se.strip()) + module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip())) else: - module.fail_json(msg="python-apt must be installed to use check mode") + module.fail_json(msg="%s must be installed to use check mode" % PYTHON_APT) + class InvalidSource(Exception): pass @@ -254,7 +290,7 @@ def load(self, file): self.files[file] = group def save(self): - for filename, sources in self.files.items(): + for filename, sources in list(self.files.items()): if sources: d, fn = os.path.split(filename) fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d) @@ -273,13 +309,14 @@ def save(self): try: f.write(line) - except IOError, err: + except IOError: + err = get_exception() self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err))) self.module.atomic_move(tmp_path, filename) # allow the user to override the default mode if filename in self.new_repos: - this_mode = self.module.params['mode'] + this_mode = self.module.params.get('mode', DEFAULT_SOURCES_PERM) self.module.set_mode_if_different(filename, this_mode, False) else: del self.files[filename] @@ -363,6 +400,7 @@ class UbuntuSourcesList(SourcesList): def __init__(self, module, add_ppa_signing_keys_callback=None): self.module = module self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback + self.codename = module.params['codename'] or distro.codename super(UbuntuSourcesList, self).__init__(module) def _get_ppa_info(self, owner_name, ppa_name): @@ -372,7 +410,7 @@ def _get_ppa_info(self, owner_name, ppa_name): response, info = fetch_url(self.module, lp_api, headers=headers) if info['status'] != 200: self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg']) - return json.load(response) + return json.loads(to_native(response.read())) def _expand_ppa(self, path): ppa = path.split(':')[1] @@ -382,7 +420,7 @@ def _expand_ppa(self, path): except IndexError: ppa_name = 'ppa' - line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, distro.codename) + line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, self.codename) return line, ppa_owner, ppa_name def _key_already_exists(self, key_fingerprint): @@ -403,7 +441,7 @@ def add_source(self, line, comment='', file=None): command = ['apt-key', 'adv', '--recv-keys', '--keyserver', 'hkp://keyserver.ubuntu.com:80', info['signing_key_fingerprint']] self.add_ppa_signing_keys_callback(command) - file = file or self._suggest_filename('%s_%s' % (line, distro.codename)) + file = file or self._suggest_filename('%s_%s' % (line, self.codename)) else: source = self._parse(line, raise_if_invalid_or_disabled=True)[2] file = file or self._suggest_filename(source) @@ -451,12 +489,13 @@ def main(): argument_spec=dict( repo=dict(required=True), state=dict(choices=['present', 'absent'], default='present'), - mode=dict(required=False, default=0644), + mode=dict(required=False, type='raw'), update_cache = dict(aliases=['update-cache'], type='bool', default='yes'), filename=dict(required=False, default=None), # this should not be needed, but exists as a failsafe install_python_apt=dict(required=False, default="yes", type='bool'), validate_certs = dict(default='yes', type='bool'), + codename = dict(required=False), ), supports_check_mode=True, ) @@ -465,21 +504,21 @@ def main(): repo = module.params['repo'] state = module.params['state'] update_cache = module.params['update_cache'] + # Note: mode is referenced in SourcesList class via the passed in module (self here) + sourceslist = None if not HAVE_PYTHON_APT: if params['install_python_apt']: install_python_apt(module) else: - module.fail_json(msg='python-apt is not installed, and install_python_apt is False') + module.fail_json(msg='%s is not installed, and install_python_apt is False' % PYTHON_APT) - if isinstance(distro, aptsources_distro.UbuntuDistribution): + if isinstance(distro, aptsources_distro.Distribution): sourceslist = UbuntuSourcesList(module, add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module)) - elif isinstance(distro, aptsources_distro.Distribution): - sourceslist = SourcesList(module) else: - module.fail_json(msg='Module apt_repository supports only Debian and Ubuntu.') + module.fail_json(msg='Module apt_repository is not supported on target.') sources_before = sourceslist.dump() @@ -488,7 +527,8 @@ def main(): sourceslist.add_source(repo) elif state == 'absent': sourceslist.remove_source(repo) - except InvalidSource, err: + except InvalidSource: + err = get_exception() module.fail_json(msg='Invalid repository string: %s' % unicode(err)) sources_after = sourceslist.dump() @@ -510,7 +550,8 @@ def main(): if update_cache: cache = apt.Cache() cache.update() - except OSError, err: + except OSError: + err = get_exception() module.fail_json(msg=unicode(err)) module.exit_json(changed=changed, repo=repo, state=state, diff=diff) @@ -519,4 +560,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/apt_rpm.py b/packaging/os/apt_rpm.py old mode 100644 new mode 100755 index fec220e0512..47f6b194cf1 --- a/packaging/os/apt_rpm.py +++ b/packaging/os/apt_rpm.py @@ -19,6 +19,10 @@ # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: apt_rpm @@ -50,13 +54,25 @@ EXAMPLES = ''' # install package foo -- apt_rpm: pkg=foo state=present +- apt_rpm: + pkg: foo + state: present + # remove package foo -- apt_rpm: pkg=foo state=absent -# description: remove packages foo and bar -- apt_rpm: pkg=foo,bar state=absent -# description: update the package database and install bar (bar will be the updated if a newer version exists) -- apt_rpm: name=bar state=present update_cache=yes +- apt_rpm: + pkg: foo + state: absent + +# description: remove packages foo and bar +- apt_rpm: + pkg: foo,bar + state: absent + +# description: update the package database and install bar (bar will be the updated if a newer version exists) +- apt_rpm: + name: bar + state: present + update_cache: yes ''' @@ -75,7 +91,7 @@ def query_package(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - rc = os.system("%s -q %s" % (RPM_PATH,name)) + rc, out, err = module.run_command("%s -q %s" % (RPM_PATH,name)) if rc == 0: return True else: @@ -84,14 +100,14 @@ def query_package(module, name): def query_package_provides(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - rc = os.system("%s -q --provides %s >/dev/null" % (RPM_PATH,name)) + rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH,name)) return rc == 0 def update_package_db(module): - rc = os.system("%s update" % APT_PATH) + rc, out, err = module.run_command("%s update" % APT_PATH) if rc != 0: - module.fail_json(msg="could not update package db") + module.fail_json(msg="could not update package db: %s" % err) def remove_packages(module, packages): @@ -102,10 +118,10 @@ def remove_packages(module, packages): if not query_package(module, package): continue - rc = os.system("%s -y remove %s > /dev/null" % (APT_PATH,package)) + rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH,package)) if rc != 0: - module.fail_json(msg="failed to remove %s" % (package)) + module.fail_json(msg="failed to remove %s: %s" % (package, err)) remove_c += 1 @@ -124,9 +140,7 @@ def install_packages(module, pkgspec): if len(packages) != 0: - cmd = ("%s -y install %s > /dev/null" % (APT_PATH, packages)) - - rc, out, err = module.run_command(cmd) + rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages)) installed = True for packages in pkgspec: @@ -169,4 +183,5 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/package.py b/packaging/os/package.py index f4234b5a472..85712b6d903 100644 --- a/packaging/os/package.py +++ b/packaging/os/package.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: package @@ -53,9 +57,13 @@ ''' EXAMPLES = ''' - name: install the latest version of ntpdate - package: name=ntpdate state=latest + package: + name: ntpdate + state: latest # This uses a variable as this changes per distribution. - name: remove the apache package - package : name={{apache}} state=absent + package: + name: "{{ apache }}" + state: absent ''' diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index 19f2ce7cf37..0b56c6eafbf 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -1,11 +1,30 @@ #!/usr/bin/python +# James Laska (jlaska@redhat.com) +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: redhat_subscription -short_description: Manage Red Hat Network registration and subscriptions using the C(subscription-manager) command +short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command description: - - Manage registration and subscription to the Red Hat Network entitlement platform. + - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command version_added: "1.2" author: "Barnaby Court (@barnabycourt)" notes: @@ -21,22 +40,22 @@ default: "present" username: description: - - Red Hat Network username + - access.redhat.com or Sat6 username required: False default: null password: description: - - Red Hat Network password + - access.redhat.com or Sat6 password required: False default: null server_hostname: description: - - Specify an alternative Red Hat Network server + - Specify an alternative Red Hat Subscription Management or Sat6 server required: False default: Current value from C(/etc/rhsm/rhsm.conf) is the default server_insecure: description: - - Allow traffic over insecure http + - Enable or disable https server certificate verification when connecting to C(server_hostname) required: False default: Current value from C(/etc/rhsm/rhsm.conf) is the default rhsm_baseurl: @@ -56,32 +75,84 @@ default: null org_id: description: - - Organisation ID to use in conjunction with activationkey + - Organization ID to use in conjunction with activationkey required: False default: null version_added: "2.0" + environment: + description: + - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello + required: False + default: null + version_added: "2.2" pool: description: - Specify a subscription pool name to consume. Regular expressions accepted. required: False default: '^$' + consumer_type: + description: + - The type of unit to register, defaults to system + required: False + default: null + version_added: "2.1" + consumer_name: + description: + - Name of the system to register, defaults to the hostname + required: False + default: null + version_added: "2.1" + consumer_id: + description: + - References an existing consumer ID to resume using a previous registration for this system. If the system's identity certificate is lost or corrupted, this option allows it to resume using its previous identity and subscriptions. The default is to not specify a consumer ID so a new ID is created. + required: False + default: null + version_added: "2.1" + force_register: + description: + - Register the system even if it is already registered + required: False + default: False + version_added: "2.2" ''' EXAMPLES = ''' # Register as user (joe_user) with password (somepass) and auto-subscribe to available content. -- redhat_subscription: state=present username=joe_user password=somepass autosubscribe=true +- redhat_subscription: + state: present + username: joe_user + password: somepass + autosubscribe: true + +# Same as above but with pulling existing system data. +- redhat_subscription: + state: present + username: joe_user + password: somepass + consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx # Register with activationkey (1-222333444) and consume subscriptions matching # the names (Red hat Enterprise Server) and (Red Hat Virtualization) -- redhat_subscription: state=present - activationkey=1-222333444 - pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$' +- redhat_subscription: + state: present + activationkey: 1-222333444 + pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$' # Update the consumed subscriptions from the previous example (remove the Red # Hat Virtualization subscription) -- redhat_subscription: state=present - activationkey=1-222333444 - pool='^Red Hat Enterprise Server$' +- redhat_subscription: + state: present + activationkey: 1-222333444 + pool: '^Red Hat Enterprise Server$' + +# Register as user credentials into given environment (against Red Hat +# Satellite 6.x), and auto-subscribe to available content. +- redhat_subscription: + state: present + username: joe_user + password: somepass + environment: Library + autosubscribe: yes ''' import os @@ -174,7 +245,7 @@ def enable(self): def configure(self, **kwargs): ''' - Configure the system as directed for registration with RHN + Configure the system as directed for registration with RHSM Raises: * Exception - if error occurs while running command ''' @@ -195,7 +266,7 @@ def is_registered(self): Determine whether the current system Returns: * Boolean - whether the current system is currently registered to - RHN. + RHSM. ''' # Quick version... if False: @@ -209,9 +280,10 @@ def is_registered(self): else: return False - def register(self, username, password, autosubscribe, activationkey, org_id): + def register(self, username, password, autosubscribe, activationkey, org_id, + consumer_type, consumer_name, consumer_id, force_register, environment): ''' - Register the current system to the provided RHN server + Register the current system to the provided RHSM or Sat6 server Raises: * Exception - if error occurs while running command ''' @@ -229,6 +301,16 @@ def register(self, username, password, autosubscribe, activationkey, org_id): args.extend(['--username', username]) if password: args.extend(['--password', password]) + if consumer_type: + args.extend(['--type', consumer_type]) + if consumer_name: + args.extend(['--name', consumer_name]) + if consumer_id: + args.extend(['--consumerid', consumer_id]) + if force_register: + args.extend(['--force']) + if environment: + args.extend(['--environment', environment]) rc, stderr, stdout = self.module.run_command(args, check_rc=True) @@ -382,24 +464,29 @@ def filter(self, regexp='^$'): def main(): # Load RHSM configuration from file - rhn = Rhsm(None) + rhsm = Rhsm(None) module = AnsibleModule( argument_spec = dict( state = dict(default='present', choices=['present', 'absent']), username = dict(default=None, required=False), password = dict(default=None, required=False, no_log=True), - server_hostname = dict(default=rhn.config.get_option('server.hostname'), required=False), - server_insecure = dict(default=rhn.config.get_option('server.insecure'), required=False), - rhsm_baseurl = dict(default=rhn.config.get_option('rhsm.baseurl'), required=False), + server_hostname = dict(default=rhsm.config.get_option('server.hostname'), required=False), + server_insecure = dict(default=rhsm.config.get_option('server.insecure'), required=False), + rhsm_baseurl = dict(default=rhsm.config.get_option('rhsm.baseurl'), required=False), autosubscribe = dict(default=False, type='bool'), activationkey = dict(default=None, required=False), org_id = dict(default=None, required=False), + environment = dict(default=None, required=False, type='str'), pool = dict(default='^$', required=False, type='str'), + consumer_type = dict(default=None, required=False), + consumer_name = dict(default=None, required=False), + consumer_id = dict(default=None, required=False), + force_register = dict(default=False, type='bool'), ) ) - rhn.module = module + rhsm.module = module state = module.params['state'] username = module.params['username'] password = module.params['password'] @@ -409,7 +496,12 @@ def main(): autosubscribe = module.params['autosubscribe'] == True activationkey = module.params['activationkey'] org_id = module.params['org_id'] + environment = module.params['environment'] pool = module.params['pool'] + consumer_type = module.params["consumer_type"] + consumer_name = module.params["consumer_name"] + consumer_id = module.params["consumer_id"] + force_register = module.params["force_register"] # Ensure system is registered if state == 'present': @@ -421,11 +513,12 @@ def main(): module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password") # Register system - if rhn.is_registered: + if rhsm.is_registered and not force_register: if pool != '^$': try: - result = rhn.update_subscriptions(pool) - except Exception, e: + result = rhsm.update_subscriptions(pool) + except Exception: + e = get_exception() module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, e)) else: module.exit_json(**result) @@ -433,11 +526,14 @@ def main(): module.exit_json(changed=False, msg="System already registered.") else: try: - rhn.enable() - rhn.configure(**module.params) - rhn.register(username, password, autosubscribe, activationkey, org_id) - subscribed_pool_ids = rhn.subscribe(pool) - except Exception, e: + rhsm.enable() + rhsm.configure(**module.params) + rhsm.register(username, password, autosubscribe, activationkey, org_id, + consumer_type, consumer_name, consumer_id, force_register, + environment) + subscribed_pool_ids = rhsm.subscribe(pool) + except Exception: + e = get_exception() module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, e)) else: module.exit_json(changed=True, @@ -445,13 +541,14 @@ def main(): subscribed_pool_ids=subscribed_pool_ids) # Ensure system is *not* registered if state == 'absent': - if not rhn.is_registered: + if not rhsm.is_registered: module.exit_json(changed=False, msg="System already unregistered.") else: try: - rhn.unsubscribe() - rhn.unregister() - except Exception, e: + rhsm.unsubscribe() + rhsm.unregister() + except Exception: + e = get_exception() module.fail_json(msg="Failed to unregister: %s" % e) else: module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) @@ -459,4 +556,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/packaging/os/rhn_channel.py b/packaging/os/rhn_channel.py index 0071183158e..c2e87f1bd91 100644 --- a/packaging/os/rhn_channel.py +++ b/packaging/os/rhn_channel.py @@ -17,6 +17,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rhn_channel @@ -26,7 +30,7 @@ version_added: "1.1" author: "Vincent Van der Kussen (@vincentvdk)" notes: - - this module fetches the system id from RHN. + - this module fetches the system id from RHN. requirements: - none options: @@ -46,7 +50,7 @@ required: false default: present url: - description: + description: - The full url to the RHN/Satellite api required: true user: @@ -60,7 +64,12 @@ ''' EXAMPLES = ''' -- rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme +- rhn_channel: + name: rhel-x86_64-server-v2vwin-6 + sysname: server01 + url: https://rhn.redhat.com/rpc/api + user: rhnuser + password: guessme ''' import xmlrpclib @@ -166,5 +175,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/rhn_register.py b/packaging/os/rhn_register.py index d73f9ff9671..c228f0b1b7a 100644 --- a/packaging/os/rhn_register.py +++ b/packaging/os/rhn_register.py @@ -17,6 +17,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rhn_register @@ -83,30 +87,44 @@ EXAMPLES = ''' # Unregister system from RHN. -- rhn_register: state=absent username=joe_user password=somepass +- rhn_register: + state: absent + username: joe_user + password: somepass # Register as user (joe_user) with password (somepass) and auto-subscribe to available content. -- rhn_register: state=present username=joe_user password=somepass +- rhn_register: + state: present + username: joe_user + password: somepass # Register with activationkey (1-222333444) and enable extended update support. -- rhn_register: state=present activationkey=1-222333444 enable_eus=true +- rhn_register: + state: present + activationkey: 1-222333444 + enable_eus: true # Register with activationkey (1-222333444) and set a profilename which may differ from the hostname. -- rhn_register: state=present activationkey=1-222333444 profilename=host.example.com.custom +- rhn_register: + state: present + activationkey: 1-222333444 + profilename: host.example.com.custom # Register as user (joe_user) with password (somepass) against a satellite # server specified by (server_url). -- rhn_register: > - state=present - username=joe_user - password=somepass - server_url=https://xmlrpc.my.satellite/XMLRPC +- rhn_register: + state: present + username: joe_user + password: somepass' + server_url: https://xmlrpc.my.satellite/XMLRPC # Register as user (joe_user) with password (somepass) and enable # channels (rhel-x86_64-server-6-foo-1) and (rhel-x86_64-server-6-bar-1). -- rhn_register: state=present username=joe_user - password=somepass - channels=rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1 +- rhn_register: + state: present + username: joe_user + password: somepass + channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1 ''' import sys @@ -119,8 +137,9 @@ try: import up2date_client import up2date_client.config -except ImportError, e: - module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e) + HAS_UP2DATE_CLIENT = True +except ImportError: + HAS_UP2DATE_CLIENT = False # INSERT REDHAT SNIPPETS from ansible.module_utils.redhat import * @@ -137,15 +156,19 @@ def load_config(self): ''' Read configuration from /etc/sysconfig/rhn/up2date ''' + if not HAS_UP2DATE_CLIENT: + return None + self.config = up2date_client.config.initUp2dateConfig() # Add support for specifying a default value w/o having to standup some # configuration. Yeah, I know this should be subclassed ... but, oh # well def get_option_default(self, key, default=''): - # ignore pep8 W601 errors for this line - # setting this to use 'in' does not work in the rhn library - if self.has_key(key): + # the class in rhn-client-tools that this comes from didn't + # implement __contains__() until 2.5.x. That's why we check if + # the key is present in the dictionary that is the actual storage + if key in self.dict: return self[key] else: return default @@ -279,10 +302,31 @@ def unregister(self): def subscribe(self, channels=[]): if len(channels) <= 0: return - current_channels = self.api('channel.software.listSystemChannels', self.systemid) - new_channels = [item['channel_label'] for item in current_channels] - new_channels.extend(channels) - return self.api('channel.software.setSystemChannels', self.systemid, new_channels) + if self._is_hosted(): + current_channels = self.api('channel.software.listSystemChannels', self.systemid) + new_channels = [item['channel_label'] for item in current_channels] + new_channels.extend(channels) + return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels)) + else: + current_channels = self.api('channel.software.listSystemChannels', self.systemid) + current_channels = [item['label'] for item in current_channels] + new_base = None + new_childs = [] + for ch in channels: + if ch in current_channels: + continue + if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '': + new_base = ch + else: + if ch not in new_childs: + new_childs.append(ch) + out_base = 0 + out_childs = 0 + if new_base: + out_base = self.api('system.setBaseChannel', self.systemid, new_base) + if new_childs: + out_childs = self.api('system.setChildChannels', self.systemid, new_childs) + return out_base and out_childs def _subscribe(self, channels=[]): ''' @@ -298,6 +342,16 @@ def _subscribe(self, channels=[]): if re.search(wanted_repo, available_channel): rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True) + def _is_hosted(self): + ''' + Return True if we are running against Hosted (rhn.redhat.com) or + False otherwise (when running against Satellite or Spacewalk) + ''' + if 'rhn.redhat.com' in self.hostname: + return True + else: + return False + def main(): # Read system RHN configuration @@ -308,7 +362,7 @@ def main(): state = dict(default='present', choices=['present', 'absent']), username = dict(default=None, required=False), password = dict(default=None, required=False, no_log=True), - server_url = dict(default=rhn.config.get_option('serverURL'), required=False), + server_url = dict(default=None, required=False), activationkey = dict(default=None, required=False, no_log=True), profilename = dict(default=None, required=False), sslcacert = dict(default=None, required=False, type='path'), @@ -318,6 +372,12 @@ def main(): ) ) + if not HAS_UP2DATE_CLIENT: + module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?") + + if not module.params['server_url']: + module.params['server_url'] = rhn.config.get_option('serverURL') + state = module.params['state'] rhn.username = module.params['username'] rhn.password = module.params['password'] @@ -346,7 +406,8 @@ def main(): rhn.enable() rhn.register(module.params['enable_eus'] == True, activationkey, profilename, sslcacert, systemorgid) rhn.subscribe(channels) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, e)) module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname) @@ -358,10 +419,12 @@ def main(): else: try: rhn.unregister() - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failed to unregister: %s" % e) module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname) -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/rpm_key.py b/packaging/os/rpm_key.py index dde35df7aad..9cb058c56aa 100644 --- a/packaging/os/rpm_key.py +++ b/packaging/os/rpm_key.py @@ -19,6 +19,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rpm_key @@ -39,7 +43,7 @@ default: "present" choices: [present, absent] description: - - Wheather the key will be imported or removed from the rpm db. + - If the key will be imported or removed from the rpm db. validate_certs: description: - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used @@ -52,17 +56,22 @@ EXAMPLES = ''' # Example action to import a key from a url -- rpm_key: state=present key=http://apt.sw.be/RPM-GPG-KEY.dag.txt +- rpm_key: + state: present + key: http://apt.sw.be/RPM-GPG-KEY.dag.txt # Example action to import a key from a file -- rpm_key: state=present key=/path/to/key.gpg +- rpm_key: + state: present + key: /path/to/key.gpg # Example action to ensure a key is not present in the db -- rpm_key: state=absent key=DEADB33F +- rpm_key: + state: absent + key: DEADB33F ''' import re import os.path -import urllib2 import tempfile def is_pubkey(string): @@ -115,18 +124,18 @@ def __init__(self, module): def fetch_key(self, url): """Downloads a key from url, returns a valid path to a gpg key""" - try: - rsp, info = fetch_url(self.module, url) - key = rsp.read() - if not is_pubkey(key): - self.module.fail_json(msg="Not a public key: %s" % url) - tmpfd, tmpname = tempfile.mkstemp() - tmpfile = os.fdopen(tmpfd, "w+b") - tmpfile.write(key) - tmpfile.close() - return tmpname - except urllib2.URLError, e: - self.module.fail_json(msg=str(e)) + rsp, info = fetch_url(self.module, url) + if info['status'] != 200: + self.module.fail_json(msg="failed to fetch key at %s , error was: %s" % (url, info['msg'])) + + key = rsp.read() + if not is_pubkey(key): + self.module.fail_json(msg="Not a public key: %s" % url) + tmpfd, tmpname = tempfile.mkstemp() + tmpfile = os.fdopen(tmpfd, "w+b") + tmpfile.write(key) + tmpfile.close() + return tmpname def normalize_keyid(self, keyid): """Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is lowercase""" diff --git a/packaging/os/yum.py b/packaging/os/yum.py index ad0f93e82f7..18e71713902 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -36,6 +36,10 @@ except: transaction_helpers = False +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: yum @@ -156,31 +160,50 @@ EXAMPLES = ''' - name: install the latest version of Apache - yum: name=httpd state=latest + yum: + name: httpd + state: latest - name: remove the Apache package - yum: name=httpd state=absent + yum: + name: httpd + state: absent - name: install the latest version of Apache from the testing repo - yum: name=httpd enablerepo=testing state=present + yum: + name: httpd + enablerepo: testing + state: present - name: install one specific version of Apache - yum: name=httpd-2.2.29-1.4.amzn1 state=present + yum: + name: httpd-2.2.29-1.4.amzn1 + state: present - name: upgrade all packages - yum: name=* state=latest + yum: + name: '*' + state: latest - name: install the nginx rpm from a remote repo - yum: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present + yum: + name: http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm + state: present - name: install nginx rpm from a local file - yum: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present + yum: + name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm + state: present - name: install the 'Development tools' package group - yum: name="@Development tools" state=present + yum: + name: "@Development tools" + state: present - name: install the 'Gnome desktop' environment group - yum: name="@^gnome-desktop-environment" state=present + yum: + name: "@^gnome-desktop-environment" + state: present ''' # 64k. Number of bytes to read at a time when manually downloading pkgs via a url @@ -219,6 +242,27 @@ def ensure_yum_utils(module): return repoquerybin +def fetch_rpm_from_url(spec, module=None): + # download package so that we can query it + tempdir = tempfile.mkdtemp() + package = os.path.join(tempdir, str(spec.rsplit('/', 1)[1])) + try: + rsp, info = fetch_url(module, spec) + if not rsp: + module.fail_json(msg="Failure downloading %s, %s" % (spec, info['msg'])) + f = open(package, 'w') + data = rsp.read(BUFSIZE) + while data: + f.write(data) + data = rsp.read(BUFSIZE) + f.close() + except Exception: + e = get_exception() + shutil.rmtree(tempdir) + if module: + module.fail_json(msg="Failure downloading %s, %s" % (spec, e)) + return package + def po_to_nevra(po): if hasattr(po, 'ui_nevra'): @@ -245,7 +289,8 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di pkgs = e + m if not pkgs and not is_pkg: pkgs.extend(my.returnInstalledPackagesByDep(pkgspec)) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failure talking to yum: %s" % e) return [ po_to_nevra(p) for p in pkgs ] @@ -301,7 +346,8 @@ def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di pkgs = e + m if not pkgs: pkgs.extend(my.returnPackagesByDep(pkgspec)) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failure talking to yum: %s" % e) return [ po_to_nevra(p) for p in pkgs ] @@ -348,7 +394,8 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_r e,m,u = my.pkgSack.matchPackageNames([pkgspec]) pkgs = e + m updates = my.doPackageLists(pkgnarrow='updates').updates - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failure talking to yum: %s" % e) for pkg in pkgs: @@ -381,6 +428,13 @@ def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, if dis_repos is None: dis_repos = [] + if req_spec.endswith('.rpm') and '://' not in req_spec: + return req_spec + + elif '://' in req_spec: + local_path = fetch_rpm_from_url(req_spec, module=module) + return local_path + if not repoq: pkgs = [] @@ -399,7 +453,8 @@ def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, e,m,u = my.rpmdb.matchPackageNames([req_spec]) pkgs.extend(e) pkgs.extend(m) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failure talking to yum: %s" % e) return set([ po_to_nevra(p) for p in pkgs ]) @@ -477,24 +532,11 @@ def local_nvra(module, path): finally: os.close(fd) - return '%s-%s-%s.%s' % (header[rpm.RPMTAG_NAME], + return '%s-%s-%s.%s' % (header[rpm.RPMTAG_NAME], header[rpm.RPMTAG_VERSION], header[rpm.RPMTAG_RELEASE], header[rpm.RPMTAG_ARCH]) - -def local_name(module, path): - """return package name of a local rpm passed in""" - ts = rpm.TransactionSet() - ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) - fd = os.open(path, os.O_RDONLY) - try: - header = ts.hdrFromFdno(fd) - finally: - os.close(fd) - - return header[rpm.RPMTAG_NAME] - def pkg_to_dict(pkgstr): if pkgstr.strip(): @@ -566,12 +608,15 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): if spec.endswith('.rpm') and '://' not in spec: # get the pkg name-v-r.arch if not os.path.exists(spec): - res['msg'] += "No Package file matching '%s' found on system" % spec + res['msg'] += "No RPM file matching '%s' found on system" % spec + res['results'].append("No RPM file matching '%s' found on system" % spec) + res['rc'] = 127 # Ensure the task fails in with-loop module.fail_json(**res) - pkg_name = local_name(module, spec) + nvra = local_nvra(module, spec) + # look for them in the rpmdb - if is_installed(module, repoq, pkg_name, conf_file, en_repos=en_repos, dis_repos=dis_repos): + if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos): # if they are there, skip it continue pkg = spec @@ -579,21 +624,9 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # URL elif '://' in spec: # download package so that we can check if it's already installed - package = os.path.join(tempdir, str(spec.rsplit('/', 1)[1])) - try: - rsp, info = fetch_url(module, spec) - f = open(package, 'w') - data = rsp.read(BUFSIZE) - while data: - f.write(data) - data = rsp.read(BUFSIZE) - f.close() - except Exception, e: - shutil.rmtree(tempdir) - module.fail_json(msg="Failure downloading %s, %s" % (spec, e)) - - pkg_name = local_name(module, package) - if is_installed(module, repoq, pkg_name, conf_file, en_repos=en_repos, dis_repos=dis_repos): + package = fetch_rpm_from_url(spec, module=module) + nvra = local_nvra(module, package) + if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos): # if it's there, skip it continue pkg = package @@ -613,11 +646,13 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): if installed_pkgs: res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec)) continue - + # look up what pkgs provide this pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) if not pkglist: - res['msg'] += "No Package matching '%s' found available, installed or updated" % spec + res['msg'] += "No package matching '%s' found available, installed or updated" % spec + res['results'].append("No package matching '%s' found available, installed or updated" % spec) + res['rc'] = 126 # Ensure the task fails in with-loop module.fail_json(**res) # if any of the packages are involved in a transaction, fail now @@ -625,6 +660,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): conflicts = transaction_exists(pkglist) if len(conflicts) > 0: res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) + res['rc'] = 125 # Ensure the task fails in with-loop module.fail_json(**res) # if any of them are installed @@ -664,7 +700,8 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # Remove rpms downloaded for EL5 via url try: shutil.rmtree(tempdir) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs)) @@ -678,8 +715,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): for spec in items: # Fail on invalid urls: if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)): - err = 'Package at %s could not be installed' % spec - module.fail_json(changed=False,msg=err,rc=1) + module.fail_json(msg='Package at %s could not be installed' % spec, rc=1, changed=False) if (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out: # avoid failing in the 'Nothing To Do' case # this may happen with an URL spec. @@ -704,7 +740,8 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # Remove rpms downloaded for EL5 via url try: shutil.rmtree(tempdir) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) return res @@ -750,7 +787,7 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # of the process # at this point we should check to see if the pkg is no longer present - + for pkg in pkgs: if not pkg.startswith('@'): # we can't sensibly check for a group being uninstalled reliably # look to see if the pkg shows up from is_installed. If it doesn't @@ -788,6 +825,8 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['results'].append('Nothing to do here, all packages are up to date') return res elif rc == 100: + # remove incorrect new lines in longer columns in output from yum check-update + out=re.sub('\n\W+', ' ', out) available_updates = out.split('\n') # build update dictionary for line in available_updates: @@ -816,6 +855,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # some guess work involved with groups. update @ will install the group if missing if spec.startswith('@'): pkgs['update'].append(spec) + will_update.add(spec) continue # dep/pkgname - find it else: @@ -826,7 +866,9 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) # FIXME..? may not be desirable to throw an exception here if a single package is missing if not pkglist: - res['msg'] += "No Package matching '%s' found available, installed or updated" % spec + res['msg'] += "No package matching '%s' found available, installed or updated" % spec + res['results'].append("No package matching '%s' found available, installed or updated" % spec) + res['rc'] = 126 # Ensure the task fails in with-loop module.fail_json(**res) nothing_to_do = True @@ -839,7 +881,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # or virtual provides (like "python-*" or "smtp-daemon") while # updates contains name only. this_name_only = '-'.join(this.split('-')[:-2]) - if spec in pkgs['update'] and this_name_only in updates.keys(): + if spec in pkgs['update'] and this_name_only in updates: nothing_to_do = False will_update.add(spec) # Massage the updates list @@ -858,6 +900,8 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): conflicts = transaction_exists(pkglist) if len(conflicts) > 0: res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) + res['results'].append("The following packages have pending transactions: %s" % ", ".join(conflicts)) + res['rc'] = 128 # Ensure the task fails in with-loop module.fail_json(**res) # check_mode output @@ -888,14 +932,16 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): if len(pkgs['install']) > 0: # install missing cmd = yum_basecmd + ['install'] + pkgs['install'] rc, out, err = module.run_command(cmd) - res['changed'] = True + if not out.strip().lower().endswith("no packages marked for update"): + res['changed'] = True else: rc, out, err = [0, '', ''] if len(will_update) > 0: # update present cmd = yum_basecmd + ['update'] + pkgs['update'] rc2, out2, err2 = module.run_command(cmd) - res['changed'] = True + if not out2.strip().lower().endswith("no packages marked for update"): + res['changed'] = True else: rc2, out2, err2 = [0, '', ''] @@ -916,7 +962,14 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, disable_gpg_check, exclude, repoq): - yumbin = module.get_bin_path('yum') + # fedora will redirect yum to dnf, which has incompatibilities + # with how this module expects yum to operate. If yum-deprecated + # is available, use that instead to emulate the old behaviors. + if module.get_bin_path('yum-deprecated'): + yumbin = module.get_bin_path('yum-deprecated') + else: + yumbin = module.get_bin_path('yum') + # need debug level 2 to get 'Nothing to do' for groupinstall. yum_basecmd = [yumbin, '-d', '2', '-y'] @@ -959,9 +1012,11 @@ def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, rid = my.repos.getRepo(i) a = rid.repoXML.repoid current_repos = new_repos - except yum.Errors.YumBaseError, e: + except yum.Errors.YumBaseError: + e = get_exception() module.fail_json(msg="Error setting/accessing repos: %s" % (e)) - except yum.Errors.YumBaseError, e: + except yum.Errors.YumBaseError: + e = get_exception() module.fail_json(msg="Error accessing repos: %s" % e) if state in ['installed', 'present']: if disable_gpg_check: @@ -976,7 +1031,7 @@ def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, else: # should be caught by AnsibleModule argument_spec module.fail_json(msg="we should never get here unless this all" - " failed", changed=False, results='', errors='unepected state') + " failed", changed=False, results='', errors='unexpected state') return res diff --git a/source_control/git.py b/source_control/git.py index 3bccf877cd6..6c79e8a3668 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -18,10 +18,14 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: git -author: +author: - "Ansible Core Team" - "Michael DeHaan" version_added: "0.0.1" @@ -46,16 +50,17 @@ default: "HEAD" description: - What version of the repository to check out. This can be the - full 40-character I(SHA-1) hash, the literal string C(HEAD), a - branch name, or a tag name. + the literal string C(HEAD), a branch name, a tag name. + It can also be a I(SHA-1) hash, in which case C(refspec) needs + to be specified if the given revision is not already available. accept_hostkey: required: false default: "no" choices: [ "yes", "no" ] version_added: "1.5" description: - - if C(yes), adds the hostkey for the repo url if not already - added. If ssh_opts contains "-o StrictHostKeyChecking=no", + - if C(yes), adds the hostkey for the repo url if not already + added. If ssh_opts contains "-o StrictHostKeyChecking=no", this parameter is ignored. ssh_opts: required: false @@ -110,7 +115,7 @@ description: - Create a shallow clone with a history truncated to the specified number or revisions. The minimum possible value is C(1), otherwise - ignored. + ignored. Needs I(git>=1.9.1) to work correctly. clone: required: false default: "yes" @@ -140,6 +145,13 @@ description: - if C(yes), repository will be created as a bare repo, otherwise it will be a standard repo with a workspace. + umask: + required: false + default: null + version_added: "2.2" + description: + - The umask to set before doing any checkouts, or any other + repository maintenance. recursive: required: false @@ -174,46 +186,103 @@ be trusted in the GPG trustdb. requirements: - - git (the command line tool) + - git>=1.7.1 (the command line tool) + notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). - SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, - one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling + SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, + one solution is to use the option accept_hostkey. Another solution is to + add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts." ''' EXAMPLES = ''' # Example git checkout from Ansible Playbooks -- git: repo=git://foosball.example.org/path/to/repo.git - dest=/srv/checkout - version=release-0.22 +- git: + repo: git://foosball.example.org/path/to/repo.git + dest: /srv/checkout + version: release-0.22 # Example read-write git checkout from github -- git: repo=ssh://git@github.com/mylogin/hello.git dest=/home/mylogin/hello +- git: + repo: ssh://git@github.com/mylogin/hello.git + dest: /home/mylogin/hello # Example just ensuring the repo checkout exists -- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no +- git: + repo: git://foosball.example.org/path/to/repo.git + dest: /srv/checkout + update: no # Example just get information about the repository whether or not it has # already been cloned locally. -- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no +- git: + repo: git://foosball.example.org/path/to/repo.git + dest: /srv/checkout + clone: no + update: no # Example checkout a github repo and use refspec to fetch all pull requests -- git: repo=https://github.com/ansible/ansible-examples.git dest=/src/ansible-examples refspec=+refs/pull/*:refs/heads/* +- git: + repo: https://github.com/ansible/ansible-examples.git + dest: /src/ansible-examples + refspec: '+refs/pull/*:refs/heads/*' ''' +import os import re +import shlex +import stat +import sys import tempfile +from distutils.version import LooseVersion + +from ansible.module_utils.basic import AnsibleModule, get_module_path +from ansible.module_utils.known_hosts import add_git_host_key +from ansible.module_utils.six import b, string_types +from ansible.module_utils._text import to_native + + +def head_splitter(headfile, remote, module=None, fail_on_error=False): + '''Extract the head reference''' + # https://github.com/ansible/ansible-modules-core/pull/907 + + res = None + if os.path.exists(headfile): + rawdata = None + try: + f = open(headfile, 'r') + rawdata = f.readline() + f.close() + except: + if fail_on_error and module: + module.fail_json(msg="Unable to read %s" % headfile) + if rawdata: + try: + rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1) + refparts = rawdata.split(' ') + newref = refparts[-1] + nrefparts = newref.split('/',2) + res = nrefparts[-1].rstrip('\n') + except: + if fail_on_error and module: + module.fail_json(msg="Unable to split head from '%s'" % rawdata) + return res + + +def unfrackgitpath(path): + # copied from ansible.utils.path + return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path)))) def get_submodule_update_params(module, git_path, cwd): - #or: git submodule [--quiet] update [--init] [-N|--no-fetch] - #[-f|--force] [--rebase] [--reference ] [--merge] + #or: git submodule [--quiet] update [--init] [-N|--no-fetch] + #[-f|--force] [--rebase] [--reference ] [--merge] #[--recursive] [--] [...] params = [] - # run a bad submodule command to get valid params + # run a bad submodule command to get valid params cmd = "%s submodule update --help" % (git_path) rc, stdout, stderr = module.run_command(cmd, cwd=cwd) lines = stderr.split('\n') @@ -226,7 +295,7 @@ def get_submodule_update_params(module, git_path, cwd): update_line = update_line.replace(']','') update_line = update_line.replace('|',' ') parts = shlex.split(update_line) - for part in parts: + for part in parts: if part.startswith('--'): part = part.replace('--', '') params.append(part) @@ -255,7 +324,7 @@ def write_ssh_wrapper(): if [ -z "$GIT_KEY" ]; then ssh $BASEOPTS "$@" else - ssh -i "$GIT_KEY" $BASEOPTS "$@" + ssh -i "$GIT_KEY" -o IdentitiesOnly=yes $BASEOPTS "$@" fi """ fh.write(template) @@ -274,7 +343,7 @@ def set_git_ssh(ssh_wrapper, key_file, ssh_opts): del os.environ["GIT_KEY"] if key_file: - os.environ["GIT_KEY"] = key_file + os.environ["GIT_KEY"] = key_file if os.environ.get("GIT_SSH_OPTS"): del os.environ["GIT_SSH_OPTS"] @@ -287,14 +356,14 @@ def get_version(module, git_path, dest, ref="HEAD"): cmd = "%s rev-parse %s" % (git_path, ref) rc, stdout, stderr = module.run_command(cmd, cwd=dest) - sha = stdout.rstrip('\n') + sha = to_native(stdout).rstrip('\n') return sha def get_submodule_versions(git_path, module, dest, version='HEAD'): cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version] (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: - module.fail_json(msg='Unable to determine hashes of submodules') + module.fail_json(msg='Unable to determine hashes of submodules', stdout=out, stderr=err, rc=rc) submodules = {} subm_name = None for line in out.splitlines(): @@ -315,21 +384,25 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'): def clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit): ''' makes a new git repo if it does not already exist ''' + dest_dirname = os.path.dirname(dest) try: os.makedirs(dest_dirname) except: pass cmd = [ git_path, 'clone' ] + if bare: cmd.append('--bare') else: cmd.extend([ '--origin', remote ]) - if is_remote_branch(git_path, module, dest, repo, version) \ - or is_remote_tag(git_path, module, dest, repo, version): - cmd.extend([ '--branch', version ]) if depth: - cmd.extend([ '--depth', str(depth) ]) + if version == 'HEAD' \ + or refspec \ + or is_remote_branch(git_path, module, dest, repo, version) \ + or is_remote_tag(git_path, module, dest, repo, version): + # only use depth if the remote opject is branch or tag (i.e. fetchable) + cmd.extend([ '--depth', str(depth) ]) if reference: cmd.extend([ '--reference', str(reference) ]) cmd.extend([ repo, dest ]) @@ -339,7 +412,11 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest) if refspec: - module.run_command([git_path, 'fetch', remote, refspec], check_rc=True, cwd=dest) + cmd = [git_path, 'fetch'] + if depth: + cmd.extend([ '--depth', str(depth) ]) + cmd.extend([remote, refspec]) + module.run_command(cmd, check_rc=True, cwd=dest) if verify_commit: verify_commit_sign(git_path, module, dest, version) @@ -348,10 +425,10 @@ def has_local_mods(module, git_path, dest, bare): if bare: return False - cmd = "%s status -s" % (git_path) + cmd = "%s status --porcelain" % (git_path) rc, stdout, stderr = module.run_command(cmd, cwd=dest) lines = stdout.splitlines() - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) + lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines)) return len(lines) > 0 @@ -364,6 +441,26 @@ def reset(git_path, module, dest): cmd = "%s reset --hard HEAD" % (git_path,) return module.run_command(cmd, check_rc=True, cwd=dest) +def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after): + ''' Return the difference between 2 versions ''' + if before is None: + return { 'prepared': '>> Newly checked out %s' % after } + elif before != after: + # Ensure we have the object we are referring to during git diff ! + git_version_used = git_version(git_path, module) + fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used) + cmd = '%s diff %s %s' % (git_path, before, after) + (rc, out, err) = module.run_command(cmd, cwd=dest) + if rc == 0 and out: + return { 'prepared': out } + elif rc == 0: + return { 'prepared': '>> No visual differences between %s and %s' % (before, after) } + elif err: + return { 'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err) } + else: + return { 'prepared': '>> Failed to get proper diff between %s and %s' % (before, after) } + return {} + def get_remote_head(git_path, module, dest, version, remote, bare): cloning = False cwd = None @@ -390,10 +487,12 @@ def get_remote_head(git_path, module, dest, version, remote, bare): return version (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd) if len(out) < 1: - module.fail_json(msg="Could not determine remote revision for %s" % version) + module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc) + + out = to_native(out) if tag: - # Find the dereferenced tag if this is an annotated tag. + # Find the dereferenced tag if this is an annotated tag. for tag in out.split('\n'): if tag.endswith(version + '^{}'): out = tag @@ -407,19 +506,20 @@ def get_remote_head(git_path, module, dest, version, remote, bare): def is_remote_tag(git_path, module, dest, remote, version): cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version) (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if version in out: + if to_native(version, errors='surrogate_or_strict') in out: return True else: return False def get_branches(git_path, module, dest): branches = [] - cmd = '%s branch -a' % (git_path,) + cmd = '%s branch --no-color -a' % (git_path,) (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: - module.fail_json(msg="Could not determine branch data - received %s" % out) + module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err) for line in out.split('\n'): - branches.append(line.strip()) + if line.strip(): + branches.append(line.strip()) return branches def get_tags(git_path, module, dest): @@ -427,15 +527,16 @@ def get_tags(git_path, module, dest): cmd = '%s tag' % (git_path,) (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: - module.fail_json(msg="Could not determine tag data - received %s" % out) - for line in out.split('\n'): - tags.append(line.strip()) + module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err) + for line in to_native(out).split('\n'): + if line.strip(): + tags.append(line.strip()) return tags def is_remote_branch(git_path, module, dest, remote, version): cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version) (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if version in out: + if to_native(version, errors='surrogate_or_strict') in out: return True else: return False @@ -452,8 +553,8 @@ def is_local_branch(git_path, module, dest, branch): def is_not_a_branch(git_path, module, dest): branches = get_branches(git_path, module, dest) - for b in branches: - if b.startswith('* ') and ('no branch' in b or 'detached from' in b): + for branch in branches: + if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch): return True return False @@ -472,60 +573,118 @@ def get_head_branch(git_path, module, dest, remote, bare=False): # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. if os.path.isfile(repo_path): try: - gitdir = yaml.safe_load(open(repo_path)).get('gitdir') - # There is a posibility the .git file to have an absolute path. + git_conf = open(repo_path, 'rb') + for line in git_conf: + config_val = line.split(b(':'), 1) + if config_val[0].strip() == b('gitdir'): + gitdir = to_native(config_val[1].strip(), errors='surrogate_or_strict') + break + else: + # No repo path found + return '' + + # There is a possibility the .git file to have an absolute path. if os.path.isabs(gitdir): repo_path = gitdir else: repo_path = os.path.join(repo_path.split('.git')[0], gitdir) except (IOError, AttributeError): + # No repo path found return '' # Read .git/HEAD for the name of the branch. # If we're in a detached HEAD state, look up the branch associated with # the remote HEAD in .git/refs/remotes//HEAD - f = open(os.path.join(repo_path, "HEAD")) + headfile = os.path.join(repo_path, "HEAD") if is_not_a_branch(git_path, module, dest): - f.close() - f = open(os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')) - branch = f.readline().split('/')[-1].rstrip("\n") - f.close() + headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD') + branch = head_splitter(headfile, remote, module=module, fail_on_error=True) return branch +def get_remote_url(git_path, module, dest, remote): + '''Return URL of remote source for repo.''' + command = [git_path, 'ls-remote', '--get-url', remote] + (rc, out, err) = module.run_command(command, cwd=dest) + if rc != 0: + # There was an issue getting remote URL, most likely + # command is not available in this version of Git. + return None + return to_native(out).rstrip('\n') + def set_remote_url(git_path, module, repo, dest, remote): ''' updates repo from remote sources ''' - commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])] + # Return if remote URL isn't changing. + remote_url = get_remote_url(git_path, module, dest, remote) + if remote_url == repo or remote_url == unfrackgitpath(repo): + return False - for (label,command) in commands: - (rc,out,err) = module.run_command(command, cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to %s: %s %s" % (label, out, err)) + command = [git_path, 'remote', 'set-url', remote, repo] + (rc, out, err) = module.run_command(command, cwd=dest) + if rc != 0: + label = "set a new url %s for %s" % (repo, remote) + module.fail_json(msg="Failed to %s: %s %s" % (label, out, err)) + + # Return False if remote_url is None to maintain previous behavior + # for Git versions prior to 1.7.5 that lack required functionality. + return remote_url is not None -def fetch(git_path, module, repo, dest, version, remote, bare, refspec): +def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used): ''' updates repo from remote sources ''' set_remote_url(git_path, module, repo, dest, remote) commands = [] fetch_str = 'download remote objects and refs' + fetch_cmd = [git_path, 'fetch'] - if bare: - refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] + refspecs = [] + if depth: + # try to find the minimal set of refs we need to fetch to get a + # successful checkout + currenthead = get_head_branch(git_path, module, dest, remote) if refspec: refspecs.append(refspec) - commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) - else: - # unlike in bare mode, there's no way to combine the - # additional refspec with the default git fetch behavior, - # so use two commands - commands.append((fetch_str, [git_path, 'fetch', remote])) - refspecs = ['+refs/tags/*:refs/tags/*'] + elif version == 'HEAD': + refspecs.append(currenthead) + elif is_remote_branch(git_path, module, dest, repo, version): + if currenthead != version: + # this workaround is only needed for older git versions + # 1.8.3 is broken, 1.9.x works + # ensure that remote branch is available as both local and remote ref + refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version)) + refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)) + else: + refspecs.append(version) + elif is_remote_tag(git_path, module, dest, repo, version): + refspecs.append('+refs/tags/'+version+':refs/tags/'+version) + if refspecs: + # if refspecs is empty, i.e. version is neither heads nor tags + # assume it is a version hash + # fall back to a full clone, otherwise we might not be able to checkout + # version + fetch_cmd.extend(['--depth', str(depth)]) + + if not depth or not refspecs: + # don't try to be minimalistic but do a full clone + # also do this if depth is given, but version is something that can't be fetched directly + if bare: + refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] + else: + # ensure all tags are fetched + if git_version_used >= LooseVersion('1.9'): + fetch_cmd.append('--tags') + else: + # old git versions have a bug in --tags that prevents updating existing tags + commands.append((fetch_str, fetch_cmd + [remote])) + refspecs = ['+refs/tags/*:refs/tags/*'] if refspec: refspecs.append(refspec) - commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) + fetch_cmd.extend([remote]) + + commands.append((fetch_str, fetch_cmd + refspecs)) for (label,command) in commands: (rc,out,err) = module.run_command(command, cwd=dest) if rc != 0: - module.fail_json(msg="Failed to %s: %s %s" % (label, out, err)) + module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command) def submodules_fetch(git_path, module, remote, track_submodules, dest): changed = False @@ -547,7 +706,7 @@ def submodules_fetch(git_path, module, remote, track_submodules, dest): if line.strip().startswith('url'): repo = line.split('=', 1)[1].strip() if module.params['ssh_opts'] is not None: - if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']: + if "-o StrictHostKeyChecking=no" not in module.params['ssh_opts']: add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) else: add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) @@ -581,7 +740,7 @@ def submodules_fetch(git_path, module, remote, track_submodules, dest): break return changed -def submodule_update(git_path, module, dest, track_submodules): +def submodule_update(git_path, module, dest, track_submodules, force=False): ''' init and update any submodules ''' # get the valid submodule params @@ -596,51 +755,62 @@ def submodule_update(git_path, module, dest, track_submodules): cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] else: cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] + if force: + cmd.append('--force') (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: module.fail_json(msg="Failed to init/update submodules: %s" % out + err) return (rc, out, err) def set_remote_branch(git_path, module, dest, remote, version, depth): - cmd = "%s remote set-branches %s %s" % (git_path, remote, version) - (rc, out, err) = module.run_command(cmd, cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to set remote branch: %s" % version) - cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, version) + """set refs for the remote branch version + + This assumes the branch does not yet exist locally and is therefore also not checked out. + Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6) + """ + + branchref = "+refs/heads/%s:refs/heads/%s" % (version, version) + branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version) + cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref) (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: - module.fail_json(msg="Failed to fetch branch from remote: %s" % version) + module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc) -def switch_version(git_path, module, dest, remote, version, verify_commit): +def switch_version(git_path, module, dest, remote, version, verify_commit, depth): cmd = '' - if version != 'HEAD': + if version == 'HEAD': + branch = get_head_branch(git_path, module, dest, remote) + (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest) + if rc != 0: + module.fail_json(msg="Failed to checkout branch %s" % branch, + stdout=out, stderr=err, rc=rc) + cmd = "%s reset --hard %s" % (git_path, remote) + else: + # FIXME check for local_branch first, should have been fetched already if is_remote_branch(git_path, module, dest, remote, version): + if depth and not is_local_branch(git_path, module, dest, version): + # git clone --depth implies --single-branch, which makes + # the checkout fail if the version changes + # fetch the remote branch, to be able to check it out next + set_remote_branch(git_path, module, dest, remote, version, depth) if not is_local_branch(git_path, module, dest, version): - depth = module.params['depth'] - if depth: - # git clone --depth implies --single-branch, which makes - # the checkout fail if the version changes - set_remote_branch(git_path, module, dest, remote, version, depth) cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version) else: (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest) if rc != 0: - module.fail_json(msg="Failed to checkout branch %s" % version) + module.fail_json(msg="Failed to checkout branch %s" % version, + stdout=out, stderr=err, rc=rc) cmd = "%s reset --hard %s/%s" % (git_path, remote, version) else: cmd = "%s checkout --force %s" % (git_path, version) - else: - branch = get_head_branch(git_path, module, dest, remote) - (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to checkout branch %s" % branch) - cmd = "%s reset --hard %s" % (git_path, remote) (rc, out1, err1) = module.run_command(cmd, cwd=dest) if rc != 0: if version != 'HEAD': - module.fail_json(msg="Failed to checkout %s" % (version)) + module.fail_json(msg="Failed to checkout %s" % (version), + stdout=out1, stderr=err1, rc=rc, cmd=cmd) else: - module.fail_json(msg="Failed to checkout branch %s" % (branch)) + module.fail_json(msg="Failed to checkout branch %s" % (branch), + stdout=out1, stderr=err1, rc=rc, cmd=cmd) if verify_commit: verify_commit_sign(git_path, module, dest, version) @@ -649,12 +819,30 @@ def switch_version(git_path, module, dest, remote, version, verify_commit): def verify_commit_sign(git_path, module, dest, version): - cmd = "%s verify-commit %s" % (git_path, version) + if version in get_tags(git_path, module, dest): + git_sub = "verify-tag" + else: + git_sub = "verify-commit" + cmd = "%s %s %s" % (git_path, git_sub, version) (rc, out, err) = module.run_command(cmd, cwd=dest) if rc != 0: - module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version) + module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc) return (rc, out, err) + +def git_version(git_path, module): + """return the installed version of git""" + cmd = "%s --version" % git_path + (rc, out, err) = module.run_command(cmd) + if rc != 0: + # one could fail_json here, but the version info is not that important, so let's try to fail only on actual git commands + return None + rematch = re.search('git version (.*)$', to_native(out)) + if not rematch: + return None + return LooseVersion(rematch.groups()[0]) + + # =========================================== def main(): @@ -678,6 +866,7 @@ def main(): bare=dict(default='no', type='bool'), recursive=dict(default='yes', type='bool'), track_submodules=dict(default='no', type='bool'), + umask=dict(default=None, type='raw'), ), supports_check_mode=True ) @@ -697,6 +886,25 @@ def main(): git_path = module.params['executable'] or module.get_bin_path('git', True) key_file = module.params['key_file'] ssh_opts = module.params['ssh_opts'] + umask = module.params['umask'] + + result = dict( warnings=list() ) + + # evaluate and set the umask before doing anything else + if umask is not None: + if not isinstance(umask, string_types): + module.fail_json(msg="umask must be defined as a quoted octal integer") + try: + umask = int(umask, 8) + except: + module.fail_json(msg="umask must be an octal integer", + details=str(sys.exc_info()[1])) + os.umask(umask) + + # Certain features such as depth require a file:/// protocol for path based urls + # so force a protocal here ... + if repo.startswith('/'): + repo = 'file://' + repo # We screenscrape a huge amount of git commands so use C locale anytime we # call run_command() @@ -721,21 +929,24 @@ def main(): set_git_ssh(ssh_wrapper, key_file, ssh_opts) module.add_cleanup_file(path=ssh_wrapper) - # add the git repo's hostkey + # add the git repo's hostkey if module.params['ssh_opts'] is not None: - if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']: + if "-o StrictHostKeyChecking=no" not in module.params['ssh_opts']: add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) else: add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) + git_version_used = git_version(git_path, module) + + if depth is not None and git_version_used < LooseVersion('1.9.1'): + result['warnings'].append("Your git version is too old to fully support the depth argument. Falling back to full checkouts.") + depth = None recursive = module.params['recursive'] track_submodules = module.params['track_submodules'] - rc, out, err, status = (0, None, None, None) - - before = None + result.update(before=None) local_mods = False - repo_updated = None + need_fetch = True if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone): # if there is no git configuration, do a clone operation unless: # * the user requested no clone (they just want info) @@ -743,74 +954,92 @@ def main(): # In those cases we do an ls-remote if module.check_mode or not allow_clone: remote_head = get_remote_head(git_path, module, dest, version, repo, bare) - module.exit_json(changed=True, before=before, after=remote_head) + result.update(changed=True, after=remote_head) + if module._diff: + diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after']) + if diff: + result['diff'] = diff + module.exit_json(**result) # there's no git config, so clone clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit) - repo_updated = True + need_fetch = False elif not update: # Just return having found a repo already in the dest path # this does no checking that the repo is the actual repo # requested. - before = get_version(module, git_path, dest) - module.exit_json(changed=False, before=before, after=before) + result['before'] = get_version(module, git_path, dest) + result.update(changed=False, after=result['before']) + module.exit_json(**result) else: # else do a pull local_mods = has_local_mods(module, git_path, dest, bare) - before = get_version(module, git_path, dest) + result['before'] = get_version(module, git_path, dest) if local_mods: # failure should happen regardless of check mode if not force: - module.fail_json(msg="Local modifications exist in repository (force=no).") + module.fail_json(msg="Local modifications exist in repository (force=no).", **result) # if force and in non-check mode, do a reset if not module.check_mode: reset(git_path, module, dest) + # exit if already at desired sha version - set_remote_url(git_path, module, repo, dest, remote) - remote_head = get_remote_head(git_path, module, dest, version, remote, bare) - if before == remote_head: - if local_mods: - module.exit_json(changed=True, before=before, after=remote_head, - msg="Local modifications exist") - elif is_remote_tag(git_path, module, dest, repo, version): - # if the remote is a tag and we have the tag locally, exit early - if version in get_tags(git_path, module, dest): - repo_updated = False - else: - # if the remote is a branch and we have the branch locally, exit early - if version in get_branches(git_path, module, dest): - repo_updated = False - if repo_updated is None: + if module.check_mode: + remote_url = get_remote_url(git_path, module, dest, remote) + remote_url_changed = remote_url and remote_url != repo and remote_url != unfrackgitpath(repo) + else: + remote_url_changed = set_remote_url(git_path, module, repo, dest, remote) + result.update(remote_url_changed=remote_url_changed) + + if need_fetch: if module.check_mode: - module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare, refspec) - repo_updated = True + remote_head = get_remote_head(git_path, module, dest, version, remote, bare) + result.update(changed=(result['before'] != remote_head), after=remote_head) + # FIXME: This diff should fail since the new remote_head is not fetched yet?! + if module._diff: + diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after']) + if diff: + result['diff'] = diff + module.exit_json(**result) + else: + fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used) + + result['after'] = get_version(module, git_path, dest) + + if result['before'] == result['after']: + if local_mods: + result.update(changed=True, after=remote_head, msg='Local modifications exist') + # no diff, since the repo didn't change + module.exit_json(**result) # switch to version specified regardless of whether # we got new revisions from the repository if not bare: - switch_version(git_path, module, dest, remote, version, verify_commit) + switch_version(git_path, module, dest, remote, version, verify_commit, depth) # Deal with submodules submodules_updated = False if recursive and not bare: submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest) + if submodules_updated: + result.update(submodules_changed=submodules_updated) - if module.check_mode: - if submodules_updated: - module.exit_json(changed=True, before=before, after=remote_head, submodules_changed=True) - else: - module.exit_json(changed=False, before=before, after=remote_head) + if module.check_mode: + result.update(changed=True, after=remote_head) + module.exit_json(**result) - if submodules_updated: # Switch to version specified - submodule_update(git_path, module, dest, track_submodules) + submodule_update(git_path, module, dest, track_submodules, force=force) # determine if we changed anything - after = get_version(module, git_path, dest) + result['after'] = get_version(module, git_path, dest) - changed = False - if before != after or local_mods or submodules_updated: - changed = True + result.update(changed=False) + if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed: + result.update(changed=True) + if module._diff: + diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after']) + if diff: + result['diff'] = diff # cleanup the wrapper script if ssh_wrapper: @@ -820,11 +1049,8 @@ def main(): # No need to fail if the file already doesn't exist pass - module.exit_json(changed=changed, before=before, after=after) + module.exit_json(**result) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.known_hosts import * if __name__ == '__main__': main() diff --git a/source_control/hg.py b/source_control/hg.py index 2428effee9f..89845c197c9 100644 --- a/source_control/hg.py +++ b/source_control/hg.py @@ -23,7 +23,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import ConfigParser +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -43,6 +45,7 @@ dest: description: - Absolute path of where the repository should be cloned to. + This parameter is required, unless clone and update are set to no required: true default: null revision: @@ -72,6 +75,13 @@ version_added: "2.0" description: - If C(no), do not retrieve new revisions from the origin repository + clone: + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "2.3" + description: + - If C(no), do not clone the repository if it does not exist locally. executable: required: false default: null @@ -89,9 +99,27 @@ EXAMPLES = ''' # Ensure the current working copy is inside the stable branch and deletes untracked files if any. -- hg: repo=https://bitbucket.org/user/repo1 dest=/home/user/repo1 revision=stable purge=yes +- hg: + repo: https://bitbucket.org/user/repo1 + dest: /home/user/repo1 + revision: stable + purge: yes + +# Example just get information about the repository whether or not it has +# already been cloned locally. +- hg: + repo: git://bitbucket.org/user/repo + dest: /srv/checkout + clone: no + update: no ''' +import os + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native + class Hg(object): def __init__(self, module, dest, repo, revision, hg_path): @@ -123,7 +151,14 @@ def get_revision(self): if rc != 0: self.module.fail_json(msg=err) else: - return out.strip('\n') + return to_native(out).strip('\n') + + def get_remote_revision(self): + (rc, out, err) = self._command(['id', self.repo]) + if rc != 0: + self.module_fail_json(msg=err) + else: + return to_native(out).strip('\n') def has_local_mods(self): now = self.get_revision() @@ -211,36 +246,48 @@ def main(): module = AnsibleModule( argument_spec = dict( repo = dict(required=True, aliases=['name']), - dest = dict(required=True), + dest = dict(type='path'), revision = dict(default=None, aliases=['version']), force = dict(default='no', type='bool'), purge = dict(default='no', type='bool'), update = dict(default='yes', type='bool'), + clone = dict(default='yes', type='bool'), executable = dict(default=None), ), ) repo = module.params['repo'] - dest = os.path.expanduser(module.params['dest']) + dest = module.params['dest'] revision = module.params['revision'] force = module.params['force'] purge = module.params['purge'] update = module.params['update'] + clone = module.params['clone'] hg_path = module.params['executable'] or module.get_bin_path('hg', True) - hgrc = os.path.join(dest, '.hg/hgrc') + if dest is not None: + hgrc = os.path.join(dest, '.hg/hgrc') # initial states before = '' changed = False cleaned = False + if not dest and (clone or update): + module.fail_json(msg="the destination directory must be specified unless clone=no and update=no") + hg = Hg(module, dest, repo, revision, hg_path) # If there is no hgrc file, then assume repo is absent # and perform clone. Otherwise, perform pull and update. + if not clone and not update: + out = hg.get_remote_revision() + module.exit_json(after=out, changed=False) if not os.path.exists(hgrc): - (rc, out, err) = hg.clone() - if rc != 0: - module.fail_json(msg=err) + if clone: + (rc, out, err) = hg.clone() + if rc != 0: + module.fail_json(msg=err) + else: + module.exit_json(changed=False) elif not update: # Just return having found a repo already in the dest path before = hg.get_revision() @@ -270,6 +317,5 @@ def main(): changed = True module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned) -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/source_control/subversion.py b/source_control/subversion.py index 3fb88d5e49e..09477700b9f 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: subversion @@ -28,6 +32,7 @@ author: "Dane Summers (@dsummersl) " notes: - Requires I(svn) to be installed on the client. + - This module does not handle externals requirements: [] options: repo: @@ -71,6 +76,20 @@ description: - Path to svn executable to use. If not supplied, the normal mechanism for resolving binary paths will be used. + checkout: + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "2.3" + description: + - If no, do not check out the repository if it does not exist locally + update: + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "2.3" + description: + - If no, do not retrieve new revisions from the origin repository export: required: false default: "no" @@ -89,10 +108,22 @@ EXAMPLES = ''' # Checkout subversion repository to specified folder. -- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout +- subversion: + repo: svn+ssh://an.example.org/path/to/repo + dest: /src/checkout # Export subversion directory to folder -- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True +- subversion: + repo: svn+ssh://an.example.org/path/to/repo + dest: /src/export + +# Example just get information about the repository whether or not it has +# already been cloned locally. +- subversion: + repo: svn+ssh://an.example.org/path/to/repo + dest: /srv/checkout + checkout: no + update: no ''' import re @@ -167,14 +198,20 @@ def get_revision(self): url = re.search(r'^URL:.*$', text, re.MULTILINE).group(0) return rev, url + def get_remote_revision(self): + '''Revision and URL of subversion working directory.''' + text = '\n'.join(self._exec(["info", self.repo])) + rev = re.search(r'^Revision:.*$', text, re.MULTILINE).group(0) + return rev + def has_local_mods(self): '''True if revisioned files have been added or modified. Unrevisioned files are ignored.''' lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest]) # The --quiet option will return only modified files. # Match only revisioned files, i.e. ignore status '?'. regex = re.compile(r'^[^?X]') - # Has local mods if more than 0 modifed revisioned files. - return len(filter(regex.match, lines)) > 0 + # Has local mods if more than 0 modified revisioned files. + return len(list(filter(regex.match, lines))) > 0 def needs_update(self): curr, url = self.get_revision() @@ -193,14 +230,16 @@ def needs_update(self): def main(): module = AnsibleModule( argument_spec=dict( - dest=dict(required=True, type='path'), + dest=dict(type='path'), repo=dict(required=True, aliases=['name', 'repository']), revision=dict(default='HEAD', aliases=['rev', 'version']), force=dict(default='no', type='bool'), username=dict(required=False), - password=dict(required=False), + password=dict(required=False, no_log=True), executable=dict(default=None, type='path'), export=dict(default=False, required=False, type='bool'), + checkout=dict(default=True, required=False, type='bool'), + update=dict(default=True, required=False, type='bool'), switch=dict(default=True, required=False, type='bool'), ), supports_check_mode=True @@ -215,19 +254,28 @@ def main(): svn_path = module.params['executable'] or module.get_bin_path('svn', True) export = module.params['export'] switch = module.params['switch'] + checkout = module.params['checkout'] + update = module.params['update'] # We screenscrape a huge amount of svn commands so use C locale anytime we # call run_command() - module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C') + + if not dest and (checkout or update or export): + module.fail_json(msg="the destination directory must be specified unless checkout=no, update=no, and export=no") svn = Subversion(module, dest, repo, revision, username, password, svn_path) + if not export and not update and not checkout: + module.exit_json(changed=False, after=svn.get_remote_revision()) if export or not os.path.exists(dest): before = None local_mods = False if module.check_mode: module.exit_json(changed=True) - if not export: + elif not export and not checkout: + module.exit_json(changed=False) + if not export and checkout: svn.checkout() else: svn.export(force=force) @@ -235,7 +283,7 @@ def main(): # Order matters. Need to get local mods before switch to avoid false # positives. Need to switch before revert to ensure we are reverting to # correct repo. - if module.check_mode: + if module.check_mode or not update: check, before, after = svn.needs_update() module.exit_json(changed=check, before=before, after=after) before = svn.get_revision() @@ -260,4 +308,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/system/authorized_key.py b/system/authorized_key.py index 1df21b4c764..c2023e30124 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -21,6 +21,10 @@ along with Ansible. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: authorized_key @@ -33,12 +37,10 @@ description: - The username on the remote host whose authorized_keys file will be modified required: true - default: null key: description: - The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys) required: true - default: null path: description: - Alternate path to the authorized_keys file @@ -82,33 +84,39 @@ version_added: "1.9" validate_certs: description: - - This only applies if using a https url as the source of the keys. If set to C(no), the SSL certificates will not be validated. - - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. + - This only applies if using a https url as the source of the keys. If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. - Prior to 2.1 the code worked as if this was set to C(yes). required: false default: "yes" choices: ["yes", "no"] - version_added: "2.1" + version_added: "2.1" author: "Ansible Core Team" ''' EXAMPLES = ''' # Example using key data from a local file on the management machine -- authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" +- authorized_key: + user: charlie + key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" # Using github url as key source -- authorized_key: user=charlie key=https://github.com/charlie.keys +- authorized_key: + user: charlie + key: https://github.com/charlie.keys # Using alternate directory locations: - authorized_key: user: charlie key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - path: '/etc/ssh/authorized_keys/charlie' + path: /etc/ssh/authorized_keys/charlie manage_dir: no # Using with_file - name: Set up authorized_keys for the deploy user - authorized_key: user=deploy key="{{ item }}" + authorized_key: + user: deploy + key: "{{ item }}" with_file: - public_keys/doe-jane - public_keys/doe-john @@ -116,16 +124,30 @@ # Using key_options: - authorized_key: user: charlie - key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" + key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" key_options: 'no-port-forwarding,from="10.0.1.1"' # Using validate_certs: -- authorized_key: user=charlie key=https://github.com/user.keys validate_certs=no +- authorized_key: + user: charlie + key: https://github.com/user.keys + validate_certs: no # Set up authorized_keys exclusively with one key -- authorized_key: user=root key="{{ item }}" state=present exclusive=yes +- authorized_key: + user: root + key: "{{ item }}" + state: present + exclusive: yes with_file: - public_keys/doe-jane + +# Copies the key from the user who is running ansible to the remote machine user ubuntu +- authorized_key: + user: ubuntu + key: "{{ lookup('file', lookup('env','HOME') + '/.ssh/id_rsa.pub') }}" + become: yes + ''' # Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. @@ -140,34 +162,82 @@ # # see example in examples/playbooks -import sys import os import pwd import os.path import tempfile import re import shlex +from operator import itemgetter + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import fetch_url class keydict(dict): - """ a dictionary that maintains the order of keys as they are added """ + """ a dictionary that maintains the order of keys as they are added + + This has become an abuse of the dict interface. Probably should be + rewritten to be an entirely custom object with methods instead of + bracket-notation. + + Our requirements are for a data structure that: + * Preserves insertion order + * Can store multiple values for a single key. + + The present implementation has the following functions used by the rest of + the code: + + * __setitem__(): to add a key=value. The value can never be disassociated + with the key, only new values can be added in addition. + * items(): to retrieve the key, value pairs. + + Other dict methods should work but may be surprising. For instance, there + will be multiple keys that are the same in keys() and __getitem__() will + return a list of the values that have been set via __setitem__. + """ # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class def __init__(self, *args, **kw): super(keydict,self).__init__(*args, **kw) - self.itemlist = super(keydict,self).keys() + self.itemlist = list(super(keydict,self).keys()) + def __setitem__(self, key, value): self.itemlist.append(key) - super(keydict,self).__setitem__(key, value) + if key in self: + self[key].append(value) + else: + super(keydict, self).__setitem__(key, [value]) + def __iter__(self): return iter(self.itemlist) + def keys(self): return self.itemlist - def values(self): - return [self[key] for key in self] + + def _item_generator(self): + indexes = {} + for key in self.itemlist: + if key in indexes: + indexes[key] += 1 + else: + indexes[key] = 0 + yield key, self[key][indexes[key]] + + def iteritems(self): + return self._item_generator() + + def items(self): + return list(self.iteritems()) + def itervalues(self): - return (self[key] for key in self) + return (item[1] for item in self.iteritems()) + + def values(self): + return list(self.itervalues()) + def keyfile(module, user, write=False, path=None, manage_dir=True): """ @@ -187,7 +257,8 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): try: user_entry = pwd.getpwnam(user) - except KeyError, e: + except KeyError: + e = get_exception() if module.check_mode and path is None: module.fail_json(msg="Either user must exist or you must provide full path to key file in check mode") module.fail_json(msg="Failed to lookup user %s: %s" % (user, str(e))) @@ -207,11 +278,11 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): if manage_dir: if not os.path.exists(sshdir): - os.mkdir(sshdir, 0700) + os.mkdir(sshdir, int('0700', 8)) if module.selinux_enabled(): module.set_default_selinux_context(sshdir, False) os.chown(sshdir, uid, gid) - os.chmod(sshdir, 0700) + os.chmod(sshdir, int('0700', 8)) if not os.path.exists(keysfile): basedir = os.path.dirname(keysfile) @@ -226,7 +297,7 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): try: os.chown(keysfile, uid, gid) - os.chmod(keysfile, 0600) + os.chmod(keysfile, int('0600', 8)) except OSError: pass @@ -239,26 +310,26 @@ def parseoptions(module, options): ''' options_dict = keydict() #ordered dict if options: - try: - # the following regex will split on commas while - # ignoring those commas that fall within quotes - regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') - parts = regex.split(options)[1:-1] - for part in parts: - if "=" in part: - (key, value) = part.split("=", 1) - options_dict[key] = value - elif part != ",": - options_dict[part] = None - except: - module.fail_json(msg="invalid option string: %s" % options) + # the following regex will split on commas while + # ignoring those commas that fall within quotes + regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') + parts = regex.split(options)[1:-1] + for part in parts: + if "=" in part: + (key, value) = part.split("=", 1) + options_dict[key] = value + elif part != ",": + options_dict[part] = None return options_dict -def parsekey(module, raw_key): +def parsekey(module, raw_key, rank=None): ''' parses a key, which may or may not contain a list of ssh-key options at the beginning + + rank indicates the keys original ordering, so that + it can be written out in the same order. ''' VALID_SSH2_KEY_TYPES = [ @@ -285,6 +356,10 @@ def parsekey(module, raw_key): lex.whitespace_split = True key_parts = list(lex) + if key_parts and key_parts[0] == '#': + # comment line, invalid line, etc. + return (raw_key, 'skipped', None, None, rank) + for i in range(0, len(key_parts)): if key_parts[i] in VALID_SSH2_KEY_TYPES: type_index = i @@ -307,7 +382,7 @@ def parsekey(module, raw_key): if len(key_parts) > (type_index + 1): comment = " ".join(key_parts[(type_index + 2):]) - return (key, key_type, options, comment) + return (key, key_type, options, comment, rank) def readkeys(module, filename): @@ -316,15 +391,15 @@ def readkeys(module, filename): keys = {} f = open(filename) - for line in f.readlines(): - key_data = parsekey(module, line) + for rank_index, line in enumerate(f.readlines()): + key_data = parsekey(module, line, rank=rank_index) if key_data: # use key as identifier keys[key_data[0]] = key_data else: - # for an invalid line, just append the line - # to the array so it will be re-output later - keys[line] = line + # for an invalid line, just set the line + # dict key to the line so it will be re-output later + keys[line] = (line, 'skipped', None, None, rank_index) f.close() return keys @@ -332,26 +407,41 @@ def writekeys(module, filename, keys): fd, tmp_path = tempfile.mkstemp('', 'tmp', os.path.dirname(filename)) f = open(tmp_path,"w") + + # FIXME: only the f.writelines() needs to be in try clause try: - for index, key in keys.items(): + new_keys = keys.values() + # order the new_keys by their original ordering, via the rank item in the tuple + ordered_new_keys = sorted(new_keys, key=itemgetter(4)) + + for key in ordered_new_keys: try: - (keyhash,type,options,comment) = key + (keyhash, key_type, options, comment, rank) = key + option_str = "" if options: option_strings = [] - for option_key in options.keys(): - if options[option_key]: - option_strings.append("%s=%s" % (option_key, options[option_key])) - else: + for option_key, value in options.items(): + if value is None: option_strings.append("%s" % option_key) - + else: + option_strings.append("%s=%s" % (option_key, value)) option_str = ",".join(option_strings) option_str += " " - key_line = "%s%s %s %s\n" % (option_str, type, keyhash, comment) + + # comment line or invalid line, just leave it + if not key_type: + key_line = key + + if key_type == 'skipped': + key_line = key[0] + else: + key_line = "%s%s %s %s\n" % (option_str, key_type, keyhash, comment) except: key_line = key f.writelines(key_line) - except IOError, e: + except IOError: + e = get_exception() module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) f.close() module.atomic_move(tmp_path, filename) @@ -368,7 +458,6 @@ def enforce_state(module, params): state = params.get("state", "present") key_options = params.get("key_options", None) exclusive = params.get("exclusive", False) - validate_certs = params.get("validate_certs", True) error_msg = "Error getting key from: %s" # if the key is a url, request it and use it as key source @@ -383,45 +472,47 @@ def enforce_state(module, params): module.fail_json(msg=error_msg % key) # extract individual keys into an array, skipping blank lines and comments - key = [s for s in key.splitlines() if s and not s.startswith('#')] + new_keys = [s for s in key.splitlines() if s and not s.startswith('#')] # check current state -- just get the filename, don't create file do_write = False params["keyfile"] = keyfile(module, user, do_write, path, manage_dir) existing_keys = readkeys(module, params["keyfile"]) - # Add a place holder for keys that should exist in the state=present and # exclusive=true case keys_to_exist = [] + # we will order any non exclusive new keys higher than all the existing keys, + # resulting in the new keys being written to the key file after existing keys, but + # in the order of new_keys + max_rank_of_existing_keys = len(existing_keys) + # Check our new keys, if any of them exist we'll continue. - for new_key in key: - parsed_new_key = parsekey(module, new_key) + for rank_index, new_key in enumerate(new_keys): + parsed_new_key = parsekey(module, new_key, rank=rank_index) if not parsed_new_key: module.fail_json(msg="invalid key specified: %s" % new_key) if key_options is not None: parsed_options = parseoptions(module, key_options) - parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3]) + # rank here is the rank in the provided new keys, which may be unrelated to rank in existing_keys + parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3], parsed_new_key[4]) - present = False matched = False non_matching_keys = [] if parsed_new_key[0] in existing_keys: - present = True - # Then we check if everything matches, including + # Then we check if everything (except the rank at index 4) matches, including # the key type and options. If not, we append this # existing key to the non-matching list # We only want it to match everything when the state # is present - if parsed_new_key != existing_keys[parsed_new_key[0]] and state == "present": + if parsed_new_key[:4] != existing_keys[parsed_new_key[0]][:4] and state == "present": non_matching_keys.append(existing_keys[parsed_new_key[0]]) else: matched = True - # handle idempotent state=present if state=="present": keys_to_exist.append(parsed_new_key[0]) @@ -431,8 +522,12 @@ def enforce_state(module, params): del existing_keys[non_matching_key[0]] do_write = True + # new key that didn't exist before. Where should it go in the ordering? if not matched: - existing_keys[parsed_new_key[0]] = parsed_new_key + # We want the new key to be after existing keys if not exclusive (rank > max_rank_of_existing_keys) + total_rank = max_rank_of_existing_keys + parsed_new_key[4] + # replace existing key tuple with new parsed key with its total rank + existing_keys[parsed_new_key[0]] = (parsed_new_key[0], parsed_new_key[1], parsed_new_key[2], parsed_new_key[3], total_rank) do_write = True elif state=="absent": @@ -442,6 +537,7 @@ def enforce_state(module, params): do_write = True # remove all other keys to honor exclusive + # for 'exclusive', make sure keys are written in the order the new keys were if state == "present" and exclusive: to_remove = frozenset(existing_keys).difference(keys_to_exist) for key in to_remove: @@ -460,7 +556,6 @@ def enforce_state(module, params): return params def main(): - module = AnsibleModule( argument_spec = dict( user = dict(required=True, type='str'), @@ -479,7 +574,5 @@ def main(): results = enforce_state(module, module.params) module.exit_json(**results) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/system/cron.py b/system/cron.py index d580934185c..6e87147f39e 100644 --- a/system/cron.py +++ b/system/cron.py @@ -22,7 +22,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # -# Cron Plugin: The goal of this plugin is to provide an indempotent method for +# Cron Plugin: The goal of this plugin is to provide an idempotent method for # setting up cron jobs on a host. The script will play well with other manually # entered crons. Each cron job entered will be preceded with a comment # describing the job so that it can be found later, which is required to be @@ -31,6 +31,10 @@ # This module is based on python-crontab by Martin Owens. # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = """ --- module: cron @@ -46,12 +50,14 @@ - 'When environment variables are managed: no comment line is added, but, when the module needs to find/check the state, it uses the "name" parameter to find the environment variable definition line.' + - 'When using symbols such as %, they must be properly escaped.' version_added: "0.9" options: name: description: - Description of a crontab entry or, if env is set, the name of environment variable. - Required if state=absent + Required if state=absent. Note that if name is not set and state=present, then a + new crontab entry will always be created, regardless of existing ones. default: null required: false user: @@ -137,7 +143,7 @@ env: description: - If set, manages a crontab's environment variable. New variables are added on top of crontab. - "name" and "value" paramenters are the name and the value of environment variable. + "name" and "value" parameters are the name and the value of environment variable. version_added: "2.1" required: false default: "no" @@ -169,40 +175,74 @@ EXAMPLES = ''' # Ensure a job that runs at 2 and 5 exists. # Creates an entry like "0 5,2 * * ls -alh > /dev/null" -- cron: name="check dirs" minute="0" hour="5,2" job="ls -alh > /dev/null" +- cron: + name: "check dirs" + minute: "0" + hour: "5,2" + job: "ls -alh > /dev/null" # Ensure an old job is no longer present. Removes any job that is prefixed # by "#Ansible: an old job" from the crontab -- cron: name="an old job" state=absent +- cron: + name: "an old job" + state: absent # Creates an entry like "@reboot /some/job.sh" -- cron: name="a job for reboot" special_time=reboot job="/some/job.sh" +- cron: + name: "a job for reboot" + special_time: reboot + job: "/some/job.sh" # Creates an entry like "PATH=/opt/bin" on top of crontab -- cron: name=PATH env=yes value=/opt/bin +- cron: + name: PATH + env: yes + value: /opt/bin # Creates an entry like "APP_HOME=/srv/app" and insert it after PATH # declaration -- cron: name=APP_HOME env=yes value=/srv/app insertafter=PATH +- cron: + name: APP_HOME + env: yes + value: /srv/app + insertafter: PATH # Creates a cron file under /etc/cron.d -- cron: name="yum autoupdate" weekday="2" minute=0 hour=12 - user="root" job="YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate" - cron_file=ansible_yum-autoupdate +- cron: + name: yum autoupdate + weekday: 2 + minute: 0 + hour: 12 + user: root + job: "YUMINTERACTIVE: 0 /usr/sbin/yum-autoupdate" + cron_file: ansible_yum-autoupdate # Removes a cron file from under /etc/cron.d -- cron: name="yum autoupdate" cron_file=ansible_yum-autoupdate state=absent +- cron: + name: "yum autoupdate" + cron_file: ansible_yum-autoupdate + state: absent # Removes "APP_HOME" environment variable from crontab -- cron: name=APP_HOME env=yes state=absent +- cron: + name: APP_HOME + env: yes + state: absent ''' import os +import pwd import re import tempfile import platform import pipes +try: + import selinux + HAS_SELINUX = True +except ImportError: + HAS_SELINUX = False + CRONCMD = "/usr/bin/crontab" class CronTabError(Exception): @@ -221,6 +261,7 @@ def __init__(self, module, user=None, cron_file=None): self.root = (os.getuid() == 0) self.lines = None self.ansible = "#Ansible: " + self.existing = '' if cron_file: if os.path.isabs(cron_file): @@ -239,9 +280,10 @@ def read(self): # read the cronfile try: f = open(self.cron_file, 'r') - self.lines = f.read().splitlines() + self.existing = f.read() + self.lines = self.existing.splitlines() f.close() - except IOError, e: + except IOError: # cron file does not exist return except: @@ -253,6 +295,8 @@ def read(self): if rc != 0 and rc != 1: # 1 can mean that there are no jobs. raise CronTabError("Unable to read crontab") + self.existing = out + lines = out.splitlines() count = 0 for l in lines: @@ -260,6 +304,9 @@ def read(self): not re.match( r'# \(/tmp/.*installed on.*\)', l) and not re.match( r'# \(.*version.*\)', l)): self.lines.append(l) + else: + pattern = re.escape(l) + '[\r\n]?' + self.existing = re.sub(pattern, '', self.existing, 1) count += 1 def is_empty(self): @@ -278,7 +325,7 @@ def write(self, backup_file=None): fileh = open(self.cron_file, 'w') else: filed, path = tempfile.mkstemp(prefix='crontab') - os.chmod(path, 0644) + os.chmod(path, int('0644', 8)) fileh = os.fdopen(filed, 'w') fileh.write(self.render()) @@ -297,9 +344,16 @@ def write(self, backup_file=None): if rc != 0: self.module.fail_json(msg=err) + # set SELinux permissions + if HAS_SELINUX: + selinux.selinux_lsetfilecon_default(self.cron_file) + + def do_comment(self, name): + return "%s%s" % (self.ansible, name) + def add_job(self, name, job): # Add the comment - self.lines.append("%s%s" % (self.ansible, name)) + self.lines.append(self.do_comment(name)) # Add the job self.lines.append("%s" % (job)) @@ -354,13 +408,14 @@ def remove_job_file(self): try: os.unlink(self.cron_file) return True - except OSError, e: + except OSError: # cron file does not exist return False except: raise CronTabError("Unexpected error:", sys.exc_info()[0]) - def find_job(self, name): + def find_job(self, name, job=None): + # attempt to find job by 'Ansible:' header comment comment = None for l in self.lines: if comment is not None: @@ -371,6 +426,19 @@ def find_job(self, name): elif re.match( r'%s' % self.ansible, l): comment = re.sub( r'%s' % self.ansible, '', l) + # failing that, attempt to find job by exact match + if job: + for i, l in enumerate(self.lines): + if l == job: + # if no leading ansible header, insert one + if not re.match( r'%s' % self.ansible, self.lines[i-1]): + self.lines.insert(i, self.do_comment(name)) + return [self.lines[i], l, True] + # if a leading blank ansible header AND job has a name, update header + elif name and self.lines[i-1] == self.do_comment(None): + self.lines[i-1] = self.do_comment(name) + return [self.lines[i-1], l, True] + return [] def find_env(self, name): @@ -381,6 +449,9 @@ def find_env(self, name): return [] def get_cron_job(self,minute,hour,day,month,weekday,job,special,disabled): + # normalize any leading/trailing newlines (ansible/ansible-modules-core#3791) + job = job.strip('\r\n') + if disabled: disable_prefix = '#' else: @@ -418,7 +489,7 @@ def get_envnames(self): return envnames def _update_job(self, name, job, addlinesfunction): - ansiblename = "%s%s" % (self.ansible, name) + ansiblename = self.do_comment(name) newlines = [] comment = None @@ -458,8 +529,8 @@ def render(self): crons.append(cron) result = '\n'.join(crons) - if result and result[-1] not in ['\n', '\r']: - result += '\n' + if result: + result = result.rstrip('\r\n') + '\n' return result def _read_user_execute(self): @@ -474,7 +545,7 @@ def _read_user_execute(self): return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user)) elif platform.system() == 'HP-UX': return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user)) - else: + elif pwd.getpwuid(os.getuid())[0] != self.user: user = '-u %s' % pipes.quote(self.user) return "%s %s %s" % (CRONCMD , user, '-l') @@ -486,9 +557,9 @@ def _write_execute(self, path): if self.user: if platform.system() in ['SunOS', 'HP-UX', 'AIX']: return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path)) - else: + elif pwd.getpwuid(os.getuid())[0] != self.user: user = '-u %s' % pipes.quote(self.user) - return "%s %s %s" % (CRONCMD , pipes.quote(path), user) + return "%s %s %s" % (CRONCMD , user, pipes.quote(path)) @@ -568,14 +639,14 @@ def main(): res_args = dict() # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. - os.umask(022) + os.umask(int('022', 8)) crontab = CronTab(module, user, cron_file) module.debug('cron instantiated - name: "%s"' % name) if module._diff: diff = dict() - diff['before'] = crontab.render() + diff['before'] = crontab.existing if crontab.cron_file: diff['before_header'] = crontab.cron_file else: @@ -639,21 +710,31 @@ def main(): crontab.remove_env(name) changed = True else: - job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled) - old_job = crontab.find_job(name) - if do_install: + job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled) + old_job = crontab.find_job(name, job) + if len(old_job) == 0: crontab.add_job(name, job) changed = True if len(old_job) > 0 and old_job[1] != job: crontab.update_job(name, job) changed = True + if len(old_job) > 2: + crontab.update_job(name, job) + changed = True else: + old_job = crontab.find_job(name) + if len(old_job) > 0: crontab.remove_job(name) changed = True + # no changes to env/job, but existing crontab needs a terminating newline + if not changed: + if not (crontab.existing.endswith('\r') or crontab.existing.endswith('\n')): + changed = True + res_args = dict( jobs = crontab.get_jobnames(), envs = crontab.get_envnames(), @@ -694,5 +775,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() - +if __name__ == '__main__': + main() diff --git a/system/group.py b/system/group.py index 8edb93a1d0a..132d71ad2d3 100644 --- a/system/group.py +++ b/system/group.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: group @@ -53,7 +57,9 @@ EXAMPLES = ''' # Example group command from Ansible Playbooks -- group: name=somegroup state=present +- group: + name: somegroup + state: present ''' import grp @@ -144,7 +150,7 @@ class SunOS(Group): This overrides the following methods from the generic class:- - group_add() - """ + """ platform = 'SunOS' distribution = None @@ -258,7 +264,7 @@ class DarwinGroup(Group): - group_add() - group_mod() - group manupulation are done using dseditgroup(1). + group manipulation are done using dseditgroup(1). """ platform = 'Darwin' @@ -269,6 +275,11 @@ def group_add(self, **kwargs): cmd += [ '-o', 'create' ] if self.gid is not None: cmd += [ '-i', self.gid ] + elif 'system' in kwargs and kwargs['system'] == True: + gid = self.get_lowest_available_system_gid() + if gid != False: + self.gid = str(gid) + cmd += [ '-i', self.gid ] cmd += [ '-L', self.name ] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) @@ -291,6 +302,26 @@ def group_mod(self, gid=None): (rc, out, err) = self.execute_command(cmd) return (rc, out, err) return (None, '', '') + + def get_lowest_available_system_gid(self): + # check for lowest available system gid (< 500) + try: + cmd = [self.module.get_bin_path('dscl', True)] + cmd += [ '/Local/Default', '-list', '/Groups', 'PrimaryGroupID'] + (rc, out, err) = self.execute_command(cmd) + lines = out.splitlines() + highest = 0 + for group_info in lines: + parts = group_info.split(' ') + if len(parts) > 1: + gid = int(parts[-1]) + if gid > highest and gid < 500: + highest = gid + if highest == 0 or highest == 499: + return False + return (highest + 1) + except: + return False class OpenBsdGroup(Group): """ @@ -439,4 +470,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/system/hostname.py b/system/hostname.py index 6c0c06b3cc8..c6432428fca 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -18,18 +18,22 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: hostname author: - - "Hiroaki Nakamura (@hnakamur)" + - "Adrian Likins (@alikins)" - "Hideki Saito (@saito-hideki)" version_added: "1.4" short_description: Manage hostname requirements: [ hostname ] description: - Set system's hostname. - - Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI. + - Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI, Alpine Linux. - Any distribution that uses systemd as their init system. - Note, this module does *NOT* modify /etc/hosts. You need to modify it yourself using other modules like template or replace. options: @@ -40,7 +44,8 @@ ''' EXAMPLES = ''' -- hostname: name=web01 +- hostname: + name: web01 ''' import socket @@ -48,12 +53,23 @@ # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.facts import * +from ansible.module_utils._text import to_bytes, to_native class UnimplementedStrategy(object): def __init__(self, module): self.module = module + def update_current_and_permanent_hostname(self): + self.unimplemented_error() + + def update_current_hostname(self): + self.unimplemented_error() + + def update_permanent_hostname(self): + self.unimplemented_error() + def get_current_hostname(self): self.unimplemented_error() @@ -94,9 +110,15 @@ def __new__(cls, *args, **kwargs): return load_platform_subclass(Hostname, args, kwargs) def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.strategy = self.strategy_class(module) + self.module = module + self.name = module.params['name'] + if self.platform == 'Linux' and Facts(module).is_systemd_managed(): + self.strategy = SystemdStrategy(module) + else: + self.strategy = self.strategy_class(module) + + def update_current_and_permanent_hostname(self): + return self.strategy.update_current_and_permanent_hostname() def get_current_hostname(self): return self.strategy.get_current_hostname() @@ -124,6 +146,26 @@ class GenericStrategy(object): def __init__(self, module): self.module = module self.hostname_cmd = self.module.get_bin_path('hostname', True) + self.changed = False + + def update_current_and_permanent_hostname(self): + self.update_current_hostname() + self.update_permanent_hostname() + return self.changed + + def update_current_hostname(self): + name = self.module.params['name'] + current_name = self.get_current_hostname() + if current_name != name: + self.set_current_hostname(name) + self.changed = True + + def update_permanent_hostname(self): + name = self.module.params['name'] + permanent_name = self.get_permanent_hostname() + if permanent_name != name: + self.set_permanent_hostname(name) + self.changed = True def get_current_hostname(self): cmd = [self.hostname_cmd] @@ -131,7 +173,7 @@ def get_current_hostname(self): if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err)) - return out.strip() + return to_native(out).strip() def set_current_hostname(self, name): cmd = [self.hostname_cmd, name] @@ -161,7 +203,8 @@ def get_permanent_hostname(self): if not os.path.isfile(self.HOSTNAME_FILE): try: open(self.HOSTNAME_FILE, "a").write("") - except IOError, err: + except IOError: + err = get_exception() self.module.fail_json(msg="failed to write file: %s" % str(err)) try: @@ -170,7 +213,8 @@ def get_permanent_hostname(self): return f.read().strip() finally: f.close() - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to read hostname: %s" % str(err)) @@ -181,7 +225,8 @@ def set_permanent_hostname(self, name): f.write("%s\n" % name) finally: f.close() - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to update hostname: %s" % str(err)) @@ -198,7 +243,8 @@ def get_permanent_hostname(self): if not os.path.isfile(self.HOSTNAME_FILE): try: open(self.HOSTNAME_FILE, "a").write("") - except IOError, err: + except IOError: + err = get_exception() self.module.fail_json(msg="failed to write file: %s" % str(err)) try: @@ -207,7 +253,8 @@ def get_permanent_hostname(self): return f.read().strip() finally: f.close() - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to read hostname: %s" % str(err)) @@ -218,7 +265,8 @@ def set_permanent_hostname(self, name): f.write("%s\n" % name) finally: f.close() - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to update hostname: %s" % str(err)) @@ -241,7 +289,8 @@ def get_permanent_hostname(self): return v.strip() finally: f.close() - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to read hostname: %s" % str(err)) @@ -266,10 +315,64 @@ def set_permanent_hostname(self, name): f.writelines(lines) finally: f.close() - except Exception, err: + except Exception: + err = get_exception() + self.module.fail_json(msg="failed to update hostname: %s" % + str(err)) + +# =========================================== + +class AlpineStrategy(GenericStrategy): + """ + This is a Alpine Linux Hostname manipulation strategy class - it edits + the /etc/hostname file then run hostname -F /etc/hostname. + """ + + HOSTNAME_FILE = '/etc/hostname' + + def update_current_and_permanent_hostname(self): + self.update_permanent_hostname() + self.update_current_hostname() + return self.changed + + def get_permanent_hostname(self): + if not os.path.isfile(self.HOSTNAME_FILE): + try: + open(self.HOSTNAME_FILE, "a").write("") + except IOError: + err = get_exception() + self.module.fail_json(msg="failed to write file: %s" % + str(err)) + try: + f = open(self.HOSTNAME_FILE) + try: + return f.read().strip() + finally: + f.close() + except Exception: + err = get_exception() + self.module.fail_json(msg="failed to read hostname: %s" % + str(err)) + + def set_permanent_hostname(self, name): + try: + f = open(self.HOSTNAME_FILE, 'w+') + try: + f.write("%s\n" % name) + finally: + f.close() + except Exception: + err = get_exception() self.module.fail_json(msg="failed to update hostname: %s" % str(err)) + def set_current_hostname(self, name): + cmd = [self.hostname_cmd, '-F', self.HOSTNAME_FILE] + rc, out, err = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % + (rc, out, err)) + # =========================================== @@ -285,7 +388,7 @@ def get_current_hostname(self): if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err)) - return out.strip() + return to_native(out).strip() def set_current_hostname(self, name): if len(name) > 64: @@ -302,7 +405,7 @@ def get_permanent_hostname(self): if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err)) - return out.strip() + return to_native(out).strip() def set_permanent_hostname(self, name): if len(name) > 64: @@ -337,7 +440,8 @@ def get_permanent_hostname(self): line = line.strip() if line.startswith('hostname='): return line[10:].strip('"') - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to read hostname: %s" % str(err)) finally: f.close() @@ -358,7 +462,8 @@ def set_permanent_hostname(self, name): f = open(self.HOSTNAME_FILE, 'w') f.write('\n'.join(lines) + '\n') - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to update hostname: %s" % str(err)) finally: f.close() @@ -377,7 +482,8 @@ def get_permanent_hostname(self): if not os.path.isfile(self.HOSTNAME_FILE): try: open(self.HOSTNAME_FILE, "a").write("") - except IOError, err: + except IOError: + err = get_exception() self.module.fail_json(msg="failed to write file: %s" % str(err)) try: @@ -386,7 +492,8 @@ def get_permanent_hostname(self): return f.read().strip() finally: f.close() - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to read hostname: %s" % str(err)) @@ -397,7 +504,8 @@ def set_permanent_hostname(self, name): f.write("%s\n" % name) finally: f.close() - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to update hostname: %s" % str(err)) @@ -425,7 +533,7 @@ def get_permanent_hostname(self): if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err)) - return out.strip() + return to_native(out).strip() def set_permanent_hostname(self, name): cmd = [self.hostname_cmd, name] @@ -449,7 +557,8 @@ def get_permanent_hostname(self): if not os.path.isfile(self.HOSTNAME_FILE): try: open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n") - except IOError, err: + except IOError: + err = get_exception() self.module.fail_json(msg="failed to write file: %s" % str(err)) try: @@ -459,7 +568,8 @@ def get_permanent_hostname(self): line = line.strip() if line.startswith('hostname='): return line[10:].strip('"') - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to read hostname: %s" % str(err)) finally: f.close() @@ -480,7 +590,8 @@ def set_permanent_hostname(self, name): f = open(self.HOSTNAME_FILE, 'w') f.write('\n'.join(lines) + '\n') - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to update hostname: %s" % str(err)) finally: f.close() @@ -496,9 +607,7 @@ class SLESHostname(Hostname): platform = 'Linux' distribution = 'Suse linux enterprise server ' distribution_version = get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("12"): - strategy_class = SystemdStrategy - elif distribution_version and LooseVersion("10") <= LooseVersion(distribution_version) <= LooseVersion("12"): + if distribution_version and LooseVersion("10") <= LooseVersion(distribution_version) <= LooseVersion("12"): strategy_class = SLESStrategy else: strategy_class = UnimplementedStrategy @@ -521,65 +630,42 @@ class RedHat5Hostname(Hostname): class RedHatServerHostname(Hostname): platform = 'Linux' distribution = 'Red hat enterprise linux server' - distribution_version = get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = SystemdStrategy - else: - strategy_class = RedHatStrategy + strategy_class = RedHatStrategy class RedHatWorkstationHostname(Hostname): platform = 'Linux' distribution = 'Red hat enterprise linux workstation' - distribution_version = get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = SystemdStrategy - else: - strategy_class = RedHatStrategy + strategy_class = RedHatStrategy class CentOSHostname(Hostname): platform = 'Linux' distribution = 'Centos' - distribution_version = get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = SystemdStrategy - else: - strategy_class = RedHatStrategy + strategy_class = RedHatStrategy class CentOSLinuxHostname(Hostname): platform = 'Linux' distribution = 'Centos linux' - distribution_version = get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = SystemdStrategy - else: - strategy_class = RedHatStrategy + strategy_class = RedHatStrategy class ScientificHostname(Hostname): platform = 'Linux' distribution = 'Scientific' - distribution_version = get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = SystemdStrategy - else: - strategy_class = RedHatStrategy + strategy_class = RedHatStrategy class ScientificLinuxHostname(Hostname): platform = 'Linux' distribution = 'Scientific linux' - distribution_version = get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = SystemdStrategy - else: - strategy_class = RedHatStrategy + strategy_class = RedHatStrategy + +class ScientificLinuxCERNHostname(Hostname): + platform = 'Linux' + distribution = 'Scientific linux cern slc' + strategy_class = RedHatStrategy class OracleLinuxHostname(Hostname): platform = 'Linux' distribution = 'Oracle linux server' - distribution_version = get_distribution_version() - if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = SystemdStrategy - else: - strategy_class = RedHatStrategy + strategy_class = RedHatStrategy class AmazonLinuxHostname(Hostname): platform = 'Linux' @@ -621,6 +707,11 @@ class ALTLinuxHostname(Hostname): distribution = 'Altlinux' strategy_class = RedHatStrategy +class AlpineLinuxHostname(Hostname): + platform = 'Linux' + distribution = 'Alpine' + strategy_class = AlpineStrategy + class OpenBSDHostname(Hostname): platform = 'OpenBSD' distribution = None @@ -642,23 +733,13 @@ class FreeBSDHostname(Hostname): def main(): module = AnsibleModule( argument_spec = dict( - name=dict(required=True, type='str') + name=dict(required=True) ) ) hostname = Hostname(module) - - changed = False name = module.params['name'] - current_name = hostname.get_current_hostname() - if current_name != name: - hostname.set_current_hostname(name) - changed = True - - permanent_name = hostname.get_permanent_hostname() - if permanent_name != name: - hostname.set_permanent_hostname(name) - changed = True + changed = hostname.update_current_and_permanent_hostname() module.exit_json(changed=changed, name=name, ansible_facts=dict(ansible_hostname=name.split('.')[0], @@ -666,4 +747,5 @@ def main(): ansible_fqdn=socket.getfqdn(), ansible_domain='.'.join(socket.getfqdn().split('.')[1:]))) -main() +if __name__ == '__main__': + main() diff --git a/system/mount.py b/system/mount.py index c7fd2d70f6a..b8fad7747b3 100644 --- a/system/mount.py +++ b/system/mount.py @@ -20,135 +20,223 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + +from ansible.module_utils._text import to_native +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import get_platform +from ansible.module_utils.ismount import ismount +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.six import iteritems +import os + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: mount short_description: Control active and configured mount points description: - - This module controls active and configured mount points in C(/etc/fstab). + - This module controls active and configured mount points in C(/etc/fstab). +author: + - Ansible Core Team + - Seth Vidal version_added: "0.6" options: name: description: - - "path to the mount point, eg: C(/mnt/files)" + - Path to the mount point (e.g. C(/mnt/files)) required: true src: description: - - device to be mounted on I(name). - required: true + - Device to be mounted on I(name). Required when I(state) set to + C(present) or C(mounted). + required: false + default: null fstype: description: - - file-system type - required: true + - Filesystem type. Required when I(state) is C(present) or C(mounted). + required: false + default: null opts: description: - - mount options (see fstab(5)) + - Mount options (see fstab(5), or vfstab(4) on Solaris). required: false default: null dump: description: - - "dump (see fstab(5)), Note that if nulled, C(state=present) will cease to work and duplicate entries will be made with subsequent runs." + - Dump (see fstab(5)). Note that if set to C(null) and I(state) set to + C(present), it will cease to work and duplicate entries will be made + with subsequent runs. + - Has no effect on Solaris systems. required: false default: 0 passno: description: - - "passno (see fstab(5)), Note that if nulled, C(state=present) will cease to work and duplicate entries will be made with subsequent runs." + - Passno (see fstab(5)). Note that if set to C(null) and I(state) set to + C(present), it will cease to work and duplicate entries will be made + with subsequent runs. + - Deprecated on Solaris systems. required: false default: 0 state: description: - - If C(mounted) or C(unmounted), the device will be actively mounted or unmounted as needed and appropriately configured in I(fstab). - - C(absent) and C(present) only deal with I(fstab) but will not affect current mounting. - - If specifying C(mounted) and the mount point is not present, the mount point will be created. Similarly. - - Specifying C(absent) will remove the mount point directory. + - If C(mounted) or C(unmounted), the device will be actively mounted or + unmounted as needed and appropriately configured in I(fstab). + - C(absent) and C(present) only deal with I(fstab) but will not affect + current mounting. + - If specifying C(mounted) and the mount point is not present, the mount + point will be created. + - Similarly, specifying C(absent) will remove the mount point directory. required: true - choices: [ "present", "absent", "mounted", "unmounted" ] + choices: ["present", "absent", "mounted", "unmounted"] fstab: description: - - file to use instead of C(/etc/fstab). You shouldn't use that option + - File to use instead of C(/etc/fstab). You shouldn't use this option unless you really know what you are doing. This might be useful if - you need to configure mountpoints in a chroot environment. + you need to configure mountpoints in a chroot environment. OpenBSD + does not allow specifying alternate fstab files with mount so do not + use this on OpenBSD with any state that operates on the live filesystem. required: false - default: /etc/fstab - -author: - - Ansible Core Team - - Seth Vidal + default: /etc/fstab (/etc/vfstab on Solaris) + boot: + version_added: 2.2 + description: + - Determines if the filesystem should be mounted on boot. + - Only applies to Solaris systems. + required: false + default: yes + choices: ["yes", "no"] ''' -EXAMPLES = ''' -# Mount DVD read-only -- mount: name=/mnt/dvd src=/dev/sr0 fstype=iso9660 opts=ro state=present - -# Mount up device by label -- mount: name=/srv/disk src='LABEL=SOME_LABEL' fstype=ext4 state=present -# Mount up device by UUID -- mount: name=/home src='UUID=b3e48f45-f933-4c8e-a700-22a159ec9077' fstype=xfs opts=noatime state=present +EXAMPLES = ''' +- name: Mount DVD read-only + mount: + name: /mnt/dvd + src: /dev/sr0 + fstype: iso9660 + opts: ro + state: present + +- name: Mount up device by label + mount: + name: /srv/disk + src: LABEL=SOME_LABEL + fstype: ext4 + state: present + +- name: Mount up device by UUID + mount: + name: /home + src: UUID=b3e48f45-f933-4c8e-a700-22a159ec9077 + fstype: xfs + opts: noatime + state: present ''' def write_fstab(lines, dest): - fs_w = open(dest, 'w') + for l in lines: fs_w.write(l) fs_w.flush() fs_w.close() + def _escape_fstab(v): - """ escape space (040), ampersand (046) and backslash (134) which are invalid in fstab fields """ - return v.replace('\\', '\\134').replace(' ', '\\040').replace('&', '\\046') - -def set_mount(module, **kwargs): - """ set/change a mount point location in fstab """ - - # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab - args = dict( - opts = 'defaults', - dump = '0', - passno = '0', - fstab = '/etc/fstab' - ) - args.update(kwargs) + """Escape invalid characters in fstab fields. - # save the mount name before space replacement - origname = args['name'] - # replace any space in mount name with '\040' to make it fstab compatible (man fstab) - args['name'] = args['name'].replace(' ', r'\040') + space (040) + ampersand (046) + backslash (134) + """ - new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' + if isinstance(v, int): + return v + else: + return( + v. + replace('\\', '\\134'). + replace(' ', '\\040'). + replace('&', '\\046')) + + +def set_mount(module, args): + """Set/change a mount point location in fstab.""" to_write = [] exists = False changed = False - escaped_args = dict([(k, _escape_fstab(v)) for k, v in args.iteritems()]) + escaped_args = dict([(k, _escape_fstab(v)) for k, v in iteritems(args)]) + new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' + + if get_platform() == 'SunOS': + new_line = ( + '%(src)s - %(name)s %(fstype)s %(passno)s %(boot)s %(opts)s\n') + for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) + continue + if line.strip().startswith('#'): to_write.append(line) + continue - if len(line.split()) != 6: - # not sure what this is or why it is here - # but it is not our fault so leave it be + + # Check if we got a valid line for splitting + if ( + get_platform() == 'SunOS' and len(line.split()) != 7 or + get_platform() != 'SunOS' and len(line.split()) != 6): to_write.append(line) + continue ld = {} - ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() + if get_platform() == 'SunOS': + ( + ld['src'], + dash, + ld['name'], + ld['fstype'], + ld['passno'], + ld['boot'], + ld['opts'] + ) = line.split() + else: + ( + ld['src'], + ld['name'], + ld['fstype'], + ld['opts'], + ld['dump'], + ld['passno'] + ) = line.split() + + # Check if we found the correct line if ld['name'] != escaped_args['name']: to_write.append(line) + continue - # it exists - now see if what we have is different + # If we got here we found a match - let's check if there is any + # difference exists = True - for t in ('src', 'fstype','opts', 'dump', 'passno'): + args_to_check = ('src', 'fstype', 'opts', 'dump', 'passno') + + if get_platform() == 'SunOS': + args_to_check = ('src', 'fstype', 'passno', 'boot', 'opts') + + for t in args_to_check: if ld[t] != escaped_args[t]: - changed = True ld[t] = escaped_args[t] + changed = True if changed: to_write.append(new_line % ld) @@ -156,223 +244,497 @@ def set_mount(module, **kwargs): to_write.append(line) if not exists: - to_write.append(new_line % args) + to_write.append(new_line % escaped_args) changed = True if changed and not module.check_mode: write_fstab(to_write, args['fstab']) - # mount function needs origname - return (origname, changed) + return (args['name'], changed) -def unset_mount(module, **kwargs): - """ remove a mount point from fstab """ - - # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab - args = dict( - opts = 'default', - dump = '0', - passno = '0', - fstab = '/etc/fstab' - ) - args.update(kwargs) - - # save the mount name before space replacement - origname = args['name'] - # replace any space in mount name with '\040' to make it fstab compatible (man fstab) - args['name'] = args['name'].replace(' ', r'\040') +def unset_mount(module, args): + """Remove a mount point from fstab.""" to_write = [] changed = False escaped_name = _escape_fstab(args['name']) + for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) + continue + if line.strip().startswith('#'): to_write.append(line) + continue - if len(line.split()) != 6: - # not sure what this is or why it is here - # but it is not our fault so leave it be + + # Check if we got a valid line for splitting + if ( + get_platform() == 'SunOS' and len(line.split()) != 7 or + get_platform() != 'SunOS' and len(line.split()) != 6): to_write.append(line) + continue ld = {} - ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() + + if get_platform() == 'SunOS': + ( + ld['src'], + dash, + ld['name'], + ld['fstype'], + ld['passno'], + ld['boot'], + ld['opts'] + ) = line.split() + else: + ( + ld['src'], + ld['name'], + ld['fstype'], + ld['opts'], + ld['dump'], + ld['passno'] + ) = line.split() if ld['name'] != escaped_name: to_write.append(line) + continue - # if we got here we found a match - continue and mark changed + # If we got here we found a match - continue and mark changed changed = True if changed and not module.check_mode: write_fstab(to_write, args['fstab']) - # umount needs origname - return (origname, changed) - + return (args['name'], changed) -def mount(module, **kwargs): - """ mount up a path or remount if needed """ +def _set_fstab_args(fstab_file): + result = [] + if fstab_file and fstab_file != '/etc/fstab': + if get_platform().lower().endswith('bsd'): + result.append('-F') + else: + result.append('-T') + result.append(fstab_file) + return result - # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab - args = dict( - opts = 'default', - dump = '0', - passno = '0', - fstab = '/etc/fstab' - ) - args.update(kwargs) +def mount(module, args): + """Mount up a path or remount if needed.""" - mount_bin = module.get_bin_path('mount') + mount_bin = module.get_bin_path('mount', required=True) + name = args['name'] + cmd = [mount_bin] - name = kwargs['name'] - - cmd = [ mount_bin, ] - - if os.path.ismount(name): - cmd += [ '-o', 'remount', ] + if ismount(name): + return remount(module, mount_bin, args) - if get_platform().lower() == 'freebsd': - cmd += [ '-F', args['fstab'], ] + if get_platform().lower() == 'openbsd': + # Use module.params['fstab'] here as args['fstab'] has been set to the + # default value. + if module.params['fstab'] is not None: + module.fail_json(msg='OpenBSD does not support alternate fstab files. Do not specify the fstab parameter for OpenBSD hosts') + else: + cmd += _set_fstab_args(args['fstab']) - cmd += [ name, ] + cmd += [name] rc, out, err = module.run_command(cmd) + if rc == 0: return 0, '' else: return rc, out+err -def umount(module, **kwargs): - """ unmount a path """ - umount_bin = module.get_bin_path('umount') - name = kwargs['name'] - cmd = [umount_bin, name] +def umount(module, dest): + """Unmount a path.""" + + umount_bin = module.get_bin_path('umount', required=True) + cmd = [umount_bin, dest] rc, out, err = module.run_command(cmd) + if rc == 0: return 0, '' else: return rc, out+err -def main(): +def remount(module, mount_bin, args): + ''' will try to use -o remount first and fallback to unmount/mount if unsupported''' + msg = '' + cmd = [mount_bin] + + # multiplatform remount opts + if get_platform().lower().endswith('bsd'): + cmd += ['-u'] + else: + cmd += ['-o', 'remount' ] + + if get_platform().lower() == 'openbsd': + # Use module.params['fstab'] here as args['fstab'] has been set to the + # default value. + if module.params['fstab'] is not None: + module.fail_json(msg='OpenBSD does not support alternate fstab files. Do not specify the fstab parameter for OpenBSD hosts') + else: + cmd += _set_fstab_args(args['fstab']) + cmd += [ args['name'], ] + out = err = '' + try: + if get_platform().lower().endswith('bsd'): + # Note: Forcing BSDs to do umount/mount due to BSD remount not + # working as expected (suspect bug in the BSD mount command) + # Interested contributor could rework this to use mount options on + # the CLI instead of relying on fstab + # https://github.com/ansible/ansible-modules-core/issues/5591 + rc = 1 + else: + rc, out, err = module.run_command(cmd) + except: + rc = 1 + + if rc != 0: + msg = out + err + if ismount(args['name']): + rc, msg = umount(module, args['name']) + if rc == 0: + rc, msg = mount(module, args) + return rc, msg + +# Note if we wanted to put this into module_utils we'd have to get permission +# from @jupeter -- https://github.com/ansible/ansible-modules-core/pull/2923 +# @jtyr -- https://github.com/ansible/ansible-modules-core/issues/4439 +# and @abadger to relicense from GPLv3+ +def is_bind_mounted(module, linux_mounts, dest, src=None, fstype=None): + """Return whether the dest is bind mounted + + :arg module: The AnsibleModule (used for helper functions) + :arg dest: The directory to be mounted under. This is the primary means + of identifying whether the destination is mounted. + :kwarg src: The source directory. If specified, this is used to help + ensure that we are detecting that the correct source is mounted there. + :kwarg fstype: The filesystem type. If specified this is also used to + help ensure that we are detecting the right mount. + :kwarg linux_mounts: Cached list of mounts for Linux. + :returns: True if the dest is mounted with src otherwise False. + """ + + is_mounted = False + + if get_platform() == 'Linux' and linux_mounts is not None: + if src is None: + # That's for unmounted/absent + if dest in linux_mounts: + is_mounted = True + else: + # That's for mounted + if dest in linux_mounts and linux_mounts[dest]['src'] == src: + is_mounted = True + else: + bin_path = module.get_bin_path('mount', required=True) + cmd = '%s -l' % bin_path + rc, out, err = module.run_command(cmd) + mounts = [] + + if len(out): + mounts = to_native(out).strip().split('\n') + + for mnt in mounts: + arguments = mnt.split() + + if ( + (arguments[0] == src or src is None) and + arguments[2] == dest and + (arguments[4] == fstype or fstype is None)): + is_mounted = True + + if is_mounted: + break + + return is_mounted + + +def get_linux_mounts(module): + """Gather mount information""" + + mntinfo_file = "/proc/self/mountinfo" + + try: + f = open(mntinfo_file) + except IOError: + return + + lines = map(str.strip, f.readlines()) + + try: + f.close() + except IOError: + module.fail_json(msg="Cannot close file %s" % mntinfo_file) + + mntinfo = [] + + for line in lines: + fields = line.split() + + record = { + 'root': fields[3], + 'dst': fields[4], + 'opts': fields[5], + 'fields': fields[6:-4], + 'fs': fields[-3], + 'src': fields[-2], + } + + mntinfo.append(record) + + mounts = {} + + for i, mnt in enumerate(mntinfo): + src = mnt['src'] + + if mnt['fs'] == 'tmpfs' and mnt['root'] != '/': + # == Example: + # 65 19 0:35 / /tmp rw shared:25 - tmpfs tmpfs rw + # 210 65 0:35 /aaa /tmp/bbb rw shared:25 - tmpfs tmpfs rw + # == Expected result: + # src=/tmp/aaa + # == + + shared = None + + # Search for the shared field + for fld in mnt['fields']: + if fld.startswith('shared'): + shared = fld + if shared is None: + continue + + dest = None + + # Search fo the record with the same field + for j, m in enumerate(mntinfo): + if j < i: + if shared in m['fields']: + dest = m['dst'] + else: + break + + if dest is not None: + src = "%s%s" % (dest, mnt['root']) + else: + continue + + elif mnt['root'] != '/' and len(mnt['fields']) > 0: + # == Example: + # 67 19 8:18 / /mnt/disk2 rw shared:26 - ext4 /dev/sdb2 rw + # 217 65 8:18 /test /tmp/ccc rw shared:26 - ext4 /dev/sdb2 rw + # == Expected result: + # src=/mnt/disk2/test + # == + + # Search for parent + for j, m in enumerate(mntinfo): + if j < i: + if m['src'] == mnt['src']: + src = "%s%s" % (m['dst'], mnt['root']) + else: + break + + elif mnt['root'] != '/' and len(mnt['fields']) == 0: + # == Example 1: + # 27 20 8:1 /tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw + # == Example 2: + # 204 136 253:2 /rootfs / rw - ext4 /dev/sdb2 rw + # 141 140 253:2 /rootfs/tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw + # == Expected result: + # src=/tmp/aaa + # == + + src = mnt['root'] + + # Search for parent + for j, m in enumerate(mntinfo): + if j < i: + if ( + m['src'] == mnt['src'] and + mnt['root'].startswith(m['root'])): + src = src.replace("%s/" % m['root'], '/', 1) + else: + break + + mounts[mnt['dst']] = { + 'src': src, + 'opts': mnt['opts'], + 'fs': mnt['fs'] + } + + return mounts + + +def main(): module = AnsibleModule( - argument_spec = dict( - state = dict(required=True, choices=['present', 'absent', 'mounted', 'unmounted']), - name = dict(required=True), - opts = dict(default=None), - passno = dict(default=None), - dump = dict(default=None), - src = dict(required=True), - fstype = dict(required=True), - fstab = dict(default='/etc/fstab') + argument_spec=dict( + boot=dict(default='yes', choices=['yes', 'no']), + dump=dict(), + fstab=dict(default=None), + fstype=dict(), + name=dict(required=True, type='path'), + opts=dict(), + passno=dict(type='str'), + src=dict(type='path'), + state=dict( + required=True, + choices=['present', 'absent', 'mounted', 'unmounted']), ), - supports_check_mode=True + supports_check_mode=True, + required_if=( + ['state', 'mounted', ['src', 'fstype']], + ['state', 'present', ['src', 'fstype']] + ) ) - changed = False - rc = 0 - args = { - 'name': module.params['name'], - 'src': module.params['src'], - 'fstype': module.params['fstype'] - } - if module.params['passno'] is not None: - args['passno'] = module.params['passno'] - if module.params['opts'] is not None: - args['opts'] = module.params['opts'] - if module.params['dump'] is not None: - args['dump'] = module.params['dump'] - if module.params['fstab'] is not None: - args['fstab'] = module.params['fstab'] - - # if fstab file does not exist, we first need to create it. This mainly - # happens when fstab optin is passed to the module. + # solaris args: + # name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab + # linux args: + # name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab + # Note: Do not modify module.params['fstab'] as we need to know if the user + # explicitly specified it in mount() and remount() + if get_platform().lower() == 'sunos': + args = dict( + name=module.params['name'], + opts='-', + passno='-', + fstab=module.params['fstab'], + boot='yes' + ) + if args['fstab'] is None: + args['fstab'] = '/etc/vfstab' + else: + args = dict( + name=module.params['name'], + opts='defaults', + dump='0', + passno='0', + fstab=module.params['fstab'] + ) + if args['fstab'] is None: + args['fstab'] = '/etc/fstab' + + # FreeBSD doesn't have any 'default' so set 'rw' instead + if get_platform() == 'FreeBSD': + args['opts'] = 'rw' + + linux_mounts = [] + + # Cache all mounts here in order we have consistent results if we need to + # call is_bind_mouted() multiple times + if get_platform() == 'Linux': + linux_mounts = get_linux_mounts(module) + + if linux_mounts is None: + args['warnings'] = ( + 'Cannot open file /proc/self/mountinfo. ' + 'Bind mounts might be misinterpreted.') + + # Override defaults with user specified params + for key in ('src', 'fstype', 'passno', 'opts', 'dump', 'fstab'): + if module.params[key] is not None: + args[key] = module.params[key] + + # If fstab file does not exist, we first need to create it. This mainly + # happens when fstab option is passed to the module. if not os.path.exists(args['fstab']): if not os.path.exists(os.path.dirname(args['fstab'])): os.makedirs(os.path.dirname(args['fstab'])) - open(args['fstab'],'a').close() - # absent == remove from fstab and unmounted - # unmounted == do not change fstab state, but unmount - # present == add to fstab, do not change mount state - # mounted == add to fstab if not there and make sure it is mounted, if it has changed in fstab then remount it + open(args['fstab'], 'a').close() + + # absent: + # Remove from fstab and unmounted. + # unmounted: + # Do not change fstab state, but unmount. + # present: + # Add to fstab, do not change mount state. + # mounted: + # Add to fstab if not there and make sure it is mounted. If it has + # changed in fstab then remount it. state = module.params['state'] - name = module.params['name'] + name = module.params['name'] + if state == 'absent': - name, changed = unset_mount(module, **args) + name, changed = unset_mount(module, args) + if changed and not module.check_mode: - if os.path.ismount(name): - res,msg = umount(module, **args) + if ismount(name) or is_bind_mounted(module, linux_mounts, name): + res, msg = umount(module, name) + if res: - module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) + module.fail_json( + msg="Error unmounting %s: %s" % (name, msg)) if os.path.exists(name): try: os.rmdir(name) - except (OSError, IOError), e: + except (OSError, IOError): + e = get_exception() module.fail_json(msg="Error rmdir %s: %s" % (name, str(e))) - - module.exit_json(changed=changed, **args) - - if state == 'unmounted': - if os.path.ismount(name): + elif state == 'unmounted': + if ismount(name) or is_bind_mounted(module, linux_mounts, name): if not module.check_mode: - res,msg = umount(module, **args) - if res: - module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) - changed = True + res, msg = umount(module, name) - module.exit_json(changed=changed, **args) + if res: + module.fail_json( + msg="Error unmounting %s: %s" % (name, msg)) - if state in ['mounted', 'present']: - if state == 'mounted': - if not os.path.exists(name) and not module.check_mode: - try: - os.makedirs(name) - except (OSError, IOError), e: - module.fail_json(msg="Error making dir %s: %s" % (name, str(e))) - - name, changed = set_mount(module, **args) - if state == 'mounted': - res = 0 - if os.path.ismount(name): - if changed and not module.check_mode: - res,msg = mount(module, **args) - elif 'bind' in args.get('opts', []): - changed = True - cmd = 'mount -l' - rc, out, err = module.run_command(cmd) - allmounts = out.split('\n') - for mounts in allmounts[:-1]: - arguments = mounts.split() - if arguments[0] == args['src'] and arguments[2] == args['name'] and arguments[4] == args['fstype']: - changed = False - if changed: - res,msg = mount(module, **args) - else: + changed = True + elif state == 'mounted': + if not os.path.exists(name) and not module.check_mode: + try: + os.makedirs(name) + except (OSError, IOError): + e = get_exception() + module.fail_json( + msg="Error making dir %s: %s" % (name, str(e))) + + name, changed = set_mount(module, args) + res = 0 + + if ismount(name): + if changed and not module.check_mode: + res, msg = mount(module, args) changed = True - if not module.check_mode: - res,msg = mount(module, **args) + elif 'bind' in args.get('opts', []): + changed = True + if is_bind_mounted( module, linux_mounts, name, args['src'], args['fstype']): + changed = False - if res: - module.fail_json(msg="Error mounting %s: %s" % (name, msg)) + if changed and not module.check_mode: + res, msg = mount(module, args) + else: + changed = True + if not module.check_mode: + res, msg = mount(module, args) + + if res: + module.fail_json(msg="Error mounting %s: %s" % (name, msg)) + elif state == 'present': + name, changed = set_mount(module, args) + else: + module.fail_json(msg='Unexpected position reached') - module.exit_json(changed=changed, **args) + module.exit_json(changed=changed, **args) - module.fail_json(msg='Unexpected position reached') -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/ping.py b/system/ping.py index ed93f7dfe11..bee23a95c4a 100644 --- a/system/ping.py +++ b/system/ping.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # (c) 2012, Michael DeHaan +# (c) 2016, Toshio Kuratomi # # This file is part of Ansible # @@ -19,6 +20,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ping @@ -40,23 +45,22 @@ ansible webservers -m ping ''' -import exceptions +from ansible.module_utils.basic import AnsibleModule def main(): module = AnsibleModule( - argument_spec = dict( + argument_spec=dict( data=dict(required=False, default=None), ), - supports_check_mode = True + supports_check_mode=True ) result = dict(ping='pong') if module.params['data']: if module.params['data'] == 'crash': - raise exceptions.Exception("boom") + raise Exception("boom") result['ping'] = module.params['data'] module.exit_json(**result) -from ansible.module_utils.basic import * - -main() +if __name__ == '__main__': + main() diff --git a/system/seboolean.py b/system/seboolean.py index 3a150d05a20..9246d912a07 100644 --- a/system/seboolean.py +++ b/system/seboolean.py @@ -17,6 +17,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: seboolean @@ -50,7 +54,10 @@ EXAMPLES = ''' # Set (httpd_can_network_connect) flag on and keep it persistent across reboots -- seboolean: name=httpd_can_network_connect state=yes persistent=yes +- seboolean: + name: httpd_can_network_connect + state: yes + persistent: yes ''' try: @@ -69,9 +76,9 @@ def has_boolean_value(module, name): bools = [] try: rc, bools = selinux.security_get_boolean_names() - except OSError, e: + except OSError: module.fail_json(msg="Failed to get list of boolean names") - if name in bools: + if to_bytes(name) in bools: return True else: return False @@ -80,7 +87,7 @@ def get_boolean_value(module, name): state = 0 try: state = selinux.security_get_boolean_active(name) - except OSError, e: + except OSError: module.fail_json(msg="Failed to determine current state for boolean %s" % name) if state == 1: return True @@ -138,7 +145,8 @@ def semanage_boolean_value(module, name, state): semanage.semanage_disconnect(handle) semanage.semanage_handle_destroy(handle) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e))) return True @@ -149,7 +157,7 @@ def set_boolean_value(module, name, state): value = 1 try: rc = selinux.security_set_boolean(name, value) - except OSError, e: + except OSError: module.fail_json(msg="Failed to set boolean %s to %s" % (name, value)) if rc == 0: return True @@ -181,6 +189,11 @@ def main(): result = {} result['name'] = name + if hasattr(selinux, 'selinux_boolean_sub'): + # selinux_boolean_sub allows sites to rename a boolean and alias the old name + # Feature only available in selinux library since 2012. + name = selinux.selinux_boolean_sub(name) + if not has_boolean_value(module, name): module.fail_json(msg="SELinux boolean %s does not exist." % name) @@ -209,4 +222,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils._text import to_bytes + +if __name__ == '__main__': + main() diff --git a/system/selinux.py b/system/selinux.py index 2debb95a475..634ef4efcad 100644 --- a/system/selinux.py +++ b/system/selinux.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: selinux @@ -49,9 +53,19 @@ ''' EXAMPLES = ''' -- selinux: policy=targeted state=enforcing -- selinux: policy=targeted state=permissive -- selinux: state=disabled +# Enable SELinux +- selinux: + policy: targeted + state: enforcing + +# Put SELinux in permissive mode, logging actions that would be blocked. +- selinux: + policy: targeted + state: permissive + +# Disable SELinux +- selinux: + state: disabled ''' import os diff --git a/system/service.py b/system/service.py index fd46cae9a95..b1cb289b49f 100644 --- a/system/service.py +++ b/system/service.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: service @@ -74,29 +78,51 @@ description: - Additional arguments provided on the command line aliases: [ 'args' ] + use: + description: + - The service module actually uses system specific modules, normally through auto detection, this setting can force a specific module. + - Normally it uses the value of the 'ansible_service_mgr' fact and falls back to the old 'service' module when none matching is found. + default: 'auto' + version_added: 2.2 ''' EXAMPLES = ''' # Example action to start service httpd, if not running -- service: name=httpd state=started +- service: + name: httpd + state: started # Example action to stop service httpd, if running -- service: name=httpd state=stopped +- service: + name: httpd + state: stopped # Example action to restart service httpd, in all cases -- service: name=httpd state=restarted +- service: + name: httpd + state: restarted # Example action to reload service httpd, in all cases -- service: name=httpd state=reloaded +- service: + name: httpd + state: reloaded # Example action to enable service httpd, and not touch the running state -- service: name=httpd enabled=yes +- service: + name: httpd + enabled: yes # Example action to start service foo, based on running process /usr/bin/foo -- service: name=foo pattern=/usr/bin/foo state=started +- service: + name: foo + pattern: /usr/bin/foo + state: started # Example action to restart network service for interface eth0 -- service: name=network state=restarted args=eth0 +- service: + name: network + state: restarted + args: eth0 ''' @@ -109,6 +135,7 @@ import time import string import glob +from ansible.module_utils.service import fail_if_missing # The distutils module is not shipped with SUNWPython on Solaris. # It's in the SUNWPython-devel package which also contains development files @@ -471,7 +498,7 @@ def check_systemd(): self.enable_cmd = location['chkconfig'] if self.enable_cmd is None: - self.module.fail_json(msg="no service or tool found for: %s" % self.name) + fail_if_missing(self.module, False, self.name, msg='host') # If no service control tool selected yet, try to see if 'service' is available if self.svc_cmd is None and location.get('service', False): @@ -496,6 +523,8 @@ def sysv_is_enabled(name): (rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, service_name,)) if rc == 0: return True + elif out.startswith('disabled'): + return False elif sysv_exists(service_name): return sysv_is_enabled(service_name) else: @@ -514,27 +543,30 @@ def get_systemd_status_dict(self): value_buffer = [] status_dict = {} for line in out.splitlines(): - if not key: - key, value = line.split('=', 1) - # systemd fields that are shell commands can be multi-line - # We take a value that begins with a "{" as the start of - # a shell command and a line that ends with "}" as the end of - # the command - if value.lstrip().startswith('{'): - if value.rstrip().endswith('}'): + if '=' in line: + if not key: + key, value = line.split('=', 1) + # systemd fields that are shell commands can be multi-line + # We take a value that begins with a "{" as the start of + # a shell command and a line that ends with "}" as the end of + # the command + if value.lstrip().startswith('{'): + if value.rstrip().endswith('}'): + status_dict[key] = value + key = None + else: + value_buffer.append(value) + else: status_dict[key] = value key = None + else: + if line.rstrip().endswith('}'): + status_dict[key] = '\n'.join(value_buffer) + key = None else: value_buffer.append(value) - else: - status_dict[key] = value - key = None else: - if line.rstrip().endswith('}'): - status_dict[key] = '\n'.join(value_buffer) - key = None - else: - value_buffer.append(value) + value_buffer.append(value) return status_dict @@ -777,24 +809,23 @@ def write_to_override_file(file_name, file_contents, ): action = 'enable' klinks = glob.glob('/etc/rc?.d/K??' + self.name) if not klinks: - (rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name)) - if rc != 0: - if err: - self.module.fail_json(msg=err) - else: - self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action) + if not self.module.check_mode: + (rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name)) + if rc != 0: + if err: + self.module.fail_json(msg=err) + else: + self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action) else: action = 'disable' - if self.module.check_mode: - rc = 0 - return - (rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action)) - if rc != 0: - if err: - self.module.fail_json(msg=err) - else: - self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action) + if not self.module.check_mode: + (rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action)) + if rc != 0: + if err: + self.module.fail_json(msg=err) + else: + self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action) else: self.changed = False @@ -941,10 +972,11 @@ class FreeBsdService(Service): def get_service_tools(self): self.svc_cmd = self.module.get_bin_path('service', True) - if not self.svc_cmd: self.module.fail_json(msg='unable to find service binary') + self.sysrc_cmd = self.module.get_bin_path('sysrc') + def get_service_status(self): rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'onestatus', self.arguments)) if self.name == "pf": @@ -983,16 +1015,45 @@ def service_enable(self): # and hope for the best. for rcvar in rcvars: if '=' in rcvar: - self.rcconf_key = rcvar.split('=')[0] + self.rcconf_key, default_rcconf_value = rcvar.split('=', 1) break if self.rcconf_key is None: self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr) - try: - return self.service_enable_rcconf() - except Exception: - self.module.fail_json(msg='unable to set rcvar') + if self.sysrc_cmd: # FreeBSD >= 9.2 + + rc, current_rcconf_value, stderr = self.execute_command("%s -n %s" % (self.sysrc_cmd, self.rcconf_key)) + # it can happen that rcvar is not set (case of a system coming from the ports collection) + # so we will fallback on the default + if rc != 0: + current_rcconf_value = default_rcconf_value + + if current_rcconf_value.strip().upper() != self.rcconf_value: + + self.changed = True + + if self.module.check_mode: + self.module.exit_json(changed=True, msg="changing service enablement") + + rc, change_stdout, change_stderr = self.execute_command("%s %s=\"%s\"" % (self.sysrc_cmd, self.rcconf_key, self.rcconf_value ) ) + if rc != 0: + self.module.fail_json(msg="unable to set rcvar using sysrc", stdout=change_stdout, stderr=change_stderr) + + # sysrc does not exit with code 1 on permission error => validate successful change using service(8) + rc, check_stdout, check_stderr = self.execute_command("%s %s %s" % (self.svc_cmd, self.name, "enabled")) + if self.enable != (rc == 0): # rc = 0 indicates enabled service, rc = 1 indicates disabled service + self.module.fail_json(msg="unable to set rcvar: sysrc did not change value", stdout=change_stdout, stderr=change_stderr) + + else: + self.changed = False + + else: # Legacy (FreeBSD < 9.2) + try: + return self.service_enable_rcconf() + except Exception: + self.module.fail_json(msg='unable to set rcvar') + def service_control(self): @@ -1003,7 +1064,12 @@ def service_control(self): if self.action == "reload": self.action = "onereload" - return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments)) + ret = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments)) + + if self.sleep: + time.sleep(self.sleep) + + return ret # =========================================== # Subclass: OpenBSD @@ -1313,7 +1379,7 @@ def service_enable(self): def service_control(self): status = self.get_sunos_svcs_status() - # if starting or reloading, clear maintenace states + # if starting or reloading, clear maintenance states if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']: rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name)) if rc != 0: @@ -1328,9 +1394,9 @@ def service_control(self): elif self.action == 'stop': subcmd = "disable -st" elif self.action == 'reload': - subcmd = "refresh" + subcmd = "refresh -s" elif self.action == 'restart' and status == 'online': - subcmd = "restart" + subcmd = "restart -s" elif self.action == 'restart' and status != 'online': subcmd = "enable -rst" @@ -1425,10 +1491,9 @@ def main(): runlevel = dict(required=False, default='default'), arguments = dict(aliases=['args'], default=''), ), - supports_check_mode=True + supports_check_mode=True, + required_one_of=[['state', 'enabled']], ) - if module.params['state'] is None and module.params['enabled'] is None: - module.fail_json(msg="Neither 'state' nor 'enabled' set") service = Service(module) @@ -1507,4 +1572,5 @@ def main(): from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/setup.py b/system/setup.py index eca7287a721..81bbf43ddba 100644 --- a/system/setup.py +++ b/system/setup.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: setup @@ -37,6 +41,12 @@ not want to display those." required: false default: 'all' + gather_timeout: + version_added: "2.2" + description: + - "Set the default timeout in seconds for individual fact gathering" + required: false + default: 10 filter: version_added: "1.1" description: @@ -111,8 +121,9 @@ def main(): module = AnsibleModule( argument_spec = dict( gather_subset=dict(default=["all"], required=False, type='list'), + gather_timeout=dict(default=10, required=False, type='int'), filter=dict(default="*", required=False), - fact_path=dict(default='/etc/ansible/facts.d', required=False), + fact_path=dict(default='/etc/ansible/facts.d', required=False, type='path'), ), supports_check_mode = True, ) diff --git a/system/sysctl.py b/system/sysctl.py index ae110b0653a..43312b0922e 100644 --- a/system/sysctl.py +++ b/system/sysctl.py @@ -20,6 +20,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: sysctl @@ -76,19 +80,37 @@ EXAMPLES = ''' # Set vm.swappiness to 5 in /etc/sysctl.conf -- sysctl: name=vm.swappiness value=5 state=present +- sysctl: + name: vm.swappiness + value: 5 + state: present # Remove kernel.panic entry from /etc/sysctl.conf -- sysctl: name=kernel.panic state=absent sysctl_file=/etc/sysctl.conf +- sysctl: + name: kernel.panic + state: absent + sysctl_file: /etc/sysctl.conf # Set kernel.panic to 3 in /tmp/test_sysctl.conf -- sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf reload=no +- sysctl: + name: kernel.panic + value: 3 + sysctl_file: /tmp/test_sysctl.conf + reload: no # Set ip forwarding on in /proc and do not reload the sysctl file -- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes +- sysctl: + name: net.ipv4.ip_forward + value: 1 + sysctl_set: yes # Set ip forwarding on in /proc and in the sysctl file and reload if necessary -- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes +- sysctl: + name: net.ipv4.ip_forward + value: 1 + sysctl_set: yes + state: present + reload: yes ''' # ============================================================== @@ -275,7 +297,8 @@ def read_sysctl_file(self): f = open(self.sysctl_file, "r") lines = f.readlines() f.close() - except IOError, e: + except IOError: + e = get_exception() self.module.fail_json(msg="Failed to open %s: %s" % (self.sysctl_file, str(e))) for line in lines: @@ -325,7 +348,8 @@ def write_sysctl(self): try: for l in self.fixed_lines: f.write(l.strip() + "\n") - except IOError, e: + except IOError: + e = get_exception() self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) f.flush() f.close() diff --git a/system/systemd.py b/system/systemd.py new file mode 100644 index 00000000000..0b6c05351bc --- /dev/null +++ b/system/systemd.py @@ -0,0 +1,411 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2016, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: systemd +author: + - "Ansible Core Team" +version_added: "2.2" +short_description: Manage services. +description: + - Controls systemd services on remote hosts. +options: + name: + required: true + description: + - Name of the service. + aliases: ['unit', 'service'] + state: + required: false + default: null + choices: [ 'started', 'stopped', 'restarted', 'reloaded' ] + description: + - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. + C(restarted) will always bounce the service. C(reloaded) will always reload. + enabled: + required: false + choices: [ "yes", "no" ] + default: null + description: + - Whether the service should start on boot. B(At least one of state and enabled are required.) + masked: + required: false + choices: [ "yes", "no" ] + default: null + description: + - Whether the unit should be masked or not, a masked unit is impossible to start. + daemon_reload: + required: false + default: no + choices: [ "yes", "no" ] + description: + - run daemon-reload before doing any other operations, to make sure systemd has read any changes. + aliases: ['daemon-reload'] + user: + required: false + default: no + choices: [ "yes", "no" ] + description: + - run systemctl talking to the service manager of the calling user, rather than the service manager + of the system. +notes: + - One option other than name is required. +requirements: + - A system managed by systemd +''' + +EXAMPLES = ''' +# Example action to start service httpd, if not running +- systemd: state=started name=httpd + +# Example action to stop service cron on debian, if running +- systemd: name=cron state=stopped + +# Example action to restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes +- systemd: + state: restarted + daemon_reload: yes + name: crond + +# Example action to reload service httpd, in all cases +- systemd: + name: httpd + state: reloaded + +# Example action to enable service httpd and ensure it is not masked +- systemd: + name: httpd + enabled: yes + masked: no + +# Example action to enable a timer for dnf-automatic +- systemd: + name: dnf-automatic.timer + state: started + enabled: True +''' + +RETURN = ''' +status: + description: A dictionary with the key=value pairs returned from `systemctl show` + returned: success + type: complex + sample: { + "ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT", + "ActiveEnterTimestampMonotonic": "8135942", + "ActiveExitTimestampMonotonic": "0", + "ActiveState": "active", + "After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice", + "AllowIsolate": "no", + "Before": "shutdown.target multi-user.target", + "BlockIOAccounting": "no", + "BlockIOWeight": "1000", + "CPUAccounting": "no", + "CPUSchedulingPolicy": "0", + "CPUSchedulingPriority": "0", + "CPUSchedulingResetOnFork": "no", + "CPUShares": "1024", + "CanIsolate": "no", + "CanReload": "yes", + "CanStart": "yes", + "CanStop": "yes", + "CapabilityBoundingSet": "18446744073709551615", + "ConditionResult": "yes", + "ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT", + "ConditionTimestampMonotonic": "7902742", + "Conflicts": "shutdown.target", + "ControlGroup": "/system.slice/crond.service", + "ControlPID": "0", + "DefaultDependencies": "yes", + "Delegate": "no", + "Description": "Command Scheduler", + "DevicePolicy": "auto", + "EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)", + "ExecMainCode": "0", + "ExecMainExitTimestampMonotonic": "0", + "ExecMainPID": "595", + "ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT", + "ExecMainStartTimestampMonotonic": "8134990", + "ExecMainStatus": "0", + "ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", + "ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }", + "FragmentPath": "/usr/lib/systemd/system/crond.service", + "GuessMainPID": "yes", + "IOScheduling": "0", + "Id": "crond.service", + "IgnoreOnIsolate": "no", + "IgnoreOnSnapshot": "no", + "IgnoreSIGPIPE": "yes", + "InactiveEnterTimestampMonotonic": "0", + "InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT", + "InactiveExitTimestampMonotonic": "8135942", + "JobTimeoutUSec": "0", + "KillMode": "process", + "KillSignal": "15", + "LimitAS": "18446744073709551615", + "LimitCORE": "18446744073709551615", + "LimitCPU": "18446744073709551615", + "LimitDATA": "18446744073709551615", + "LimitFSIZE": "18446744073709551615", + "LimitLOCKS": "18446744073709551615", + "LimitMEMLOCK": "65536", + "LimitMSGQUEUE": "819200", + "LimitNICE": "0", + "LimitNOFILE": "4096", + "LimitNPROC": "3902", + "LimitRSS": "18446744073709551615", + "LimitRTPRIO": "0", + "LimitRTTIME": "18446744073709551615", + "LimitSIGPENDING": "3902", + "LimitSTACK": "18446744073709551615", + "LoadState": "loaded", + "MainPID": "595", + "MemoryAccounting": "no", + "MemoryLimit": "18446744073709551615", + "MountFlags": "0", + "Names": "crond.service", + "NeedDaemonReload": "no", + "Nice": "0", + "NoNewPrivileges": "no", + "NonBlocking": "no", + "NotifyAccess": "none", + "OOMScoreAdjust": "0", + "OnFailureIsolate": "no", + "PermissionsStartOnly": "no", + "PrivateNetwork": "no", + "PrivateTmp": "no", + "RefuseManualStart": "no", + "RefuseManualStop": "no", + "RemainAfterExit": "no", + "Requires": "basic.target", + "Restart": "no", + "RestartUSec": "100ms", + "Result": "success", + "RootDirectoryStartOnly": "no", + "SameProcessGroup": "no", + "SecureBits": "0", + "SendSIGHUP": "no", + "SendSIGKILL": "yes", + "Slice": "system.slice", + "StandardError": "inherit", + "StandardInput": "null", + "StandardOutput": "journal", + "StartLimitAction": "none", + "StartLimitBurst": "5", + "StartLimitInterval": "10000000", + "StatusErrno": "0", + "StopWhenUnneeded": "no", + "SubState": "running", + "SyslogLevelPrefix": "yes", + "SyslogPriority": "30", + "TTYReset": "no", + "TTYVHangup": "no", + "TTYVTDisallocate": "no", + "TimeoutStartUSec": "1min 30s", + "TimeoutStopUSec": "1min 30s", + "TimerSlackNSec": "50000", + "Transient": "no", + "Type": "simple", + "UMask": "0022", + "UnitFileState": "enabled", + "WantedBy": "multi-user.target", + "Wants": "system.slice", + "WatchdogTimestampMonotonic": "0", + "WatchdogUSec": "0", + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing +from ansible.module_utils._text import to_native + +# =========================================== +# Main control flow + +def main(): + # initialize + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, type='str', aliases=['unit', 'service']), + state = dict(choices=[ 'started', 'stopped', 'restarted', 'reloaded'], type='str'), + enabled = dict(type='bool'), + masked = dict(type='bool'), + daemon_reload= dict(type='bool', default=False, aliases=['daemon-reload']), + user= dict(type='bool', default=False), + ), + supports_check_mode=True, + required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']], + ) + + systemctl = module.get_bin_path('systemctl') + if module.params['user']: + systemctl = systemctl + " --user" + unit = module.params['name'] + rc = 0 + out = err = '' + result = { + 'name': unit, + 'changed': False, + 'status': {}, + 'warnings': [], + } + + # Run daemon-reload first, if requested + if module.params['daemon_reload']: + (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl)) + if rc != 0: + module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err)) + + found = False + is_initd = sysv_exists(unit) + is_systemd = False + + # check service data, cannot error out on rc as it changes across versions, assume not found + (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit)) + if rc == 0: + # load return of systemctl show into dictionary for easy access and return + multival = [] + if out: + k = None + for line in to_native(out).split('\n'): # systemd can have multiline values delimited with {} + if line.strip(): + if k is None: + if '=' in line: + k,v = line.split('=', 1) + if v.lstrip().startswith('{'): + if not v.rstrip().endswith('}'): + multival.append(line) + continue + result['status'][k] = v.strip() + k = None + else: + if line.rstrip().endswith('}'): + result['status'][k] = '\n'.join(multival).strip() + multival = [] + k = None + else: + multival.append(line) + + is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found' + + # Check for loading error + if is_systemd and 'LoadError' in result['status']: + module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError'])) + + # Does service exist? + found = is_systemd or is_initd + if is_initd and not is_systemd: + result['warnings'].append('The service (%s) is actually an init script but the system is managed by systemd' % unit) + + # mask/unmask the service, if requested, can operate on services before they are installed + if module.params['masked'] is not None: + # state is not masked unless systemd affirms otherwise + masked = ('LoadState' in result['status'] and result['status']['LoadState'] == 'masked') + + if masked != module.params['masked']: + result['changed'] = True + if module.params['masked']: + action = 'mask' + else: + action = 'unmask' + + if not module.check_mode: + (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) + if rc != 0: + # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't + fail_if_missing(module, found, unit, msg='host') + + + # Enable/disable service startup at boot if requested + if module.params['enabled'] is not None: + + if module.params['enabled']: + action = 'enable' + else: + action = 'disable' + + fail_if_missing(module, found, unit, msg='host') + + # do we need to enable the service? + enabled = False + (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit)) + + # check systemctl result or if it is a init script + if rc == 0: + enabled = True + elif rc == 1: + # if both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries + if is_initd and (not out.startswith('disabled') or sysv_is_enabled(unit)): + enabled = True + + # default to current state + result['enabled'] = enabled + + # Change enable/disable if needed + if enabled != module.params['enabled']: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err)) + + result['enabled'] = not enabled + + # set service state if requested + if module.params['state'] is not None: + fail_if_missing(module, found, unit, msg="host") + + # default to desired state + result['state'] = module.params['state'] + + # What is current service state? + if 'ActiveState' in result['status']: + action = None + if module.params['state'] == 'started': + if result['status']['ActiveState'] != 'active': + action = 'start' + elif module.params['state'] == 'stopped': + if result['status']['ActiveState'] == 'active': + action = 'stop' + else: + if result['status']['ActiveState'] != 'active': + action = 'start' + else: + action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded + result['state'] = 'started' + + if action: + result['changed'] = True + if not module.check_mode: + (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err)) + else: + # this should not happen? + module.fail_json(msg="Service is in unknown state", status=result['status']) + + + module.exit_json(**result) + +if __name__ == '__main__': + main() diff --git a/system/user.py b/system/user.py old mode 100755 new mode 100644 index 3aeda4b86e9..ed5503583ae --- a/system/user.py +++ b/system/user.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: user @@ -61,9 +65,10 @@ groups: required: false description: - - Puts the user in this comma-delimited list of groups. When set to - the empty string ('groups='), the user is removed from all groups - except the primary group. + - Puts the user in list of groups. When set to the empty string ('groups='), + the user is removed from all groups except the primary group. + - Before version 2.3, the only input format allowed was a 'comma separated string', + now it should be able to accept YAML lists also. append: required: false default: "no" @@ -150,7 +155,7 @@ This will B(not) overwrite an existing SSH key. ssh_key_bits: required: false - default: 2048 + default: default set by ssh-keygen version_added: "0.9" description: - Optionally specify number of bits in SSH key to create. @@ -159,7 +164,7 @@ default: rsa version_added: "0.9" description: - - Optionally specify the type of SSH key to generate. + - Optionally specify the type of SSH key to generate. Available SSH key types will depend on implementation present on target host. ssh_key_file: @@ -200,19 +205,38 @@ EXAMPLES = ''' # Add the user 'johnd' with a specific uid and a primary group of 'admin' -- user: name=johnd comment="John Doe" uid=1040 group=admin +- user: + name: johnd + comment: "John Doe" + uid: 1040 + group: admin # Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups -- user: name=james shell=/bin/bash groups=admins,developers append=yes +- user: + name: james + shell: /bin/bash + groups: admins,developers + append: yes # Remove the user 'johnd' -- user: name=johnd state=absent remove=yes +- user: + name: johnd + state: absent + remove: yes # Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa -- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa +- user: + name: jsmith + generate_ssh_key: yes + ssh_key_bits: 2048 + ssh_key_file: .ssh/id_rsa # added a consultant whose account you want to expire -- user: name=james18 shell=/bin/zsh groups=developers expires=1422403387 +- user: + name: james18 + shell: /bin/zsh + groups: developers + expires: 1422403387 ''' import os @@ -221,6 +245,7 @@ import platform import socket import time +from ansible.module_utils._text import to_native try: import spwd @@ -261,7 +286,6 @@ def __init__(self, module): self.non_unique = module.params['non_unique'] self.seuser = module.params['seuser'] self.group = module.params['group'] - self.groups = module.params['groups'] self.comment = module.params['comment'] self.shell = module.params['shell'] self.password = module.params['password'] @@ -279,16 +303,18 @@ def __init__(self, module): self.ssh_comment = module.params['ssh_key_comment'] self.ssh_passphrase = module.params['ssh_key_passphrase'] self.update_password = module.params['update_password'] - self.home = None + self.home = module.params['home'] self.expires = None + self.groups = None - if module.params['home'] is not None: - self.home = os.path.expanduser(module.params['home']) + if module.params['groups'] is not None: + self.groups = ','.join(module.params['groups']) if module.params['expires']: try: self.expires = time.gmtime(module.params['expires']) - except Exception,e: + except Exception: + e = get_exception() module.fail_json("Invalid expires time %s: %s" %(self.expires, str(e))) if module.params['ssh_key_file'] is not None: @@ -302,6 +328,8 @@ def execute_command(self, cmd, use_unsafe_shell=False, data=None, obey_checkmode self.module.debug('In check mode, would have run: "%s"' % cmd) return (0, '','') else: + # cast all args to strings ansible-modules-core/issues/4397 + cmd = [str(x) for x in cmd] return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) def remove_user_userdel(self): @@ -402,7 +430,7 @@ def _check_usermod_append(self): helpout = data1 + data2 # check if --append exists - lines = helpout.split('\n') + lines = to_native(helpout).split('\n') for line in lines: if line.strip().startswith('-a, --append'): return True @@ -432,7 +460,8 @@ def modify_user_usermod(self): cmd.append(self.group) if self.groups is not None: - current_groups = self.user_group_membership() + # get a list of all groups for the user, including the primary + current_groups = self.user_group_membership(exclude_primary=False) groups_need_mod = False groups = [] @@ -462,7 +491,6 @@ def modify_user_usermod(self): cmd.append('-G') cmd.append(','.join(groups)) - if self.comment is not None and info[4] != self.comment: cmd.append('-c') cmd.append(self.comment) @@ -525,12 +553,19 @@ def get_groups_set(self, remove_existing=True): groups.remove(g) return groups - def user_group_membership(self): + def user_group_membership(self, exclude_primary=True): + ''' Return a list of groups the user belongs to ''' groups = [] info = self.get_pwd_info() for group in grp.getgrall(): - if self.name in group.gr_mem and not info[3] == group.gr_gid: - groups.append(group[0]) + if self.name in group.gr_mem: + # Exclude the user's primary group by default + if not exclude_primary: + groups.append(group[0]) + else: + if info[3] != group.gr_gid: + groups.append(group[0]) + return groups def user_exists(self): @@ -588,17 +623,19 @@ def ssh_key_gen(self): if self.module.check_mode: return (0, '', '') try: - os.mkdir(ssh_dir, 0700) + os.mkdir(ssh_dir, int('0700', 8)) os.chown(ssh_dir, info[2], info[3]) - except OSError, e: + except OSError: + e = get_exception() return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e))) if os.path.exists(ssh_key_file): return (None, 'Key already exists', '') cmd = [self.module.get_bin_path('ssh-keygen', True)] cmd.append('-t') cmd.append(self.ssh_type) - cmd.append('-b') - cmd.append(self.ssh_bits) + if self.ssh_bits > 0: + cmd.append('-b') + cmd.append(self.ssh_bits) cmd.append('-C') cmd.append(self.ssh_comment) cmd.append('-f') @@ -660,12 +697,14 @@ def create_homedir(self, path): if os.path.exists(skeleton): try: shutil.copytree(skeleton, path, symlinks=True) - except OSError, e: + except OSError: + e = get_exception() self.module.exit_json(failed=True, msg="%s" % e) else: try: os.makedirs(path) - except OSError, e: + except OSError: + e = get_exception() self.module.exit_json(failed=True, msg="%s" % e) def chown_homedir(self, uid, gid, path): @@ -676,7 +715,8 @@ def chown_homedir(self, uid, gid, path): os.chown(path, uid, gid) for f in files: os.chown(os.path.join(root, f), uid, gid) - except OSError, e: + except OSError: + e = get_exception() self.module.exit_json(failed=True, msg="%s" % e) @@ -1225,6 +1265,29 @@ class SunOS(User): distribution = None SHADOWFILE = '/etc/shadow' + def get_password_defaults(self): + # Read password aging defaults + try: + minweeks = '' + maxweeks = '' + warnweeks = '' + for line in open("/etc/default/passwd", 'r'): + line = line.strip() + if (line.startswith('#') or line == ''): + continue + key, value = line.split('=') + if key == "MINWEEKS": + minweeks = value.rstrip('\n') + elif key == "MAXWEEKS": + maxweeks = value.rstrip('\n') + elif key == "WARNWEEKS": + warnweeks = value.rstrip('\n') + except Exception: + err = get_exception() + self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % str(err)) + + return (minweeks, maxweeks, warnweeks) + def remove_user(self): cmd = [self.module.get_bin_path('userdel', True)] if self.remove: @@ -1282,6 +1345,7 @@ def create_user(self): if not self.module.check_mode: # we have to set the password by editing the /etc/shadow file if self.password is not None: + minweeks, maxweeks, warnweeks = self.get_password_defaults() try: lines = [] for line in open(self.SHADOWFILE, 'rb').readlines(): @@ -1291,10 +1355,17 @@ def create_user(self): continue fields[1] = self.password fields[2] = str(int(time.time() / 86400)) + if minweeks: + fields[3] = str(int(minweeks) * 7) + if maxweeks: + fields[4] = str(int(maxweeks) * 7) + if warnweeks: + fields[5] = str(int(warnweeks) * 7) line = ':'.join(fields) lines.append('%s\n' % line) open(self.SHADOWFILE, 'w+').writelines(lines) - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to update users password: %s" % str(err)) return (rc, out, err) @@ -1368,6 +1439,7 @@ def modify_user_usermod(self): if self.update_password == 'always' and self.password is not None and info[1] != self.password: (rc, out, err) = (0, '', '') if not self.module.check_mode: + minweeks, maxweeks, warnweeks = self.get_password_defaults() try: lines = [] for line in open(self.SHADOWFILE, 'rb').readlines(): @@ -1377,11 +1449,18 @@ def modify_user_usermod(self): continue fields[1] = self.password fields[2] = str(int(time.time() / 86400)) + if minweeks: + fields[3] = str(int(minweeks) * 7) + if maxweeks: + fields[4] = str(int(maxweeks) * 7) + if warnweeks: + fields[5] = str(int(warnweeks) * 7) line = ':'.join(fields) lines.append('%s\n' % line) open(self.SHADOWFILE, 'w+').writelines(lines) rc = 0 - except Exception, err: + except Exception: + err = get_exception() self.module.fail_json(msg="failed to update users password: %s" % str(err)) return (rc, out, err) @@ -1476,7 +1555,7 @@ def _get_next_uid(self): def _change_user_password(self): '''Change password for SELF.NAME against SELF.PASSWORD. - Please note that password must be cleatext. + Please note that password must be cleartext. ''' # some documentation on how is stored passwords on OSX: # http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/ @@ -1508,7 +1587,7 @@ def _make_group_numerical(self): def __modify_group(self, group, action): '''Add or remove SELF.NAME to or from GROUP depending on ACTION. - ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. ''' + ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. ''' if action == 'add': option = '-a' else: @@ -1522,7 +1601,7 @@ def __modify_group(self, group, action): def _modify_group(self): '''Add or remove SELF.NAME to or from GROUP depending on ACTION. - ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. ''' + ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. ''' rc = 0 out = '' @@ -1535,12 +1614,13 @@ def _modify_group(self): else: target = set([]) - for remove in current - target: - (_rc, _err, _out) = self.__modify_group(remove, 'delete') - rc += rc - out += _out - err += _err - changed = True + if self.append is False: + for remove in current - target: + (_rc, _err, _out) = self.__modify_group(remove, 'delete') + rc += rc + out += _out + err += _err + changed = True for add in target - current: (_rc, _err, _out) = self.__modify_group(add, 'add') @@ -1640,7 +1720,7 @@ def create_user(self, command_name='dscl'): self.chown_homedir(int(self.uid), int(self.group), self.home) for field in self.fields: - if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]: + if field[0] in self.__dict__ and self.__dict__[field[0]]: cmd = self._get_dscl() cmd += [ '-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]] @@ -1677,7 +1757,7 @@ def modify_user(self): self._make_group_numerical() for field in self.fields: - if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]: + if field[0] in self.__dict__ and self.__dict__[field[0]]: current = self._get_user_property(field[1]) if current is None or current != self.__dict__[field[0]]: cmd = self._get_dscl() @@ -2015,7 +2095,7 @@ def modify_user(self): def main(): ssh_defaults = { - 'bits': '2048', + 'bits': 0, 'type': 'rsa', 'passphrase': None, 'comment': 'ansible-generated on %s' % socket.gethostname() @@ -2027,9 +2107,9 @@ def main(): uid=dict(default=None, type='str'), non_unique=dict(default='no', type='bool'), group=dict(default=None, type='str'), - groups=dict(default=None, type='str'), + groups=dict(default=None, type='list'), comment=dict(default=None, type='str'), - home=dict(default=None, type='str'), + home=dict(default=None, type='path'), shell=dict(default=None, type='str'), password=dict(default=None, type='str', no_log=True), login_class=dict(default=None, type='str'), @@ -2047,9 +2127,9 @@ def main(): append=dict(default='no', type='bool'), # following are specific to ssh key generation generate_ssh_key=dict(type='bool'), - ssh_key_bits=dict(default=ssh_defaults['bits'], type='str'), + ssh_key_bits=dict(default=ssh_defaults['bits'], type='int'), ssh_key_type=dict(default=ssh_defaults['type'], type='str'), - ssh_key_file=dict(default=None, type='str'), + ssh_key_file=dict(default=None, type='path'), ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'), ssh_key_passphrase=dict(default=None, type='str', no_log=True), update_password=dict(default='always',choices=['always','on_create'],type='str'), diff --git a/test-docs.sh b/test-docs.sh deleted file mode 100755 index 76297fbada6..00000000000 --- a/test-docs.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh -set -x - -CHECKOUT_DIR=".ansible-checkout" -MOD_REPO="$1" - -# Hidden file to avoid the module_formatter recursing into the checkout -git clone https://github.com/ansible/ansible "$CHECKOUT_DIR" -cd "$CHECKOUT_DIR" -git submodule update --init -rm -rf "lib/ansible/modules/$MOD_REPO" -ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO" - -pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx - -. ./hacking/env-setup -PAGER=/bin/cat bin/ansible-doc -l -if [ $? -ne 0 ] ; then - exit $? -fi -make -C docsite diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 93253de97a3..00000000000 --- a/test-requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -mock -pytest diff --git a/test/unit/cloud/openstack/test_os_server.py b/test/unit/cloud/openstack/test_os_server.py deleted file mode 100644 index bb1f79ad2f9..00000000000 --- a/test/unit/cloud/openstack/test_os_server.py +++ /dev/null @@ -1,221 +0,0 @@ -import mock -import pytest -import yaml -import inspect -import collections - -from cloud.openstack import os_server - - -class AnsibleFail(Exception): - pass - - -class AnsibleExit(Exception): - pass - - -def params_from_doc(func): - '''This function extracts the docstring from the specified function, - parses it as a YAML document, and returns parameters for the os_server - module.''' - - doc = inspect.getdoc(func) - cfg = yaml.load(doc) - - for task in cfg: - for module, params in task.items(): - for k, v in params.items(): - if k in ['nics'] and type(v) == str: - params[k] = [v] - task[module] = collections.defaultdict(str, - params) - - return cfg[0]['os_server'] - - -class FakeCloud (object): - ports = [ - {'name': 'port1', 'id': '1234'}, - {'name': 'port2', 'id': '4321'}, - ] - - networks = [ - {'name': 'network1', 'id': '5678'}, - {'name': 'network2', 'id': '8765'}, - ] - - images = [ - {'name': 'cirros', 'id': '1'}, - {'name': 'fedora', 'id': '2'}, - ] - - flavors = [ - {'name': 'm1.small', 'id': '1', 'flavor_ram': 1024}, - {'name': 'm1.tiny', 'id': '2', 'flavor_ram': 512}, - ] - - def _find(self, source, name): - for item in source: - if item['name'] == name or item['id'] == name: - return item - - def get_image_id(self, name, exclude=None): - image = self._find(self.images, name) - if image: - return image['id'] - - def get_flavor(self, name): - return self._find(self.flavors, name) - - def get_flavor_by_ram(self, ram, include=None): - for flavor in self.flavors: - if flavor['ram'] >= ram and (include is None or include in - flavor['name']): - return flavor - - def get_port(self, name): - return self._find(self.ports, name) - - def get_network(self, name): - return self._find(self.networks, name) - - create_server = mock.MagicMock() - - -class TestNetworkArgs(object): - '''This class exercises the _network_args function of the - os_server module. For each test, we parse the YAML document - contained in the docstring to retrieve the module parameters for the - test.''' - - def setup_method(self, method): - self.cloud = FakeCloud() - self.module = mock.MagicMock() - self.module.params = params_from_doc(method) - - def test_nics_string_net_id(self): - ''' - - os_server: - nics: net-id=1234 - ''' - args = os_server._network_args(self.module, self.cloud) - assert(args[0]['net-id'] == '1234') - - def test_nics_string_net_id_list(self): - ''' - - os_server: - nics: net-id=1234,net-id=4321 - ''' - args = os_server._network_args(self.module, self.cloud) - assert(args[0]['net-id'] == '1234') - assert(args[1]['net-id'] == '4321') - - def test_nics_string_port_id(self): - ''' - - os_server: - nics: port-id=1234 - ''' - args = os_server._network_args(self.module, self.cloud) - assert(args[0]['port-id'] == '1234') - - def test_nics_string_net_name(self): - ''' - - os_server: - nics: net-name=network1 - ''' - args = os_server._network_args(self.module, self.cloud) - assert(args[0]['net-id'] == '5678') - - def test_nics_string_port_name(self): - ''' - - os_server: - nics: port-name=port1 - ''' - args = os_server._network_args(self.module, self.cloud) - assert(args[0]['port-id'] == '1234') - - def test_nics_structured_net_id(self): - ''' - - os_server: - nics: - - net-id: '1234' - ''' - args = os_server._network_args(self.module, self.cloud) - assert(args[0]['net-id'] == '1234') - - def test_nics_structured_mixed(self): - ''' - - os_server: - nics: - - net-id: '1234' - - port-name: port1 - - 'net-name=network1,port-id=4321' - ''' - args = os_server._network_args(self.module, self.cloud) - assert(args[0]['net-id'] == '1234') - assert(args[1]['port-id'] == '1234') - assert(args[2]['net-id'] == '5678') - assert(args[3]['port-id'] == '4321') - - -class TestCreateServer(object): - def setup_method(self, method): - self.cloud = FakeCloud() - self.module = mock.MagicMock() - self.module.params = params_from_doc(method) - self.module.fail_json.side_effect = AnsibleFail() - self.module.exit_json.side_effect = AnsibleExit() - - self.meta = mock.MagicMock() - self.meta.gett_hostvars_from_server.return_value = { - 'id': '1234' - } - os_server.meta = self.meta - - def test_create_server(self): - ''' - - os_server: - image: cirros - flavor: m1.tiny - nics: - - net-name: network1 - ''' - with pytest.raises(AnsibleExit): - os_server._create_server(self.module, self.cloud) - - assert(self.cloud.create_server.call_count == 1) - assert(self.cloud.create_server.call_args[1]['image'] - == self.cloud.get_image_id('cirros')) - assert(self.cloud.create_server.call_args[1]['flavor'] - == self.cloud.get_flavor('m1.tiny')['id']) - assert(self.cloud.create_server.call_args[1]['nics'][0]['net-id'] - == self.cloud.get_network('network1')['id']) - - def test_create_server_bad_flavor(self): - ''' - - os_server: - image: cirros - flavor: missing_flavor - nics: - - net-name: network1 - ''' - with pytest.raises(AnsibleFail): - os_server._create_server(self.module, self.cloud) - - assert('missing_flavor' in - self.module.fail_json.call_args[1]['msg']) - - def test_create_server_bad_nic(self): - ''' - - os_server: - image: cirros - flavor: m1.tiny - nics: - - net-name: missing_network - ''' - with pytest.raises(AnsibleFail): - os_server._create_server(self.module, self.cloud) - - assert('missing_network' in - self.module.fail_json.call_args[1]['msg']) diff --git a/test/utils/shippable/ci.sh b/test/utils/shippable/ci.sh new file mode 100755 index 00000000000..5c0f847e661 --- /dev/null +++ b/test/utils/shippable/ci.sh @@ -0,0 +1,7 @@ +#!/bin/bash -eux + +set -o pipefail + +source_root=$(python -c "from os import path; print(path.abspath(path.join(path.dirname('$0'), '../../..')))") + +"${source_root}/test/utils/shippable/${TEST}.sh" 2>&1 | gawk '{ print strftime("%Y-%m-%d %H:%M:%S"), $0; fflush(); }' diff --git a/test/utils/shippable/docs-requirements.txt b/test/utils/shippable/docs-requirements.txt new file mode 100644 index 00000000000..4e859bb8c71 --- /dev/null +++ b/test/utils/shippable/docs-requirements.txt @@ -0,0 +1,2 @@ +jinja2 +pyyaml diff --git a/test/utils/shippable/docs.sh b/test/utils/shippable/docs.sh new file mode 100755 index 00000000000..2858f87c997 --- /dev/null +++ b/test/utils/shippable/docs.sh @@ -0,0 +1,62 @@ +#!/bin/bash -eux + +set -o pipefail + +ansible_repo_url="https://github.com/ansible/ansible.git" + +build_dir="${SHIPPABLE_BUILD_DIR}" +repo="${REPO_NAME}" + +case "${repo}" in + "ansible-modules-core") + this_module_group="core" + other_module_group="extras" + ;; + "ansible-modules-extras") + this_module_group="extras" + other_module_group="core" + ;; + *) + echo "Unsupported repo name: ${repo}" + exit 1 + ;; +esac + +modules_tmp_dir="${build_dir}.tmp" +this_modules_dir="${build_dir}/lib/ansible/modules/${this_module_group}" +other_modules_dir="${build_dir}/lib/ansible/modules/${other_module_group}" + +cd / +mv "${build_dir}" "${modules_tmp_dir}" +git clone "${ansible_repo_url}" "${build_dir}" +cd "${build_dir}" +rmdir "${this_modules_dir}" +mv "${modules_tmp_dir}" "${this_modules_dir}" +mv "${this_modules_dir}/shippable" "${build_dir}" +git submodule init "${other_modules_dir}" +git submodule sync "${other_modules_dir}" +git submodule update "${other_modules_dir}" + +pip install -r lib/ansible/modules/${this_module_group}/test/utils/shippable/docs-requirements.txt --upgrade +pip list + +source hacking/env-setup + +docs_status=0 + +PAGER=/bin/cat \ + ANSIBLE_DEPRECATION_WARNINGS=false \ + bin/ansible-doc -l \ + 2>/tmp/ansible-doc.err || docs_status=$? + +if [ -s /tmp/ansible-doc.err ]; then + # report warnings as errors + echo "Output from 'ansible-doc -l' on stderr is considered an error:" + cat /tmp/ansible-doc.err + exit 1 +fi + +if [ "${docs_status}" -ne 0 ]; then + echo "Running 'ansible-doc -l' failed with no output on stderr and exit code: ${docs_status}" + exit 1 +fi diff --git a/test/utils/shippable/integration.sh b/test/utils/shippable/integration.sh new file mode 100755 index 00000000000..cf10e681bfb --- /dev/null +++ b/test/utils/shippable/integration.sh @@ -0,0 +1,55 @@ +#!/bin/bash -eux + +set -o pipefail + +ansible_repo_url="https://github.com/ansible/ansible.git" + +is_pr="${IS_PULL_REQUEST}" +build_dir="${SHIPPABLE_BUILD_DIR}" +repo="${REPO_NAME}" + +if [ "${is_pr}" != "true" ]; then + echo "Module integration tests are only supported on pull requests." + exit 0 +fi + +case "${repo}" in + "ansible-modules-core") + this_module_group="core" + other_module_group="extras" + ;; + "ansible-modules-extras") + this_module_group="extras" + other_module_group="core" + ;; + *) + echo "Unsupported repo name: ${repo}" + exit 1 + ;; +esac + +modules_tmp_dir="${build_dir}.tmp" +this_modules_dir="${build_dir}/lib/ansible/modules/${this_module_group}" +other_modules_dir="${build_dir}/lib/ansible/modules/${other_module_group}" + +cd / +mv "${build_dir}" "${modules_tmp_dir}" +git clone "${ansible_repo_url}" "${build_dir}" +cd "${build_dir}" +rmdir "${this_modules_dir}" +mv "${modules_tmp_dir}" "${this_modules_dir}" +mv "${this_modules_dir}/shippable" "${build_dir}" +git submodule init "${other_modules_dir}" +git submodule sync "${other_modules_dir}" +git submodule update "${other_modules_dir}" + +pip install -r test/utils/shippable/modules/generate-tests-requirements.txt --upgrade +pip list + +source hacking/env-setup + +test/utils/shippable/modules/generate-tests "${this_module_group}" --verbose --output /tmp/integration.sh >/dev/null + +if [ -f /tmp/integration.sh ]; then + /bin/bash -eux /tmp/integration.sh +fi diff --git a/test/utils/shippable/sanity-skip-python24.txt b/test/utils/shippable/sanity-skip-python24.txt new file mode 100644 index 00000000000..1a0a28c4d49 --- /dev/null +++ b/test/utils/shippable/sanity-skip-python24.txt @@ -0,0 +1 @@ +/cloud/[^/]+/(?!(ec2_facts.py|_ec2_ami_search.py)) diff --git a/test/utils/shippable/sanity.sh b/test/utils/shippable/sanity.sh new file mode 100755 index 00000000000..8c1453022e7 --- /dev/null +++ b/test/utils/shippable/sanity.sh @@ -0,0 +1,41 @@ +#!/bin/bash -eux + +source_root=$(python -c "from os import path; print(path.abspath(path.join(path.dirname('$0'), '../../..')))") + +install_deps="${INSTALL_DEPS:-}" + +cd "${source_root}" + +# FIXME REPOMERGE: No need to checkout ansible +build_dir=$(mktemp -d) +trap 'rm -rf "${build_dir}"' EXIT + +git clone "https://github.com/ansible/ansible.git" "${build_dir}" --recursive +source "${build_dir}/hacking/env-setup" +# REPOMERGE: END + +if [ "${install_deps}" != "" ]; then + add-apt-repository ppa:fkrull/deadsnakes + apt-add-repository 'deb http://archive.ubuntu.com/ubuntu trusty-backports universe' + apt-get update -qq + + apt-get install -qq shellcheck python2.4 + + # Install dependencies for ansible and validate_modules + pip install -r "${build_dir}/test/utils/shippable/sanity-requirements.txt" --upgrade + pip list + +fi + +validate_modules="${build_dir}/test/sanity/validate-modules/validate-modules" + +python2.4 -m compileall -fq -x "($(printf %s "$(< "test/utils/shippable/sanity-skip-python24.txt"))" | tr '\n' '|')" . +python2.6 -m compileall -fq . +python2.7 -m compileall -fq . +python3.5 -m compileall -fq . + +ANSIBLE_DEPRECATION_WARNINGS=false \ + "${validate_modules}" --exclude '/utilities/|/shippable(/|$)' . + +shellcheck \ + test/utils/shippable/*.sh diff --git a/utilities/helper/accelerate.py b/utilities/helper/_accelerate.py similarity index 95% rename from utilities/helper/accelerate.py rename to utilities/helper/_accelerate.py index 6c656825b9d..5bd38931f22 100644 --- a/utilities/helper/accelerate.py +++ b/utilities/helper/_accelerate.py @@ -18,10 +18,15 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: accelerate short_description: Enable accelerated mode on remote node +deprecated: "in favor of SSH with ControlPersist" description: - This modules launches an ephemeral I(accelerate) daemon on the remote node which Ansible can use to communicate with nodes at high speed. @@ -64,7 +69,7 @@ notes: - See the advanced playbooks chapter for more about using accelerated mode. requirements: - - "python >= 2.6" + - "python >= 2.4" - "python-keyczar" author: "James Cammarata (@jimi-c)" ''' @@ -105,8 +110,6 @@ # we must import this here at the top so we can use get_module_path() from ansible.module_utils.basic import * -syslog.openlog('ansible-%s' % os.path.basename(__file__)) - # the chunk size to read and send, assuming mtu 1500 and # leaving room for base64 (+33%) encoding and header (100 bytes) # 4 * (975/3) + 100 = 1400 @@ -168,14 +171,15 @@ def daemonize_self(module, password, port, minutes, pid_file): vvv("exiting pid %s" % pid) # exit first parent module.exit_json(msg="daemonized accelerate on port %s for %s minutes with pid %s" % (port, minutes, str(pid))) - except OSError, e: - log("fork #1 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) + except OSError: + e = get_exception() + message = "fork #1 failed: %d (%s)" % (e.errno, e.strerror) + module.fail_json(msg=message) # decouple from parent environment os.chdir("/") os.setsid() - os.umask(022) + os.umask(int('O22', 8)) # do second fork try: @@ -187,8 +191,9 @@ def daemonize_self(module, password, port, minutes, pid_file): pid_file.close() vvv("pid file written") sys.exit(0) - except OSError, e: - log("fork #2 failed: %d (%s)" % (e.errno, e.strerror)) + except OSError: + e = get_exception() + log('fork #2 failed: %d (%s)' % (e.errno, e.strerror)) sys.exit(1) dev_null = file('/dev/null','rw') @@ -219,9 +224,9 @@ def run(self): # make sure the directory is accessible only to this # user, as socket files derive their permissions from # the directory that contains them - os.chmod(dir, 0700) + os.chmod(dir, int('0700', 8)) elif not os.path.exists(dir): - os.makedirs(dir, 0700) + os.makedirs(dir, int('O700', 8)) except OSError: pass self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) @@ -254,13 +259,14 @@ def run(self): conn.sendall("EXISTS\n") # update the last event time so the server doesn't - # shutdown sooner than expected for new cliets + # shutdown sooner than expected for new clients try: self.server.last_event_lock.acquire() self.server.last_event = datetime.datetime.now() finally: self.server.last_event_lock.release() - except Exception, e: + except Exception: + e = get_exception() vv("key loaded locally was invalid, ignoring (%s)" % e) conn.sendall("BADKEY\n") finally: @@ -520,7 +526,8 @@ def fetch(self, data): if response.get('failed',False): log("got a failed response from the master") return dict(failed=True, stderr="Master reported failure, aborting transfer") - except Exception, e: + except Exception: + e = get_exception() fd.close() tb = traceback.format_exc() log("failed to fetch the file: %s" % tb) @@ -541,7 +548,7 @@ def put(self, data): tmp_path = os.path.expanduser('~/.ansible/tmp/') if not os.path.exists(tmp_path): try: - os.makedirs(tmp_path, 0700) + os.makedirs(tmp_path, int('O700', 8)) except: return dict(failed=True, msg='could not create a temporary directory at %s' % tmp_path) (fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=tmp_path) @@ -618,7 +625,8 @@ def timer_handler(signum, _): server = ThreadedTCPServer(address, ThreadedTCPRequestHandler, module, password, timeout, use_ipv6=use_ipv6) server.allow_reuse_address = True break - except Exception, e: + except Exception: + e = get_exception() vv("Failed to create the TCP server (tries left = %d) (error: %s) " % (tries,e)) tries -= 1 time.sleep(0.2) @@ -641,7 +649,8 @@ def timer_handler(signum, _): v("server thread terminated, exiting!") sys.exit(0) - except Exception, e: + except Exception: + e = get_exception() tb = traceback.format_exc() log("exception caught, exiting accelerated mode: %s\n%s" % (e, tb)) sys.exit(0) @@ -661,6 +670,8 @@ def main(): supports_check_mode=True ) + syslog.openlog('ansible-%s' % module._name) + password = base64.b64decode(module.params['password']) port = int(module.params['port']) timeout = int(module.params['timeout']) @@ -685,11 +696,16 @@ def main(): # process, other than tell the calling program # whether other signals can be sent os.kill(daemon_pid, 0) - except OSError, e: + except OSError: + e = get_exception() + message = 'the accelerate daemon appears to be running' + message += 'as a different user that this user cannot access' + message += 'pid=%s' % daemon_pid + if e.errno == errno.EPERM: # no permissions means the pid is probably # running, but as a different user, so fail - module.fail_json(msg="the accelerate daemon appears to be running as a different user that this user cannot access (pid=%d)" % daemon_pid) + module.fail_json(msg=message) else: daemon_running = True except ValueError: @@ -726,4 +742,5 @@ def main(): # try to start up the daemon daemonize(module, password, port, timeout, minutes, ipv6, pid_file) -main() +if __name__ == '__main__': + main() diff --git a/utilities/helper/_fireball.py b/utilities/helper/_fireball.py index ba0770d7839..d3bc837fa4f 100644 --- a/utilities/helper/_fireball.py +++ b/utilities/helper/_fireball.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: fireball diff --git a/utilities/helper/meta.py b/utilities/helper/meta.py new file mode 100644 index 00000000000..a89bfc5d394 --- /dev/null +++ b/utilities/helper/meta.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Ansible, a Red Hat company +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: meta +short_description: Execute Ansible 'actions' +version_added: "1.2" +description: + - Meta tasks are a special kind of task which can influence Ansible internal execution or state. Prior to Ansible 2.0, + the only meta option available was `flush_handlers`. As of 2.2, there are five meta tasks which can be used. + Meta tasks can be used anywhere within your playbook. +options: + free_form: + description: + - This module takes a free form command, as a string. There's not an actual option named "free form". See the examples! + - "C(flush_handlers) makes Ansible run any handler tasks which have thus far been notified. Ansible inserts these tasks internally at certain points to implicitly trigger handler runs (after pre/post tasks, the final role execution, and the main tasks section of your plays)." + - "C(refresh_inventory) (added in 2.0) forces the reload of the inventory, which in the case of dynamic inventory scripts means they will be re-executed. This is mainly useful when additional hosts are created and users wish to use them instead of using the `add_host` module." + - "C(noop) (added in 2.0) This literally does 'nothing'. It is mainly used internally and not recommended for general use." + - "C(clear_facts) (added in 2.1) causes the gathered facts for the hosts specified in the play's list of hosts to be cleared, including the fact cache." + - "C(clear_host_errors) (added in 2.1) clears the failed state (if any) from hosts specified in the play's list of hosts." + - "C(end_play) (added in 2.2) causes the play to end without failing the host." + choices: ['noop', 'flush_handlers', 'refresh_inventory', 'clear_facts', 'clear_host_errors', 'end_play'] + required: true + default: null +notes: + - meta is not really a module nor action_plugin as such it cannot be overwritten. +author: + - "Ansible Core Team" +''' + +EXAMPLES = ''' +# force all notified handlers to run at this point, not waiting for normal sync points +- template: + src: new.j2 + dest: /etc/config.txt + notify: myhandler +- meta: flush_handlers + +# reload inventory, useful with dynamic inventories when play makes changes to the existing hosts +- cloud_guest: # this is fake module + name: newhost + state: present + +- name: Refresh inventory to ensure new instaces exist in inventory + meta: refresh_inventory + +- name: Clear gathered facts from all currently targeted hosts + meta: clear_facts + +# bring host back to play after failure +- copy: + src: file + dest: /etc/file + remote_user: imightnothavepermission + +- meta: clear_host_errors +''' diff --git a/utilities/logic/assert.py b/utilities/logic/assert.py index e9e359f421a..875fc6e8565 100644 --- a/utilities/logic/assert.py +++ b/utilities/logic/assert.py @@ -18,12 +18,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: assert -short_description: Fail with custom message +short_description: Asserts given expressions are true description: - - This module asserts that a given expression is true and can be a simpler alternative to the 'fail' module in some cases. + - This module asserts that given expressions are true with an optional custom message. version_added: "1.5" options: that: @@ -31,6 +35,10 @@ - "A string expression of the same form that can be passed to the 'when' statement" - "Alternatively, a list of string expressions" required: true + msg: + description: + - "The customized message used for a failing assertion" + required: false author: - "Ansible Core Team" - "Michael DeHaan" @@ -43,4 +51,10 @@ that: - "'foo' in some_command_result.stdout" - "number_of_the_counting == 3" + +- assert: + that: + - "my_param <= 100" + - "my_param >= 0" + msg: "'my_param' must be between 0 and 100" ''' diff --git a/utilities/logic/async_status.py b/utilities/logic/async_status.py index b4c749760d7..7093cd32db7 100644 --- a/utilities/logic/async_status.py +++ b/utilities/logic/async_status.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: async_status @@ -50,6 +54,7 @@ import datetime import traceback +from ansible.module_utils.six import iteritems def main(): @@ -76,11 +81,12 @@ def main(): # no remote kill mode currently exists, but probably should # consider log_path + ".pid" file and also unlink that above - data = file(log_path).read() + data = None try: + data = open(log_path).read() data = json.loads(data) - except Exception, e: - if data == '': + except Exception: + if not data: # file not written yet? That means it is running module.exit_json(results_file=log_path, ansible_job_id=jid, started=1, finished=0) else: @@ -90,12 +96,16 @@ def main(): if not 'started' in data: data['finished'] = 1 data['ansible_job_id'] = jid + elif 'finished' not in data: + data['finished'] = 0 # Fix error: TypeError: exit_json() keywords must be strings - data = dict([(str(k), v) for k, v in data.iteritems()]) + data = dict([(str(k), v) for k, v in iteritems(data)]) module.exit_json(**data) # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/utilities/logic/async_wrapper.py b/utilities/logic/async_wrapper.py index ce29d238ff8..ae23d19a4a4 100644 --- a/utilities/logic/async_wrapper.py +++ b/utilities/logic/async_wrapper.py @@ -24,6 +24,7 @@ except ImportError: import simplejson as json import shlex +import shutil import os import subprocess import sys @@ -32,6 +33,7 @@ import time import syslog +PY3 = sys.version_info[0] == 3 syslog.openlog('ansible-%s' % os.path.basename(__file__)) syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:])) @@ -46,14 +48,13 @@ def daemonize_self(): if pid > 0: # exit first parent sys.exit(0) - except OSError, e: - print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror) - sys.exit(1) + except OSError: + e = sys.exc_info()[1] + sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) - # decouple from parent environment - os.chdir("/") + # decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks) os.setsid() - os.umask(022) + os.umask(int('022', 8)) # do second fork try: @@ -61,50 +62,139 @@ def daemonize_self(): if pid > 0: # print "Daemon PID %d" % pid sys.exit(0) - except OSError, e: - print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror) - sys.exit(1) + except OSError: + e = sys.exc_info()[1] + sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) - dev_null = file('/dev/null','rw') + dev_null = open('/dev/null', 'w') os.dup2(dev_null.fileno(), sys.stdin.fileno()) os.dup2(dev_null.fileno(), sys.stdout.fileno()) os.dup2(dev_null.fileno(), sys.stderr.fileno()) +# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there. +# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function. +def _filter_non_json_lines(data): + ''' + Used to filter unrelated output around module JSON output, like messages from + tcagetattr, or where dropbear spews MOTD on every single command (which is nuts). + + Filters leading lines before first line-starting occurrence of '{' or '[', and filter all + trailing lines after matching close character (working from the bottom of output). + ''' + warnings = [] + + # Filter initial junk + lines = data.splitlines() + + for start, line in enumerate(lines): + line = line.strip() + if line.startswith(u'{'): + endchar = u'}' + break + elif line.startswith(u'['): + endchar = u']' + break + else: + raise ValueError('No start of json char found') + + # Filter trailing junk + lines = lines[start:] + + for reverse_end_offset, line in enumerate(reversed(lines)): + if line.strip().endswith(endchar): + break + else: + raise ValueError('No end of json char found') + + if reverse_end_offset > 0: + # Trailing junk is uncommon and can point to things the user might + # want to change. So print a warning if we find any + trailing_junk = lines[len(lines) - reverse_end_offset:] + warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk)) + + lines = lines[:(len(lines) - reverse_end_offset)] + + return ('\n'.join(lines), warnings) + + +def _get_interpreter(module_path): + module_fd = open(module_path, 'rb') + try: + head = module_fd.read(1024) + if head[0:2] != '#!': + return None + return head[2:head.index('\n')].strip().split(' ') + finally: + module_fd.close() + def _run_module(wrapped_cmd, jid, job_path): - jobfile = open(job_path, "w") - jobfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid })) + tmp_job_path = job_path + ".tmp" + jobfile = open(tmp_job_path, "w") + jobfile.write(json.dumps({ "started" : 1, "finished" : 0, "ansible_job_id" : jid })) jobfile.close() - jobfile = open(job_path, "w") + os.rename(tmp_job_path, job_path) + jobfile = open(tmp_job_path, "w") result = {} outdata = '' + filtered_outdata = '' + stderr = '' try: cmd = shlex.split(wrapped_cmd) - script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=jobfile, stderr=jobfile) - script.communicate() - outdata = file(job_path).read() - result = json.loads(outdata) + # call the module interpreter directly (for non-binary modules) + # this permits use of a script for an interpreter on non-Linux platforms + interpreter = _get_interpreter(cmd[0]) + if interpreter: + cmd = interpreter + cmd + script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (outdata, stderr) = script.communicate() + if PY3: + outdata = outdata.decode('utf-8', 'surrogateescape') + stderr = stderr.decode('utf-8', 'surrogateescape') + + (filtered_outdata, json_warnings) = _filter_non_json_lines(outdata) + + result = json.loads(filtered_outdata) + + if json_warnings: + # merge JSON junk warnings with any existing module warnings + module_warnings = result.get('warnings', []) + if not isinstance(module_warnings, list): + module_warnings = [module_warnings] + module_warnings.extend(json_warnings) + result['warnings'] = module_warnings - except (OSError, IOError), e: + if stderr: + result['stderr'] = stderr + jobfile.write(json.dumps(result)) + + except (OSError, IOError): + e = sys.exc_info()[1] result = { "failed": 1, "cmd" : wrapped_cmd, "msg": str(e), + "outdata": outdata, # temporary notice only + "stderr": stderr } result['ansible_job_id'] = jid jobfile.write(json.dumps(result)) - except: + + except (ValueError, Exception): result = { "failed" : 1, "cmd" : wrapped_cmd, "data" : outdata, # temporary notice only + "stderr": stderr, "msg" : traceback.format_exc() } result['ansible_job_id'] = jid jobfile.write(json.dumps(result)) + jobfile.close() + os.rename(tmp_job_path, job_path) #################### @@ -112,18 +202,26 @@ def _run_module(wrapped_cmd, jid, job_path): #################### if __name__ == '__main__': - if len(sys.argv) < 3: - print json.dumps({ + if len(sys.argv) < 5: + print(json.dumps({ "failed" : True, - "msg" : "usage: async_wrapper . Humans, do not call directly!" - }) + "msg" : "usage: async_wrapper [-preserve_tmp] " + "Humans, do not call directly!" + })) sys.exit(1) jid = "%s.%d" % (sys.argv[1], os.getpid()) time_limit = sys.argv[2] wrapped_module = sys.argv[3] - if len(sys.argv) >= 5: - argsfile = sys.argv[4] + argsfile = sys.argv[4] + if '-tmp-' not in os.path.dirname(wrapped_module): + preserve_tmp = True + elif len(sys.argv) > 5: + preserve_tmp = sys.argv[5] == '-preserve_tmp' + else: + preserve_tmp = False + # consider underscore as no argsfile so we can support passing of additional positional parameters + if argsfile != '_': cmd = "%s %s" % (wrapped_module, argsfile) else: cmd = wrapped_module @@ -137,10 +235,10 @@ def _run_module(wrapped_cmd, jid, job_path): try: os.makedirs(jobdir) except: - print json.dumps({ + print(json.dumps({ "failed" : 1, "msg" : "could not create: %s" % jobdir - }) + })) # immediately exit this process, leaving an orphaned process # running which immediately forks a supervisory timing process @@ -149,12 +247,13 @@ def _run_module(wrapped_cmd, jid, job_path): if pid: # Notify the overlord that the async process started - # we need to not return immmediately such that the launched command has an attempt + # we need to not return immediately such that the launched command has an attempt # to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile) # this probably could be done with some IPC later. Modules should always read # the argsfile at the very first start of their execution anyway notice("Return async_wrapper task started.") - print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : job_path }) + print(json.dumps({ "started" : 1, "finished" : 0, "ansible_job_id" : jid, "results_file" : job_path, + "_suppress_tmpdir_delete": not preserve_tmp})) sys.stdout.flush() time.sleep(1) sys.exit(0) @@ -186,8 +285,12 @@ def _run_module(wrapped_cmd, jid, job_path): os.killpg(sub_pid, signal.SIGKILL) notice("Sent kill to group %s"%sub_pid) time.sleep(1) + if not preserve_tmp: + shutil.rmtree(os.path.dirname(wrapped_module), True) sys.exit(0) notice("Done in kid B.") + if not preserve_tmp: + shutil.rmtree(os.path.dirname(wrapped_module), True) sys.exit(0) else: # the child process runs the actual module @@ -196,10 +299,16 @@ def _run_module(wrapped_cmd, jid, job_path): notice("Module complete (%s)"%os.getpid()) sys.exit(0) - except Exception, err: - notice("error: %s"%(err)) - print json.dumps({ + except SystemExit: + # On python2.4, SystemExit is a subclass of Exception. + # This block makes python2.4 behave the same as python2.5+ + raise + + except Exception: + e = sys.exc_info()[1] + notice("error: %s"%(e)) + print(json.dumps({ "failed" : True, - "msg" : "FATAL ERROR: %s" % str(err) - }) + "msg" : "FATAL ERROR: %s" % str(e) + })) sys.exit(1) diff --git a/utilities/logic/debug.py b/utilities/logic/debug.py index 89d9254a08e..8e8f16ca554 100644 --- a/utilities/logic/debug.py +++ b/utilities/logic/debug.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: debug @@ -44,23 +48,29 @@ required: False default: 0 version_added: "2.1" -author: +author: - "Dag Wieers (@dagwieers)" - "Michael DeHaan" ''' EXAMPLES = ''' # Example that prints the loopback address and gateway for each host -- debug: msg="System {{ inventory_hostname }} has uuid {{ ansible_product_uuid }}" +- debug: + msg: "System {{ inventory_hostname }} has uuid {{ ansible_product_uuid }}" -- debug: msg="System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}" +- debug: + msg: "System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}" when: ansible_default_ipv4.gateway is defined - shell: /usr/bin/uptime register: result -- debug: var=result verbosity=2 +- debug: + var: result + verbosity: 2 - name: Display all variables/facts known for a host - debug: var=hostvars[inventory_hostname] verbosity=4 + debug: + var: hostvars[inventory_hostname] + verbosity: 4 ''' diff --git a/utilities/logic/fail.py b/utilities/logic/fail.py index 75a7c81d1cf..544758ad100 100644 --- a/utilities/logic/fail.py +++ b/utilities/logic/fail.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: fail @@ -39,6 +43,7 @@ EXAMPLES = ''' # Example playbook using fail and when together -- fail: msg="The system may not be provisioned according to the CMDB status." +- fail: + msg: "The system may not be provisioned according to the CMDB status." when: cmdb_status != "to-be-staged" ''' diff --git a/utilities/logic/include.py b/utilities/logic/include.py new file mode 100644 index 00000000000..3de1e198a2a --- /dev/null +++ b/utilities/logic/include.py @@ -0,0 +1,71 @@ +#!/usr/bin/python +# -*- mode: python -*- +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +author: + - "Ansible Core Team (@ansible)" +module: include +short_description: include a play or task list. +description: + - Includes a file with a list of plays or tasks to be executed in the current playbook. + - Files with a list of plays can only be included at the top level, lists of tasks can only be included where tasks normally run (in play). + - Before 2.0 all includes were 'static', executed at play compile time. + - Static includes are not subject to most directives, for example, loops or conditionals, they are applied instead to each inherited task. + - Since 2.0 task includes are dynamic and behave more like real tasks. This means they can be looped, skipped and use variables from any source. + Ansible tries to auto detect this, use the `static` directive (new in 2.1) to bypass autodetection. +version_added: "0.6" +options: + free-form: + description: + - This module allows you to specify the name of the file directly w/o any other options. +notes: + - This is really not a module, though it appears as such, this is a feature of the Ansible Engine, as such it cannot be overridden the same way a module can. +''' + +EXAMPLES = """ +# include a play after another play +- hosts: localhost + tasks: + - debug: + msg: "play1" + +- include: otherplays.yml + + +# include task list in play +- hosts: all + tasks: + - debug: + msg: task1 + + - include: stuff.yml + + - debug: + msg: task10 + +# dyanmic include task list in play +- hosts: all + tasks: + - debug: + msg: task1 + + - include: "{{ hostvar }}.yml" + static: no + when: hostvar is defined +""" + +RETURN = """ +# this module does not return anything except plays or tasks to execute +""" diff --git a/utilities/logic/include_role.py b/utilities/logic/include_role.py new file mode 100644 index 00000000000..843a873b8ae --- /dev/null +++ b/utilities/logic/include_role.py @@ -0,0 +1,97 @@ +#!/usr/bin/python +# -*- mode: python -*- +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +author: + - "Ansible Core Team (@ansible)" +module: include_role +short_description: Load and execute a role +description: + - "Loads and executes a role as a task, this frees roles from the `role:` directive and allows them to be treated more as tasks." +version_added: "2.2" +options: + name: + description: + - The name of the role to be executed. + required: True + tasks_from: + description: + - "File to load from a Role's tasks/ directory." + required: False + default: 'main' + vars_from: + description: + - "File to load from a Role's vars/ directory." + required: False + default: 'main' + defaults_from: + description: + - "File to load from a Role's defaults/ directory." + required: False + default: 'main' + static: + description: + - Gives Ansible a hint if this is a 'static' include or not. If static it implies that it won't need templating nor loops nor conditionals and will show included tasks in the --list options. + required: False + default: None + allow_duplicates: + description: + - Overrides the role's metadata setting to allow using a role more than once with the same parameters. + required: False + default: True + private: + description: + - If True the variables from defaults/ and vars/ in a role will not be made available to the rest of the play. + default: None +notes: + - THIS IS EARLY PREVIEW, THINGS MAY CHANGE + - Handlers are made available to the whole play. + - simple dependencies seem to work fine. + - "Things not tested (yet): plugin overrides, nesting includes, used as handler, other stuff I did not think of when I wrote this." +''' + +EXAMPLES = """ +- include_role: + name: myrole + +- name: Run tasks/other.yml instead of 'main' + include_role: + name: myrole + tasks_from: other + +- name: Pass variables to role + include_role: + name: myrole + vars: + rolevar1: 'value from task' + +- name: Use role in loop + include_role: + name: myrole + with_items: + - "{{roleinput1}}" + - "{{roleinput2}}" + loop_control: + loop_var: roleinputvar + +- name: conditional role + include_role: + name: myrole + when: not idontwanttorun +""" + +RETURN = """ +# this module does not return anything except tasks to execute +""" diff --git a/utilities/logic/include_vars.py b/utilities/logic/include_vars.py index fb55ab5515f..8e7f4aa1bda 100644 --- a/utilities/logic/include_vars.py +++ b/utilities/logic/include_vars.py @@ -8,32 +8,99 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- -author: "Benno Joy (@bennojoy)" +author: "Allen Sanabria (@linuxdynasty)" module: include_vars short_description: Load variables from files, dynamically within a task. description: - - Loads variables from a YAML/JSON file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from. + - Loads variables from a YAML/JSON files dynamically from within a file or + from a directory recursively during task runtime. If loading a directory, the files are sorted alphabetically before being loaded. +version_added: "1.4" options: + file: + version_added: "2.2" + description: + - The file name from which variables should be loaded. + - If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook. + dir: + version_added: "2.2" + description: + - The directory name from which the variables should be loaded. + - If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook. + default: null + name: + version_added: "2.2" + description: + - The name of a variable into which assign the included vars, if omitted (null) they will be made top level vars. + default: null + depth: + version_added: "2.2" + description: + - By default, this module will recursively go through each sub directory and load up the variables. By explicitly setting the depth, this module will only go as deep as the depth. + default: 0 + files_matching: + version_added: "2.2" + description: + - Limit the variables that are loaded within any directory to this regular expression. + default: null + ignore_files: + version_added: "2.2" + description: + - List of file names to ignore. The defaults can not be overridden, but can be extended. + default: null free-form: description: - - The file name from which variables should be loaded, if called from a role it will look for - the file in vars/ subdirectory of the role, otherwise the path would be relative to playbook. An absolute path can also be provided. - required: true -version_added: "1.4" + - This module allows you to specify the 'file' option directly w/o any other options. ''' EXAMPLES = """ -# Conditionally decide to load in variables when x is 0, otherwise do not. -- include_vars: contingency_plan.yml +# Include vars of stuff.yml into the 'stuff' variable (2.2). +- include_vars: + file: stuff.yml + name: stuff + +# Conditionally decide to load in variables into 'plans' when x is 0, otherwise do not. (2.2) +- include_vars: + file: contingency_plan.yml + name: plans when: x == 0 # Load a variable file based on the OS type, or a default if not found. - include_vars: "{{ item }}" with_first_found: - - "{{ ansible_distribution }}.yml" - - "{{ ansible_os_family }}.yml" - - "default.yml" + - "{{ ansible_distribution }}.yml" + - "{{ ansible_os_family }}.yml" + - "default.yml" + +# bare include (free-form) +- include_vars: myvars.yml + +# Include all yml files in vars/all and all nested directories +- include_vars: + dir: 'vars/all' + +# Include all yml files in vars/all and all nested directories and save the output in test. +- include_vars: + dir: 'vars/all' + name: test + +# Include all yml files in vars/services +- include_vars: + dir: 'vars/services' + depth: 1 + +# Include only bastion.yml files +- include_vars: + dir: 'vars' + files_matching: 'bastion.yml' +# Include only all yml files exception bastion.yml +- include_vars: + dir: 'vars' + ignore_files: 'bastion.yml' """ diff --git a/utilities/logic/pause.py b/utilities/logic/pause.py index 0fad09ea7bc..0fed099b700 100644 --- a/utilities/logic/pause.py +++ b/utilities/logic/pause.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: pause @@ -27,12 +31,12 @@ options: minutes: description: - - Number of minutes to pause for. + - A positive number of minutes to pause for. required: false default: null seconds: description: - - Number of seconds to pause for. + - A positive number of seconds to pause for. required: false default: null prompt: @@ -41,15 +45,19 @@ required: false default: null author: "Tim Bielawa (@tbielawa)" +notes: + - Starting in 2.2, if you specify 0 or negative for minutes or seconds, it will wait for 1 second, previously it would wait indefinitely. ''' EXAMPLES = ''' # Pause for 5 minutes to build app cache. -- pause: minutes=5 +- pause: + minutes: 5 # Pause until you can verify updates to an application were successful. - pause: # A helpful reminder of what to look out for post-update. -- pause: prompt="Make sure org.foo.FooOverload exception is not present" +- pause: + prompt: "Make sure org.foo.FooOverload exception is not present" ''' diff --git a/utilities/logic/set_fact.py b/utilities/logic/set_fact.py index 3dc729d07dc..c106ef74e63 100644 --- a/utilities/logic/set_fact.py +++ b/utilities/logic/set_fact.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- author: "Dag Wieers (@dagwieers)" @@ -25,7 +29,7 @@ short_description: Set host facts from a task description: - This module allows setting new variables. Variables are set on a host-by-host basis just like facts discovered by the setup module. - - These variables will survive between plays during an Ansible run, but will not be saved across executions even if you use a fact cache. + - These variables will be available to subsequent plays during an ansible-playbook run, but will not be saved across executions even if you use a fact cache. options: key_value: description: @@ -35,11 +39,16 @@ required: true default: null version_added: "1.2" +notes: + - "The `var=value` notation can only create strings or booleans. + If you want to create lists/arrays or dictionary/hashes use `var: [val1, val2]`" ''' EXAMPLES = ''' -# Example setting host facts using key=value pairs -- set_fact: one_fact="something" other_fact="{{ local_var }}" +# Example setting host facts using key=value pairs, note that this always creates strings or booleans +- set_fact: + one_fact: "something" + other_fact: "{{ local_var }}" # Example setting host facts using complex arguments - set_fact: diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 95e4ec01b5f..55f88fc8c84 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -27,6 +27,8 @@ import sys import time +from ansible.module_utils._text import to_native + HAS_PSUTIL = False try: import psutil @@ -35,6 +37,10 @@ except ImportError: pass +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: wait_for @@ -78,28 +84,39 @@ description: - port number to poll required: false + default: null state: description: - either C(present), C(started), or C(stopped), C(absent), or C(drained) - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed choices: [ "present", "started", "stopped", "absent", "drained" ] + required: False default: "started" path: version_added: "1.4" required: false + default: null description: - path to a file on the filesytem that must exist before continuing search_regex: version_added: "1.4" required: false + default: null description: - Can be used to match a string in either a file or a socket connection. Defaults to a multiline regex. exclude_hosts: version_added: "1.8" required: false + default: null description: - list of hosts or IPs to ignore when looking for active TCP connections for C(drained) state + sleep: + version_added: "2.3" + required: false + default: 1 + description: + - Number of seconds to sleep between checks, before 2.3 this was hardcoded to 1 second. notes: - The ability to use search_regex with a port connection was added in 1.7. requirements: [] @@ -112,30 +129,50 @@ EXAMPLES = ''' # wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds -- wait_for: port=8000 delay=10 +- wait_for: + port: 8000 + delay: 10 # wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds -- wait_for: host=0.0.0.0 port=8000 delay=10 state=drained +- wait_for: + host: 0.0.0.0 + port: 8000 + delay: 10 + state: drained # wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts -- wait_for: host=0.0.0.0 port=8000 state=drained exclude_hosts=10.2.1.2,10.2.1.3 +- wait_for: + host: 0.0.0.0 + port: 8000 + state: drained + exclude_hosts: 10.2.1.2,10.2.1.3 # wait until the file /tmp/foo is present before continuing -- wait_for: path=/tmp/foo +- wait_for: + path: /tmp/foo # wait until the string "completed" is in the file /tmp/foo before continuing -- wait_for: path=/tmp/foo search_regex=completed +- wait_for: + path: /tmp/foo + search_regex: completed # wait until the lock file is removed -- wait_for: path=/var/lock/file.lock state=absent +- wait_for: + path: /var/lock/file.lock + state: absent # wait until the process is finished and pid was destroyed -- wait_for: path=/proc/3466/status state=absent +- wait_for: + path: /proc/3466/status + state: absent # wait 300 seconds for port 22 to become open and contain "OpenSSH", don't assume the inventory_hostname is resolvable # and don't start checking for 10 seconds -- local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10 - +- local_action: wait_for + port: 22 + host: "{{ ansible_ssh_host | default(inventory_hostname) }}" + search_regex: OpenSSH + delay: 10 ''' class TCPConnectionInfo(object): @@ -157,6 +194,10 @@ class TCPConnectionInfo(object): socket.AF_INET: '0.0.0.0', socket.AF_INET6: '::', } + ipv4_mapped_ipv6_address = { + 'prefix': '::ffff', + 'match_all': '::ffff:0.0.0.0' + } connection_states = { '01': 'ESTABLISHED', '02': 'SYN_SENT', @@ -171,17 +212,19 @@ def __new__(cls, *args, **kwargs): def __init__(self, module): self.module = module - (self.family, self.ip) = _convert_host_to_ip(self.module.params['host']) + self.ips = _convert_host_to_ip(module.params['host']) self.port = int(self.module.params['port']) self.exclude_ips = self._get_exclude_ips() if not HAS_PSUTIL: module.fail_json(msg="psutil module required for wait_for") def _get_exclude_ips(self): - if self.module.params['exclude_hosts'] is None: - return [] exclude_hosts = self.module.params['exclude_hosts'] - return [ _convert_host_to_hex(h)[1] for h in exclude_hosts ] + exclude_ips = [] + if exclude_hosts is not None: + for host in exclude_hosts: + exclude_ips.extend(_convert_host_to_ip(host)) + return exclude_ips def get_active_connections_count(self): active_connections = 0 @@ -191,10 +234,18 @@ def get_active_connections_count(self): if conn.status not in self.connection_states.values(): continue (local_ip, local_port) = conn.local_address - if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]: - (remote_ip, remote_port) = conn.remote_address - if remote_ip not in self.exclude_ips: - active_connections += 1 + if self.port != local_port: + continue + (remote_ip, remote_port) = conn.remote_address + if (conn.family, remote_ip) in self.exclude_ips: + continue + if any(( + (conn.family, local_ip) in self.ips, + (conn.family, self.match_all_ips[conn.family]) in self.ips, + local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and + (conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips, + )): + active_connections += 1 return active_connections @@ -218,37 +269,52 @@ class LinuxTCPConnectionInfo(TCPConnectionInfo): socket.AF_INET: '00000000', socket.AF_INET6: '00000000000000000000000000000000', } + ipv4_mapped_ipv6_address = { + 'prefix': '0000000000000000FFFF0000', + 'match_all': '0000000000000000FFFF000000000000' + } local_address_field = 1 remote_address_field = 2 connection_state_field = 3 def __init__(self, module): self.module = module - (self.family, self.ip) = _convert_host_to_hex(module.params['host']) + self.ips = _convert_host_to_hex(module.params['host']) self.port = "%0.4X" % int(module.params['port']) self.exclude_ips = self._get_exclude_ips() def _get_exclude_ips(self): - if self.module.params['exclude_hosts'] is None: - return [] exclude_hosts = self.module.params['exclude_hosts'] - return [ _convert_host_to_hex(h) for h in exclude_hosts ] + exclude_ips = [] + if exclude_hosts is not None: + for host in exclude_hosts: + exclude_ips.extend(_convert_host_to_hex(host)) + return exclude_ips def get_active_connections_count(self): active_connections = 0 - f = open(self.source_file[self.family]) - for tcp_connection in f.readlines(): - tcp_connection = tcp_connection.strip().split() - if tcp_connection[self.local_address_field] == 'local_address': - continue - if tcp_connection[self.connection_state_field] not in self.connection_states: - continue - (local_ip, local_port) = tcp_connection[self.local_address_field].split(':') - if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]: - (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':') - if remote_ip not in self.exclude_ips: - active_connections += 1 - f.close() + for family in self.source_file.keys(): + f = open(self.source_file[family]) + for tcp_connection in f.readlines(): + tcp_connection = tcp_connection.strip().split() + if tcp_connection[self.local_address_field] == 'local_address': + continue + if tcp_connection[self.connection_state_field] not in self.connection_states: + continue + (local_ip, local_port) = tcp_connection[self.local_address_field].split(':') + if self.port != local_port: + continue + (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':') + if (family, remote_ip) in self.exclude_ips: + continue + if any(( + (family, local_ip) in self.ips, + (family, self.match_all_ips[family]) in self.ips, + local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and + (family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips, + )): + active_connections += 1 + f.close() return active_connections @@ -260,10 +326,16 @@ def _convert_host_to_ip(host): host: String with either hostname, IPv4, or IPv6 address Returns: - Tuple containing address family and IP + List of tuples containing address family and IP """ - addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)[0] - return (addrinfo[0], addrinfo[4][0]) + addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP) + ips = [] + for family, socktype, proto, canonname, sockaddr in addrinfo: + ip = sockaddr[0] + ips.append((family, ip)) + if family == socket.AF_INET: + ips.append((socket.AF_INET6, "::ffff:" + ip)) + return ips def _convert_host_to_hex(host): """ @@ -276,34 +348,22 @@ def _convert_host_to_hex(host): host: String with either hostname, IPv4, or IPv6 address Returns: - Tuple containing address family and the little-endian converted host - """ - (family, ip) = _convert_host_to_ip(host) - hexed = binascii.hexlify(socket.inet_pton(family, ip)).upper() - if family == socket.AF_INET: - hexed = _little_endian_convert_32bit(hexed) - elif family == socket.AF_INET6: - # xrange loops through each 8 character (4B) set in the 128bit total - hexed = "".join([ _little_endian_convert_32bit(hexed[x:x+8]) for x in xrange(0, 32, 8) ]) - return (family, hexed) - -def _little_endian_convert_32bit(block): + List of tuples containing address family and the + little-endian converted host """ - Convert to little-endian, effectively transposing - the order of the four byte word - 12345678 -> 78563412 - - Args: - block: String containing a 4 byte hex representation - - Returns: - String containing the little-endian converted block - """ - # xrange starts at 6, and increments by -2 until it reaches -2 - # which lets us start at the end of the string block and work to the begining - return "".join([ block[x:x+2] for x in xrange(6, -2, -2) ]) - -def _create_connection( (host, port), connect_timeout): + ips = [] + if host is not None: + for family, ip in _convert_host_to_ip(host): + hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip)) + hexip_hf = "" + for i in range(0, len(hexip_nf), 8): + ipgroup_nf = hexip_nf[i:i+8] + ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16)) + hexip_hf = "%s%08X" % (hexip_hf, ipgroup_hf) + ips.append((family, hexip_hf)) + return ips + +def _create_connection(host, port, connect_timeout): """ Connect to a 2-tuple (host, port) and return the socket object. @@ -314,7 +374,7 @@ def _create_connection( (host, port), connect_timeout): Socket object """ if sys.version_info < (2, 6): - (family, _) = _convert_host_to_ip(host) + (family, _) = (_convert_host_to_ip(host))[0] connect_socket = socket.socket(family, socket.SOCK_STREAM) connect_socket.settimeout(connect_timeout) connect_socket.connect( (host, port) ) @@ -339,7 +399,8 @@ def main(): path=dict(default=None, type='path'), search_regex=dict(default=None), state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']), - exclude_hosts=dict(default=None, type='list') + exclude_hosts=dict(default=None, type='list'), + sleep=dict(default=1, type='int') ), ) @@ -384,20 +445,17 @@ def main(): try: f = open(path) f.close() - time.sleep(1) - pass except IOError: break elif port: try: - s = _create_connection( (host, port), connect_timeout) + s = _create_connection(host, port, connect_timeout) s.shutdown(socket.SHUT_RDWR) s.close() - time.sleep(1) except: break - else: - time.sleep(1) + # Conditions not yet met, wait and try again + time.sleep(params['sleep']) else: elapsed = datetime.datetime.now() - start if port: @@ -412,7 +470,8 @@ def main(): if path: try: os.stat(path) - except OSError, e: + except OSError: + e = get_exception() # If anything except file not present, throw an error if e.errno != 2: elapsed = datetime.datetime.now() - start @@ -436,7 +495,7 @@ def main(): elif port: alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now())) try: - s = _create_connection((host, port), min(connect_timeout, alt_connect_timeout)) + s = _create_connection(host, port, min(connect_timeout, alt_connect_timeout)) except: # Failed to connect by connect_timeout. wait and try again pass @@ -456,7 +515,7 @@ def main(): if not response: # Server shutdown break - data += response + data += to_native(response, errors='surrogate_or_strict') if re.search(compiled_search_re, data): matched = True break @@ -474,7 +533,7 @@ def main(): break # Conditions not yet met, wait and try again - time.sleep(1) + time.sleep(params['sleep']) else: # while-else # Timeout expired @@ -500,7 +559,8 @@ def main(): break except IOError: pass - time.sleep(1) + # Conditions not yet met, wait and try again + time.sleep(params['sleep']) else: elapsed = datetime.datetime.now() - start module.fail_json(msg="Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds) diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index ecc176a20b3..34d736d4d7c 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: apache2_module @@ -29,6 +33,13 @@ description: - name of the module to enable/disable required: true + force: + description: + - force disabling of default modules and override Debian warnings + required: false + choices: ['yes', 'no'] + default: no + version_added: "2.1" state: description: - indicate the desired state of the resource @@ -40,57 +51,103 @@ EXAMPLES = ''' # enables the Apache2 module "wsgi" -- apache2_module: state=present name=wsgi +- apache2_module: + state: present + name: wsgi # disables the Apache2 module "wsgi" -- apache2_module: state=absent name=wsgi +- apache2_module: + state: absent + name: wsgi ''' import re -def _disable_module(module): +def _run_threaded(module): + control_binary = _get_ctl_binary(module) + + result, stdout, stderr = module.run_command("%s -V" % control_binary) + + if re.search(r'threaded:[ ]*yes', stdout): + return True + else: + return False + +def _get_ctl_binary(module): + for command in ['apache2ctl', 'apachectl']: + ctl_binary = module.get_bin_path(command) + if ctl_binary is not None: + return ctl_binary + + module.fail_json( + msg="None of httpd, apachectl or apach2ctl found. At least one apache control binary is necessary.") + +def _module_is_enabled(module): + control_binary = _get_ctl_binary(module) name = module.params['name'] - a2dismod_binary = module.get_bin_path("a2dismod") - if a2dismod_binary is None: - module.fail_json(msg="a2dismod not found. Perhaps this system does not use a2dismod to manage apache") - result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name)) + result, stdout, stderr = module.run_command("%s -M" % control_binary) - if re.match(r'.*\b' + name + r' already disabled', stdout, re.S|re.M): - module.exit_json(changed = False, result = "Success") - elif result != 0: - module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout)) + """ + Work around for Ubuntu Xenial listing php7_module as php7.0 + """ + if name == "php7.0": + name = "php7" + + if result != 0: + module.fail_json(msg="Error executing %s: %s" % (control_binary, stderr)) + + if re.search(r' ' + name + r'_module', stdout): + return True else: - module.exit_json(changed = True, result = "Disabled") + return False -def _enable_module(module): +def _set_state(module, state): name = module.params['name'] - a2enmod_binary = module.get_bin_path("a2enmod") - if a2enmod_binary is None: - module.fail_json(msg="a2enmod not found. Perhaps this system does not use a2enmod to manage apache") + force = module.params['force'] + + want_enabled = state == 'present' + state_string = {'present': 'enabled', 'absent': 'disabled'}[state] + a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state] + success_msg = "Module %s %s" % (name, state_string) + + if _module_is_enabled(module) != want_enabled: + if module.check_mode: + module.exit_json(changed = True, result = success_msg) - result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name)) + a2mod_binary = module.get_bin_path(a2mod_binary) + if a2mod_binary is None: + module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary)) - if re.match(r'.*\b' + name + r' already enabled', stdout, re.S|re.M): - module.exit_json(changed = False, result = "Success") - elif result != 0: - module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout)) + if not want_enabled and force: + # force exists only for a2dismod on debian + a2mod_binary += ' -f' + + result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name)) + + if _module_is_enabled(module) == want_enabled: + module.exit_json(changed = True, result = success_msg) + else: + module.fail_json(msg="Failed to set module %s to %s: %s" % (name, state_string, stdout), rc=result, stdout=stdout, stderr=stderr) else: - module.exit_json(changed = True, result = "Enabled") + module.exit_json(changed = False, result = success_msg) def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), + force = dict(required=False, type='bool', default=False), state = dict(default='present', choices=['absent', 'present']) ), + supports_check_mode = True, ) - if module.params['state'] == 'present': - _enable_module(module) + name = module.params['name'] + if name == 'cgi' and _run_threaded(module): + module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name) - if module.params['state'] == 'absent': - _disable_module(module) + if module.params['state'] in ['present', 'absent']: + _set_state(module, module.params['state']) # import module snippets from ansible.module_utils.basic import * diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index e1447d5d17e..efd32a33a18 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: django_manage @@ -99,24 +103,34 @@ EXAMPLES = """ # Run cleanup on the application installed in 'django_dir'. -- django_manage: command=cleanup app_path={{ django_dir }} +- django_manage: + command: cleanup + app_path: "{{ django_dir }}" # Load the initial_data fixture into the application -- django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }} +- django_manage: + command: loaddata + app_path: "{{ django_dir }}" + fixtures: "{{ initial_data }}" # Run syncdb on the application -- django_manage: > - command=syncdb - app_path={{ django_dir }} - settings={{ settings_app_name }} - pythonpath={{ settings_dir }} - virtualenv={{ virtualenv_dir }} +- django_manage: + command: syncdb + app_path: "{{ django_dir }}" + settings: "{{ settings_app_name }}" + pythonpath: "{{ settings_dir }}" + virtualenv: "{{ virtualenv_dir }}" # Run the SmokeTest test case from the main app. Useful for testing deploys. -- django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest +- django_manage: + command: test + app_path: "{{ django_dir }}" + apps: main.SmokeTest # Create an initial superuser. -- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }} +- django_manage: + command: "createsuperuser --noinput --username=admin --email=admin@example.com" + app_path: "{{ django_dir }}" """ @@ -167,7 +181,7 @@ def migrate_filter_output(line): return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line) def collectstatic_filter_output(line): - return "0 static files" not in line + return line and "0 static files" not in line def main(): command_allowed_param_map = dict( @@ -274,7 +288,7 @@ def main(): lines = out.split('\n') filt = globals().get(command + "_filter_output", None) if filt: - filtered_output = filter(filt, out.split('\n')) + filtered_output = filter(filt, lines) if len(filtered_output): changed = filtered_output @@ -284,4 +298,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 83a6445374b..0c5d8bea9d6 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: htpasswd version_added: "1.3" @@ -74,11 +78,26 @@ EXAMPLES = """ # Add a user to a password file and ensure permissions are set -- htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640 +- htpasswd: + path: /etc/nginx/passwdfile + name: janedoe + password: '9s36?;fyNp' + owner: root + group: www-data + mode: 0640 + # Remove a user from a password file -- htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent +- htpasswd: + path: /etc/apache2/passwdfile + name: foobar + state: absent + # Add a user to a password file suitable for use by libpam-pwdfile -- htpasswd: path=/etc/mail/passwords name=alex password=oedu2eGh crypt_scheme=md5_crypt +- htpasswd: + path: /etc/mail/passwords + name: alex + password: oedu2eGh + crypt_scheme: md5_crypt """ @@ -187,7 +206,7 @@ def main(): arg_spec = dict( path=dict(required=True, aliases=["dest", "destfile"]), name=dict(required=True, aliases=["username"]), - password=dict(required=False, default=None), + password=dict(required=False, default=None, no_log=True), crypt_scheme=dict(required=False, default="apr_md5_crypt"), state=dict(required=False, default="present"), create=dict(type='bool', default='yes'), @@ -251,12 +270,14 @@ def main(): check_file_attrs(module, changed, msg) module.exit_json(msg=msg, changed=changed) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) # import module snippets -from ansible.module_utils.basic import * +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception if __name__ == '__main__': main() diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index 6ac5bc7de67..84c8ece749f 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -19,6 +19,11 @@ # along with Ansible. If not, see . # import os +from ansible.module_utils.basic import AnsibleModule, is_executable + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -82,16 +87,28 @@ EXAMPLES = ''' # Manage the state of program to be in 'started' state. -- supervisorctl: name=my_app state=started +- supervisorctl: + name: my_app + state: started # Manage the state of program group to be in 'started' state. -- supervisorctl: name='my_apps:' state=started +- supervisorctl: + name: 'my_apps:' + state: started # Restart my_app, reading supervisorctl configuration from a specified file. -- supervisorctl: name=my_app state=restarted config=/var/opt/my_project/supervisord.conf +- supervisorctl: + name: my_app + state: restarted + config: /var/opt/my_project/supervisord.conf # Restart my_app, connecting to supervisord with credentials and server URL. -- supervisorctl: name=my_app state=restarted username=test password=testpass server_url=http://localhost:9001 +- supervisorctl: + name: my_app + state: restarted + username: test + password: testpass + server_url: http://localhost:9001 ''' @@ -101,7 +118,7 @@ def main(): config=dict(required=False, type='path'), server_url=dict(required=False), username=dict(required=False), - password=dict(required=False), + password=dict(required=False, no_log=True), supervisorctl_path=dict(required=False, type='path'), state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent']) ) @@ -239,8 +256,5 @@ def take_action_on_processes(processes, status_filter, action, expected_result): module.fail_json(name=name, msg="ERROR (no such process)") take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped') -# import module snippets -from ansible.module_utils.basic import * -# is_executable from basic if __name__ == '__main__': main() diff --git a/windows/async_status.ps1 b/windows/async_status.ps1 new file mode 100644 index 00000000000..efde748fb97 --- /dev/null +++ b/windows/async_status.ps1 @@ -0,0 +1,69 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$results = @{changed=$false} + +$parsed_args = Parse-Args $args +$jid = Get-AnsibleParam $parsed_args "jid" -failifempty $true -resultobj $results +$mode = Get-AnsibleParam $parsed_args "mode" -Default "status" -ValidateSet "status","cleanup" + +# setup logging directory +$log_path = [System.IO.Path]::Combine($env:LOCALAPPDATA, ".ansible_async", $jid) + +If(-not $(Test-Path $log_path)) +{ + Fail-Json @{ansible_job_id=$jid; started=1; finished=1} "could not find job" +} + +If($mode -eq "cleanup") { + Remove-Item $log_path -Recurse + Exit-Json @{ansible_job_id=$jid; erased=$log_path} +} + +# NOT in cleanup mode, assume regular status mode +# no remote kill mode currently exists, but probably should +# consider log_path + ".pid" file and also unlink that above + +$data = $null +Try { + $data_raw = Get-Content $log_path + + # TODO: move this into module_utils/powershell.ps1? + $jss = New-Object System.Web.Script.Serialization.JavaScriptSerializer + $data = $jss.DeserializeObject($data_raw) +} +Catch { + If(-not $data_raw) { + # file not written yet? That means it is running + Exit-Json @{results_file=$log_path; ansible_job_id=$jid; started=1; finished=0} + } + Else { + Fail-Json @{ansible_job_id=$jid; results_file=$log_path; started=1; finished=1} "Could not parse job output: $data" + } +} + +If (-not $data.ContainsKey("started")) { + $data['finished'] = 1 + $data['ansible_job_id'] = $jid +} +ElseIf (-not $data.ContainsKey("finished")) { + $data['finished'] = 0 +} + +Exit-Json $data diff --git a/windows/async_wrapper.ps1 b/windows/async_wrapper.ps1 new file mode 100644 index 00000000000..a79a6d6bb13 --- /dev/null +++ b/windows/async_wrapper.ps1 @@ -0,0 +1,447 @@ +#!powershell +# This file is part of Ansible +# +# Copyright (c)2016, Matt Davis +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +Param( + [string]$jid, + [int]$max_exec_time_sec, + [string]$module_path, + [string]$argfile_path, + [switch]$preserve_tmp +) + +# WANT_JSON +# POWERSHELL_COMMON + +Set-StrictMode -Version 2 +$ErrorActionPreference = "Stop" + +Function Start-Watchdog { + Param( + [string]$module_tempdir, + [string]$module_path, + [int]$max_exec_time_sec, + [string]$resultfile_path, + [string]$argfile_path, + [switch]$preserve_tmp, + [switch]$start_suspended + ) + +# BEGIN Ansible.Async native type definition + $native_process_util = @" + using Microsoft.Win32.SafeHandles; + using System; + using System.ComponentModel; + using System.Diagnostics; + using System.IO; + using System.Linq; + using System.Runtime.InteropServices; + using System.Text; + using System.Threading; + + namespace Ansible.Async { + + public static class NativeProcessUtil + { + [DllImport("kernel32.dll", SetLastError=true, CharSet=CharSet.Unicode)] + public static extern bool CreateProcess( + string lpApplicationName, + string lpCommandLine, + IntPtr lpProcessAttributes, + IntPtr lpThreadAttributes, + bool bInheritHandles, + uint dwCreationFlags, + IntPtr lpEnvironment, + string lpCurrentDirectory, + [In] ref STARTUPINFO lpStartupInfo, + out PROCESS_INFORMATION lpProcessInformation); + + [DllImport("kernel32.dll", SetLastError = true, CharSet=CharSet.Unicode)] + public static extern uint SearchPath ( + string lpPath, + string lpFileName, + string lpExtension, + int nBufferLength, + [MarshalAs (UnmanagedType.LPTStr)] + StringBuilder lpBuffer, + out IntPtr lpFilePart); + + public static string SearchPath(string findThis) + { + StringBuilder sbOut = new StringBuilder(1024); + IntPtr filePartOut; + + if(SearchPath(null, findThis, null, sbOut.Capacity, sbOut, out filePartOut) == 0) + throw new FileNotFoundException("Couldn't locate " + findThis + " on path"); + + return sbOut.ToString(); + } + + [DllImport("kernel32.dll", SetLastError=true)] + static extern SafeFileHandle OpenThread( + ThreadAccessRights dwDesiredAccess, + bool bInheritHandle, + int dwThreadId); + + [DllImport("kernel32.dll", SetLastError=true)] + static extern int ResumeThread(SafeHandle hThread); + + public static void ResumeThreadById(int threadId) + { + var threadHandle = OpenThread(ThreadAccessRights.SUSPEND_RESUME, false, threadId); + if(threadHandle.IsInvalid) + throw new Exception(String.Format("Thread ID {0} is invalid ({1})", threadId, new Win32Exception(Marshal.GetLastWin32Error()).Message)); + + try + { + if(ResumeThread(threadHandle) == -1) + throw new Exception(String.Format("Thread ID {0} cannot be resumed ({1})", threadId, new Win32Exception(Marshal.GetLastWin32Error()).Message)); + } + finally + { + threadHandle.Dispose(); + } + } + + public static void ResumeProcessById(int pid) + { + var proc = Process.GetProcessById(pid); + + // wait for at least one suspended thread in the process (this handles possible slow startup race where primary thread of created-suspended process has not yet become runnable) + var retryCount = 0; + while(!proc.Threads.OfType().Any(t=>t.ThreadState == System.Diagnostics.ThreadState.Wait && t.WaitReason == ThreadWaitReason.Suspended)) + { + proc.Refresh(); + Thread.Sleep(50); + if (retryCount > 100) + throw new InvalidOperationException(String.Format("No threads were suspended in target PID {0} after 5s", pid)); + } + + foreach(var thread in proc.Threads.OfType().Where(t => t.ThreadState == System.Diagnostics.ThreadState.Wait && t.WaitReason == ThreadWaitReason.Suspended)) + ResumeThreadById(thread.Id); + } + } + + [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] + public struct STARTUPINFO + { + public Int32 cb; + public string lpReserved; + public string lpDesktop; + public string lpTitle; + public Int32 dwX; + public Int32 dwY; + public Int32 dwXSize; + public Int32 dwYSize; + public Int32 dwXCountChars; + public Int32 dwYCountChars; + public Int32 dwFillAttribute; + public Int32 dwFlags; + public Int16 wShowWindow; + public Int16 cbReserved2; + public IntPtr lpReserved2; + public IntPtr hStdInput; + public IntPtr hStdOutput; + public IntPtr hStdError; + } + + [StructLayout(LayoutKind.Sequential)] + public struct PROCESS_INFORMATION + { + public IntPtr hProcess; + public IntPtr hThread; + public int dwProcessId; + public int dwThreadId; + } + + [Flags] + enum ThreadAccessRights : uint + { + SUSPEND_RESUME = 0x0002 + } + } +"@ # END Ansible.Async native type definition + + Add-Type -TypeDefinition $native_process_util + + $watchdog_script = { + Set-StrictMode -Version 2 + $ErrorActionPreference = "Stop" + + Function Log { + Param( + [string]$msg + ) + + If(Get-Variable -Name log_path -ErrorAction SilentlyContinue) { + Add-Content $log_path $msg + } + } + + Add-Type -AssemblyName System.Web.Extensions + + # -EncodedCommand won't allow us to pass args, so they have to be templated into the script + $jsonargs = @" + <> +"@ + Function Deserialize-Json { + Param( + [Parameter(ValueFromPipeline=$true)] + [string]$json + ) + + # FUTURE: move this into module_utils/powershell.ps1 and use for everything (sidestep PSCustomObject issues) + # FUTURE: won't work w/ Nano Server/.NET Core- fallback to DataContractJsonSerializer (which can't handle dicts on .NET 4.0) + + Log "Deserializing:`n$json" + + $jss = New-Object System.Web.Script.Serialization.JavaScriptSerializer + return $jss.DeserializeObject($json) + } + + Function Write-Result { + [hashtable]$result, + [string]$resultfile_path + + $result | ConvertTo-Json | Set-Content -Path $resultfile_path + } + + Function Exec-Module { + Param( + [string]$module_tempdir, + [string]$module_path, + [int]$max_exec_time_sec, + [string]$resultfile_path, + [string]$argfile_path, + [switch]$preserve_tmp + ) + + Log "in watchdog exec" + + Try + { + Log "deserializing existing resultfile args" + # read in existing resultsfile to merge w/ module output (it should be written by the time we're unsuspended and running) + $result = Get-Content $resultfile_path -Raw | Deserialize-Json + + Log "deserialized result is $($result | Out-String)" + + Log "creating runspace" + + $rs = [runspacefactory]::CreateRunspace() + $rs.Open() + $rs.SessionStateProxy.Path.SetLocation($module_tempdir) | Out-Null + + Log "creating Powershell object" + + $job = [powershell]::Create() + $job.Runspace = $rs + + Log "adding scripts" + + if($module_path.EndsWith(".ps1")) { + $job.AddScript($module_path) | Out-Null + } + else { + $job.AddCommand($module_path) | Out-Null + $job.AddArgument($argfile_path) | Out-Null + } + + Log "job BeginInvoke()" + + $job_asyncresult = $job.BeginInvoke() + + Log "waiting $max_exec_time_sec seconds for job to complete" + + $signaled = $job_asyncresult.AsyncWaitHandle.WaitOne($max_exec_time_sec * 1000) + + $result["finished"] = 1 + + If($job_asyncresult.IsCompleted) { + Log "job completed, calling EndInvoke()" + + $job_output = $job.EndInvoke($job_asyncresult) + $job_error = $job.Streams.Error + + Log "raw module stdout: \r\n$job_output" + If($job_error) { + Log "raw module stderr: \r\n$job_error" + } + + # write success/output/error to result object + + # TODO: cleanse leading/trailing junk + Try { + $module_result = Deserialize-Json $job_output + # TODO: check for conflicting keys + $result = $result + $module_result + } + Catch { + $excep = $_ + + $result.failed = $true + $result.msg = "failed to parse module output: $excep" + } + + # TODO: determine success/fail, or always include stderr if nonempty? + Write-Result $result $resultfile_path + + Log "wrote output to $resultfile_path" + } + Else { + $job.Stop() + # write timeout to result object + $result.failed = $true + $result.msg = "timed out waiting for module completion" + Write-Result $result $resultfile_path + + Log "wrote timeout to $resultfile_path" + } + + $rs.Close() | Out-Null + } + Catch { + $excep = $_ + + $result = @{failed=$true; msg="module execution failed: $($excep.ToString())`n$($excep.InvocationInfo.PositionMessage)"} + + Write-Result $result $resultfile_path + } + Finally + { + If(-not $preserve_tmp -and $module_tempdir -imatch "-tmp-") { + Try { + Log "deleting tempdir, cwd is $(Get-Location)" + Set-Location $env:USERPROFILE + $res = Remove-Item $module_tempdir -recurse 2>&1 + Log "delete output was $res" + } + Catch { + $excep = $_ + Log "error deleting tempdir: $excep" + } + } + Else { + Log "skipping tempdir deletion" + } + } + } + + Try { + Log "deserializing args" + + # deserialize the JSON args that should've been templated in before execution + $ext_args = Deserialize-Json $jsonargs + + Log "exec module" + + Exec-Module @ext_args + + Log "exec done" + } + Catch { + $excep = $_ + + Log $excep + } + } + + $bp = [hashtable] $MyInvocation.BoundParameters + # convert switch types to bool so they'll serialize as simple bools + $bp["preserve_tmp"] = [bool]$bp["preserve_tmp"] + $bp["start_suspended"] = [bool]$bp["start_suspended"] + + # serialize this function's args to JSON so we can template them verbatim into the script(block) + $jsonargs = $bp | ConvertTo-Json + + $raw_script = $watchdog_script.ToString() + $raw_script = $raw_script.Replace("<>", $jsonargs) + + $encoded_command = [Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($raw_script)) + + # FUTURE: create under new job to ensure all children die on exit? + + # FUTURE: move these flags into C# enum + # start process suspended + breakaway so we can record the watchdog pid without worrying about a completion race + Set-Variable CREATE_BREAKAWAY_FROM_JOB -Value ([uint32]0x01000000) -Option Constant + Set-Variable CREATE_SUSPENDED -Value ([uint32]0x00000004) -Option Constant + Set-Variable CREATE_UNICODE_ENVIRONMENT -Value ([uint32]0x000000400) -Option Constant + Set-Variable CREATE_NEW_CONSOLE -Value ([uint32]0x00000010) -Option Constant + + $pstartup_flags = $CREATE_BREAKAWAY_FROM_JOB -bor $CREATE_UNICODE_ENVIRONMENT -bor $CREATE_NEW_CONSOLE + If($start_suspended) { + $pstartup_flags = $pstartup_flags -bor $CREATE_SUSPENDED + } + + # execute the dynamic watchdog as a breakway process, which will in turn exec the module + $si = New-Object Ansible.Async.STARTUPINFO + $si.cb = [System.Runtime.InteropServices.Marshal]::SizeOf([type][Ansible.Async.STARTUPINFO]) + + $pi = New-Object Ansible.Async.PROCESS_INFORMATION + + # FUTURE: direct cmdline CreateProcess path lookup fails- this works but is sub-optimal + $exec_cmd = [Ansible.Async.NativeProcessUtil]::SearchPath("powershell.exe") + $exec_args = "`"$exec_cmd`" -NoProfile -ExecutionPolicy Bypass -EncodedCommand $encoded_command" + + If(-not [Ansible.Async.NativeProcessUtil]::CreateProcess($exec_cmd, $exec_args, [IntPtr]::Zero, [IntPtr]::Zero, $false, $pstartup_flags, [IntPtr]::Zero, $env:windir, [ref]$si, [ref]$pi)) { + #throw New-Object System.ComponentModel.Win32Exception + throw "create bang $([System.Runtime.InteropServices.Marshal]::GetLastWin32Error())" + } + + $watchdog_pid = $pi.dwProcessId + + return $watchdog_pid +} + +$local_jid = $jid + "." + $pid + +$results_path = [System.IO.Path]::Combine($env:LOCALAPPDATA, ".ansible_async", $local_jid) + +[System.IO.Directory]::CreateDirectory([System.IO.Path]::GetDirectoryName($results_path)) | Out-Null + +$watchdog_args = @{ + module_tempdir=$([System.IO.Path]::GetDirectoryName($module_path)); + module_path=$module_path; + max_exec_time_sec=$max_exec_time_sec; + resultfile_path=$results_path; + argfile_path=$argfile_path; + start_suspended=$true; +} + +If($preserve_tmp) { + $watchdog_args["preserve_tmp"] = $true +} + +# start watchdog/module-exec +$watchdog_pid = Start-Watchdog @watchdog_args + +# populate initial results before we resume the process to avoid result race +$result = @{ + started=1; + finished=0; + results_file=$results_path; + ansible_job_id=$local_jid; + _suppress_tmpdir_delete=$true; + ansible_async_watchdog_pid=$watchdog_pid +} + +$result_json = ConvertTo-Json $result +Set-Content $results_path -Value $result_json + +[Ansible.Async.NativeProcessUtil]::ResumeProcessById($watchdog_pid) + +return $result_json diff --git a/windows/setup.ps1 b/windows/setup.ps1 index 63e5597d861..fef2ade25c4 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -17,9 +17,7 @@ # WANT_JSON # POWERSHELL_COMMON -# enabled $params (David O'Brien, 06/08/2015) -$params = Parse-Args $args; - +$params = Parse-Args $args -supports_check_mode $true Function Get-CustomFacts { [cmdletBinding()] @@ -54,8 +52,15 @@ if ($factpath -ne $null) { $win32_os = Get-CimInstance Win32_OperatingSystem $win32_cs = Get-CimInstance Win32_ComputerSystem +$win32_bios = Get-CimInstance Win32_Bios +$win32_cpu = Get-CimInstance Win32_Processor +If ($win32_cpu -is [array]) { # multi-socket, pick first + $win32_cpu = $win32_cpu[0] +} + +$ip_props = [System.Net.NetworkInformation.IPGlobalProperties]::GetIPGlobalProperties() $osversion = [Environment]::OSVersion -$capacity = $win32_cs.TotalPhysicalMemory # Win32_PhysicalMemory is empty on some virtual platforms +$user = [Security.Principal.WindowsIdentity]::GetCurrent() $netcfg = Get-WmiObject win32_NetworkAdapterConfiguration $ActiveNetcfg = @(); $ActiveNetcfg+= $netcfg | where {$_.ipaddress -ne $null} @@ -77,29 +82,78 @@ foreach ($adapter in $ActiveNetcfg) $formattednetcfg += $thisadapter;$thisadapter = $null } +$cpu_list = @( ) +for ($i=1; $i -le ($win32_cpu.NumberOfLogicalProcessors / $win32_cs.NumberOfProcessors); $i++) { + $cpu_list += $win32_cpu.Manufacturer + $cpu_list += $win32_cpu.Name +} + Set-Attr $result.ansible_facts "ansible_interfaces" $formattednetcfg Set-Attr $result.ansible_facts "ansible_architecture" $win32_os.OSArchitecture -Set-Attr $result.ansible_facts "ansible_hostname" $env:COMPUTERNAME; -Set-Attr $result.ansible_facts "ansible_fqdn" "$([System.Net.Dns]::GetHostByName((hostname)).HostName)" +Set-Attr $result.ansible_facts "ansible_bios_date" $win32_bios.ReleaseDate.ToString("MM/dd/yyyy") +Set-Attr $result.ansible_facts "ansible_bios_version" $win32_bios.SMBIOSBIOSVersion +Set-Attr $result.ansible_facts "ansible_hostname" $env:COMPUTERNAME +Set-Attr $result.ansible_facts "ansible_fqdn" ($ip_props.Hostname + "." + $ip_props.DomainName) +Set-Attr $result.ansible_facts "ansible_processor" $cpu_list +Set-Attr $result.ansible_facts "ansible_processor_cores" $win32_cpu.NumberOfCores +Set-Attr $result.ansible_facts "ansible_processor_count" $win32_cs.NumberOfProcessors +Set-Attr $result.ansible_facts "ansible_processor_threads_per_core" ($win32_cpu.NumberOfLogicalProcessors / $win32_cs.NumberOfProcessors / $win32_cpu.NumberOfCores) +Set-Attr $result.ansible_facts "ansible_processor_vcpus" ($win32_cpu.NumberOfLogicalProcessors / $win32_cs.NumberOfProcessors) +Set-Attr $result.ansible_facts "ansible_product_name" $win32_cs.Model.Trim() +Set-Attr $result.ansible_facts "ansible_product_serial" $win32_bios.SerialNumber +#Set-Attr $result.ansible_facts "ansible_product_version" ([string] $win32_cs.SystemFamily) Set-Attr $result.ansible_facts "ansible_system" $osversion.Platform.ToString() +Set-Attr $result.ansible_facts "ansible_system_description" ([string] $win32_os.Description) +Set-Attr $result.ansible_facts "ansible_system_vendor" $win32_cs.Manufacturer Set-Attr $result.ansible_facts "ansible_os_family" "Windows" Set-Attr $result.ansible_facts "ansible_os_name" ($win32_os.Name.Split('|')[0]).Trim() -Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString +Set-Attr $result.ansible_facts "ansible_distribution" $win32_os.Caption Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString() +Set-Attr $result.ansible_facts "ansible_distribution_major_version" $osversion.Version.Major.ToString() +Set-Attr $result.ansible_facts "ansible_kernel" $osversion.Version.ToString() + +Set-Attr $result.ansible_facts "ansible_machine_id" $user.User.AccountDomainSid.Value +Set-Attr $result.ansible_facts "ansible_domain" $ip_props.DomainName +Set-Attr $result.ansible_facts "ansible_nodename" ($ip_props.HostName + "." + $ip_props.DomainName) +Set-Attr $result.ansible_facts "ansible_windows_domain" $win32_cs.Domain + +Set-Attr $result.ansible_facts "ansible_owner_name" ([string] $win32_cs.PrimaryOwnerName) +Set-Attr $result.ansible_facts "ansible_owner_contact" ([string] $win32_cs.PrimaryOwnerContact) + +Set-Attr $result.ansible_facts "ansible_user_dir" $env:userprofile +Set-Attr $result.ansible_facts "ansible_user_gecos" "" # Win32_UserAccount.FullName is probably the right thing here, but it can be expensive to get on large domains +Set-Attr $result.ansible_facts "ansible_user_id" $env:username +Set-Attr $result.ansible_facts "ansible_user_sid" $user.User.Value $date = New-Object psobject -Set-Attr $date "date" (Get-Date -format d) -Set-Attr $date "year" (Get-Date -format yyyy) -Set-Attr $date "month" (Get-Date -format MM) -Set-Attr $date "day" (Get-Date -format dd) -Set-Attr $date "hour" (Get-Date -format HH) -Set-Attr $date "minute" (Get-Date -format mm) -Set-Attr $date "iso8601" (Get-Date -format s) +$datetime = (Get-Date) +$datetime_utc = $datetime.ToUniversalTime() +Set-Attr $date "date" $datetime.ToString("yyyy-MM-dd") +Set-Attr $date "day" $datetime.ToString("dd") +Set-Attr $date "epoch" (Get-Date -UFormat "%s") +Set-Attr $date "hour" $datetime.ToString("HH") +Set-Attr $date "iso8601" $datetime_utc.ToString("yyyy-MM-ddTHH:mm:ssZ") +Set-Attr $date "iso8601_basic" $datetime.ToString("yyyyMMddTHHmmssffffff") +Set-Attr $date "iso8601_basic_short" $datetime.ToString("yyyyMMddTHHmmss") +Set-Attr $date "iso8601_micro" $datetime_utc.ToString("yyyy-MM-ddTHH:mm:ss.ffffffZ") +Set-Attr $date "minute" $datetime.ToString("mm") +Set-Attr $date "month" $datetime.ToString("MM") +Set-Attr $date "second" $datetime.ToString("ss") +Set-Attr $date "time" $datetime.ToString("HH:mm:ss") +Set-Attr $date "tz_offset" $datetime.ToString("zzzz") +Set-Attr $date "tz" ([System.TimeZoneInfo]::Local.Id) +# Ensure that the weekday is in English +Set-Attr $date "weekday" $datetime.ToString("dddd", [System.Globalization.CultureInfo]::InvariantCulture) +Set-Attr $date "weekday_number" (Get-Date -UFormat "%w") +Set-Attr $date "weeknumber" (Get-Date -UFormat "%W") +Set-Attr $date "year" $datetime.ToString("yyyy") Set-Attr $result.ansible_facts "ansible_date_time" $date -Set-Attr $result.ansible_facts "ansible_totalmem" $capacity +# Win32_PhysicalMemory is empty on some virtual platforms +Set-Attr $result.ansible_facts "ansible_memtotal_mb" ([math]::round($win32_cs.TotalPhysicalMemory / 1024 / 1024)) +Set-Attr $result.ansible_facts "ansible_swaptotal_mb" ([math]::round($win32_os.TotalSwapSpaceSize / 1024 / 1024)) Set-Attr $result.ansible_facts "ansible_lastboot" $win32_os.lastbootuptime.ToString("u") Set-Attr $result.ansible_facts "ansible_uptime_seconds" $([System.Convert]::ToInt64($(Get-Date).Subtract($win32_os.lastbootuptime).TotalSeconds)) @@ -121,38 +175,49 @@ Set-Attr $result.ansible_facts "ansible_env" $env_vars $psversion = $PSVersionTable.PSVersion.Major Set-Attr $result.ansible_facts "ansible_powershell_version" $psversion -$winrm_https_listener_parent_path = Get-ChildItem -Path WSMan:\localhost\Listener -Recurse | Where-Object {$_.PSChildName -eq "Transport" -and $_.Value -eq "HTTPS"} | select PSParentPath -$winrm_https_listener_path = $null -$https_listener = $null -$winrm_cert_thumbprint = $null -$uppercase_cert_thumbprint = $null - -if ($winrm_https_listener_parent_path ) { - $winrm_https_listener_path = $winrm_https_listener_parent_path.PSParentPath.Substring($winrm_https_listener_parent_path.PSParentPath.LastIndexOf("\")) +$winrm_https_listener_parent_paths = Get-ChildItem -Path WSMan:\localhost\Listener -Recurse | Where-Object {$_.PSChildName -eq "Transport" -and $_.Value -eq "HTTPS"} | select PSParentPath +if ($winrm_https_listener_parent_paths -isnot [array]) { + $winrm_https_listener_parent_paths = @($winrm_https_listener_parent_paths) } -if ($winrm_https_listener_path) -{ - $https_listener = Get-ChildItem -Path "WSMan:\localhost\Listener$winrm_https_listener_path" +$winrm_https_listener_paths = @() +$https_listeners = @() +$winrm_cert_thumbprints = @() +$winrm_cert_expiry = @() + +foreach ($winrm_https_listener_parent_path in $winrm_https_listener_parent_paths) { + $winrm_https_listener_paths += $winrm_https_listener_parent_path.PSParentPath.Substring($winrm_https_listener_parent_path.PSParentPath.LastIndexOf("\")) } -if ($https_listener) +foreach ($winrm_https_listener_path in $winrm_https_listener_paths) { - $winrm_cert_thumbprint = $https_listener | where {$_.Name -EQ "CertificateThumbprint" } | select Value + $https_listeners += Get-ChildItem -Path "WSMan:\localhost\Listener$winrm_https_listener_path" } -if ($winrm_cert_thumbprint) +foreach ($https_listener in $https_listeners) { - $uppercase_cert_thumbprint = $winrm_cert_thumbprint.Value.ToString().ToUpper() + $winrm_cert_thumbprints += $https_listener | where {$_.Name -EQ "CertificateThumbprint" } | select Value } -$winrm_cert_expiry = Get-ChildItem -Path Cert:\LocalMachine\My | where Thumbprint -EQ $uppercase_cert_thumbprint | select NotAfter +foreach ($winrm_cert_thumbprint in $winrm_cert_thumbprints) { + Try { + $winrm_cert_expiry += Get-ChildItem -Path Cert:\LocalMachine\My | where Thumbprint -EQ $winrm_cert_thumbprint.Value.ToString().ToUpper() | select NotAfter + } + Catch {} +} -if ($winrm_cert_expiry) +$winrm_cert_expirations = $winrm_cert_expiry | Sort-Object NotAfter +if ($winrm_cert_expirations) { - Set-Attr $result.ansible_facts "ansible_winrm_certificate_expires" $winrm_cert_expiry.NotAfter.ToString("yyyy-MM-dd HH:mm:ss") + # this fact was renamed from ansible_winrm_certificate_expires due to collision with ansible_winrm_X connection var pattern + Set-Attr $result.ansible_facts "ansible_win_rm_certificate_expires" $winrm_cert_expirations[0].NotAfter.ToString("yyyy-MM-dd HH:mm:ss") } +$PendingReboot = Get-PendingRebootStatus +Set-Attr $result.ansible_facts "ansible_reboot_pending" $PendingReboot + +Set-Attr $result.ansible_facts "module_setup" $true + # See if Facter is on the System Path Try { $facter_exe = Get-Command facter -ErrorAction Stop diff --git a/windows/win_command.ps1 b/windows/win_command.ps1 new file mode 100644 index 00000000000..316654b81d7 --- /dev/null +++ b/windows/win_command.ps1 @@ -0,0 +1,161 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +# TODO: add check mode support + +Set-StrictMode -Version 2 +$ErrorActionPreference = "Stop" + +$parsed_args = Parse-Args $args $false + +$raw_command_line = $(Get-AnsibleParam $parsed_args "_raw_params" -failifempty $true).Trim() +$chdir = Get-AnsibleParam $parsed_args "chdir" +$creates = Get-AnsibleParam $parsed_args "creates" +$removes = Get-AnsibleParam $parsed_args "removes" + +$result = @{changed=$true; warnings=@(); cmd=$raw_command_line} + +If($creates -and $(Test-Path $creates)) { + Exit-Json @{cmd=$raw_command_line; msg="skipped, since $creates exists"; changed=$false; skipped=$true; rc=0} +} + +If($removes -and -not $(Test-Path $removes)) { + Exit-Json @{cmd=$raw_command_line; msg="skipped, since $removes does not exist"; changed=$false; skipped=$true; rc=0} +} + +$util_def = @' +using System; +using System.ComponentModel; +using System.Diagnostics; +using System.IO; +using System.Linq; +using System.Runtime.InteropServices; +using System.Threading; + +namespace Ansible.Command +{ + public static class NativeUtil + { + [DllImport("shell32.dll", SetLastError = true)] + static extern IntPtr CommandLineToArgvW([MarshalAs(UnmanagedType.LPWStr)] string lpCmdLine, out int pNumArgs); + + public static string[] ParseCommandLine(string cmdline) + { + int numArgs; + IntPtr ret = CommandLineToArgvW(cmdline, out numArgs); + + if (ret == IntPtr.Zero) + throw new Exception(String.Format("Error parsing command line: {0}", new Win32Exception(Marshal.GetLastWin32Error()).Message)); + + IntPtr[] strptrs = new IntPtr[numArgs]; + Marshal.Copy(ret, strptrs, 0, numArgs); + string[] cmdlineParts = strptrs.Select(s=>Marshal.PtrToStringUni(s)).ToArray(); + + Marshal.FreeHGlobal(ret); + + return cmdlineParts; + } + + public static void GetProcessOutput(StreamReader stdoutStream, StreamReader stderrStream, out string stdout, out string stderr) + { + var sowait = new EventWaitHandle(false, EventResetMode.ManualReset); + var sewait = new EventWaitHandle(false, EventResetMode.ManualReset); + + string so = null, se = null; + + ThreadPool.QueueUserWorkItem((s)=> + { + so = stdoutStream.ReadToEnd(); + sowait.Set(); + }); + + ThreadPool.QueueUserWorkItem((s) => + { + se = stderrStream.ReadToEnd(); + sewait.Set(); + }); + + foreach(var wh in new WaitHandle[] { sowait, sewait }) + wh.WaitOne(); + + stdout = so; + stderr = se; + } + } +} +'@ + +$util_type = Add-Type -TypeDefinition $util_def + +# FUTURE: extract this code to separate module_utils as Windows module API version of run_command + +$exec_args = $null + +# Parse the command-line with the Win32 parser to get the application name to run. The Win32 parser +# will deal with quoting/escaping for us... +# FUTURE: no longer necessary once we switch to raw Win32 CreateProcess +$parsed_command_line = [Ansible.Command.NativeUtil]::ParseCommandLine($raw_command_line); +$exec_application = $parsed_command_line[0] +If($parsed_command_line.Length -gt 1) { + # lop the application off, then rejoin the args as a single string + $exec_args = $parsed_command_line[1..$($parsed_command_line.Length-1)] -join " " +} + +$proc = New-Object System.Diagnostics.Process +$psi = $proc.StartInfo +$psi.FileName = $exec_application +$psi.Arguments = $exec_args +$psi.RedirectStandardOutput = $true +$psi.RedirectStandardError = $true +$psi.UseShellExecute = $false + +If ($chdir) { + $psi.WorkingDirectory = $chdir +} + +$start_datetime = [DateTime]::UtcNow + +Try { + $proc.Start() | Out-Null # will always return $true for non shell-exec cases +} +Catch [System.ComponentModel.Win32Exception] { + # fail nicely for "normal" error conditions + # FUTURE: this probably won't work on Nano Server + $excep = $_ + Exit-Json @{failed=$true;changed=$false;cmd=$raw_command_line;rc=$excep.Exception.NativeErrorCode;msg=$excep.Exception.Message} +} + +$stdout = $stderr = [string] $null + +[Ansible.Command.NativeUtil]::GetProcessOutput($proc.StandardOutput, $proc.StandardError, [ref] $stdout, [ref] $stderr) | Out-Null + +$result.stdout = $stdout +$result.stderr = $stderr + +$proc.WaitForExit() | Out-Null + +$result.rc = $proc.ExitCode + +$end_datetime = [DateTime]::UtcNow + +$result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff") +$result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff") +$result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff") + +Exit-Json $result diff --git a/windows/win_command.py b/windows/win_command.py new file mode 100644 index 00000000000..2a131c4bc45 --- /dev/null +++ b/windows/win_command.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Ansible, inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_command +short_description: Executes a command on a remote Windows node +version_added: 2.2 +description: + - The M(win_command) module takes the command name followed by a list of space-delimited arguments. + - The given command will be executed on all selected nodes. It will not be + processed through the shell, so variables like C($env:HOME) and operations + like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell) + module if you need these features). +options: + free_form: + description: + - the win_command module takes a free form command to run. There is no parameter actually named 'free form'. + See the examples! + required: true + creates: + description: + - a path or path filter pattern; when the referenced path exists on the target host, the task will be skipped. + removes: + description: + - a path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped. + chdir: + description: + - set the specified path as the current working directory before executing a command +notes: + - If you want to run a command through a shell (say you are using C(<), + C(>), C(|), etc), you actually want the M(win_shell) module instead. The + M(win_command) module is much more secure as it's not affected by the user's + environment. + - " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this." +author: + - Matt Davis +''' + +EXAMPLES = ''' +# Example from Ansible Playbooks. +- win_command: whoami + register: whoami_out + +# Run the command only if the specified file does not exist. +- win_command: wbadmin -backupTarget:c:\\backup\\ creates=c:\\backup\\ + +# You can also use the 'args' form to provide the options. This command +# will change the working directory to c:\\somedir\\ and will only run when +# c:\\backup\\ doesn't exist. +- win_command: wbadmin -backupTarget:c:\\backup\\ creates=c:\\backup\\ + args: + chdir: c:\\somedir\\ + creates: c:\\backup\\ +''' + +RETURN = ''' +msg: + description: changed + returned: always + type: boolean + sample: True +start: + description: The command execution start time + returned: always + type: string + sample: '2016-02-25 09:18:26.429568' +end: + description: The command execution end time + returned: always + type: string + sample: '2016-02-25 09:18:26.755339' +delta: + description: The command execution delta time + returned: always + type: string + sample: '0:00:00.325771' +stdout: + description: The command standard output + returned: always + type: string + sample: 'Clustering node rabbit@slave1 with rabbit@master ...' +stderr: + description: The command standard error + returned: always + type: string + sample: 'ls: cannot access foo: No such file or directory' +cmd: + description: The command executed by the task + returned: always + type: string + sample: 'rabbitmqctl join_cluster rabbit@master' +rc: + description: The command return code (0 means success) + returned: always + type: int + sample: 0 +stdout_lines: + description: The command standard output split in lines + returned: always + type: list of strings + sample: [u'Clustering node rabbit@slave1 with rabbit@master ...'] +''' diff --git a/windows/win_copy.py b/windows/win_copy.py index a222a928f09..1b81f0cac89 100755 --- a/windows/win_copy.py +++ b/windows/win_copy.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_copy @@ -47,32 +51,29 @@ ''' EXAMPLES = ''' -# Copy a single file -- win_copy: src=/srv/myfiles/foo.conf dest=c:\\TEMP\\foo.conf - -# Copy the contents of files/temp_files dir into c:\temp\. Includes any sub dirs under files/temp_files -# Note the use of unix style path in the dest. -# This is necessary because \ is yaml escape sequence -- win_copy: src=files/temp_files/ dest=c:/temp/ - -# Copy the files/temp_files dir and any files or sub dirs into c:\temp -# Copies the folder because there is no trailing / on 'files/temp_files' -- win_copy: src=files/temp_files dest=c:/temp/ +- name: Copy a single file + win_copy: + src: /srv/myfiles/foo.conf + dest: c:\TEMP\foo.conf +- name: Copy files/temp_files to c:\temp + win_copy: + src: files/temp_files/ + dest: c:\temp ''' RETURN = ''' dest: description: destination file/path returned: changed type: string - sample: "c:/temp/" + sample: 'c:\temp' src: description: source file used for the copy on the target machine returned: changed type: string sample: "/home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source" checksum: - description: checksum of the file after running copy + description: sha1 checksum of the file after running copy returned: success type: string sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827" diff --git a/windows/win_feature.ps1 b/windows/win_feature.ps1 index 62960d581e3..339b79b9d7b 100644 --- a/windows/win_feature.ps1 +++ b/windows/win_feature.ps1 @@ -63,7 +63,7 @@ If ($state -eq "present") { $InstallParams.add("IncludeManagementTools",$includemanagementtools) } - if ($source -ne $null) + if ($source) { $InstallParams.add("Source",$source) } @@ -77,7 +77,7 @@ If ($state -eq "present") { ElseIf (Get-Command "Add-WindowsFeature" -ErrorAction SilentlyContinue) { if ($IncludeManagementTools) { - $Params.Remove("IncludeManagementTools") + $InstallParams.Remove("IncludeManagementTools") } $featureresult = Add-WindowsFeature @InstallParams } diff --git a/windows/win_feature.py b/windows/win_feature.py index 84c95154477..2fb6fe3718b 100644 --- a/windows/win_feature.py +++ b/windows/win_feature.py @@ -21,13 +21,17 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_feature version_added: "1.7" -short_description: Installs and uninstalls Windows Features +short_description: Installs and uninstalls Windows Features on Windows Server description: - - Installs or uninstalls Windows Roles or Features + - Installs or uninstalls Windows Roles or Features on Windows Server. This module uses the Add/Remove-WindowsFeature Cmdlets, which is not available on client os machines. options: name: description: @@ -49,6 +53,7 @@ - yes - no default: null + required: false include_sub_features: description: - Adds all subfeatures of the specified feature @@ -56,6 +61,7 @@ - yes - no default: null + required: false include_management_tools: description: - Adds the corresponding management tools to the specified feature @@ -63,6 +69,7 @@ - yes - no default: null + required: false source: description: - Specify a source to install the feature from @@ -74,7 +81,7 @@ - "Trond Hindenes (@trondhindenes)" ''' -EXAMPLES = ''' +EXAMPLES = r''' # This installs IIS. # The names of features available for install can be run by running the following Powershell Command: # PS C:\Users\Administrator> Import-Module ServerManager; Get-WindowsFeature @@ -96,6 +103,4 @@ restart: yes include_sub_features: yes include_management_tools: yes - - ''' diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 index 958f9f04fcc..e064c5c6dbd 100644 --- a/windows/win_file.ps1 +++ b/windows/win_file.ps1 @@ -102,7 +102,7 @@ Else If ( $state -eq "directory" ) { - New-Item -ItemType directory -Path $path + New-Item -ItemType directory -Path $path | Out-Null $result.changed = $TRUE } diff --git a/windows/win_file.py b/windows/win_file.py index 895da567d86..82c5510c3cf 100644 --- a/windows/win_file.py +++ b/windows/win_file.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_file @@ -46,27 +50,37 @@ If C(file), the file will NOT be created if it does not exist, see the M(copy) or M(template) module if you want that behavior. If C(absent), directories will be recursively deleted, and files will be removed. - If C(touch), an empty file will be created if the c(path) does not + If C(touch), an empty file will be created if the C(path) does not exist, while an existing file or directory will receive updated file access and - modification times (similar to the way `touch` works from the command line). + modification times (similar to the way C(touch) works from the command line). required: false default: file choices: [ file, directory, touch, absent ] ''' EXAMPLES = ''' -# create a file -- win_file: path=C:\\temp\\foo.conf +- name: Create a file + win_file: + path: C:\temp\foo.conf + state: file -# touch a file (creates if not present, updates modification time if present) -- win_file: path=C:\\temp\\foo.conf state=touch +- name: Touch a file (creates if not present, updates modification time if present) + win_file: + path: C:\temp\foo.conf + state: touch -# remove a file, if present -- win_file: path=C:\\temp\\foo.conf state=absent +- name: Remove a file, if present + win_file: + path: C:\temp\foo.conf + state: absent -# create directory structure -- win_file: path=C:\\temp\\folder\\subfolder state=directory +- name: Create directory structure + win_file: + path: C:\temp\folder\subfolder + state: directory -# remove directory structure -- win_file: path=C:\\temp state=absent +- name: Remove directory structure + win_file: + path: C:\temp + state: absent ''' diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 75bef615ab7..f24b8740469 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -26,33 +26,24 @@ $result = New-Object psobject @{ changed = $false } -If ($params.url) { - $url = $params.url -} -Else { - Fail-Json $result "missing required argument: url" -} - -If ($params.dest) { - $dest = $params.dest -} -Else { - Fail-Json $result "missing required argument: dest" -} +$url = Get-AnsibleParam $params -name "url" -failifempty $true +$dest = Get-AnsibleParam $params -name "dest" -failifempty $true -$skip_certificate_validation = Get-Attr $params "skip_certificate_validation" $false | ConvertTo-Bool -$username = Get-Attr $params "username" -$password = Get-Attr $params "password" +$skip_certificate_validation = Get-AnsibleParam $params -name "skip_certificate_validation" -default $false +$skip_certificate_validation = $skip_certificate_validation | ConvertTo-Bool +$username = Get-AnsibleParam $params "username" +$password = Get-AnsibleParam $params "password" -$proxy_url = Get-Attr $params "proxy_url" -$proxy_username = Get-Attr $params "proxy_username" -$proxy_password = Get-Attr $params "proxy_password" +$proxy_url = Get-AnsibleParam $params "proxy_url" +$proxy_username = Get-AnsibleParam $params "proxy_username" +$proxy_password = Get-AnsibleParam $params "proxy_password" if($skip_certificate_validation){ [System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true} } -$force = Get-Attr -obj $params -name "force" "yes" | ConvertTo-Bool +$force = Get-AnsibleParam -obj $params -name "force" -default $true +$force = $force | ConvertTo-Bool Function Download-File($result, $url, $dest, $username, $password, $proxy_url, $proxy_username, $proxy_password) { $webClient = New-Object System.Net.WebClient @@ -105,7 +96,7 @@ Else { Fail-Json $result "Error when requesting Last-Modified date from $url $($_.Exception.Message)" } - If ((Get-Date -Date $webLastMod ) -lt $fileLastMod) { + If (($webLastMod) -and ((Get-Date -Date $webLastMod ) -lt $fileLastMod)) { $result.changed = $false } Else { Download-File -result $result -url $url -dest $dest -username $username -password $password -proxy_url $proxy_url -proxy_username $proxy_username -proxy_password $proxy_password diff --git a/windows/win_get_url.py b/windows/win_get_url.py index f7c5819cb22..6c5d5c67c0d 100644 --- a/windows/win_get_url.py +++ b/windows/win_get_url.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_get_url @@ -45,9 +49,12 @@ default: null force: description: - - If C(yes), will always download the file. If C(no), will only + - If C(yes), will always download the file. If C(no), will only download the file if it does not exist or the remote file has been - modified more recently than the local file. + modified more recently than the local file. This works by sending + an http HEAD request to retrieve last modified time of the requested + resource, so for this to work, the remote web server must support + HEAD requests. version_added: "2.0" required: false choices: [ "yes", "no" ] @@ -84,28 +91,40 @@ required: false ''' -EXAMPLES = ''' +EXAMPLES = r''' # Downloading a JPEG and saving it to a file with the ansible command. # Note the "dest" is quoted rather instead of escaping the backslashes -$ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthrise.jpg dest='C:\Users\Administrator\earthrise.jpg'" all +$ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthrise.jpg dest='C:\\Users\\Administrator\\earthrise.jpg'" all # Playbook example -- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' +- name: Download earthrise.jpg to 'C:\\Users\\RandomUser\\earthrise.jpg' win_get_url: - url: 'http://www.example.com/earthrise.jpg' - dest: 'C:\Users\RandomUser\earthrise.jpg' + url: http://www.example.com/earthrise.jpg + dest: C:\Users\RandomUser\earthrise.jpg - name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' only if modified win_get_url: - url: 'http://www.example.com/earthrise.jpg' - dest: 'C:\Users\RandomUser\earthrise.jpg' + url: http://www.example.com/earthrise.jpg + dest: C:\Users\RandomUser\earthrise.jpg force: no - name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' through a proxy server. win_get_url: - url: 'http://www.example.com/earthrise.jpg' - dest: 'C:\Users\RandomUser\earthrise.jpg' - proxy_url: 'http://10.0.0.1:8080' - proxy_username: 'username' - proxy_password: 'password' + url: http://www.example.com/earthrise.jpg + dest: C:\Users\RandomUser\earthrise.jpg + proxy_url: http://10.0.0.1:8080 + proxy_username: username + proxy_password: password +''' +RETURN = ''' +url: + description: requested url + returned: always + type: string + sample: http://www.example.com/earthrise.jpg +dest: + description: destination file/path + returned: always + type: string + sample: 'C:\\Users\\RandomUser\\earthrise.jpg' ''' diff --git a/windows/win_group.py b/windows/win_group.py index 5e8b0adaaf2..035c05eff5c 100644 --- a/windows/win_group.py +++ b/windows/win_group.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_group @@ -54,13 +58,13 @@ ''' EXAMPLES = ''' - # Create a new group +- name: Create a new group win_group: name: deploy description: Deploy Group state: present - # Remove a group +- name: Remove a group win_group: name: deploy state: absent diff --git a/windows/win_lineinfile.py b/windows/win_lineinfile.py index c6761591619..df250d6d414 100644 --- a/windows/win_lineinfile.py +++ b/windows/win_lineinfile.py @@ -10,11 +10,15 @@ # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = """ --- @@ -31,11 +35,11 @@ aliases: [ name, destfile ] description: - The path of the file to modify. - - Note that the Windows path delimiter '\' must be escaped as '\\' (see examples below) + - Note that the Windows path delimiter C(\) must be escaped as C(\\) when the line is double quoted. regexp: required: false description: - - "The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions; see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx)." + - "The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions; see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx)." state: required: false choices: [ present, absent ] @@ -58,13 +62,13 @@ default: EOF description: - Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is available; C(EOF) for inserting the line at the end of the file. - - If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs). + - If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs). choices: [ 'EOF', '*regex*' ] insertbefore: required: false description: - Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available; C(BOF) for inserting the line at the beginning of the file. - - If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs). + - If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs). choices: [ 'BOF', '*regex*' ] create: required: false @@ -81,7 +85,7 @@ validate: required: false description: - - Validation to run before copying into place. Use %s in the command to indicate the current file to validate. + - Validation to run before copying into place. Use %s in the command to indicate the current file to validate. - The command is passed securely so shell features like expansion and pipes won't work. default: None encoding: @@ -94,26 +98,49 @@ newline: required: false description: - - "Specifies the line separator style to use for the modified file. This defaults to the windows line separator (\r\n). Note that the indicated line separator will be used for file output regardless of the original line seperator that appears in the input file." + - "Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated line separator will be used for file output regardless of the original line separator that appears in the input file." choices: [ "windows", "unix" ] default: "windows" """ -EXAMPLES = """ -- win_lineinfile: dest=C:\\temp\\example.conf regexp=^name= line="name=JohnDoe" +EXAMPLES = r""" +- win_lineinfile: + dest: C:\temp\example.conf + regexp: '^name=' + line: 'name=JohnDoe' -- win_lineinfile: dest=C:\\temp\\example.conf state=absent regexp="^name=" +- win_lineinfile: + dest: C:\temp\example.conf + regexp: '^name=' + state: absent -- win_lineinfile: dest=C:\\temp\\example.conf regexp='^127\.0\.0\.1' line='127.0.0.1 localhost' +- win_lineinfile: + dest: C:\temp\example.conf + regexp: '^127\.0\.0\.1' + line: '127.0.0.1 localhost' -- win_lineinfile: dest=C:\\temp\\httpd.conf regexp="^Listen " insertafter="^#Listen " line="Listen 8080" +- win_lineinfile: + dest: C:\temp\httpd.conf + regexp: '^Listen ' + insertafter: '^#Listen ' + line: Listen 8080 -- win_lineinfile: dest=C:\\temp\\services regexp="^# port for http" insertbefore="^www.*80/tcp" line="# port for http by default" +- win_lineinfile: + dest: C:\temp\services + regexp: '^# port for http' + insertbefore: '^www.*80/tcp' + line: '# port for http by default' -# Create file if it doesnt exist with a specific encoding -- win_lineinfile: dest=C:\\temp\\utf16.txt create="yes" encoding="utf-16" line="This is a utf-16 encoded file" +# Create file if it doesn't exist with a specific encoding +- win_lineinfile: + dest: C:\temp\utf16.txt + create: yes + encoding: utf-16 + line: This is a utf-16 encoded file # Add a line to a file and ensure the resulting file uses unix line separators -- win_lineinfile: dest=C:\\temp\\testfile.txt line="Line added to file" newline="unix" - +- win_lineinfile: + dest: C:\temp\testfile.txt + line: Line added to file + newline: unix """ diff --git a/windows/win_msi.py b/windows/win_msi.py index d426c164506..cfc7e089822 100644 --- a/windows/win_msi.py +++ b/windows/win_msi.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['deprecated'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_msi @@ -55,21 +59,24 @@ - Specify whether to wait for install or uninstall to complete before continuing. choices: - true - - yes - false - - no default: false author: "Matt Martz (@sivel)" ''' EXAMPLES = ''' -# Install an MSI file -- win_msi: path=C:\\\\7z920-x64.msi +- name: Install an MSI file + win_msi: + path: C:\7z920-x64.msi -# Install an MSI, and wait for it to complete before continuing -- win_msi: path=C:\\\\7z920-x64.msi wait=true +- name: Install an MSI, and wait for it to complete before continuing + win_msi: + path: C:\7z920-x64.msi + wait: true -# Uninstall an MSI file -- win_msi: path=C:\\\\7z920-x64.msi state=absent +- name: Uninstall an MSI file + win_msi: + path: C:\7z920-x64.msi + state: absent ''' diff --git a/windows/win_ping.py b/windows/win_ping.py index ecb5149f8c3..6f650e977fd 100644 --- a/windows/win_ping.py +++ b/windows/win_ping.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_ping diff --git a/windows/win_reboot.py b/windows/win_reboot.py new file mode 100644 index 00000000000..c8f179e7dd4 --- /dev/null +++ b/windows/win_reboot.py @@ -0,0 +1,82 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION=''' +--- +module: win_reboot +short_description: Reboot a windows machine +description: + - Reboot a Windows machine, wait for it to go down, come back up, and respond to commands. +version_added: "2.1" +options: + pre_reboot_delay_sec: + description: + - Seconds for shutdown to wait before requesting reboot + default: 2 + shutdown_timeout_sec: + description: + - Maximum seconds to wait for shutdown to occur + - Increase this timeout for very slow hardware, large update applications, etc + default: 600 + reboot_timeout_sec: + description: + - Maximum seconds to wait for machine to re-appear on the network and respond to a test command + - This timeout is evaluated separately for both network appearance and test command success (so maximum clock time is actually twice this value) + default: 600 + connect_timeout_sec: + description: + - Maximum seconds to wait for a single successful TCP connection to the WinRM endpoint before trying again + default: 5 + test_command: + description: + - Command to expect success for to determine the machine is ready for management + default: whoami +author: + - Matt Davis (@nitzmahone) +''' + +EXAMPLES=''' +# unconditionally reboot the machine with all defaults +- win_reboot: + +# apply updates and reboot if necessary +- win_updates: + register: update_result +- win_reboot: + when: update_result.reboot_required + +# reboot a slow machine that might have lots of updates to apply +- win_reboot: + shutdown_timeout_sec: 3600 + reboot_timeout_sec: 3600 +''' + +RETURNS=''' +rebooted: + description: true if the machine was rebooted + returned: always + type: boolean + sample: true +''' diff --git a/windows/win_service.py b/windows/win_service.py index 1f0f6326e65..e4f009a361b 100644 --- a/windows/win_service.py +++ b/windows/win_service.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_service @@ -59,12 +63,12 @@ ''' EXAMPLES = ''' - # Restart a service +- name: Restart a service win_service: name: spooler state: restarted - # Set service startup mode to auto and ensure it is started +- name: Set service startup mode to auto and ensure it is started win_service: name: spooler start_mode: auto diff --git a/windows/win_shell.ps1 b/windows/win_shell.ps1 new file mode 100644 index 00000000000..664858e52ca --- /dev/null +++ b/windows/win_shell.ps1 @@ -0,0 +1,142 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +# TODO: add check mode support + +Set-StrictMode -Version 2 +$ErrorActionPreference = "Stop" + +$helper_def = @" +using System.Diagnostics; +using System.IO; +using System.Threading; + +namespace Ansible.Shell +{ + public class ProcessUtil + { + public static void GetProcessOutput(StreamReader stdoutStream, StreamReader stderrStream, out string stdout, out string stderr) + { + var sowait = new EventWaitHandle(false, EventResetMode.ManualReset); + var sewait = new EventWaitHandle(false, EventResetMode.ManualReset); + + string so = null, se = null; + + ThreadPool.QueueUserWorkItem((s)=> + { + so = stdoutStream.ReadToEnd(); + sowait.Set(); + }); + + ThreadPool.QueueUserWorkItem((s) => + { + se = stderrStream.ReadToEnd(); + sewait.Set(); + }); + + foreach(var wh in new WaitHandle[] { sowait, sewait }) + wh.WaitOne(); + + stdout = so; + stderr = se; + } + } +} +"@ + +$parsed_args = Parse-Args $args $false + +$raw_command_line = $(Get-AnsibleParam $parsed_args "_raw_params" -failifempty $true).Trim() +$chdir = Get-AnsibleParam $parsed_args "chdir" +$executable = Get-AnsibleParam $parsed_args "executable" +$creates = Get-AnsibleParam $parsed_args "creates" +$removes = Get-AnsibleParam $parsed_args "removes" + +$result = @{changed=$true; warnings=@(); cmd=$raw_command_line} + +If($creates -and $(Test-Path $creates)) { + Exit-Json @{cmd=$raw_command_line; msg="skipped, since $creates exists"; changed=$false; skipped=$true; rc=0} +} + +If($removes -and -not $(Test-Path $removes)) { + Exit-Json @{cmd=$raw_command_line; msg="skipped, since $removes does not exist"; changed=$false; skipped=$true; rc=0} +} + +Add-Type -TypeDefinition $helper_def + +$exec_args = $null + +If(-not $executable -or $executable -eq "powershell") { + $exec_application = "powershell" + + # Base64 encode the command so we don't have to worry about the various levels of escaping + $encoded_command = [Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($raw_command_line)) + + $exec_args = @("-noninteractive", "-encodedcommand", $encoded_command) +} +Else { + # FUTURE: support arg translation from executable (or executable_args?) to process arguments for arbitrary interpreter? + $exec_application = $executable + $exec_args = @("/c", $raw_command_line) +} + +$proc = New-Object System.Diagnostics.Process +$psi = $proc.StartInfo +$psi.FileName = $exec_application +$psi.Arguments = $exec_args +$psi.RedirectStandardOutput = $true +$psi.RedirectStandardError = $true +$psi.UseShellExecute = $false + +If ($chdir) { + $psi.WorkingDirectory = $chdir +} + +$start_datetime = [DateTime]::UtcNow + +Try { + $proc.Start() | Out-Null # will always return $true for non shell-exec cases +} +Catch [System.ComponentModel.Win32Exception] { + # fail nicely for "normal" error conditions + # FUTURE: this probably won't work on Nano Server + $excep = $_ + Exit-Json @{failed=$true;changed=$false;cmd=$raw_command_line;rc=$excep.Exception.NativeErrorCode;msg=$excep.Exception.Message} +} + +$stdout = $stderr = [string] $null + +[Ansible.Shell.ProcessUtil]::GetProcessOutput($proc.StandardOutput, $proc.StandardError, [ref] $stdout, [ref] $stderr) | Out-Null + +$result.stdout = $stdout +$result.stderr = $stderr + +# TODO: decode CLIXML stderr output (and other streams?) + +$proc.WaitForExit() | Out-Null + +$result.rc = $proc.ExitCode + +$end_datetime = [DateTime]::UtcNow + +$result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff") +$result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff") +$result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff") + +Exit-Json $result diff --git a/windows/win_shell.py b/windows/win_shell.py new file mode 100644 index 00000000000..6441f2018fa --- /dev/null +++ b/windows/win_shell.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Ansible, inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_shell +short_description: Execute shell commands on target hosts. +version_added: 2.2 +description: + - The M(win_shell) module takes the command name followed by a list of space-delimited arguments. + It is similar to the M(win_command) module, but runs + the command via a shell (defaults to PowerShell) on the target host. +options: + free_form: + description: + - the win_shell module takes a free form command to run. There is no parameter actually named 'free form'. + See the examples! + required: true + creates: + description: + - a path or path filter pattern; when the referenced path exists on the target host, the task will be skipped. + removes: + description: + - a path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped. + chdir: + description: + - set the specified path as the current working directory before executing a command + executable: + description: + - change the shell used to execute the command (eg, C(cmd)). The target shell must accept a C(/c) parameter followed by the raw command line to be executed. +notes: + - If you want to run an executable securely and predictably, it may be + better to use the M(win_command) module instead. Best practices when writing + playbooks will follow the trend of using M(win_command) unless M(win_shell) is + explicitly required. When running ad-hoc commands, use your best judgement. + - WinRM will not return from a command execution until all child processes created have exited. Thus, it is not possible to use win_shell to spawn long-running child or background processes. + Consider creating a Windows service for managing background processes. +author: + - Matt Davis +''' + +EXAMPLES = ''' +# Execute a command in the remote shell; stdout goes to the specified +# file on the remote. +- win_shell: C:\\somescript.ps1 >> c:\\somelog.txt + +# Change the working directory to somedir/ before executing the command. +- win_shell: C:\\somescript.ps1 >> c:\\somelog.txt chdir=c:\\somedir + +# You can also use the 'args' form to provide the options. This command +# will change the working directory to somedir/ and will only run when +# somedir/somelog.txt doesn't exist. +- win_shell: C:\\somescript.ps1 >> c:\\somelog.txt + args: + chdir: c:\\somedir + creates: c:\\somelog.txt + +# Run a command under a non-Powershell interpreter (cmd in this case) +- win_shell: echo %HOMEDIR% + args: + executable: cmd + register: homedir_out +''' + +RETURN = ''' +msg: + description: changed + returned: always + type: boolean + sample: True +start: + description: The command execution start time + returned: always + type: string + sample: '2016-02-25 09:18:26.429568' +end: + description: The command execution end time + returned: always + type: string + sample: '2016-02-25 09:18:26.755339' +delta: + description: The command execution delta time + returned: always + type: string + sample: '0:00:00.325771' +stdout: + description: The command standard output + returned: always + type: string + sample: 'Clustering node rabbit@slave1 with rabbit@master ...' +stderr: + description: The command standard error + returned: always + type: string + sample: 'ls: cannot access foo: No such file or directory' +cmd: + description: The command executed by the task + returned: always + type: string + sample: 'rabbitmqctl join_cluster rabbit@master' +rc: + description: The command return code (0 means success) + returned: always + type: int + sample: 0 +stdout_lines: + description: The command standard output split in lines + returned: always + type: list of strings + sample: [u'Clustering node rabbit@slave1 with rabbit@master ...'] +''' diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 06484f3d1cc..366652249b6 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -21,7 +21,10 @@ $params = Parse-Args $args $true; function Date_To_Timestamp($start_date, $end_date) { - Write-Output (New-TimeSpan -Start $start_date -End $end_date).TotalSeconds + If($start_date -and $end_date) + { + Write-Output (New-TimeSpan -Start $start_date -End $end_date).TotalSeconds + } } $path = Get-Attr $params "path" $FALSE; @@ -42,23 +45,44 @@ $result = New-Object psobject @{ If (Test-Path $path) { Set-Attr $result.stat "exists" $TRUE; + $info = Get-Item $path; + $iscontainer = Get-Attr $info "PSIsContainer" $null; + $length = Get-Attr $info "Length" $null; + $extension = Get-Attr $info "Extension" $null; + $attributes = Get-Attr $info "Attributes" ""; + If ($info) + { + $accesscontrol = $info.GetAccessControl(); + } + Else + { + $accesscontrol = $null; + } + $owner = Get-Attr $accesscontrol "Owner" $null; + $creationtime = Get-Attr $info "CreationTime" $null; + $lastaccesstime = Get-Attr $info "LastAccessTime" $null; + $lastwritetime = Get-Attr $info "LastWriteTime" $null; + + $epoch_date = Get-Date -Date "01/01/1970" - If ($info.PSIsContainer) + If ($iscontainer) { Set-Attr $result.stat "isdir" $TRUE; } Else { Set-Attr $result.stat "isdir" $FALSE; - Set-Attr $result.stat "size" $info.Length; + Set-Attr $result.stat "size" $length; } - Set-Attr $result.stat "extension" $info.Extension; - Set-Attr $result.stat "attributes" $info.Attributes.ToString(); - Set-Attr $result.stat "owner" $info.GetAccessControl().Owner; - Set-Attr $result.stat "creationtime" (Date_To_Timestamp $epoch_date $info.CreationTime); - Set-Attr $result.stat "lastaccesstime" (Date_To_Timestamp $epoch_date $info.LastAccessTime); - Set-Attr $result.stat "lastwritetime" (Date_To_Timestamp $epoch_date $info.LastWriteTime); + Set-Attr $result.stat "extension" $extension; + Set-Attr $result.stat "attributes" $attributes.ToString(); + # Set-Attr $result.stat "owner" $getaccesscontrol.Owner; + # Set-Attr $result.stat "owner" $info.GetAccessControl().Owner; + Set-Attr $result.stat "owner" $owner; + Set-Attr $result.stat "creationtime" (Date_To_Timestamp $epoch_date $creationtime); + Set-Attr $result.stat "lastaccesstime" (Date_To_Timestamp $epoch_date $lastaccesstime); + Set-Attr $result.stat "lastwritetime" (Date_To_Timestamp $epoch_date $lastwritetime); } Else { diff --git a/windows/win_stat.py b/windows/win_stat.py index e26655985e2..823dde0d15e 100644 --- a/windows/win_stat.py +++ b/windows/win_stat.py @@ -17,6 +17,10 @@ # this is a windows documentation stub, actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_stat @@ -50,11 +54,12 @@ ''' EXAMPLES = ''' -# Obtain information about a file - -- win_stat: path=C:\\foo.ini +- name: Obtain information about a file + win_stat: + path: C:\foo.ini register: file_info -- debug: var=file_info +- debug: + var: file_info ''' diff --git a/windows/win_template.py b/windows/win_template.py index 4ffcaafe2c7..f93307a0fcc 100644 --- a/windows/win_template.py +++ b/windows/win_template.py @@ -15,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = r''' --- module: win_template @@ -47,7 +51,7 @@ - "templates are loaded with C(trim_blocks=True)." - By default, windows line endings are not created in the generated file. - "In order to ensure windows line endings are in the generated file, add the following header - as the first line of your template: #jinja2: newline_sequence:'\\\\r\\\\n' and ensure each line + as the first line of your template: ``#jinja2: newline_sequence:'\\r\\n'`` and ensure each line of the template ends with \\\\r\\\\n" - Beware fetching files from windows machines when creating templates because certain tools, such as Powershell ISE, and regedit's export facility @@ -57,8 +61,12 @@ ''' EXAMPLES = ''' -# Playbook Example (win_template can only be run inside a playbook) -- win_template: src=/mytemplates/file.conf.j2 dest=C:\\temp\\file.conf +# Playbook Example +- win_template: + src: /mytemplates/file.conf.j2 + dest: C:\temp\file.conf +# Ad-hoc Example +ansible winhost -m win_template -a "src=/mytemplates/file.conf.j2 dest=c:/temp/file.conf" ''' diff --git a/windows/win_user.ps1 b/windows/win_user.ps1 index cf3a178c6bb..5eba6ad2a69 100644 --- a/windows/win_user.ps1 +++ b/windows/win_user.ps1 @@ -102,10 +102,10 @@ If ($account_locked -ne $null) { $groups = Get-Attr $params "groups" $null If ($groups -ne $null) { - If ($groups.GetType().Name -eq "String") { + If ($groups -is [System.String]) { [string[]]$groups = $groups.Split(",") } - ElseIf ($groups.GetType().Name -ne "Object[]") { + ElseIf ($groups -isnot [System.Collections.IList]) { Fail-Json $result "groups must be a string or array" } $groups = $groups | ForEach { ([string]$_).Trim() } | Where { $_ } @@ -125,7 +125,7 @@ $user_obj = Get-User $username If ($state -eq 'present') { # Add or update user try { - If (-not $user_obj -or -not $user_obj.GetType) { + If (-not $user_obj) { $user_obj = $adsi.Create("User", $username) If ($password -ne $null) { $user_obj.SetPassword($password) @@ -137,8 +137,16 @@ If ($state -eq 'present') { [void][system.reflection.assembly]::LoadWithPartialName('System.DirectoryServices.AccountManagement') $host_name = [System.Net.Dns]::GetHostName() $pc = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext 'Machine', $host_name - # ValidateCredentials fails if PasswordExpired == 1 - If (!$pc.ValidateCredentials($username, $password)) { + + # ValidateCredentials will fail if either of these are true- just force update... + If($user_obj.AccountDisabled -or $user_obj.PasswordExpired) { + $password_match = $false + } + Else { + $password_match = $pc.ValidateCredentials($username, $password) + } + + If (-not $password_match) { $user_obj.SetPassword($password) $result.changed = $true } @@ -190,7 +198,7 @@ If ($state -eq 'present') { ForEach ($grp in $current_groups) { If ((($groups_action -eq "remove") -and ($groups -contains $grp)) -or (($groups_action -eq "replace") -and ($groups -notcontains $grp))) { $group_obj = $adsi.Children | where { $_.SchemaClassName -eq 'Group' -and $_.Name -eq $grp } - If ($group_obj -and $group_obj.GetType) { + If ($group_obj) { $group_obj.Remove($user_obj.Path) $result.changed = $true } @@ -204,7 +212,7 @@ If ($state -eq 'present') { ForEach ($grp in $groups) { If ($current_groups -notcontains $grp) { $group_obj = $adsi.Children | where { $_.SchemaClassName -eq 'Group' -and $_.Name -eq $grp } - If ($group_obj.GetType) { + If ($group_obj) { $group_obj.Add($user_obj.Path) $result.changed = $true } @@ -223,7 +231,7 @@ If ($state -eq 'present') { ElseIf ($state -eq 'absent') { # Remove user try { - If ($user_obj -and $user_obj.GetType) { + If ($user_obj) { $username = $user_obj.Name.Value $adsi.delete("User", $user_obj.Name.Value) $result.changed = $true @@ -236,7 +244,7 @@ ElseIf ($state -eq 'absent') { } try { - If ($user_obj -and $user_obj.GetType) { + If ($user_obj -and $user_obj -is [System.DirectoryServices.DirectoryEntry]) { $user_obj.RefreshCache() Set-Attr $result "name" $user_obj.Name[0] Set-Attr $result "fullname" $user_obj.FullName[0] diff --git a/windows/win_user.py b/windows/win_user.py index 376ff487fb3..3158208db6f 100644 --- a/windows/win_user.py +++ b/windows/win_user.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_user