diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000000..300886a6973 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,5 @@ +# This repository is locked + +Please open all new issues and pull requests in https://github.com/ansible/ansible + +For more information please see http://docs.ansible.com/ansible/dev_guide/repomerge.html diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..300886a6973 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,5 @@ +# This repository is locked + +Please open all new issues and pull requests in https://github.com/ansible/ansible + +For more information please see http://docs.ansible.com/ansible/dev_guide/repomerge.html diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 409c24454ac..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -sudo: false -language: python -python: - - "2.7" -addons: - apt: - sources: - - deadsnakes - packages: - - python2.4 - - python2.6 -script: - - python2.4 -m compileall -fq -x 'cloud/|monitoring/zabbix.*\.py|/dnf\.py|/layman\.py|/maven_artifact\.py|clustering/consul.*\.py|notification/pushbullet\.py' . - - python2.6 -m compileall -fq . - - python2.7 -m compileall -fq . - #- ./test-docs.sh extras diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 38b95840a77..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,37 +0,0 @@ -Contributing to ansible-modules-extras -====================================== - -The Ansible Extras Modules are written and maintained by the Ansible community, according to the following contribution guidelines. - -If you'd like to contribute code -================================ - -Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page. - -If you'd like to contribute code to an existing module -====================================================== -Each module in Extras is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team. - -If you'd like to contribute a new module -======================================== -Ansible welcomes new modules. Please be certain that you've read the [module development guide and standards](http://docs.ansible.com/developing_modules.html) thoroughly before submitting your module. - -Each new module requires two current module owners to approve a new module for inclusion. The Ansible community reviews new modules as often as possible, but please be patient; there are a lot of new module submissions in the pipeline, and it takes time to evaluate a new module for its adherence to module standards. - -Once your module is accepted, you become responsible for maintenance of that module, which means responding to pull requests and issues in a reasonably timely manner. - -If you'd like to ask a question -=============================== - -Please see [this web page ](http://docs.ansible.com/community.html) for community information, which includes pointers on how to ask questions on the [mailing lists](http://docs.ansible.com/community.html#mailing-list-information) and IRC. - -The Github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those. - -If you'd like to file a bug -=========================== - -Read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible/blob/devel/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it. - -Also please make sure you are testing on the latest released version of Ansible or the development branch. - -Thanks! diff --git a/COPYING b/COPYING deleted file mode 100644 index 10926e87f11..00000000000 --- a/COPYING +++ /dev/null @@ -1,675 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - diff --git a/README.md b/README.md index 9a0ddb6c898..3bb1f395c56 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,6 @@ -ansible-modules-extras -====================== +**NOTE:** As of Ansible 2.3, modules are now in the +[main Ansible repository](https://github.com/ansible/ansible/tree/devel/lib/ansible/modules). -This repo contains a subset of ansible-modules with slightly lower use or priority than "core" modules. +See the [repo merge guide](https://docs.ansible.com/ansible/dev_guide/repomerge.html) for more information. -All new modules should be submitted here, and have a chance to be promoted to core over time. - -Reporting bugs -============== - -Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of module documentation page at [docs.ansible.com](http://docs.ansible.com/). - -Testing modules -=============== - -Ansible [module development guide](http://docs.ansible.com/developing_modules.html#testing-modules) contains the latest info about that. - -License -======= - -As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. - -Installation -============ - -There should be no need to install this repo separately as it should be included in any Ansible install using the official documented methods. +This repo still exists to allow bug fixes for `stable-2.2` and older releases. diff --git a/REVIEWERS.md b/REVIEWERS.md deleted file mode 100644 index b09af51d1c1..00000000000 --- a/REVIEWERS.md +++ /dev/null @@ -1,160 +0,0 @@ -New module reviewers -==================== -The following list represents all current Github module reviewers. It's currently comprised of all Ansible module authors, past and present. - -Two +1 votes by any of these module reviewers on a new module pull request will result in the inclusion of that module into Ansible Extras. - -Active -====== -- Adam Garside [@fabulops](https://www.github.com/fabulops) -- Adam Keech [@smadam813](https://www.github.com/smadam813) -- Adam Miller [@maxamillion](https://www.github.com/maxamillion) -- Alex Coomans [@drcapulet](https://www.github.com/drcapulet) -- Alexander Bulimov [@abulimov](https://www.github.com/abulimov) -- Alexander Saltanov [@sashka](https://www.github.com/sashka) -- Alexander Winkler [@dermute](https://www.github.com/dermute) -- Andrew de Quincey [@adq](https://www.github.com/adq) -- André Paramés [@andreparames](https://www.github.com/andreparames) -- Andy Hill [@andyhky](https://www.github.com/andyhky) -- Artūras `arturaz` Šlajus [@arturaz](https://www.github.com/arturaz) -- Augustus Kling [@AugustusKling](https://www.github.com/AugustusKling) -- BOURDEL Paul [@pb8226](https://www.github.com/pb8226) -- Balazs Pocze [@banyek](https://www.github.com/banyek) -- Ben Whaley [@bwhaley](https://www.github.com/bwhaley) -- Benno Joy [@bennojoy](https://www.github.com/bennojoy) -- Bernhard Weitzhofer [@b6d](https://www.github.com/b6d) -- Boyd Adamson [@brontitall](https://www.github.com/brontitall) -- Brad Olson [@bradobro](https://www.github.com/bradobro) -- Brian Coca [@bcoca](https://www.github.com/bcoca) -- Brice Burgess [@briceburg](https://www.github.com/briceburg) -- Bruce Pennypacker [@bpennypacker](https://www.github.com/bpennypacker) -- Carson Gee [@carsongee](https://www.github.com/carsongee) -- Chris Church [@cchurch](https://www.github.com/cchurch) -- Chris Hoffman [@chrishoffman](https://www.github.com/chrishoffman) -- Chris Long [@alcamie101](https://www.github.com/alcamie101) -- Chris Schmidt [@chrisisbeef](https://www.github.com/chrisisbeef) -- Christian Berendt [@berendt](https://www.github.com/berendt) -- Christopher H. Laco [@claco](https://www.github.com/claco) -- Cristian van Ee [@DJMuggs](https://www.github.com/DJMuggs) -- Dag Wieers [@dagwieers](https://www.github.com/dagwieers) -- Dane Summers [@dsummersl](https://www.github.com/dsummersl) -- Daniel Jaouen [@danieljaouen](https://www.github.com/danieljaouen) -- Daniel Schep [@dschep](https://www.github.com/dschep) -- Dariusz Owczarek [@dareko](https://www.github.com/dareko) -- Darryl Stoflet [@dstoflet](https://www.github.com/dstoflet) -- David CHANIAL [@davixx](https://www.github.com/davixx) -- David Stygstra [@stygstra](https://www.github.com/) -- Derek Carter [@goozbach](https://www.github.com/stygstra) -- Dimitrios Tydeas Mengidis [@dmtrs](https://www.github.com/dmtrs) -- Doug Luce [@dougluce](https://www.github.com/dougluce) -- Dylan Martin [@pileofrogs](https://www.github.com/pileofrogs) -- Elliott Foster [@elliotttf](https://www.github.com/elliotttf) -- Eric Johnson [@erjohnso](https://www.github.com/erjohnso) -- Evan Duffield [@scicoin-project](https://www.github.com/scicoin-project) -- Evan Kaufman [@EvanK](https://www.github.com/EvanK) -- Evgenii Terechkov [@evgkrsk](https://www.github.com/evgkrsk) -- Franck Cuny [@franckcuny](https://www.github.com/franckcuny) -- Gareth Rushgrove [@garethr](https://www.github.com/garethr) -- Hagai Kariti [@hkariti](https://www.github.com/hkariti) -- Hector Acosta [@hacosta](https://www.github.com/hacosta) -- Hiroaki Nakamura [@hnakamur](https://www.github.com/hnakamur) -- Ivan Vanderbyl [@ivanvanderbyl](https://www.github.com/ivanvanderbyl) -- Jakub Jirutka [@jirutka](https://www.github.com/jirutka) -- James Cammarata [@jimi-c](https://www.github.com/jimi-c) -- James Laska [@jlaska](https://www.github.com/jlaska) -- James S. Martin [@jsmartin](https://www.github.com/jsmartin) -- Jan-Piet Mens [@jpmens](https://www.github.com/jpmens) -- Jayson Vantuyl [@jvantuyl](https://www.github.com/jvantuyl) -- Jens Depuydt [@jensdepuydt](https://www.github.com/jensdepuydt) -- Jeroen Hoekx [@jhoekx](https://www.github.com/jhoekx) -- Jesse Keating [@j2sol](https://www.github.com/j2sol) -- Jim Dalton [@jsdalton](https://www.github.com/jsdalton) -- Jim Richardson [@weaselkeeper](https://www.github.com/weaselkeeper) -- Jimmy Tang [@jcftang](https://www.github.com/jcftang) -- Johan Wiren [@johanwiren](https://www.github.com/johanwiren) -- John Dewey [@retr0h](https://www.github.com/retr0h) -- John Jarvis [@jarv](https://www.github.com/jarv) -- John Whitbeck [@jwhitbeck](https://www.github.com/jwhitbeck) -- Jon Hawkesworth [@jhawkesworth](https://www.github.com/jhawkesworth) -- Jonas Pfenniger [@zimbatm](https://www.github.com/zimbatm) -- Jonathan I. Davila [@defionscode](https://www.github.com/defionscode) -- Joseph Callen [@jcpowermac](https://www.github.com/jcpowermac) -- Kevin Carter [@cloudnull](https://www.github.com/cloudnull) -- Lester Wade [@lwade](https://www.github.com/lwade) -- Lorin Hochstein [@lorin](https://www.github.com/lorin) -- Manuel Sousa [@manuel-sousa](https://www.github.com/manuel-sousa) -- Mark Theunissen [@marktheunissen](https://www.github.com/marktheunissen) -- Matt Coddington [@mcodd](https://www.github.com/mcodd) -- Matt Hite [@mhite](https://www.github.com/mhite) -- Matt Makai [@makaimc](https://www.github.com/makaimc) -- Matt Martz [@sivel](https://www.github.com/sivel) -- Matt Wright [@mattupstate](https://www.github.com/mattupstate) -- Matthew Vernon [@mcv21](https://www.github.com/mcv21) -- Matthew Williams [@mgwilliams](https://www.github.com/mgwilliams) -- Matthias Vogelgesang [@matze](https://www.github.com/matze) -- Max Riveiro [@kavu](https://www.github.com/kavu) -- Michael Gregson [@mgregson](https://www.github.com/mgregson) -- Michael J. Schultz [@mjschultz](https://www.github.com/mjschultz) -- Michael Schuett [@michaeljs1990](https://www.github.com/michaeljs1990) -- Michael Warkentin [@mwarkentin](https://www.github.com/mwarkentin) -- Mischa Peters [@mischapeters](https://www.github.com/mischapeters) -- Monty Taylor [@emonty](https://www.github.com/emonty) -- Nandor Sivok [@dominis](https://www.github.com/dominis) -- Nate Coraor [@natefoo](https://www.github.com/natefoo) -- Nate Kingsley [@nate-kingsley](https://www.github.com/nate-kingsley) -- Nick Harring [@NickatEpic](https://www.github.com/NickatEpic) -- Patrick Callahan [@dirtyharrycallahan](https://www.github.com/dirtyharrycallahan) -- Patrick Ogenstad [@ogenstad](https://www.github.com/ogenstad) -- Patrick Pelletier [@skinp](https://www.github.com/skinp) -- Patrik Lundin [@eest](https://www.github.com/eest) -- Paul Durivage [@angstwad](https://www.github.com/angstwad) -- Pavel Antonov [@softzilla](https://www.github.com/softzilla) -- Pepe Barbe [@elventear](https://www.github.com/elventear) -- Peter Mounce [@petemounce](https://www.github.com/petemounce) -- Peter Oliver [@mavit](https://www.github.com/mavit) -- Peter Sprygada [@privateip](https://www.github.com/privateip) -- Peter Tan [@tanpeter](https://www.github.com/tanpeter) -- Philippe Makowski [@pmakowski](https://www.github.com/pmakowski) -- Phillip Gentry, CX Inc [@pcgentry](https://www.github.com/pcgentry) -- Quentin Stafford-Fraser [@quentinsf](https://www.github.com/quentinsf) -- Ramon de la Fuente [@ramondelafuente](https://www.github.com/ramondelafuente) -- Raul Melo [@melodous](https://www.github.com/melodous) -- Ravi Bhure [@ravibhure](https://www.github.com/ravibhure) -- René Moser [@resmo](https://www.github.com/resmo) -- Richard Hoop [@rhoop](https://www.github.com/rhoop) -- Richard Isaacson [@risaacson](https://www.github.com/risaacson) -- Rick Mendes [@rickmendes](https://www.github.com/rickmendes) -- Romeo Theriault [@romeotheriault](https://www.github.com/romeotheriault) -- Scott Anderson [@tastychutney](https://www.github.com/tastychutney) -- Sebastian Kornehl [@skornehl](https://www.github.com/skornehl) -- Serge van Ginderachter [@srvg](https://www.github.com/srvg) -- Sergei Antipov [@UnderGreen](https://www.github.com/UnderGreen) -- Seth Edwards [@sedward](https://www.github.com/sedward) -- Silviu Dicu [@silviud](https://www.github.com/silviud) -- Simon JAILLET [@jails](https://www.github.com/jails) -- Stephen Fromm [@sfromm](https://www.github.com/sfromm) -- Steve [@groks](https://www.github.com/groks) -- Steve Gargan [@sgargan](https://www.github.com/sgargan) -- Steve Smith [@tarka](https://www.github.com/tarka) -- Takashi Someda [@tksmd](https://www.github.com/tksmd) -- Taneli Leppä [@rosmo](https://www.github.com/rosmo) -- Tim Bielawa [@tbielawa](https://www.github.com/tbielawa) -- Tim Mahoney [@timmahoney](https://www.github.com/timmahoney) -- Timothy Appnel [@tima](https://www.github.com/tima) -- Tom Bamford [@tombamford](https://www.github.com/tombamford) -- Trond Hindenes [@trondhindenes](https://www.github.com/trondhindenes) -- Vincent Van der Kussen [@vincentvdk](https://www.github.com/vincentvdk) -- Vincent Viallet [@zbal](https://www.github.com/zbal) -- WAKAYAMA Shirou [@shirou](https://www.github.com/shirou) -- Will Thames [@willthames](https://www.github.com/willthames) -- Willy Barro [@willybarro](https://www.github.com/willybarro) -- Xabier Larrakoetxea [@slok](https://www.github.com/slok) -- Yeukhon Wong [@yeukhon](https://www.github.com/yeukhon) -- Zacharie Eakin [@zeekin](https://www.github.com/zeekin) -- berenddeboer [@berenddeboer](https://www.github.com/berenddeboer) -- bleader [@bleader](https://www.github.com/bleader) -- curtis [@ccollicutt](https://www.github.com/ccollicutt) - -Retired -======= -None yet :) diff --git a/VERSION b/VERSION deleted file mode 100644 index ee36851a03e..00000000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -${version} diff --git a/cloud/amazon/GUIDELINES.md b/cloud/amazon/GUIDELINES.md index ee5aea90ef7..b8ca836b79a 100644 --- a/cloud/amazon/GUIDELINES.md +++ b/cloud/amazon/GUIDELINES.md @@ -1,8 +1,21 @@ -Guidelines for AWS modules --------------------------- +# Guidelines for AWS modules -Naming your module -================== +## Getting Started + +Since Ansible 2.0, it is a requirement that all new AWS modules are written to use boto3. + +Prior to 2.0, modules may of been written in boto or boto3. Modules written using boto can continue to be extended using boto. + +Backward compatibility of older modules must be maintained. + +## Bug fixing + +If you are writing a bugfix for a module that uses boto, you should continue to use boto to maintain backward compatibility. + +If you are adding new functionality to an existing module that uses boto but the new functionality requires boto3, you +must maintain backward compatibility of the module and ensure the module still works without boto3. + +## Naming your module Base the name of the module on the part of AWS that you actually use. (A good rule of thumb is to take @@ -13,76 +26,239 @@ known abbreviation due to it being a major component of AWS, that's fine, but don't create new ones independently (e.g. VPC, ELB, etc. are fine) -Using boto -========== +## Adding new features -Wrap the `import` statements in a try block and fail the -module later on if the import fails +Try and keep backward compatibility with relatively recent +versions of boto. That means that if want to implement some +functionality that uses a new feature of boto, it should only +fail if that feature actually needs to be run, with a message +saying which version of boto is needed. + +Use feature testing (e.g. `hasattr('boto.module', 'shiny_new_method')`) +to check whether boto supports a feature rather than version checking +e.g. from the `ec2` module: +```python +if boto_supports_profile_name_arg(ec2): + params['instance_profile_name'] = instance_profile_name +else: + if instance_profile_name is not None: + module.fail_json(msg="instance_profile_name parameter requires boto version 2.5.0 or higher") ``` + +## Using boto and boto3 + +### Importing + +Wrap import statements in a try block and fail the module later if the import fails + +#### boto + +```python try: - import boto - import boto.module.that.you.use + import boto.ec2 + from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False - - def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - module_specific_parameter=dict(), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) + if not HAS_BOTO: module.fail_json(msg='boto required for this module') ``` +#### boto3 -Try and keep backward compatibility with relatively recent -versions of boto. That means that if want to implement some -functionality that uses a new feature of boto, it should only -fail if that feature actually needs to be run, with a message -saying which version of boto is needed. +```python +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False -Use feature testing (e.g. `hasattr('boto.module', 'shiny_new_method')`) -to check whether boto supports a feature rather than version checking +def main(): -e.g. from the `ec2` module: + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') ``` -if boto_supports_profile_name_arg(ec2): - params['instance_profile_name'] = instance_profile_name + +#### boto and boto3 combined + +If you want to add boto3 functionality to a module written using boto, you must maintain backward compatibility. +Ensure that you clearly document if a new parameter requires boto3. Import boto3 at the top of the +module as normal and then use the HAS_BOTO3 bool when necessary, before the new feature. + +```python +try: + import boto + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +if my_new_feauture_Parameter_is_set: + if HAS_BOTO3: + # do feature + else: + module.fail_json(msg="boto3 is required for this feature") +``` + +### Connecting to AWS + +To connect to AWS, you should use `get_aws_connection_info` and then +`connect_to_aws`. + +The reason for using `get_aws_connection_info` and `connect_to_aws` rather than doing it +yourself is that they handle some of the more esoteric connection +options such as security tokens and boto profiles. + +Some boto services require region to be specified. You should check for the region parameter if required. + +#### boto + +An example of connecting to ec2: + +```python +region, ec2_url, aws_connect_params = get_aws_connection_info(module) +if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: + module.fail_json(msg=str(e)) else: - if instance_profile_name is not None: - module.fail_json( - msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") + module.fail_json(msg="region must be specified") ``` +#### boto3 + +An example of connecting to ec2 is shown below. Note that there is no 'NoAuthHandlerFound' exception handling like in boto. +Instead, an AuthFailure exception will be thrown when you use 'connection'. See exception handling. + +```python +region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) +if region: + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) +else: + module.fail_json(msg="region must be specified") +``` -Connecting to AWS -================= +### Exception Handling -For EC2 you can just use +You should wrap any boto call in a try block. If an exception is thrown, it is up to you decide how to handle it +but usually calling fail_json with the error message will suffice. +#### boto + +```python +# Import BotoServerError +try: + import boto.ec2 + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +# Connect to AWS +... + +# Make a call to AWS +try: + result = connection.aws_call() +except BotoServerError, e: + module.fail_json(msg=e.message) ``` -ec2 = ec2_connect(module) + +#### boto3 + +For more information on botocore exception handling see [http://botocore.readthedocs.org/en/latest/client_upgrades.html#error-handling] + +Boto3 provides lots of useful info when an exception is thrown so pass this to the user along with the message. + +```python +# Import ClientError from botocore +try: + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +# Connect to AWS +... + +# Make a call to AWS +try: + result = connection.aws_call() +except ClientError, e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) ``` -For other modules, you should use `get_aws_connection_info` and then -`connect_to_aws`. To connect to an example `xyz` service: +If you need to perform an action based on the error boto3 returned, use the error code. +```python +# Make a call to AWS +try: + result = connection.aws_call() +except ClientError, e: + if e.response['Error']['Code'] == 'NoSuchEntity': + return None + else: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) ``` -region, ec2_url, aws_connect_params = get_aws_connection_info(module) -xyz = connect_to_aws(boto.xyz, region, **aws_connect_params) + +### Returning Values + +When you make a call using boto3, you will probably get back some useful information that you should return in the module. + +As well as information related to the call itself, you will also have some response metadata. It is OK to return this to +the user as well as they may find it useful. + +Boto3 returns all values CamelCased. Ansible follows Python standards for variable names and uses snake_case. There is a +helper function in module_utils/ec2.py called `camel_dict_to_snake_dict` that allows you to easily convert the boto3 +response to snake_case. + +You should use this helper function and avoid changing the names of values returned by Boto3. E.g. if boto3 returns a +value called 'SecretAccessKey' do not change it to 'AccessKey'. + +```python +# Make a call to AWS +result = connection.aws_call() + +# Return the result to the user +module.exit_json(changed=True, **camel_dict_to_snake_dict(result)) ``` -The reason for using `get_aws_connection_info` and `connect_to_aws` -(and even `ec2_connect` uses those under the hood) rather than doing it -yourself is that they handle some of the more esoteric connection -options such as security tokens and boto profiles. +### Helper functions + +Along with the connection functions in Ansible ec2.py module_utils, there are some other useful functions detailed below. + +#### camel_dict_to_snake_dict + +boto3 returns results in a dict. The keys of the dict are in CamelCase format. In keeping +with Ansible format, this function will convert the keys to snake_case. + +#### ansible_dict_to_boto3_filter_list + +Converts a an Ansible list of filters to a boto3 friendly list of dicts. This is useful for +any boto3 _facts modules. + +#### boto3_tag_list_to_ansible_dict + +Converts a boto3 tag list to an Ansible dict. Boto3 returns tags as a list of dicts containing keys called +'Key' and 'Value'. This function converts this list in to a single dict where the dict key is the tag +key and the dict value is the tag value. + +#### ansible_dict_to_boto3_tag_list + +Opposite of above. Converts an Ansible dict to a boto3 tag list of dicts. + +#### get_ec2_security_group_ids_from_names + +Pass this function a list of security group names or combination of security group names and IDs and this function will +return a list of IDs. You should also pass the VPC ID if known because security group names are not necessarily unique +across VPCs. \ No newline at end of file diff --git a/cloud/amazon/cloudformation_facts.py b/cloud/amazon/cloudformation_facts.py new file mode 100644 index 00000000000..ae40ed0242d --- /dev/null +++ b/cloud/amazon/cloudformation_facts.py @@ -0,0 +1,290 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cloudformation_facts +short_description: Obtain facts about an AWS CloudFormation stack +description: + - Gets information about an AWS CloudFormation stack +requirements: + - boto3 >= 1.0.0 + - python >= 2.6 +version_added: "2.2" +author: Justin Menga (@jmenga) +options: + stack_name: + description: + - The name or id of the CloudFormation stack + required: true + all_facts: + description: + - Get all stack information for the stack + required: false + default: false + stack_events: + description: + - Get stack events for the stack + required: false + default: false + stack_template: + description: + - Get stack template body for the stack + required: false + default: false + stack_resources: + description: + - Get stack resources for the stack + required: false + default: false + stack_policy: + description: + - Get stack policy for the stack + required: false + default: false +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Get summary information about a stack +- cloudformation_facts: + stack_name: my-cloudformation-stack + +# Facts are published in ansible_facts['cloudformation'][] +- debug: + msg: '{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}' + +# Get all stack information about a stack +- cloudformation_facts: + stack_name: my-cloudformation-stack + all_facts: true + +# Get stack resource and stack policy information about a stack +- cloudformation_facts: + stack_name: my-cloudformation-stack + stack_resources: true + stack_policy: true + +# Example dictionary outputs for stack_outputs, stack_parameters and stack_resources: +"stack_outputs": { + "ApplicationDatabaseName": "dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com", + ... +}, +"stack_parameters": { + "DatabaseEngine": "mysql", + "DatabasePassword": "****", + ... +}, +"stack_resources": { + "AutoscalingGroup": "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7", + "AutoscalingSecurityGroup": "sg-abcd1234", + "ApplicationDatabase": "dazvlpr01xj55a", + "EcsTaskDefinition": "arn:aws:ecs:ap-southeast-2:123456789:task-definition/dev-someapp-EcsTaskDefinition-1F2VM9QB0I7K9:1" + ... +} +''' + +RETURN = ''' +stack_description: + description: Summary facts about the stack + returned: always + type: dict +stack_outputs: + description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each output 'OutputValue' parameter + returned: always + type: dict +stack_parameters: + description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of each parameter 'ParameterValue' parameter + returned: always + type: dict +stack_events: + description: All stack events for the stack + returned: only if all_facts or stack_events is true + type: list of events +stack_policy: + description: Describes the stack policy for the stack + returned: only if all_facts or stack_policy is true + type: dict +stack_template: + description: Describes the stack template for the stack + returned: only if all_facts or stack_template is true + type: dict +stack_resource_list: + description: Describes stack resources for the stack + returned: only if all_facts or stack_resourses is true + type: list of resources +stack_resources: + description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each resource 'PhysicalResourceId' parameter + returned: only if all_facts or stack_resourses is true + type: dict +''' + +try: + import boto3 + import botocore + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec +from ansible.module_utils.basic import AnsibleModule +from functools import partial +import json +import traceback + +class CloudFormationServiceManager: + """Handles CloudFormation Services""" + + def __init__(self, module): + self.module = module + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + self.client = boto3_conn(module, conn_type='client', + resource='cloudformation', region=region, + endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoRegionError: + self.module.fail_json(msg="Region must be specified as a parameter, in AWS_DEFAULT_REGION environment variable or in boto configuration file") + except Exception as e: + self.module.fail_json(msg="Can't establish connection - " + str(e), exception=traceback.format_exc(e)) + + def describe_stack(self, stack_name): + try: + func = partial(self.client.describe_stacks,StackName=stack_name) + response = self.paginated_response(func, 'Stacks') + if response: + return response[0] + self.module.fail_json(msg="Error describing stack - an empty response was returned") + except Exception as e: + self.module.fail_json(msg="Error describing stack - " + str(e), exception=traceback.format_exc(e)) + + def list_stack_resources(self, stack_name): + try: + func = partial(self.client.list_stack_resources,StackName=stack_name) + return self.paginated_response(func, 'StackResourceSummaries') + except Exception as e: + self.module.fail_json(msg="Error listing stack resources - " + str(e), exception=traceback.format_exc(e)) + + def describe_stack_events(self, stack_name): + try: + func = partial(self.client.describe_stack_events,StackName=stack_name) + return self.paginated_response(func, 'StackEvents') + except Exception as e: + self.module.fail_json(msg="Error describing stack events - " + str(e), exception=traceback.format_exc(e)) + + def get_stack_policy(self, stack_name): + try: + response = self.client.get_stack_policy(StackName=stack_name) + stack_policy = response.get('StackPolicyBody') + if stack_policy: + return json.loads(stack_policy) + return dict() + except Exception as e: + self.module.fail_json(msg="Error getting stack policy - " + str(e), exception=traceback.format_exc(e)) + + def get_template(self, stack_name): + try: + response = self.client.get_template(StackName=stack_name) + return response.get('TemplateBody') + except Exception as e: + self.module.fail_json(msg="Error getting stack template - " + str(e), exception=traceback.format_exc(e)) + + def paginated_response(self, func, result_key, next_token=None): + ''' + Returns expanded response for paginated operations. + The 'result_key' is used to define the concatenated results that are combined from each paginated response. + ''' + args=dict() + if next_token: + args['NextToken'] = next_token + response = func(**args) + result = response.get(result_key) + next_token = response.get('NextToken') + if not next_token: + return result + return result + self.paginated_response(func, result_key, next_token) + +def to_dict(items, key, value): + ''' Transforms a list of items to a Key/Value dictionary ''' + if items: + return dict(zip([i[key] for i in items], [i[value] for i in items])) + else: + return dict() + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + stack_name=dict(required=True, type='str' ), + all_facts=dict(required=False, default=False, type='bool'), + stack_policy=dict(required=False, default=False, type='bool'), + stack_events=dict(required=False, default=False, type='bool'), + stack_resources=dict(required=False, default=False, type='bool'), + stack_template=dict(required=False, default=False, type='bool'), + )) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + # Describe the stack + service_mgr = CloudFormationServiceManager(module) + stack_name = module.params.get('stack_name') + result = { + 'ansible_facts': { 'cloudformation': { stack_name:{} } } + } + facts = result['ansible_facts']['cloudformation'][stack_name] + facts['stack_description'] = service_mgr.describe_stack(stack_name) + + # Create stack output and stack parameter dictionaries + if facts['stack_description']: + facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue') + facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), 'ParameterKey', 'ParameterValue') + + # normalize stack description API output + facts['stack_description'] = camel_dict_to_snake_dict(facts['stack_description']) + # camel2snake doesn't handle NotificationARNs properly, so let's fix that + facts['stack_description']['notification_arns'] = facts['stack_description'].pop('notification_ar_ns', []) + + # Create optional stack outputs + all_facts = module.params.get('all_facts') + if all_facts or module.params.get('stack_resources'): + facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name) + facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), 'LogicalResourceId', 'PhysicalResourceId') + if all_facts or module.params.get('stack_template'): + facts['stack_template'] = service_mgr.get_template(stack_name) + if all_facts or module.params.get('stack_policy'): + facts['stack_policy'] = service_mgr.get_stack_policy(stack_name) + if all_facts or module.params.get('stack_events'): + facts['stack_events'] = service_mgr.describe_stack_events(stack_name) + + result['changed'] = False + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/cloudtrail.py b/cloud/amazon/cloudtrail.py index 557f2ebaae3..ab4652fccd4 100644 --- a/cloud/amazon/cloudtrail.py +++ b/cloud/amazon/cloudtrail.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: cloudtrail @@ -80,16 +84,25 @@ EXAMPLES = """ - name: enable cloudtrail local_action: cloudtrail - state=enabled name=main s3_bucket_name=ourbucket - s3_key_prefix=cloudtrail region=us-east-1 + state: enabled + name: main + s3_bucket_name: ourbucket + s3_key_prefix: cloudtrail + region: us-east-1 - name: enable cloudtrail with different configuration local_action: cloudtrail - state=enabled name=main s3_bucket_name=ourbucket2 - s3_key_prefix='' region=us-east-1 + state: enabled + name: main + s3_bucket_name: ourbucket2 + s3_key_prefix: '' + region: us-east-1 - name: remove cloudtrail - local_action: cloudtrail state=disabled name=main region=us-east-1 + local_action: cloudtrail + state: disabled + name: main + region: us-east-1 """ HAS_BOTO = False @@ -101,6 +114,10 @@ except ImportError: HAS_BOTO = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_ec2_creds + + class CloudTrailManager: """Handles cloudtrail configuration""" @@ -112,7 +129,7 @@ def __init__(self, module, region=None, **aws_connect_params): try: self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: self.module.fail_json(msg=str(e)) def view_status(self, name): @@ -152,13 +169,13 @@ def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - state={'required': True, 'choices': ['enabled', 'disabled'] }, - name={'required': True, 'type': 'str' }, - s3_bucket_name={'required': False, 'type': 'str' }, - s3_key_prefix={'default':'', 'required': False, 'type': 'str' }, - include_global_events={'default':True, 'required': False, 'type': 'bool' }, + state={'required': True, 'choices': ['enabled', 'disabled']}, + name={'required': True, 'type': 'str'}, + s3_bucket_name={'required': False, 'type': 'str'}, + s3_key_prefix={'default': '', 'required': False, 'type': 'str'}, + include_global_events={'default': True, 'required': False, 'type': 'bool'}, )) - required_together = ( ['state', 's3_bucket_name'] ) + required_together = (['state', 's3_bucket_name']) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) @@ -176,6 +193,7 @@ def main(): s3_bucket_name = module.params['s3_bucket_name'] # remove trailing slash from the key prefix, really messes up the key structure. s3_key_prefix = module.params['s3_key_prefix'].rstrip('/') + include_global_events = module.params['include_global_events'] #if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: @@ -190,7 +208,7 @@ def main(): results['view'] = cf_man.view(ct_name) # only update if the values have changed. if results['view']['S3BucketName'] != s3_bucket_name or \ - results['view']['S3KeyPrefix'] != s3_key_prefix or \ + results['view'].get('S3KeyPrefix', '') != s3_key_prefix or \ results['view']['IncludeGlobalServiceEvents'] != include_global_events: if not module.check_mode: results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events) @@ -222,8 +240,6 @@ def main(): module.exit_json(**results) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/cloudwatchevent_rule.py b/cloud/amazon/cloudwatchevent_rule.py new file mode 100644 index 00000000000..643343d82fb --- /dev/null +++ b/cloud/amazon/cloudwatchevent_rule.py @@ -0,0 +1,415 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cloudwatchevent_rule +short_description: Manage CloudWatch Event rules and targets +description: + - This module creates and manages CloudWatch event rules and targets. +version_added: "2.2" +extends_documentation_fragment: + - aws +author: "Jim Dalton (@jsdalton) " +requirements: + - python >= 2.6 + - boto3 +notes: + - A rule must contain at least an I(event_pattern) or I(schedule_expression). A + rule can have both an I(event_pattern) and a I(schedule_expression), in which + case the rule will trigger on matching events as well as on a schedule. + - When specifying targets, I(input) and I(input_path) are mutually-exclusive + and optional parameters. +options: + name: + description: + - The name of the rule you are creating, updating or deleting. No spaces + or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+)) + required: true + schedule_expression: + description: + - A cron or rate expression that defines the schedule the rule will + trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes)) + required: false + event_pattern: + description: + - A string pattern (in valid JSON format) that is used to match against + incoming events to determine if the rule should be triggered + required: false + state: + description: + - Whether the rule is present (and enabled), disabled, or absent + choices: ["present", "disabled", "absent"] + default: present + required: false + description: + description: + - A description of the rule + required: false + role_arn: + description: + - The Amazon Resource Name (ARN) of the IAM role associated with the rule + required: false + targets: + description: + - "A dictionary array of targets to add to or update for the rule, in the + form C({ id: [string], arn: [string], input: [valid JSON string], input_path: [valid JSONPath string] }). + I(id) [required] is the unique target assignment ID. I(arn) (required) + is the Amazon Resource Name associated with the target. I(input) + (optional) is a JSON object that will override the event data when + passed to the target. I(input_path) (optional) is a JSONPath string + (e.g. C($.detail)) that specifies the part of the event data to be + passed to the target. If neither I(input) nor I(input_path) is + specified, then the entire event is passed to the target in JSON form." + required: false +''' + +EXAMPLES = ''' +- cloudwatchevent_rule: + name: MyCronTask + schedule_expression: "cron(0 20 * * ? *)" + description: Run my scheduled task + targets: + - id: MyTargetId + arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction + +- cloudwatchevent_rule: + name: MyDisabledCronTask + schedule_expression: "cron(5 minutes)" + description: Run my disabled scheduled task + state: disabled + targets: + - id: MyOtherTargetId + arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction + input: '{"foo": "bar"}' + +- cloudwatchevent_rule: + name: MyCronTask + state: absent +''' + +RETURN = ''' +rule: + description: CloudWatch Event rule data + returned: success + type: dict + sample: "{ 'arn': 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask', 'description': 'Run my scheduled task', 'name': 'MyCronTask', 'schedule_expression': 'cron(0 20 * * ? *)', 'state': 'ENABLED' }" +targets: + description: CloudWatch Event target(s) assigned to the rule + returned: success + type: list + sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]" +''' + + +class CloudWatchEventRule(object): + def __init__(self, module, name, client, schedule_expression=None, + event_pattern=None, description=None, role_arn=None): + self.name = name + self.client = client + self.changed = False + self.schedule_expression = schedule_expression + self.event_pattern = event_pattern + self.description = description + self.role_arn = role_arn + + def describe(self): + """Returns the existing details of the rule in AWS""" + try: + rule_info = self.client.describe_rule(Name=self.name) + except botocore.exceptions.ClientError as e: + error_code = e.response.get('Error', {}).get('Code') + if error_code == 'ResourceNotFoundException': + return {} + raise + return self._snakify(rule_info) + + def put(self, enabled=True): + """Creates or updates the rule in AWS""" + request = { + 'Name': self.name, + 'State': "ENABLED" if enabled else "DISABLED", + } + if self.schedule_expression: + request['ScheduleExpression'] = self.schedule_expression + if self.event_pattern: + request['EventPattern'] = self.event_pattern + if self.description: + request['Description'] = self.description + if self.role_arn: + request['RoleArn'] = self.role_arn + response = self.client.put_rule(**request) + self.changed = True + return response + + def delete(self): + """Deletes the rule in AWS""" + self.remove_all_targets() + response = self.client.delete_rule(Name=self.name) + self.changed = True + return response + + def enable(self): + """Enables the rule in AWS""" + response = self.client.enable_rule(Name=self.name) + self.changed = True + return response + + def disable(self): + """Disables the rule in AWS""" + response = self.client.disable_rule(Name=self.name) + self.changed = True + return response + + def list_targets(self): + """Lists the existing targets for the rule in AWS""" + try: + targets = self.client.list_targets_by_rule(Rule=self.name) + except botocore.exceptions.ClientError as e: + error_code = e.response.get('Error', {}).get('Code') + if error_code == 'ResourceNotFoundException': + return [] + raise + return self._snakify(targets)['targets'] + + def put_targets(self, targets): + """Creates or updates the provided targets on the rule in AWS""" + if not targets: + return + request = { + 'Rule': self.name, + 'Targets': self._targets_request(targets), + } + response = self.client.put_targets(**request) + self.changed = True + return response + + def remove_targets(self, target_ids): + """Removes the provided targets from the rule in AWS""" + if not target_ids: + return + request = { + 'Rule': self.name, + 'Ids': target_ids + } + response = self.client.remove_targets(**request) + self.changed = True + return response + + def remove_all_targets(self): + """Removes all targets on rule""" + targets = self.list_targets() + return self.remove_targets([t['id'] for t in targets]) + + def _targets_request(self, targets): + """Formats each target for the request""" + targets_request = [] + for target in targets: + target_request = { + 'Id': target['id'], + 'Arn': target['arn'] + } + if 'input' in target: + target_request['Input'] = target['input'] + if 'input_path' in target: + target_request['InputPath'] = target['input_path'] + targets_request.append(target_request) + return targets_request + + def _snakify(self, dict): + """Converts cammel case to snake case""" + return camel_dict_to_snake_dict(dict) + + +class CloudWatchEventRuleManager(object): + RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn'] + + def __init__(self, rule, targets): + self.rule = rule + self.targets = targets + + def ensure_present(self, enabled=True): + """Ensures the rule and targets are present and synced""" + rule_description = self.rule.describe() + if rule_description: + # Rule exists so update rule, targets and state + self._sync_rule(enabled) + self._sync_targets() + self._sync_state(enabled) + else: + # Rule does not exist, so create new rule and targets + self._create(enabled) + + def ensure_disabled(self): + """Ensures the rule and targets are present, but disabled, and synced""" + self.ensure_present(enabled=False) + + def ensure_absent(self): + """Ensures the rule and targets are absent""" + rule_description = self.rule.describe() + if not rule_description: + # Rule doesn't exist so don't need to delete + return + self.rule.delete() + + def fetch_aws_state(self): + """Retrieves rule and target state from AWS""" + aws_state = { + 'rule': {}, + 'targets': [], + 'changed': self.rule.changed + } + rule_description = self.rule.describe() + if not rule_description: + return aws_state + + # Don't need to include response metadata noise in response + del rule_description['response_metadata'] + + aws_state['rule'] = rule_description + aws_state['targets'].extend(self.rule.list_targets()) + return aws_state + + def _sync_rule(self, enabled=True): + """Syncs local rule state with AWS""" + if not self._rule_matches_aws(): + self.rule.put(enabled) + + def _sync_targets(self): + """Syncs local targets with AWS""" + # Identify and remove extraneous targets on AWS + target_ids_to_remove = self._remote_target_ids_to_remove() + if target_ids_to_remove: + self.rule.remove_targets(target_ids_to_remove) + + # Identify targets that need to be added or updated on AWS + targets_to_put = self._targets_to_put() + if targets_to_put: + self.rule.put_targets(targets_to_put) + + def _sync_state(self, enabled=True): + """Syncs local rule state with AWS""" + remote_state = self._remote_state() + if enabled and remote_state != 'ENABLED': + self.rule.enable() + elif not enabled and remote_state != 'DISABLED': + self.rule.disable() + + def _create(self, enabled=True): + """Creates rule and targets on AWS""" + self.rule.put(enabled) + self.rule.put_targets(self.targets) + + def _rule_matches_aws(self): + """Checks if the local rule data matches AWS""" + aws_rule_data = self.rule.describe() + + # The rule matches AWS only if all rule data fields are equal + # to their corresponding local value defined in the task + return all([ + getattr(self.rule, field) == aws_rule_data.get(field, None) + for field in self.RULE_FIELDS + ]) + + def _targets_to_put(self): + """Returns a list of targets that need to be updated or added remotely""" + remote_targets = self.rule.list_targets() + return [t for t in self.targets if t not in remote_targets] + + def _remote_target_ids_to_remove(self): + """Returns a list of targets that need to be removed remotely""" + target_ids = [t['id'] for t in self.targets] + remote_targets = self.rule.list_targets() + return [ + rt['id'] for rt in remote_targets if rt['id'] not in target_ids + ] + + def _remote_state(self): + """Returns the remote state from AWS""" + description = self.rule.describe() + if not description: + return + return description['state'] + + +def get_cloudwatchevents_client(module): + """Returns a boto3 client for accessing CloudWatch Events""" + try: + region, ec2_url, aws_conn_kwargs = get_aws_connection_info(module, + boto3=True) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in \ + EC2_REGION or AWS_REGION environment variables \ + or in boto configuration file") + return boto3_conn(module, conn_type='client', + resource='events', + region=region, endpoint=ec2_url, + **aws_conn_kwargs) + except boto3.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + schedule_expression = dict(), + event_pattern = dict(), + state = dict(choices=['present', 'disabled', 'absent'], + default='present'), + description = dict(), + role_arn = dict(), + targets = dict(type='list', default=[]), + )) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + rule_data = dict( + [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS] + ) + targets = module.params.get('targets') + state = module.params.get('state') + + cwe_rule = CloudWatchEventRule(module, + client=get_cloudwatchevents_client(module), + **rule_data) + cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets) + + if state == 'present': + cwe_rule_manager.ensure_present() + elif state == 'disabled': + cwe_rule_manager.ensure_disabled() + elif state == 'absent': + cwe_rule_manager.ensure_absent() + else: + module.fail_json(msg="Invalid state '{0}' provided".format(state)) + + module.exit_json(**cwe_rule_manager.fetch_aws_state()) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py index c97ff6f0be0..75e410d4b71 100644 --- a/cloud/amazon/dynamodb_table.py +++ b/cloud/amazon/dynamodb_table.py @@ -14,19 +14,22 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: dynamodb_table short_description: Create, update or delete AWS Dynamo DB tables. +version_added: "2.0" description: - Create or delete AWS Dynamo DB tables. - Can update the provisioned throughput on existing tables. - Returns the status of the specified table. -version_added: "2.0" author: Alan Loi (@loia) -version_added: "2.0" requirements: - - "boto >= 2.13.2" + - "boto >= 2.37.0" options: state: description: @@ -71,13 +74,18 @@ - Write throughput capacity (units) to provision. required: false default: 1 - region: + indexes: description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + - list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput. + - "required options: ['name', 'type', 'hash_key_name']" + - "valid types: ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']" + - "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']" required: false - aliases: ['aws_region', 'ec2_region'] - -extends_documentation_fragment: aws + default: [] + version_added: "2.1" +extends_documentation_fragment: + - aws + - ec2 """ EXAMPLES = ''' @@ -99,6 +107,21 @@ read_capacity: 10 write_capacity: 10 +# set index on existing dynamo table +- dynamodb_table: + name: my-table + region: us-east-1 + indexes: + - name: NamedIndex + type: global_include + hash_key_name: id + range_key_name: create_time + includes: + - other_field + - other_field2 + read_capacity: 10 + write_capacity: 10 + # Delete dynamo table - dynamodb_table: name: my-table @@ -114,24 +137,35 @@ sample: ACTIVE ''' +import traceback + try: import boto import boto.dynamodb2 from boto.dynamodb2.table import Table - from boto.dynamodb2.fields import HashKey, RangeKey + from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex from boto.dynamodb2.types import STRING, NUMBER, BINARY from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError + from boto.dynamodb2.exceptions import ValidationException HAS_BOTO = True + DYNAMO_TYPE_MAP = { + 'STRING': STRING, + 'NUMBER': NUMBER, + 'BINARY': BINARY + } + except ImportError: HAS_BOTO = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info -DYNAMO_TYPE_MAP = { - 'STRING': STRING, - 'NUMBER': NUMBER, - 'BINARY': BINARY -} + +DYNAMO_TYPE_DEFAULT = 'STRING' +INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name'] +INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity'] +INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only'] def create_or_update_dynamo_table(connection, module): @@ -142,16 +176,20 @@ def create_or_update_dynamo_table(connection, module): range_key_type = module.params.get('range_key_type') read_capacity = module.params.get('read_capacity') write_capacity = module.params.get('write_capacity') + all_indexes = module.params.get('indexes') + + for index in all_indexes: + validate_index(index, module) + + schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type) - schema = [ - HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)), - RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type)) - ] throughput = { 'read': read_capacity, 'write': write_capacity } + indexes, global_indexes = get_indexes(all_indexes) + result = dict( region=module.params.get('region'), table_name=table_name, @@ -161,16 +199,18 @@ def create_or_update_dynamo_table(connection, module): range_key_type=range_key_type, read_capacity=read_capacity, write_capacity=write_capacity, + indexes=all_indexes, ) try: table = Table(table_name, connection=connection) + if dynamo_table_exists(table): - result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode) + result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes) else: if not module.check_mode: - Table.create(table_name, connection=connection, schema=schema, throughput=throughput) + Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes) result['changed'] = True if not module.check_mode: @@ -214,23 +254,49 @@ def dynamo_table_exists(table): table.describe() return True - except JSONResponseError, e: + except JSONResponseError as e: if e.message and e.message.startswith('Requested resource not found'): return False else: raise e -def update_dynamo_table(table, throughput=None, check_mode=False): +def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None): table.describe() # populate table details - + throughput_changed = False + global_indexes_changed = False if has_throughput_changed(table, throughput): if not check_mode: - return table.update(throughput=throughput) + throughput_changed = table.update(throughput=throughput) + else: + throughput_changed = True + + removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes) + if removed_indexes: + if not check_mode: + for name, index in removed_indexes.iteritems(): + global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed + else: + global_indexes_changed = True + + if added_indexes: + if not check_mode: + for name, index in added_indexes.iteritems(): + global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed else: - return True + global_indexes_changed = True - return False + if index_throughput_changes: + if not check_mode: + # todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed + try: + global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed + except ValidationException: + pass + else: + global_indexes_changed = True + + return throughput_changed or global_indexes_changed def has_throughput_changed(table, new_throughput): @@ -241,6 +307,80 @@ def has_throughput_changed(table, new_throughput): new_throughput['write'] != table.throughput['write'] +def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type): + if range_key_name: + schema = [ + HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])), + RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])) + ] + else: + schema = [ + HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])) + ] + return schema + + +def get_changed_global_indexes(table, global_indexes): + table.describe() + + table_index_info = dict((index.name, index.schema()) for index in table.global_indexes) + table_index_objects = dict((index.name, index) for index in table.global_indexes) + set_index_info = dict((index.name, index.schema()) for index in global_indexes) + set_index_objects = dict((index.name, index) for index in global_indexes) + + removed_indexes = dict((name, index) for name, index in table_index_info.iteritems() if name not in set_index_info) + added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.iteritems() if name not in table_index_info) + # todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed + # index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.iteritems() if name not in added_indexes and (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or index.throughput['write'] != str(table_index_objects[name].throughput['write']))) + # todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed + index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.iteritems() if name not in added_indexes) + + return removed_indexes, added_indexes, index_throughput_changes + + +def validate_index(index, module): + for key, val in index.iteritems(): + if key not in INDEX_OPTIONS: + module.fail_json(msg='%s is not a valid option for an index' % key) + for required_option in INDEX_REQUIRED_OPTIONS: + if required_option not in index: + module.fail_json(msg='%s is a required option for an index' % required_option) + if index['type'] not in INDEX_TYPE_OPTIONS: + module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS)) + +def get_indexes(all_indexes): + indexes = [] + global_indexes = [] + for index in all_indexes: + name = index['name'] + schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type')) + throughput = { + 'read': index.get('read_capacity', 1), + 'write': index.get('write_capacity', 1) + } + + if index['type'] == 'all': + indexes.append(AllIndex(name, parts=schema)) + + elif index['type'] == 'global_all': + global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput)) + + elif index['type'] == 'global_include': + global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes'])) + + elif index['type'] == 'global_keys_only': + global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput)) + + elif index['type'] == 'include': + indexes.append(IncludeIndex(name, parts=schema, includes=index['includes'])) + + elif index['type'] == 'keys_only': + indexes.append(KeysOnlyIndex(name, parts=schema)) + + return indexes, global_indexes + + + def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( @@ -252,6 +392,7 @@ def main(): range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), read_capacity=dict(default=1, type='int'), write_capacity=dict(default=1, type='int'), + indexes=dict(default=[], type='list'), )) module = AnsibleModule( @@ -267,8 +408,7 @@ def main(): try: connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) - - except (NoAuthHandlerFound, StandardError), e: + except (NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) state = module.params.get('state') @@ -278,9 +418,5 @@ def main(): delete_dynamo_table(connection, module) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - if __name__ == '__main__': main() diff --git a/cloud/amazon/ec2_ami_copy.py b/cloud/amazon/ec2_ami_copy.py index ff9bde88022..71b3c611a8f 100644 --- a/cloud/amazon/ec2_ami_copy.py +++ b/cloud/amazon/ec2_ami_copy.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -14,6 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_ami_copy @@ -26,11 +31,6 @@ description: - the source region that AMI should be copied from required: true - region: - description: - - the destination region that AMI should be copied to - required: true - aliases: ['aws_region', 'ec2_region', 'dest_region'] source_image_id: description: - the id of the image in source region that should be copied @@ -45,6 +45,18 @@ - An optional human-readable string describing the contents and purpose of the new AMI. required: false default: null + encrypted: + description: + - Whether or not to encrypt the target image + required: false + default: null + version_added: "2.2" + kms_key_id: + description: + - KMS key id used to encrypt image. If not specified, uses default EBS Customer Master Key (CMK) for your account. + required: false + default: null + version_added: "2.2" wait: description: - wait for the copied AMI to be in state 'available' before returning. @@ -63,37 +75,71 @@ default: null author: Amir Moulavi -extends_documentation_fragment: aws +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' # Basic AMI Copy -- local_action: - module: ec2_ami_copy - source_region: eu-west-1 - dest_region: us-east-1 +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + +# AMI copy wait until available +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 source_image_id: ami-xxxxxxx - name: SuperService-new-AMI - description: latest patch - tags: '{"Name":"SuperService-new-AMI", "type":"SuperService"}' wait: yes register: image_id -''' +# Named AMI copy +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + name: My-Awesome-AMI + description: latest patch + +# Tagged AMI copy +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + tags: + Name: My-Super-AMI + Patch: 1.2.3 + +# Encrypted AMI copy +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + encrypted: yes + +# Encrypted AMI copy with specified key +- ec2_ami_copy: + source_region: us-east-1 + region: eu-west-1 + source_image_id: ami-xxxxxxx + encrypted: yes + kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b +''' -import sys import time try: import boto import boto.ec2 - from boto.vpc import VPCConnection HAS_BOTO = True except ImportError: HAS_BOTO = False - -if not HAS_BOTO: - module.fail_json(msg='boto required for this module') + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import ec2_argument_spec, ec2_connect, get_aws_connection_info + def copy_image(module, ec2): """ @@ -107,6 +153,8 @@ def copy_image(module, ec2): source_image_id = module.params.get('source_image_id') name = module.params.get('name') description = module.params.get('description') + encrypted = module.params.get('encrypted') + kms_key_id = module.params.get('kms_key_id') tags = module.params.get('tags') wait_timeout = int(module.params.get('wait_timeout')) wait = module.params.get('wait') @@ -115,11 +163,13 @@ def copy_image(module, ec2): params = {'source_region': source_region, 'source_image_id': source_image_id, 'name': name, - 'description': description + 'description': description, + 'encrypted': encrypted, + 'kms_key_id': kms_key_id } image_id = ec2.copy_image(**params).image_id - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) img = wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait) @@ -131,7 +181,7 @@ def copy_image(module, ec2): module.exit_json(msg="AMI copy operation complete", image_id=image_id, state=img.state, changed=True) -# register tags to the copied AMI in dest_region +# register tags to the copied AMI def register_tags_if_any(module, ec2, tags, image_id): if tags: try: @@ -157,7 +207,7 @@ def wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait): for i in range(wait_timeout): try: return ec2.get_image(image_id) - except boto.exception.EC2ResponseError, e: + except boto.exception.EC2ResponseError as e: # This exception we expect initially right after registering the copy with EC2 API if 'InvalidAMIID.NotFound' in e.error_code and wait: time.sleep(1) @@ -177,32 +227,33 @@ def main(): source_image_id=dict(required=True), name=dict(), description=dict(default=""), + encrypted=dict(type='bool', required=False), + kms_key_id=dict(type='str', required=False), wait=dict(type='bool', default=False), wait_timeout=dict(default=1200), tags=dict(type='dict'))) module = AnsibleModule(argument_spec=argument_spec) + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + try: ec2 = ec2_connect(module) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) try: region, ec2_url, boto_params = get_aws_connection_info(module) - vpc = connect_to_aws(boto.vpc, region, **boto_params) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg=str(e)) - if not region: + if not region: module.fail_json(msg="region must be specified") copy_image(module, ec2) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_asg_facts.py b/cloud/amazon/ec2_asg_facts.py new file mode 100644 index 00000000000..3cd6e678605 --- /dev/null +++ b/cloud/amazon/ec2_asg_facts.py @@ -0,0 +1,359 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_asg_facts +short_description: Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS +description: + - Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS +version_added: "2.2" +author: "Rob White (@wimnat)" +options: + name: + description: + - The prefix or name of the auto scaling group(s) you are searching for. + - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match." + required: false + tags: + description: + - "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling group(s) you are searching for." + required: false +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Find all groups +- ec2_asg_facts: + register: asgs + +# Find a group with matching name/prefix +- ec2_asg_facts: + name: public-webserver-asg + register: asgs + +# Find a group with matching tags +- ec2_asg_facts: + tags: + project: webapp + env: production + register: asgs + +# Find a group with matching name/prefix and tags +- ec2_asg_facts: + name: myproject + tags: + env: production + register: asgs + +# Fail if no groups are found +- ec2_asg_facts: + name: public-webserver-asg + register: asgs + failed_when: "{{ asgs.results | length == 0 }}" + +# Fail if more than 1 group is found +- ec2_asg_facts: + name: public-webserver-asg + register: asgs + failed_when: "{{ asgs.results | length > 1 }}" +''' + +RETURN = ''' +--- +auto_scaling_group_arn: + description: The Amazon Resource Name of the ASG + returned: success + type: string + sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1" +auto_scaling_group_name: + description: Name of autoscaling group + returned: success + type: str + sample: "public-webapp-production-1" +availability_zones: + description: List of Availability Zones that are enabled for this ASG. + returned: success + type: list + sample: ["us-west-2a", "us-west-2b", "us-west-2a"] +created_time: + description: The date and time this ASG was created, in ISO 8601 format. + returned: success + type: string + sample: "2015-11-25T00:05:36.309Z" +default_cooldown: + description: The default cooldown time in seconds. + returned: success + type: int + sample: 300 +desired_capacity: + description: The number of EC2 instances that should be running in this group. + returned: success + type: int + sample: 3 +health_check_period: + description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. + returned: success + type: int + sample: 30 +health_check_type: + description: The service you want the health status from, one of "EC2" or "ELB". + returned: success + type: str + sample: "ELB" +instances: + description: List of EC2 instances and their status as it relates to the ASG. + returned: success + type: list + sample: [ + { + "availability_zone": "us-west-2a", + "health_status": "Healthy", + "instance_id": "i-es22ad25", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": "false" + } + ] +launch_configuration_name: + description: Name of launch configuration associated with the ASG. + returned: success + type: str + sample: "public-webapp-production-1" +load_balancer_names: + description: List of load balancers names attached to the ASG. + returned: success + type: list + sample: ["elb-webapp-prod"] +max_size: + description: Maximum size of group + returned: success + type: int + sample: 3 +min_size: + description: Minimum size of group + returned: success + type: int + sample: 1 +new_instances_protected_from_scale_in: + description: Whether or not new instances a protected from automatic scaling in. + returned: success + type: boolean + sample: "false" +placement_group: + description: Placement group into which instances are launched, if any. + returned: success + type: str + sample: None +status: + description: The current state of the group when DeleteAutoScalingGroup is in progress. + returned: success + type: str + sample: None +tags: + description: List of tags for the ASG, and whether or not each tag propagates to instances at launch. + returned: success + type: list + sample: [ + { + "key": "Name", + "value": "public-webapp-production-1", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + }, + { + "key": "env", + "value": "production", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + } + ] +termination_policies: + description: A list of termination policies for the group. + returned: success + type: str + sample: ["Default"] +''' + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +def match_asg_tags(tags_to_match, asg): + for key, value in tags_to_match.iteritems(): + for tag in asg['Tags']: + if key == tag['Key'] and value == tag['Value']: + break + else: return False + return True + +def find_asgs(conn, module, name=None, tags=None): + """ + Args: + conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. + name (str): Optional name of the ASG you are looking for. + tags (dict): Optional dictionary of tags and values to search for. + + Basic Usage: + >>> name = 'public-webapp-production' + >>> tags = { 'env': 'production' } + >>> conn = boto3.client('autoscaling', region_name='us-west-2') + >>> results = find_asgs(name, conn) + + Returns: + List + [ + { + "auto_scaling_group_arn": "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:autoScalingGroupName/public-webapp-production", + "auto_scaling_group_name": "public-webapp-production", + "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"], + "created_time": "2016-02-02T23:28:42.481000+00:00", + "default_cooldown": 300, + "desired_capacity": 2, + "enabled_metrics": [], + "health_check_grace_period": 300, + "health_check_type": "ELB", + "instances": + [ + { + "availability_zone": "us-west-2c", + "health_status": "Healthy", + "instance_id": "i-047a12cb", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": false + }, + { + "availability_zone": "us-west-2a", + "health_status": "Healthy", + "instance_id": "i-7a29df2c", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": false + } + ], + "launch_configuration_name": "public-webapp-production-1", + "load_balancer_names": ["public-webapp-production-lb"], + "max_size": 4, + "min_size": 2, + "new_instances_protected_from_scale_in": false, + "placement_group": None, + "status": None, + "suspended_processes": [], + "tags": + [ + { + "key": "Name", + "propagate_at_launch": true, + "resource_id": "public-webapp-production", + "resource_type": "auto-scaling-group", + "value": "public-webapp-production" + }, + { + "key": "env", + "propagate_at_launch": true, + "resource_id": "public-webapp-production", + "resource_type": "auto-scaling-group", + "value": "production" + } + ], + "termination_policies": + [ + "Default" + ], + "vpc_zone_identifier": + [ + "subnet-a1b1c1d1", + "subnet-a2b2c2d2", + "subnet-a3b3c3d3" + ] + } + ] + """ + + try: + asgs = conn.describe_auto_scaling_groups() + except ClientError as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + matched_asgs = [] + + if name is not None: + # if the user didn't specify a name + name_prog = re.compile(r'^' + name) + + for asg in asgs['AutoScalingGroups']: + if name: + matched_name = name_prog.search(asg['AutoScalingGroupName']) + else: + matched_name = True + + if tags: + matched_tags = match_asg_tags(tags, asg) + else: + matched_tags = True + + if matched_name and matched_tags: + matched_asgs.append(camel_dict_to_snake_dict(asg)) + + return matched_asgs + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(type='str'), + tags=dict(type='dict'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + asg_name = module.params.get('name') + asg_tags = module.params.get('tags') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + autoscaling = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except ClientError as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags) + module.exit_json(results=results) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_customer_gateway.py b/cloud/amazon/ec2_customer_gateway.py new file mode 100644 index 00000000000..a8a74926cdd --- /dev/null +++ b/cloud/amazon/ec2_customer_gateway.py @@ -0,0 +1,271 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_customer_gateway +short_description: Manage an AWS customer gateway +description: + - Manage an AWS customer gateway +version_added: "2.2" +author: Michael Baydoun (@MichaelBaydoun) +requirements: [ botocore, boto3 ] +notes: + - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources. + - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use + customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details. +options: + bgp_asn: + description: + - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when state=present. + required: false + default: null + ip_address: + description: + - Internet-routable IP address for customers gateway, must be a static address. + required: true + name: + description: + - Name of the customer gateway. + required: true + state: + description: + - Create or terminate the Customer Gateway. + required: false + default: present + choices: [ 'present', 'absent' ] +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' + +# Create Customer Gateway +- ec2_customer_gateway: + bgp_asn: 12345 + ip_address: 1.2.3.4 + name: IndianapolisOffice + region: us-east-1 + register: cgw + +# Delete Customer Gateway +- ec2_customer_gateway: + ip_address: 1.2.3.4 + name: IndianapolisOffice + state: absent + region: us-east-1 + register: cgw +''' + +RETURN = ''' +gateway.customer_gateways: + description: details about the gateway that was created. + returned: success + type: complex + contains: + bgp_asn: + description: The Border Gateway Autonomous System Number. + returned: when exists and gateway is available. + sample: 65123 + type: string + customer_gateway_id: + description: gateway id assigned by amazon. + returned: when exists and gateway is available. + sample: cgw-cb6386a2 + type: string + ip_address: + description: ip address of your gateway device. + returned: when exists and gateway is available. + sample: 1.2.3.4 + type: string + state: + description: state of gateway. + returned: when gateway exists and is available. + state: available + type: string + tags: + description: any tags on the gateway. + returned: when gateway exists and is available, and when tags exist. + state: available + type: string + type: + description: encryption type. + returned: when gateway exists and is available. + sample: ipsec.1 + type: string +''' + +try: + from botocore.exceptions import ClientError + HAS_BOTOCORE = True +except ImportError: + HAS_BOTOCORE = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import (boto3_conn, camel_dict_to_snake_dict, + ec2_argument_spec, get_aws_connection_info) + + +class Ec2CustomerGatewayManager: + + def __init__(self, module): + self.module = module + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except ClientError as e: + module.fail_json(msg=e.message) + + def ensure_cgw_absent(self, gw_id): + response = self.ec2.delete_customer_gateway( + DryRun=False, + CustomerGatewayId=gw_id + ) + return response + + def ensure_cgw_present(self, bgp_asn, ip_address): + response = self.ec2.create_customer_gateway( + DryRun=False, + Type='ipsec.1', + PublicIp=ip_address, + BgpAsn=bgp_asn, + ) + return response + + def tag_cgw_name(self, gw_id, name): + response = self.ec2.create_tags( + DryRun=False, + Resources=[ + gw_id, + ], + Tags=[ + { + 'Key': 'Name', + 'Value': name + }, + ] + ) + return response + + def describe_gateways(self, ip_address): + response = self.ec2.describe_customer_gateways( + DryRun=False, + Filters=[ + { + 'Name': 'state', + 'Values': [ + 'available', + ] + }, + { + 'Name': 'ip-address', + 'Values': [ + ip_address, + ] + } + ] + ) + return response + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + bgp_asn=dict(required=False, type='int'), + ip_address=dict(required=True), + name=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ('state', 'present', ['bgp_asn']) + ] + ) + + if not HAS_BOTOCORE: + module.fail_json(msg='botocore is required.') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + gw_mgr = Ec2CustomerGatewayManager(module) + + name = module.params.get('name') + + existing = gw_mgr.describe_gateways(module.params['ip_address']) + # describe_gateways returns a key of CustomerGateways where as create_gateway returns a + # key of CustomerGateway. For consistency, change it here + existing['CustomerGateway'] = existing['CustomerGateways'] + + results = dict(changed=False) + if module.params['state'] == 'present': + if existing['CustomerGateway']: + results['gateway'] = existing + if existing['CustomerGateway'][0]['Tags']: + tag_array = existing['CustomerGateway'][0]['Tags'] + for key, value in enumerate(tag_array): + if value['Key'] == 'Name': + current_name = value['Value'] + if current_name != name: + results['name'] = gw_mgr.tag_cgw_name( + results['gateway']['CustomerGateway'][0]['CustomerGatewayId'], + module.params['name'], + ) + results['changed'] = True + else: + if not module.check_mode: + results['gateway'] = gw_mgr.ensure_cgw_present( + module.params['bgp_asn'], + module.params['ip_address'], + ) + results['name'] = gw_mgr.tag_cgw_name( + results['gateway']['CustomerGateway']['CustomerGatewayId'], + module.params['name'], + ) + results['changed'] = True + + elif module.params['state'] == 'absent': + if existing['CustomerGateway']: + results['gateway'] = existing + if not module.check_mode: + results['gateway'] = gw_mgr.ensure_cgw_absent( + existing['CustomerGateway'][0]['CustomerGatewayId'] + ) + results['changed'] = True + + pretty_results = camel_dict_to_snake_dict(results) + module.exit_json(**pretty_results) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_elb_facts.py b/cloud/amazon/ec2_elb_facts.py index 554b75c951d..c4857f6a3cd 100644 --- a/cloud/amazon/ec2_elb_facts.py +++ b/cloud/amazon/ec2_elb_facts.py @@ -13,6 +13,10 @@ # You should have received a copy of the GNU General Public License # along with this library. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_elb_facts @@ -20,7 +24,9 @@ description: - Gather facts about EC2 Elastic Load Balancers in AWS version_added: "2.0" -author: "Michael Schultz (github.com/mjschultz)" +author: + - "Michael Schultz (github.com/mjschultz)" + - "Fernando Jose Pando (@nand0p)" options: names: description: @@ -28,7 +34,9 @@ required: false default: null aliases: ['elb_ids', 'ec2_elbs'] -extends_documentation_fragment: aws +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' @@ -43,7 +51,7 @@ - action: module: debug msg: "{{ item.dns_name }}" - with_items: elb_facts.elbs + with_items: "{{ elb_facts.elbs }}" # Gather facts about a particular ELB - action: @@ -66,130 +74,180 @@ - action: module: debug msg: "{{ item.dns_name }}" - with_items: elb_facts.elbs + with_items: "{{ elb_facts.elbs }}" ''' -import xml.etree.ElementTree as ET - try: import boto.ec2.elb + from boto.ec2.tag import Tag from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False +class ElbInformation(object): + """ Handles ELB information """ -def get_error_message(xml_string): + def __init__(self, + module, + names, + region, + **aws_connect_params): - root = ET.fromstring(xml_string) - for message in root.findall('.//Message'): - return message.text + self.module = module + self.names = names + self.region = region + self.aws_connect_params = aws_connect_params + self.connection = self._get_elb_connection() + def _get_tags(self, elbname): + params = {'LoadBalancerNames.member.1': elbname} + try: + elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)]) + return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key')) + except: + return {} -def get_elb_listeners(listeners): - listener_list = [] - for listener in listeners: - listener_dict = { - 'load_balancer_port': listener[0], - 'instance_port': listener[1], - 'protocol': listener[2], - } + def _get_elb_connection(self): + try: + return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) + except BotoServerError as err: + self.module.fail_json(msg=err.message) + + def _get_elb_listeners(self, listeners): + listener_list = [] + + for listener in listeners: + listener_dict = { + 'load_balancer_port': listener[0], + 'instance_port': listener[1], + 'protocol': listener[2], + } + + try: + ssl_certificate_id = listener[4] + except IndexError: + pass + else: + if ssl_certificate_id: + listener_dict['ssl_certificate_id'] = ssl_certificate_id + + listener_list.append(listener_dict) + + return listener_list + + def _get_health_check(self, health_check): + protocol, port_path = health_check.target.split(':') try: - ssl_certificate_id = listener[4] - except IndexError: - pass - else: - if ssl_certificate_id: - listener_dict['ssl_certificate_id'] = ssl_certificate_id - listener_list.append(listener_dict) - - return listener_list - - -def get_health_check(health_check): - protocol, port_path = health_check.target.split(':') - try: - port, path = port_path.split('/') - path = '/{}'.format(path) - except ValueError: - port = port_path - path = None - - health_check_dict = { - 'ping_protocol': protocol.lower(), - 'ping_port': int(port), - 'response_timeout': health_check.timeout, - 'interval': health_check.interval, - 'unhealthy_threshold': health_check.unhealthy_threshold, - 'healthy_threshold': health_check.healthy_threshold, - } - if path: - health_check_dict['ping_path'] = path - return health_check_dict - - -def get_elb_info(elb): - elb_info = { - 'name': elb.name, - 'zones': elb.availability_zones, - 'dns_name': elb.dns_name, - 'instances': [instance.id for instance in elb.instances], - 'listeners': get_elb_listeners(elb.listeners), - 'scheme': elb.scheme, - 'security_groups': elb.security_groups, - 'health_check': get_health_check(elb.health_check), - 'subnets': elb.subnets, - } - if elb.vpc_id: - elb_info['vpc_id'] = elb.vpc_id - - return elb_info - - -def list_elb(connection, module): - elb_names = module.params.get("names") - if not elb_names: - elb_names = None - - try: - all_elbs = connection.get_all_load_balancers(elb_names) - except BotoServerError as e: - module.fail_json(msg=get_error_message(e.args[2])) - - elb_array = [] - for elb in all_elbs: - elb_array.append(get_elb_info(elb)) - - module.exit_json(elbs=elb_array) + port, path = port_path.split('/', 1) + path = '/{}'.format(path) + except ValueError: + port = port_path + path = None + + health_check_dict = { + 'ping_protocol': protocol.lower(), + 'ping_port': int(port), + 'response_timeout': health_check.timeout, + 'interval': health_check.interval, + 'unhealthy_threshold': health_check.unhealthy_threshold, + 'healthy_threshold': health_check.healthy_threshold, + } + + if path: + health_check_dict['ping_path'] = path + return health_check_dict + + def _get_elb_info(self, elb): + elb_info = { + 'name': elb.name, + 'zones': elb.availability_zones, + 'dns_name': elb.dns_name, + 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name, + 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id, + 'hosted_zone_name': elb.canonical_hosted_zone_name, + 'hosted_zone_id': elb.canonical_hosted_zone_name_id, + 'instances': [instance.id for instance in elb.instances], + 'listeners': self._get_elb_listeners(elb.listeners), + 'scheme': elb.scheme, + 'security_groups': elb.security_groups, + 'health_check': self._get_health_check(elb.health_check), + 'subnets': elb.subnets, + 'instances_inservice': [], + 'instances_inservice_count': 0, + 'instances_outofservice': [], + 'instances_outofservice_count': 0, + 'instances_inservice_percent': 0.0, + 'tags': self._get_tags(elb.name) + } + + if elb.vpc_id: + elb_info['vpc_id'] = elb.vpc_id + if elb.instances: + try: + instance_health = self.connection.describe_instance_health(elb.name) + except BotoServerError as err: + self.module.fail_json(msg=err.message) + elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService'] + elb_info['instances_inservice_count'] = len(elb_info['instances_inservice']) + elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService'] + elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice']) + elb_info['instances_inservice_percent'] = float(elb_info['instances_inservice_count'])/( + float(elb_info['instances_inservice_count']) + + float(elb_info['instances_outofservice_count']))*100 + return elb_info + + + def list_elbs(self): + elb_array = [] + + try: + all_elbs = self.connection.get_all_load_balancers() + except BotoServerError as err: + self.module.fail_json(msg = "%s: %s" % (err.error_code, err.error_message)) + + if all_elbs: + if self.names: + for existing_lb in all_elbs: + if existing_lb.name in self.names: + elb_array.append(existing_lb) + else: + elb_array = all_elbs + + return list(map(self._get_elb_info, elb_array)) def main(): argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - names={'default': None, 'type': 'list'} + argument_spec.update(dict( + names={'default': [], 'type': 'list'} ) ) - - module = AnsibleModule(argument_spec=argument_spec) + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) if not HAS_BOTO: module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if region: - try: - connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: - module.fail_json(msg=str(e)) - else: + if not region: module.fail_json(msg="region must be specified") - list_elb(connection, module) + names = module.params['names'] + elb_information = ElbInformation(module, + names, + region, + **aws_connect_params) + + ec2_facts_result = dict(changed=False, + elbs=elb_information.list_elbs()) + + module.exit_json(**ec2_facts_result) from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_eni.py b/cloud/amazon/ec2_eni.py index 9e878e7d558..aca78a459da 100644 --- a/cloud/amazon/ec2_eni.py +++ b/cloud/amazon/ec2_eni.py @@ -13,14 +13,20 @@ # You should have received a copy of the GNU General Public License # along with this library. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_eni short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance description: - - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID is provided, an attempt is made to update the existing ENI. By passing 'None' as the instance_id, an ENI can be detached from an instance. + - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is \ + provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status \ + of the network interface. version_added: "2.0" -author: Rob White, wimnat [at] gmail.com, @wimnat +author: "Rob White (@wimnat)" options: eni_id: description: @@ -29,7 +35,8 @@ default: null instance_id: description: - - Instance ID that you wish to attach ENI to. To detach an ENI from an instance, use 'None'. + - Instance ID that you wish to attach ENI to. Since version 2.2, use the 'attached' parameter to attach or \ + detach an ENI. Prior to 2.2, to detach an ENI from an instance, use 'None'. required: false default: null private_ip_address: @@ -48,12 +55,13 @@ default: null security_groups: description: - - List of security groups associated with the interface. Only used when state=present. + - List of security groups associated with the interface. Only used when state=present. Since version 2.2, you \ + can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID. required: false default: null state: description: - - Create or delete ENI. + - Create or delete ENI required: false default: present choices: [ 'present', 'absent' ] @@ -62,6 +70,13 @@ - The index of the device for the network interface attachment on the instance. required: false default: 0 + attached: + description: + - Specifies if network interface should be attached or detached from instance. If ommited, attachment status \ + won't change + required: false + default: yes + version_added: 2.2 force_detach: description: - Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent. @@ -74,8 +89,20 @@ source_dest_check: description: - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation. - required: false -extends_documentation_fragment: aws + required: false + secondary_private_ip_addresses: + description: + - A list of IP addresses to assign as secondary IP addresses to the network interface. This option is mutually exclusive of secondary_private_ip_address_count + required: false + version_added: 2.2 + secondary_private_ip_address_count: + description: + - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of secondary_private_ip_addresses + required: false + version_added: 2.2 +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' @@ -94,25 +121,48 @@ private_ip_address: 172.31.0.20 subnet_id: subnet-xxxxxxxx state: present - + +# Create an ENI with two secondary addresses +- ec2_eni: + subnet_id: subnet-xxxxxxxx + state: present + secondary_private_ip_address_count: 2 + +# Assign a secondary IP address to an existing ENI +# This will purge any existing IPs +- ec2_eni: + subnet_id: subnet-xxxxxxxx + eni_id: eni-yyyyyyyy + state: present + secondary_private_ip_addresses: + - 172.16.1.1 + +# Remove any secondary IP addresses from an existing ENI +- ec2_eni: + subnet_id: subnet-xxxxxxxx + eni_id: eni-yyyyyyyy + state: present + secondary_private_ip_addresses: + - + # Destroy an ENI, detaching it from any instance if necessary - ec2_eni: eni_id: eni-xxxxxxx force_detach: yes state: absent - + # Update an ENI - ec2_eni: eni_id: eni-xxxxxxx description: "My new description" state: present - + # Detach an ENI from an instance - ec2_eni: eni_id: eni-xxxxxxx instance_id: None state: present - + ### Delete an interface on termination # First create the interface - ec2_eni: @@ -122,7 +172,7 @@ subnet_id: subnet-xxxxxxxx state: present register: eni - + # Modify the interface to enable the delete_on_terminaton flag - ec2_eni: eni_id: {{ "eni.interface.id" }} @@ -130,27 +180,84 @@ ''' + +RETURN = ''' +interface: + description: Network interface attributes + returned: when state != absent + type: dictionary + contains: + description: + description: interface description + type: string + sample: Firewall network interface + groups: + description: list of security groups + type: list of dictionaries + sample: [ { "sg-f8a8a9da": "default" } ] + id: + description: network interface id + type: string + sample: "eni-1d889198" + mac_address: + description: interface's physical address + type: string + sample: "00:00:5E:00:53:23" + owner_id: + description: aws account id + type: string + sample: 812381371 + private_ip_address: + description: primary ip address of this interface + type: string + sample: 10.20.30.40 + private_ip_addresses: + description: list of all private ip addresses associated to this interface + type: list of dictionaries + sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ] + source_dest_check: + description: value of source/dest check flag + type: boolean + sample: True + status: + description: network interface status + type: string + sample: "pending" + subnet_id: + description: which vpc subnet the interface is bound + type: string + sample: subnet-b0a0393c + vpc_id: + description: which vpc this network interface is bound + type: string + sample: vpc-9a9a9da + +''' + import time -import xml.etree.ElementTree as ET import re try: import boto.ec2 + import boto.vpc from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws, + ec2_argument_spec, get_aws_connection_info, + get_ec2_security_group_ids_from_names) + -def get_error_message(xml_string): - - root = ET.fromstring(xml_string) - for message in root.findall('.//Message'): - return message.text - - def get_eni_info(interface): - + + # Private addresses + private_addresses = [] + for ip in interface.private_ip_addresses: + private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary }) + interface_info = {'id': interface.id, 'subnet_id': interface.subnet_id, 'vpc_id': interface.vpc_id, @@ -161,8 +268,9 @@ def get_eni_info(interface): 'private_ip_address': interface.private_ip_address, 'source_dest_check': interface.source_dest_check, 'groups': dict((group.id, group.name) for group in interface.groups), + 'private_ip_addresses': private_addresses } - + if interface.attachment is not None: interface_info['attachment'] = {'attachment_id': interface.attachment.id, 'instance_id': interface.attachment.instance_id, @@ -171,11 +279,12 @@ def get_eni_info(interface): 'attach_time': interface.attachment.attach_time, 'delete_on_termination': interface.attachment.delete_on_termination, } - + return interface_info - + + def wait_for_eni(eni, status): - + while True: time.sleep(3) eni.update() @@ -186,114 +295,136 @@ def wait_for_eni(eni, status): else: if status == "attached" and eni.attachment.status == "attached": break - - -def create_eni(connection, module): - + + +def create_eni(connection, vpc_id, module): + instance_id = module.params.get("instance_id") + attached = module.params.get("attached") if instance_id == 'None': instance_id = None - do_detach = True - else: - do_detach = False device_index = module.params.get("device_index") subnet_id = module.params.get('subnet_id') private_ip_address = module.params.get('private_ip_address') description = module.params.get('description') - security_groups = module.params.get('security_groups') + security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False) + secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") + secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count") changed = False - + try: - eni = compare_eni(connection, module) + eni = find_eni(connection, module) if eni is None: eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups) - if instance_id is not None: + if attached == True and instance_id is not None: try: eni.attach(instance_id, device_index) - except BotoServerError as ex: + except BotoServerError: + eni.delete() + raise + # Wait to allow creation / attachment to finish + wait_for_eni(eni, "attached") + eni.update() + + if secondary_private_ip_address_count is not None: + try: + connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count) + except BotoServerError: eni.delete() raise + + if secondary_private_ip_addresses is not None: + try: + connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses) + except BotoServerError: + eni.delete() + raise + changed = True - # Wait to allow creation / attachment to finish - wait_for_eni(eni, "attached") - eni.update() - + except BotoServerError as e: - module.fail_json(msg=get_error_message(e.args[2])) - + module.fail_json(msg=e.message) + module.exit_json(changed=changed, interface=get_eni_info(eni)) - -def modify_eni(connection, module): - - eni_id = module.params.get("eni_id") + +def modify_eni(connection, vpc_id, module, eni): + instance_id = module.params.get("instance_id") - if instance_id == 'None': - instance_id = None - do_detach = True - else: - do_detach = False + attached = module.params.get("attached") + do_detach = module.params.get('state') == 'detached' device_index = module.params.get("device_index") - subnet_id = module.params.get('subnet_id') - private_ip_address = module.params.get('private_ip_address') description = module.params.get('description') security_groups = module.params.get('security_groups') force_detach = module.params.get("force_detach") source_dest_check = module.params.get("source_dest_check") delete_on_termination = module.params.get("delete_on_termination") + secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") + secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count") changed = False - try: - # Get the eni with the eni_id specified - eni_result_set = connection.get_all_network_interfaces(eni_id) - eni = eni_result_set[0] if description is not None: if eni.description != description: connection.modify_network_interface_attribute(eni.id, "description", description) changed = True - if security_groups is not None: - if sorted(get_sec_group_list(eni.groups)) != sorted(security_groups): - connection.modify_network_interface_attribute(eni.id, "groupSet", security_groups) + if len(security_groups) > 0: + groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False) + if sorted(get_sec_group_list(eni.groups)) != sorted(groups): + connection.modify_network_interface_attribute(eni.id, "groupSet", groups) changed = True if source_dest_check is not None: if eni.source_dest_check != source_dest_check: connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check) changed = True - if delete_on_termination is not None: - if eni.attachment is not None: - if eni.attachment.delete_on_termination is not delete_on_termination: - connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id) - changed = True - else: - module.fail_json(msg="Can not modify delete_on_termination as the interface is not attached") - if eni.attachment is not None and instance_id is None and do_detach is True: - eni.detach(force_detach) - wait_for_eni(eni, "detached") - changed = True - else: - if instance_id is not None: + if delete_on_termination is not None and eni.attachment is not None: + if eni.attachment.delete_on_termination is not delete_on_termination: + connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id) + changed = True + + current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary] + if secondary_private_ip_addresses is not None: + secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)) + if secondary_addresses_to_remove: + connection.unassign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)), dry_run=False) + connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses, secondary_private_ip_address_count=None, allow_reassignment=False, dry_run=False) + if secondary_private_ip_address_count is not None: + current_secondary_address_count = len(current_secondary_addresses) + + if secondary_private_ip_address_count > current_secondary_address_count: + connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=None, secondary_private_ip_address_count=(secondary_private_ip_address_count - current_secondary_address_count), allow_reassignment=False, dry_run=False) + changed = True + elif secondary_private_ip_address_count < current_secondary_address_count: + # How many of these addresses do we want to remove + secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count + connection.unassign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count], dry_run=False) + + if attached == True: + if eni.attachment and eni.attachment.instance_id != instance_id: + detach_eni(eni, module) + if eni.attachment is None: eni.attach(instance_id, device_index) wait_for_eni(eni, "attached") changed = True + elif attached == False: + detach_eni(eni, module) except BotoServerError as e: - print e - module.fail_json(msg=get_error_message(e.args[2])) - + module.fail_json(msg=e.message) + eni.update() module.exit_json(changed=changed, interface=get_eni_info(eni)) - - + + def delete_eni(connection, module): - + eni_id = module.params.get("eni_id") force_detach = module.params.get("force_detach") - + try: eni_result_set = connection.get_all_network_interfaces(eni_id) eni = eni_result_set[0] - + if force_detach is True: if eni.attachment is not None: eni.detach(force_detach) @@ -305,100 +436,141 @@ def delete_eni(connection, module): else: eni.delete() changed = True - + module.exit_json(changed=changed) except BotoServerError as e: - msg = get_error_message(e.args[2]) regex = re.compile('The networkInterface ID \'.*\' does not exist') - if regex.search(msg) is not None: + if regex.search(e.message) is not None: module.exit_json(changed=False) else: - module.fail_json(msg=get_error_message(e.args[2])) - -def compare_eni(connection, module): - + module.fail_json(msg=e.message) + + +def detach_eni(eni, module): + + force_detach = module.params.get("force_detach") + if eni.attachment is not None: + eni.detach(force_detach) + wait_for_eni(eni, "detached") + eni.update() + module.exit_json(changed=True, interface=get_eni_info(eni)) + else: + module.exit_json(changed=False, interface=get_eni_info(eni)) + + +def find_eni(connection, module): + eni_id = module.params.get("eni_id") subnet_id = module.params.get('subnet_id') private_ip_address = module.params.get('private_ip_address') - description = module.params.get('description') - security_groups = module.params.get('security_groups') - + instance_id = module.params.get('instance_id') + device_index = module.params.get('device_index') + try: - all_eni = connection.get_all_network_interfaces(eni_id) + filters = {} + if subnet_id: + filters['subnet-id'] = subnet_id + if private_ip_address: + filters['private-ip-address'] = private_ip_address + else: + if instance_id: + filters['attachment.instance-id'] = instance_id + if device_index: + filters['attachment.device-index'] = device_index + + eni_result = connection.get_all_network_interfaces(eni_id, filters=filters) + if len(eni_result) > 0: + return eni_result[0] + else: + return None - for eni in all_eni: - remote_security_groups = get_sec_group_list(eni.groups) - if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups): - return eni - except BotoServerError as e: - module.fail_json(msg=get_error_message(e.args[2])) - + module.fail_json(msg=e.message) + return None + def get_sec_group_list(groups): - + # Build list of remote security groups remote_security_groups = [] for group in groups: remote_security_groups.append(group.id.encode()) - + return remote_security_groups +def _get_vpc_id(connection, module, subnet_id): + + try: + return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id + except BotoServerError as e: + module.fail_json(msg=e.message) + + def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( - eni_id = dict(default=None), - instance_id = dict(default=None), - private_ip_address = dict(), - subnet_id = dict(), - description = dict(), - security_groups = dict(type='list'), - device_index = dict(default=0, type='int'), - state = dict(default='present', choices=['present', 'absent']), - force_detach = dict(default='no', type='bool'), - source_dest_check = dict(default=None, type='bool'), - delete_on_termination = dict(default=None, type='bool') + eni_id=dict(default=None, type='str'), + instance_id=dict(default=None, type='str'), + private_ip_address=dict(type='str'), + subnet_id=dict(type='str'), + description=dict(type='str'), + security_groups=dict(default=[], type='list'), + device_index=dict(default=0, type='int'), + state=dict(default='present', choices=['present', 'absent']), + force_detach=dict(default='no', type='bool'), + source_dest_check=dict(default=None, type='bool'), + delete_on_termination=dict(default=None, type='bool'), + secondary_private_ip_addresses=dict(default=None, type='list'), + secondary_private_ip_address_count=dict(default=None, type='int'), + attached=dict(default=None, type='bool') ) ) - - module = AnsibleModule(argument_spec=argument_spec) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[ + ['secondary_private_ip_addresses', 'secondary_private_ip_address_count'] + ], + required_if=([ + ('state', 'present', ['subnet_id']), + ('state', 'absent', ['eni_id']), + ('attached', True, ['instance_id']) + ]) + ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') - + region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") state = module.params.get("state") eni_id = module.params.get("eni_id") + private_ip_address = module.params.get('private_ip_address') if state == 'present': - if eni_id is None: - if module.params.get("subnet_id") is None: - module.fail_json(msg="subnet_id must be specified when state=present") - create_eni(connection, module) + subnet_id = module.params.get("subnet_id") + vpc_id = _get_vpc_id(vpc_connection, module, subnet_id) + + eni = find_eni(connection, module) + if eni is None: + create_eni(connection, vpc_id, module) else: - modify_eni(connection, module) + modify_eni(connection, vpc_id, module, eni) + elif state == 'absent': - if eni_id is None: - module.fail_json(msg="eni_id must be specified") - else: - delete_eni(connection, module) - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * + delete_eni(connection, module) -# this is magic, see lib/ansible/module_common.py -#<> -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_eni_facts.py b/cloud/amazon/ec2_eni_facts.py index 981358c33af..4c6882e127c 100644 --- a/cloud/amazon/ec2_eni_facts.py +++ b/cloud/amazon/ec2_eni_facts.py @@ -13,6 +13,10 @@ # You should have received a copy of the GNU General Public License # along with this library. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_eni_facts @@ -22,12 +26,15 @@ version_added: "2.0" author: "Rob White (@wimnat)" options: - eni_id: + filters: description: - - The ID of the ENI. Pass this option to gather facts about a particular ENI, otherwise, all ENIs are returned. + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters. required: false default: null -extends_documentation_fragment: aws + +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' @@ -38,12 +45,11 @@ # Gather facts about a particular ENI - ec2_eni_facts: - eni_id: eni-xxxxxxx + filters: + network-interface-id: eni-xxxxxxx ''' -import xml.etree.ElementTree as ET - try: import boto.ec2 from boto.exception import BotoServerError @@ -51,16 +57,47 @@ except ImportError: HAS_BOTO = False +try: + import boto3 + from botocore.exceptions import ClientError, NoCredentialsError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import (AnsibleAWSError, + ansible_dict_to_boto3_filter_list, boto3_conn, + boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, + connect_to_aws, ec2_argument_spec, get_aws_connection_info) + + +def list_ec2_snapshots_boto3(connection, module): + + if module.params.get("filters") is None: + filters = [] + else: + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + try: + network_interfaces_result = connection.describe_network_interfaces(Filters=filters) + except (ClientError, NoCredentialsError) as e: + module.fail_json(msg=e.message) + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_network_interfaces_result = camel_dict_to_snake_dict(network_interfaces_result) + for network_interfaces in snaked_network_interfaces_result['network_interfaces']: + network_interfaces['tag_set'] = boto3_tag_list_to_ansible_dict(network_interfaces['tag_set']) + + module.exit_json(**snaked_network_interfaces_result) + -def get_error_message(xml_string): - - root = ET.fromstring(xml_string) - for message in root.findall('.//Message'): - return message.text - - def get_eni_info(interface): - + + # Private addresses + private_addresses = [] + for ip in interface.private_ip_addresses: + private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary }) + interface_info = {'id': interface.id, 'subnet_id': interface.subnet_id, 'vpc_id': interface.vpc_id, @@ -71,8 +108,15 @@ def get_eni_info(interface): 'private_ip_address': interface.private_ip_address, 'source_dest_check': interface.source_dest_check, 'groups': dict((group.id, group.name) for group in interface.groups), + 'private_ip_addresses': private_addresses } - + + if hasattr(interface, 'publicDnsName'): + interface_info['association'] = {'public_ip_address': interface.publicIp, + 'public_dns_name': interface.publicDnsName, + 'ip_owner_id': interface.ipOwnerId + } + if interface.attachment is not None: interface_info['attachment'] = {'attachment_id': interface.attachment.id, 'instance_id': interface.attachment.instance_id, @@ -81,23 +125,23 @@ def get_eni_info(interface): 'attach_time': interface.attachment.attach_time, 'delete_on_termination': interface.attachment.delete_on_termination, } - + return interface_info - + def list_eni(connection, module): - - eni_id = module.params.get("eni_id") + + filters = module.params.get("filters") interface_dict_array = [] - + try: - all_eni = connection.get_all_network_interfaces(eni_id) + all_eni = connection.get_all_network_interfaces(filters=filters) except BotoServerError as e: - module.fail_json(msg=get_error_message(e.args[2])) - + module.fail_json(msg=e.message) + for interface in all_eni: interface_dict_array.append(get_eni_info(interface)) - + module.exit_json(interfaces=interface_dict_array) @@ -105,31 +149,37 @@ def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( - eni_id = dict(default=None) + filters = dict(default=None, type='dict') ) ) - + module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - if region: - try: - connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: - module.fail_json(msg=str(e)) + + if HAS_BOTO3: + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + list_ec2_snapshots_boto3(connection, module) else: - module.fail_json(msg="region must be specified") + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") - list_eni(connection, module) - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * + list_eni(connection, module) -# this is magic, see lib/ansible/module_common.py -#<> -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_group_facts.py b/cloud/amazon/ec2_group_facts.py new file mode 100644 index 00000000000..ccb4aa64e30 --- /dev/null +++ b/cloud/amazon/ec2_group_facts.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_group_facts +short_description: Gather facts about ec2 security groups in AWS. +description: + - Gather facts about ec2 security groups in AWS. +version_added: "2.3" +author: "Henrique Rodrigues (github.com/Sodki)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \ + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for \ + possible filters. Filter names and values are case sensitive. You can also use underscores (_) \ + instead of dashes (-) in the filter keys, which will take precedence in case of conflict. + required: false + default: {} +notes: + - By default, the module will return all security groups. To limit results use the appropriate filters. + +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather facts about all security groups +- ec2_group_facts: + +# Gather facts about all security groups in a specific VPC +- ec2_group_facts: + filters: + vpc-id: vpc-12345678 + +# Gather facts about all security groups in a specific VPC +- ec2_group_facts: + filters: + vpc-id: vpc-12345678 + +# Gather facts about a security group +- ec2_group_facts: + filters: + group-name: example-1 + +# Gather facts about a security group by id +- ec2_group_facts: + filters: + group-id: sg-12345678 + +# Gather facts about a security group with multiple filters, also mixing the use of underscores as filter keys +- ec2_group_facts: + filters: + group_id: sg-12345678 + vpc-id: vpc-12345678 + +# Gather facts about various security groups +- ec2_group_facts: + filters: + group-name: + - example-1 + - example-2 + - example-3 + +# Gather facts about any security group with a tag key Name and value Example. The quotes around 'tag:name' are important because of the colon in the value +- ec2_group_facts: + filters: + "tag:Name": Example +''' + +RETURN = ''' +security_groups: + description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group. + type: list + sample: +''' + + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = Falsentry + +import traceback + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters=dict(default={}, type='dict') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + connection = boto3_conn( + module, + conn_type='client', + resource='ec2', + region=region, + endpoint=ec2_url, + **aws_connect_params + ) + else: + module.fail_json(msg="region must be specified") + + # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags + sanitized_filters = module.params.get("filters") + for key in sanitized_filters: + if not key.startswith("tag:"): + sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key) + + try: + security_groups = connection.describe_security_groups( + Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) + except ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(e)) + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_security_groups = [] + for security_group in security_groups['SecurityGroups']: + snaked_security_groups.append(camel_dict_to_snake_dict(security_group)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for security_group in snaked_security_groups: + if 'tags' in security_group: + security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group['tags']) + + module.exit_json(security_groups=snaked_security_groups) + + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_lc_facts.py b/cloud/amazon/ec2_lc_facts.py new file mode 100644 index 00000000000..b81ce8975b6 --- /dev/null +++ b/cloud/amazon/ec2_lc_facts.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_lc_facts +short_description: Gather facts about AWS Autoscaling Launch Configurations +description: + - Gather facts about AWS Autoscaling Launch Configurations +version_added: "2.3" +author: "Loïc Latreille (@psykotox)" +requirements: [ boto3 ] +options: + name: + description: + - A name or a list of name to match. + required: false + default: [] + sort: + description: + - Optional attribute which with to sort the results. + choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name'] + default: null + required: false + sort_order: + description: + - Order in which to sort results. + - Only used when the 'sort' parameter is specified. + choices: ['ascending', 'descending'] + default: 'ascending' + required: false + sort_start: + description: + - Which result to start with (when sorting). + - Corresponds to Python slice notation. + default: null + required: false + sort_end: + description: + - Which result to end with (when sorting). + - Corresponds to Python slice notation. + default: null + required: false +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather facts about all launch configurations +- ec2_lc_facts: + +# Gather facts about launch configuration with name "example" +- ec2_lc_facts: + name: example + +# Gather facts sorted by created_time from most recent to least recent +- ec2_lc_facts: + sort: created_time + sort_order: descending +''' + +RETURN = ''' +block_device_mapping: + description: Block device mapping for the instances of launch configuration + type: list of block devices + sample: "[{ + 'device_name': '/dev/xvda':, + 'ebs': { + 'delete_on_termination': true, + 'volume_size': 8, + 'volume_type': 'gp2' + }]" +classic_link_vpc_security_groups: + description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id + type: string + sample: +created_time: + description: The creation date and time for the launch configuration + type: string + sample: "2016-05-27T13:47:44.216000+00:00" +ebs_optimized: + description: EBS I/O optimized (true ) or not (false ) + type: bool + sample: true, +image_id: + description: ID of the Amazon Machine Image (AMI) + type: string + sample: "ami-12345678" +instance_monitoring: + description: Launched with detailed monitoring or not + type: dict + sample: "{ + 'enabled': true + }" +instance_type: + description: Instance type + type: string + sample: "t2.micro" +kernel_id: + description: ID of the kernel associated with the AMI + type: string + sample: +key_name: + description: Name of the key pair + type: string + sample: "user_app" +launch_configuration_arn: + description: Amazon Resource Name (ARN) of the launch configuration + type: string + sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app" +launch_configuration_name: + description: Name of the launch configuration + type: string + sample: "lc-app" +ramdisk_id: + description: ID of the RAM disk associated with the AMI + type: string + sample: +security_groups: + description: Security groups to associated + type: list + sample: "[ + 'web' + ]" +user_data: + description: User data available + type: string + sample: +''' + +try: + import boto3 + from botocore.exceptions import ClientError, NoCredentialsError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +def list_launch_configs(connection, module): + + launch_config_name = module.params.get("name") + sort = module.params.get('sort') + sort_order = module.params.get('sort_order') + sort_start = module.params.get('sort_start') + sort_end = module.params.get('sort_end') + + try: + launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=launch_config_name) + except ClientError as e: + module.fail_json(msg=e.message) + + snaked_launch_configs = [] + for launch_config in launch_configs['LaunchConfigurations']: + snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config)) + + for launch_config in snaked_launch_configs: + if 'CreatedTime' in launch_config: + launch_config['CreatedTime'] = str(launch_config['CreatedTime']) + + if sort: + snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order=='descending')) + + try: + if sort and sort_start and sort_end: + snaked_launch_configs = snaked_launch_configs[int(sort_start):int(sort_end)] + elif sort and sort_start: + snaked_launch_configs = snaked_launch_configs[int(sort_start):] + elif sort and sort_end: + snaked_launch_configs = snaked_launch_configs[:int(sort_end)] + except TypeError: + module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end") + + module.exit_json(launch_configurations=snaked_launch_configs) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name = dict(required=False, default=[], type='list'), + sort = dict(required=False, default=None, + choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']), + sort_order = dict(required=False, default='ascending', + choices=['ascending', 'descending']), + sort_start = dict(required=False), + sort_end = dict(required=False), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + list_launch_configs(connection, module) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_lc_find.py b/cloud/amazon/ec2_lc_find.py new file mode 100644 index 00000000000..d6c515d6ffe --- /dev/null +++ b/cloud/amazon/ec2_lc_find.py @@ -0,0 +1,229 @@ +#!/usr/bin/python +# encoding: utf-8 + +# (c) 2015, Jose Armesto +# +# This file is part of Ansible +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: ec2_lc_find +short_description: Find AWS Autoscaling Launch Configurations +description: + - Returns list of matching Launch Configurations for a given name, along with other useful information + - Results can be sorted and sliced + - It depends on boto + - Based on the work by Tom Bamford (https://github.com/tombamford) + +version_added: "2.2" +author: "Jose Armesto (@fiunchinho)" +options: + region: + description: + - The AWS region to use. + required: true + aliases: ['aws_region', 'ec2_region'] + name_regex: + description: + - A Launch Configuration to match + - It'll be compiled as regex + required: True + sort_order: + description: + - Order in which to sort results. + choices: ['ascending', 'descending'] + default: 'ascending' + required: false + limit: + description: + - How many results to show. + - Corresponds to Python slice notation like list[:limit]. + default: null + required: false +requirements: + - "python >= 2.6" + - boto3 +""" + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Search for the Launch Configurations that start with "app" +- ec2_lc_find: + name_regex: app.* + sort_order: descending + limit: 2 +''' + +RETURN = ''' +image_id: + description: AMI id + returned: when Launch Configuration was found + type: string + sample: "ami-0d75df7e" +user_data: + description: User data used to start instance + returned: when Launch Configuration was found + type: string + user_data: "ZXhwb3J0IENMT1VE" +name: + description: Name of the AMI + returned: when Launch Configuration was found + type: string + sample: "myapp-v123" +arn: + description: Name of the AMI + returned: when Launch Configuration was found + type: string + sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject" +instance_type: + description: Type of ec2 instance + returned: when Launch Configuration was found + type: string + sample: "t2.small" +created_time: + description: When it was created + returned: when Launch Configuration was found + type: string + sample: "2016-06-29T14:59:22.222000+00:00" +ebs_optimized: + description: Launch Configuration EBS optimized property + returned: when Launch Configuration was found + type: boolean + sample: False +instance_monitoring: + description: Launch Configuration instance monitoring property + returned: when Launch Configuration was found + type: string + sample: {"Enabled": false} +classic_link_vpc_security_groups: + description: Launch Configuration classic link vpc security groups property + returned: when Launch Configuration was found + type: list + sample: [] +block_device_mappings: + description: Launch Configuration block device mappings property + returned: when Launch Configuration was found + type: list + sample: [] +keyname: + description: Launch Configuration ssh key + returned: when Launch Configuration was found + type: string + sample: mykey +security_groups: + description: Launch Configuration security groups + returned: when Launch Configuration was found + type: list + sample: [] +kernel_id: + description: Launch Configuration kernel to use + returned: when Launch Configuration was found + type: string + sample: '' +ram_disk_id: + description: Launch Configuration ram disk property + returned: when Launch Configuration was found + type: string + sample: '' +associate_public_address: + description: Assign public address or not + returned: when Launch Configuration was found + type: boolean + sample: True +... +''' + + +def find_launch_configs(client, module): + name_regex = module.params.get('name_regex') + sort_order = module.params.get('sort_order') + limit = module.params.get('limit') + + paginator = client.get_paginator('describe_launch_configurations') + + response_iterator = paginator.paginate( + PaginationConfig={ + 'MaxItems': 1000, + 'PageSize': 100 + } + ) + + results = [] + + for response in response_iterator: + response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']), + response['LaunchConfigurations']) + + for lc in response['LaunchConfigurations']: + data = { + 'name': lc['LaunchConfigurationName'], + 'arn': lc['LaunchConfigurationARN'], + 'created_time': lc['CreatedTime'], + 'user_data': lc['UserData'], + 'instance_type': lc['InstanceType'], + 'image_id': lc['ImageId'], + 'ebs_optimized': lc['EbsOptimized'], + 'instance_monitoring': lc['InstanceMonitoring'], + 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'], + 'block_device_mappings': lc['BlockDeviceMappings'], + 'keyname': lc['KeyName'], + 'security_groups': lc['SecurityGroups'], + 'kernel_id': lc['KernelId'], + 'ram_disk_id': lc['RamdiskId'], + 'associate_public_address': lc.get('AssociatePublicIpAddress', False), + } + + results.append(data) + + results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending')) + + if limit: + results = results[:int(limit)] + + module.exit_json(changed=False, results=results) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + region=dict(required=True, aliases=['aws_region', 'ec2_region']), + name_regex=dict(required=True), + sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']), + limit=dict(required=False, type='int'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, True) + + client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params) + find_launch_configs(client, module) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_remote_facts.py b/cloud/amazon/ec2_remote_facts.py index 035b7b42394..98ea16628fa 100644 --- a/cloud/amazon/ec2_remote_facts.py +++ b/cloud/amazon/ec2_remote_facts.py @@ -13,140 +13,180 @@ # You should have received a copy of the GNU General Public License # along with this library. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_remote_facts -short_description: ask EC2 for information about other instances. +short_description: Gather facts about ec2 instances in AWS description: - - Only supports seatch for hostname by tags currently. Looking to add more later. + - Gather facts about ec2 instances in AWS version_added: "2.0" options: - key: - description: - - instance tag key in EC2 - required: false - default: Name - value: + filters: description: - - instance tag value in EC2 + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. required: false default: null - lookup: - description: - - What type of lookup to use when searching EC2 instance info. - required: false - default: tags - region: - description: - - EC2 region that it should look for tags in - required: false - default: All Regions - ignore_state: - description: - - instance state that should be ignored such as terminated. - required: false - default: terminated author: - "Michael Schuett (@michaeljs1990)" -extends_documentation_fragment: aws +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Basic provisioning example -- ec2_search: - key: mykey - value: myvalue - register: servers +# Gather facts about all ec2 instances +- ec2_remote_facts: + +# Gather facts about all running ec2 instances with a tag of Name:Example +- ec2_remote_facts: + filters: + instance-state-name: running + "tag:Name": Example + +# Gather facts about instance i-123456 +- ec2_remote_facts: + filters: + instance-id: i-123456 + +# Gather facts about all instances in vpc-123456 that are t2.small type +- ec2_remote_facts: + filters: + vpc-id: vpc-123456 + instance-type: t2.small + ''' + try: - import boto import boto.ec2 + from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False -def todict(obj, classkey=None): - if isinstance(obj, dict): - data = {} - for (k, v) in obj.items(): - data[k] = todict(v, classkey) - return data - elif hasattr(obj, "_ast"): - return todict(obj._ast()) - elif hasattr(obj, "__iter__"): - return [todict(v, classkey) for v in obj] - elif hasattr(obj, "__dict__"): - # This Class causes a recursive loop and at this time is not worth - # debugging. If it's useful later I'll look into it. - if not isinstance(obj, boto.ec2.blockdevicemapping.BlockDeviceType): - data = dict([(key, todict(value, classkey)) - for key, value in obj.__dict__.iteritems() - if not callable(value) and not key.startswith('_')]) - if classkey is not None and hasattr(obj, "__class__"): - data[classkey] = obj.__class__.__name__ - return data - else: - return obj +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +def get_instance_info(instance): -def get_all_ec2_regions(module): + # Get groups + groups = [] + for group in instance.groups: + groups.append({ 'id': group.id, 'name': group.name }.copy()) + + # Get interfaces + interfaces = [] + for interface in instance.interfaces: + interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy()) + + # If an instance is terminated, sourceDestCheck is no longer returned + try: + source_dest_check = instance.sourceDestCheck + except AttributeError: + source_dest_check = None + + # Get block device mapping try: - regions = boto.ec2.regions() - except Exception, e: - module.fail_json('Boto authentication issue: %s' % e) + bdm_dict = [] + bdm = getattr(instance, 'block_device_mapping') + for device_name in bdm.keys(): + bdm_dict.append({ + 'device_name': device_name, + 'status': bdm[device_name].status, + 'volume_id': bdm[device_name].volume_id, + 'delete_on_termination': bdm[device_name].delete_on_termination, + 'attach_time': bdm[device_name].attach_time + }) + except AttributeError: + pass + + instance_info = { 'id': instance.id, + 'kernel': instance.kernel, + 'instance_profile': instance.instance_profile, + 'root_device_type': instance.root_device_type, + 'private_dns_name': instance.private_dns_name, + 'public_dns_name': instance.public_dns_name, + 'ebs_optimized': instance.ebs_optimized, + 'client_token': instance.client_token, + 'virtualization_type': instance.virtualization_type, + 'architecture': instance.architecture, + 'ramdisk': instance.ramdisk, + 'tags': instance.tags, + 'key_name': instance.key_name, + 'source_destination_check': source_dest_check, + 'image_id': instance.image_id, + 'groups': groups, + 'interfaces': interfaces, + 'spot_instance_request_id': instance.spot_instance_request_id, + 'requester_id': instance.requester_id, + 'monitoring_state': instance.monitoring_state, + 'placement': { + 'tenancy': instance._placement.tenancy, + 'zone': instance._placement.zone + }, + 'ami_launch_index': instance.ami_launch_index, + 'launch_time': instance.launch_time, + 'hypervisor': instance.hypervisor, + 'region': instance.region.name, + 'persistent': instance.persistent, + 'private_ip_address': instance.private_ip_address, + 'public_ip_address': instance.ip_address, + 'state': instance._state.name, + 'vpc_id': instance.vpc_id, + 'block_device_mapping': bdm_dict, + } + + return instance_info + + +def list_ec2_instances(connection, module): - return regions + filters = module.params.get("filters") + instance_dict_array = [] -# Connect to ec2 region -def connect_to_region(region, module): try: - conn = boto.ec2.connect_to_region(region.name) - except Exception, e: - print module.jsonify('error connecting to region: ' + region.name) - conn = None - # connect_to_region will fail "silently" by returning - # None if the region name is wrong or not supported - return conn + all_instances = connection.get_only_instances(filters=filters) + except BotoServerError as e: + module.fail_json(msg=e.message) + + for instance in all_instances: + instance_dict_array.append(get_instance_info(instance)) + + module.exit_json(instances=instance_dict_array) + def main(): - module = AnsibleModule( - argument_spec = dict( - key = dict(default='Name'), - value = dict(), - lookup = dict(default='tags'), - ignore_state = dict(default='terminated'), - region = dict(), + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters = dict(default=None, type='dict') ) ) + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + if not HAS_BOTO: module.fail_json(msg='boto required for this module') - server_info = list() + region, ec2_url, aws_connect_params = get_aws_connection_info(module) - for region in get_all_ec2_regions(module): - conn = connect_to_region(region, module) + if region: try: - # Run when looking up by tag names, only returning hostname currently - if module.params.get('lookup') == 'tags': - ec2_key = 'tag:' + module.params.get('key') - ec2_value = module.params.get('value') - reservations = conn.get_all_instances(filters={ec2_key : ec2_value}) - for instance in [i for r in reservations for i in r.instances]: - if instance.private_ip_address != None: - instance.hostname = 'ip-' + instance.private_ip_address.replace('.', '-') - if instance._state.name not in module.params.get('ignore_state'): - server_info.append(todict(instance)) - except: - print module.jsonify('error getting instances from: ' + region.name) - - ec2_facts_result = dict(changed=True, ec2=server_info) - - module.exit_json(**ec2_facts_result) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + list_ec2_instances(connection, module) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_snapshot_facts.py b/cloud/amazon/ec2_snapshot_facts.py new file mode 100644 index 00000000000..1fd91960983 --- /dev/null +++ b/cloud/amazon/ec2_snapshot_facts.py @@ -0,0 +1,233 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_snapshot_facts +short_description: Gather facts about ec2 volume snapshots in AWS +description: + - Gather facts about ec2 volume snapshots in AWS +version_added: "2.1" +author: "Rob White (@wimnat)" +options: + snapshot_ids: + description: + - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned. + required: false + default: [] + owner_ids: + description: + - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have \ + access are returned. + required: false + default: [] + restorable_by_user_ids: + description: + - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are \ + returned. + required: false + default: [] + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \ + U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter \ + names and values are case sensitive. + required: false + default: {} +notes: + - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by \ + the account use the filter 'owner-id'. + +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather facts about all snapshots, including public ones +- ec2_snapshot_facts: + +# Gather facts about all snapshots owned by the account 0123456789 +- ec2_snapshot_facts: + filters: + owner-id: 0123456789 + +# Or alternatively... +- ec2_snapshot_facts: + owner_ids: + - 0123456789 + +# Gather facts about a particular snapshot using ID +- ec2_snapshot_facts: + filters: + snapshot-id: snap-00112233 + +# Or alternatively... +- ec2_snapshot_facts: + snapshot_ids: + - snap-00112233 + +# Gather facts about any snapshot with a tag key Name and value Example +- ec2_snapshot_facts: + filters: + "tag:Name": Example + +# Gather facts about any snapshot with an error status +- ec2_snapshot_facts: + filters: + status: error + +''' + +RETURN = ''' +snapshot_id: + description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created. + type: string + sample: snap-01234567 +volume_id: + description: The ID of the volume that was used to create the snapshot. + type: string + sample: vol-01234567 +state: + description: The snapshot state (completed, pending or error). + type: string + sample: completed +state_message: + description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. + type: string + sample: +start_time: + description: The time stamp when the snapshot was initiated. + type: datetime + sample: 2015-02-12T02:14:02+00:00 +progress: + description: The progress of the snapshot, as a percentage. + type: string + sample: 100% +owner_id: + description: The AWS account ID of the EBS snapshot owner. + type: string + sample: 099720109477 +description: + description: The description for the snapshot. + type: string + sample: My important backup +volume_size: + description: The size of the volume, in GiB. + type: integer + sample: 8 +owner_alias: + description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot. + type: string + sample: 033440102211 +tags: + description: Any tags assigned to the snapshot. + type: list + sample: "{ 'my_tag_key': 'my_tag_value' }" +encrypted: + description: Indicates whether the snapshot is encrypted. + type: boolean + sample: True +kms_key_id: + description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \ + protect the volume encryption key for the parent volume. + type: string + sample: 74c9742a-a1b2-45cb-b3fe-abcdef123456 +data_encryption_key_id: + description: The data encryption key identifier for the snapshot. This value is a unique identifier that \ + corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy. + type: string + sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456" + +''' + +try: + import boto3 + from botocore.exceptions import ClientError, NoCredentialsError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, + boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, + ec2_argument_spec, get_aws_connection_info) + + +def list_ec2_snapshots(connection, module): + + snapshot_ids = module.params.get("snapshot_ids") + owner_ids = map(str, module.params.get("owner_ids")) + restorable_by_user_ids = module.params.get("restorable_by_user_ids") + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + try: + snapshots = connection.describe_snapshots(SnapshotIds=snapshot_ids, OwnerIds=owner_ids, RestorableByUserIds=restorable_by_user_ids, Filters=filters) + except ClientError as e: + module.fail_json(msg=e.message) + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_snapshots = [] + for snapshot in snapshots['Snapshots']: + snaked_snapshots.append(camel_dict_to_snake_dict(snapshot)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for snapshot in snaked_snapshots: + if 'tags' in snapshot: + snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags']) + + module.exit_json(snapshots=snaked_snapshots) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + snapshot_ids=dict(default=[], type='list'), + owner_ids=dict(default=[], type='list'), + restorable_by_user_ids=dict(default=[], type='list'), + filters=dict(default={}, type='dict') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[ + ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters'] + ] + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + list_ec2_snapshots(connection, module) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vol_facts.py b/cloud/amazon/ec2_vol_facts.py new file mode 100644 index 00000000000..14f5282eca7 --- /dev/null +++ b/cloud/amazon/ec2_vol_facts.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_vol_facts +short_description: Gather facts about ec2 volumes in AWS +description: + - Gather facts about ec2 volumes in AWS +version_added: "2.1" +author: "Rob White (@wimnat)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters. + required: false + default: null +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather facts about all volumes +- ec2_vol_facts: + +# Gather facts about a particular volume using volume ID +- ec2_vol_facts: + filters: + volume-id: vol-00112233 + +# Gather facts about any volume with a tag key Name and value Example +- ec2_vol_facts: + filters: + "tag:Name": Example + +# Gather facts about any volume that is attached +- ec2_vol_facts: + filters: + attachment.status: attached + +''' + +# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to +# fix this +RETURN = '''# ''' + +try: + import boto.ec2 + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +def get_volume_info(volume): + + attachment = volume.attach_data + + volume_info = { + 'create_time': volume.create_time, + 'id': volume.id, + 'iops': volume.iops, + 'size': volume.size, + 'snapshot_id': volume.snapshot_id, + 'status': volume.status, + 'type': volume.type, + 'zone': volume.zone, + 'region': volume.region.name, + 'attachment_set': { + 'attach_time': attachment.attach_time, + 'device': attachment.device, + 'instance_id': attachment.instance_id, + 'status': attachment.status + }, + 'tags': volume.tags + } + + return volume_info + +def list_ec2_volumes(connection, module): + + filters = module.params.get("filters") + volume_dict_array = [] + + try: + all_volumes = connection.get_all_volumes(filters=filters) + except BotoServerError as e: + module.fail_json(msg=e.message) + + for volume in all_volumes: + volume_dict_array.append(get_volume_info(volume)) + + module.exit_json(volumes=volume_dict_array) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters = dict(default=None, type='dict') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.ec2, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError) as e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + list_ec2_volumes(connection, module) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_dhcp_options.py b/cloud/amazon/ec2_vpc_dhcp_options.py new file mode 100644 index 00000000000..4caee644519 --- /dev/null +++ b/cloud/amazon/ec2_vpc_dhcp_options.py @@ -0,0 +1,389 @@ +#!/usr/bin/python + +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: ec2_vpc_dhcp_options +short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's + requested +description: + - This module removes, or creates DHCP option sets, and can associate them to a VPC. + Optionally, a new DHCP Options set can be created that converges a VPC's existing + DHCP option set with values provided. + When dhcp_options_id is provided, the module will + 1. remove (with state='absent') + 2. ensure tags are applied (if state='present' and tags are provided + 3. attach it to a VPC (if state='present' and a vpc_id is provided. + If any of the optional values are missing, they will either be treated + as a no-op (i.e., inherit what already exists for the VPC) + To remove existing options while inheriting, supply an empty value + (e.g. set ntp_servers to [] if you want to remove them from the VPC's options) + Most of the options should be self-explanatory. +author: "Joel Thompson (@joelthompson)" +version_added: 2.1 +options: + domain_name: + description: + - The domain name to set in the DHCP option sets + required: false + default: None + dns_servers: + description: + - A list of hosts to set the DNS servers for the VPC to. (Should be a + list of IP addresses rather than host names.) + required: false + default: None + ntp_servers: + description: + - List of hosts to advertise as NTP servers for the VPC. + required: false + default: None + netbios_name_servers: + description: + - List of hosts to advertise as NetBIOS servers. + required: false + default: None + netbios_node_type: + description: + - NetBIOS node type to advertise in the DHCP options. + The AWS recommendation is to use 2 (when using netbios name services) + http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html + required: false + default: None + vpc_id: + description: + - VPC ID to associate with the requested DHCP option set. + If no vpc id is provided, and no matching option set is found then a new + DHCP option set is created. + required: false + default: None + delete_old: + description: + - Whether to delete the old VPC DHCP option set when associating a new one. + This is primarily useful for debugging/development purposes when you + want to quickly roll back to the old option set. Note that this setting + will be ignored, and the old DHCP option set will be preserved, if it + is in use by any other VPC. (Otherwise, AWS will return an error.) + required: false + default: true + inherit_existing: + description: + - For any DHCP options not specified in these parameters, whether to + inherit them from the options set already applied to vpc_id, or to + reset them to be empty. + required: false + default: false + tags: + description: + - Tags to be applied to a VPC options set if a new one is created, or + if the resource_id is provided. (options must match) + required: False + default: None + aliases: [ 'resource_tags'] + version_added: "2.1" + dhcp_options_id: + description: + - The resource_id of an existing DHCP options set. + If this is specified, then it will override other settings, except tags + (which will be updated to match) + required: False + default: None + version_added: "2.1" + state: + description: + - create/assign or remove the DHCP options. + If state is set to absent, then a DHCP options set matched either + by id, or tags and options will be removed if possible. + required: False + default: present + choices: [ 'absent', 'present' ] + version_added: "2.1" +extends_documentation_fragment: aws +requirements: + - boto +""" + +RETURN = """ +new_options: + description: The DHCP options created, associated or found + returned: when appropriate + type: dict + sample: + domain-name-servers: + - 10.0.0.1 + - 10.0.1.1 + netbois-name-servers: + - 10.0.0.1 + - 10.0.1.1 + netbios-node-type: 2 + domain-name: "my.example.com" +dhcp_options_id: + description: The aws resource id of the primary DCHP options set created, found or removed + type: string + returned: when available +changed: + description: Whether the dhcp options were changed + type: bool + returned: always +""" + +EXAMPLES = """ +# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing +# DHCP option set that may have been attached to that VPC. +- ec2_vpc_dhcp_options: + domain_name: "foo.example.com" + region: us-east-1 + dns_servers: + - 10.0.0.1 + - 10.0.1.1 + ntp_servers: + - 10.0.0.2 + - 10.0.1.2 + netbios_name_servers: + - 10.0.0.1 + - 10.0.1.1 + netbios_node_type: 2 + vpc_id: vpc-123456 + delete_old: True + inherit_existing: False + + +# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but +# keep any other existing settings. Also, keep the old DHCP option set around. +- ec2_vpc_dhcp_options: + region: us-east-1 + dns_servers: + - "{{groups['dns-primary']}}" + - "{{groups['dns-secondary']}}" + vpc_id: vpc-123456 + inherit_existing: True + delete_old: False + + +## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags +## but do not assign to a VPC +- ec2_vpc_dhcp_options: + region: us-east-1 + dns_servers: + - 4.4.4.4 + - 8.8.8.8 + tags: + Name: google servers + Environment: Test + +## Delete a DHCP options set that matches the tags and options specified +- ec2_vpc_dhcp_options: + region: us-east-1 + dns_servers: + - 4.4.4.4 + - 8.8.8.8 + tags: + Name: google servers + Environment: Test + state: absent + +## Associate a DHCP options set with a VPC by ID +- ec2_vpc_dhcp_options: + region: us-east-1 + dhcp_options_id: dopt-12345678 + vpc_id: vpc-123456 + +""" + +import boto.vpc +import boto.ec2 +from boto.exception import EC2ResponseError +import socket +import collections + +def get_resource_tags(vpc_conn, resource_id): + return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id})) + +def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode): + try: + cur_tags = get_resource_tags(vpc_conn, resource_id) + if tags == cur_tags: + return {'changed': False, 'tags': cur_tags} + + to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags) + if to_delete and not add_only: + vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode) + + to_add = dict((k, tags[k]) for k in tags if k not in cur_tags) + if to_add: + vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode) + + latest_tags = get_resource_tags(vpc_conn, resource_id) + return {'changed': True, 'tags': latest_tags} + except EC2ResponseError as e: + module.fail_json(msg=get_error_message(e.args[2])) + +def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id): + """ + Returns the DHCP options object currently associated with the requested VPC ID using the VPC + connection variable. + """ + vpcs = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id]) + if len(vpcs) != 1 or vpcs[0].dhcp_options_id == "default": + return None + dhcp_options = vpc_conn.get_all_dhcp_options(dhcp_options_ids=[vpcs[0].dhcp_options_id]) + if len(dhcp_options) != 1: + return None + return dhcp_options[0] + +def match_dhcp_options(vpc_conn, tags=None, options=None): + """ + Finds a DHCP Options object that optionally matches the tags and options provided + """ + dhcp_options = vpc_conn.get_all_dhcp_options() + for dopts in dhcp_options: + if (not tags) or get_resource_tags(vpc_conn, dopts.id) == tags: + if (not options) or dopts.options == options: + return(True, dopts) + return(False, None) + +def remove_dhcp_options_by_id(vpc_conn, dhcp_options_id): + associations = vpc_conn.get_all_vpcs(filters={'dhcpOptionsId': dhcp_options_id}) + if len(associations) > 0: + return False + else: + vpc_conn.delete_dhcp_options(dhcp_options_id) + return True + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + dhcp_options_id=dict(type='str', default=None), + domain_name=dict(type='str', default=None), + dns_servers=dict(type='list', default=None), + ntp_servers=dict(type='list', default=None), + netbios_name_servers=dict(type='list', default=None), + netbios_node_type=dict(type='int', default=None), + vpc_id=dict(type='str', default=None), + delete_old=dict(type='bool', default=True), + inherit_existing=dict(type='bool', default=False), + tags=dict(type='dict', default=None, aliases=['resource_tags']), + state=dict(type='str', default='present', choices=['present', 'absent']) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + params = module.params + found = False + changed = False + new_options = collections.defaultdict(lambda: None) + + + region, ec2_url, boto_params = get_aws_connection_info(module) + connection = connect_to_aws(boto.vpc, region, **boto_params) + + existing_options = None + + # First check if we were given a dhcp_options_id + if not params['dhcp_options_id']: + # No, so create new_options from the parameters + if params['dns_servers'] != None: + new_options['domain-name-servers'] = params['dns_servers'] + if params['netbios_name_servers'] != None: + new_options['netbios-name-servers'] = params['netbios_name_servers'] + if params['ntp_servers'] != None: + new_options['ntp-servers'] = params['ntp_servers'] + if params['domain_name'] != None: + # needs to be a list for comparison with boto objects later + new_options['domain-name'] = [ params['domain_name'] ] + if params['netbios_node_type'] != None: + # needs to be a list for comparison with boto objects later + new_options['netbios-node-type'] = [ str(params['netbios_node_type']) ] + # If we were given a vpc_id then we need to look at the options on that + if params['vpc_id']: + existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id']) + # if we've been asked to inherit existing options, do that now + if params['inherit_existing']: + if existing_options: + for option in [ 'domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']: + if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]): + new_options[option] = existing_options.options.get(option) + + # Do the vpc's dhcp options already match what we're asked for? if so we are done + if existing_options and new_options == existing_options.options: + module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=existing_options.id) + + # If no vpc_id was given, or the options don't match then look for an existing set using tags + found, dhcp_option = match_dhcp_options(connection, params['tags'], new_options) + + # Now let's cover the case where there are existing options that we were told about by id + # If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given) + else: + supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id':params['dhcp_options_id']}) + if len(supplied_options) != 1: + if params['state'] != 'absent': + module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist") + else: + found = True + dhcp_option = supplied_options[0] + if params['state'] != 'absent' and params['tags']: + ensure_tags(connection, dhcp_option.id, params['tags'], False, module.check_mode) + + # Now we have the dhcp options set, let's do the necessary + + # if we found options we were asked to remove then try to do so + if params['state'] == 'absent': + if not module.check_mode: + if found: + changed = remove_dhcp_options_by_id(connection, dhcp_option.id) + module.exit_json(changed=changed, new_options={}) + + # otherwise if we haven't found the required options we have something to do + elif not module.check_mode and not found: + + # create some dhcp options if we weren't able to use existing ones + if not found: + # Convert netbios-node-type and domain-name back to strings + if new_options['netbios-node-type']: + new_options['netbios-node-type'] = new_options['netbios-node-type'][0] + if new_options['domain-name']: + new_options['domain-name'] = new_options['domain-name'][0] + + # create the new dhcp options set requested + dhcp_option = connection.create_dhcp_options( + new_options['domain-name'], + new_options['domain-name-servers'], + new_options['ntp-servers'], + new_options['netbios-name-servers'], + new_options['netbios-node-type']) + changed = True + if params['tags']: + ensure_tags(connection, dhcp_option.id, params['tags'], False, module.check_mode) + + # If we were given a vpc_id, then attach the options we now have to that before we finish + if params['vpc_id'] and not module.check_mode: + changed = True + connection.associate_dhcp_options(dhcp_option.id, params['vpc_id']) + # and remove old ones if that was requested + if params['delete_old'] and existing_options: + remove_dhcp_options_by_id(connection, existing_options.id) + + module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id) + + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == "__main__": + main() diff --git a/cloud/amazon/ec2_vpc_dhcp_options_facts.py b/cloud/amazon/ec2_vpc_dhcp_options_facts.py new file mode 100644 index 00000000000..063f525ea0f --- /dev/null +++ b/cloud/amazon/ec2_vpc_dhcp_options_facts.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_vpc_dhcp_options_facts +short_description: Gather facts about dhcp options sets in AWS +description: + - Gather facts about dhcp options sets in AWS +version_added: "2.2" +requirements: [ boto3 ] +author: "Nick Aslanidis (@naslanidis)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. + required: false + default: null + DhcpOptionsIds: + description: + - Get details of specific DHCP Option ID + - Provide this value as a list + required: false + default: None +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# # Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather facts about all DHCP Option sets for an account or profile + ec2_vpc_dhcp_options_facts: + region: ap-southeast-2 + profile: production + register: dhcp_facts + +- name: Gather facts about a filtered list of DHCP Option sets + ec2_vpc_dhcp_options_facts: + region: ap-southeast-2 + profile: production + filters: + "tag:Name": "abc-123" + register: dhcp_facts + +- name: Gather facts about a specific DHCP Option set by DhcpOptionId + ec2_vpc_dhcp_options_facts: + region: ap-southeast-2 + profile: production + DhcpOptionsIds: dopt-123fece2 + register: dhcp_facts + +''' + +RETURN = ''' +dhcp_options: + description: The dhcp option sets for the account + returned: always + type: list + +changed: + description: True if listing the dhcp options succeeds + type: bool + returned: always +''' + +import json + +try: + import botocore + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +def get_dhcp_options_info(dhcp_option): + dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'], + 'DhcpConfigurations': dhcp_option['DhcpConfigurations'], + 'Tags': dhcp_option['Tags'] + } + return dhcp_option_info + + +def list_dhcp_options(client, module): + dryrun = module.params.get("DryRun") + all_dhcp_options_array = [] + params = dict() + + if module.params.get('filters'): + params['Filters'] = [] + for key, value in module.params.get('filters').iteritems(): + temp_dict = dict() + temp_dict['Name'] = key + if isinstance(value, basestring): + temp_dict['Values'] = [value] + else: + temp_dict['Values'] = value + params['Filters'].append(temp_dict) + + if module.params.get("DryRun"): + params['DryRun'] = module.params.get("DryRun") + + if module.params.get("DhcpOptionsIds"): + params['DhcpOptionsIds'] = module.params.get("DhcpOptionsIds") + + try: + all_dhcp_options = client.describe_dhcp_options(**params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + for dhcp_option in all_dhcp_options['DhcpOptions']: + all_dhcp_options_array.append(get_dhcp_options_info(dhcp_option)) + + snaked_dhcp_options_array = [] + for dhcp_option in all_dhcp_options_array: + snaked_dhcp_options_array.append(camel_dict_to_snake_dict(dhcp_option)) + + module.exit_json(dhcp_options=snaked_dhcp_options_array) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters = dict(type='dict', default=None, ), + DryRun = dict(type='bool', default=False), + DhcpOptionsIds = dict(type='list', default=None) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + # Validate Requirements + if not HAS_BOTO3: + module.fail_json(msg='json and botocore/boto3 is required.') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Can't authorize connection - "+str(e)) + + # call your function here + results = list_dhcp_options(connection, module) + + module.exit_json(result=results) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py index 63be48248ef..91366f35ac6 100644 --- a/cloud/amazon/ec2_vpc_igw.py +++ b/cloud/amazon/ec2_vpc_igw.py @@ -13,6 +13,10 @@ # You should have received a copy of the GNU General Public License # along with this library. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_vpc_igw @@ -20,7 +24,7 @@ description: - Manage an AWS VPC Internet gateway version_added: "2.0" -author: Robert Estelle, @erydo +author: Robert Estelle (@erydo) options: vpc_id: description: @@ -32,25 +36,23 @@ - Create or terminate the IGW required: false default: present -extends_documentation_fragment: aws + choices: [ 'present', 'absent' ] +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Ensure that the VPC has an Internet Gateway. -# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use -# in setting up NATs etc. - local_action: - module: ec2_vpc_igw - vpc_id: {{vpc.vpc_id}} - region: {{vpc.vpc.region}} - state: present - register: igw -''' - +# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc. +ec2_vpc_igw: + vpc_id: vpc-abcdefgh + state: present +register: igw -import sys # noqa +''' try: import boto.ec2 @@ -62,6 +64,9 @@ if __name__ != '__main__': raise +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info + class AnsibleIGWException(Exception): pass @@ -117,7 +122,7 @@ def main(): argument_spec.update( dict( vpc_id = dict(required=True), - state = dict(choices=['present', 'absent'], default='present') + state = dict(default='present', choices=['present', 'absent']) ) ) @@ -133,8 +138,8 @@ def main(): if region: try: - connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + connection = connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -152,8 +157,6 @@ def main(): module.exit_json(**result) -from ansible.module_utils.basic import * # noqa -from ansible.module_utils.ec2 import * # noqa if __name__ == '__main__': main() diff --git a/cloud/amazon/ec2_vpc_nacl.py b/cloud/amazon/ec2_vpc_nacl.py new file mode 100644 index 00000000000..1758e288c61 --- /dev/null +++ b/cloud/amazon/ec2_vpc_nacl.py @@ -0,0 +1,548 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: ec2_vpc_nacl +short_description: create and delete Network ACLs. +description: + - Read the AWS documentation for Network ACLS + U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) +version_added: "2.2" +options: + name: + description: + - Tagged name identifying a network ACL. + required: true + vpc_id: + description: + - VPC id of the requesting VPC. + required: true + subnets: + description: + - The list of subnets that should be associated with the network ACL. + - Must be specified as a list + - Each subnet can be specified as subnet ID, or its tagged name. + required: false + egress: + description: + - A list of rules for outgoing traffic. + - Each rule must be specified as a list. + required: false + ingress: + description: + - List of rules for incoming traffic. + - Each rule must be specified as a list. + required: false + tags: + description: + - Dictionary of tags to look for and apply when creating a network ACL. + required: false + state: + description: + - Creates or modifies an existing NACL + - Deletes a NACL and reassociates subnets to the default NACL + required: false + choices: ['present', 'absent'] + default: present +author: Mike Mochan(@mmochan) +extends_documentation_fragment: aws +requirements: [ botocore, boto3, json ] +''' + +EXAMPLES = ''' + +# Complete example to create and delete a network ACL +# that allows SSH, HTTP and ICMP in, and all traffic out. +- name: "Create and associate production DMZ network ACL with DMZ subnets" + ec2_vpc_nacl: + vpc_id: vpc-12345678 + name: prod-dmz-nacl + region: ap-southeast-2 + subnets: ['prod-dmz-1', 'prod-dmz-2'] + tags: + CostCode: CC1234 + Project: phoenix + Description: production DMZ + ingress: [ + # rule no, protocol, allow/deny, cidr, icmp_code, icmp_type, + # port from, port to + [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22], + [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80], + [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8], + ] + egress: [ + [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null] + ] + state: 'present' + +- name: "Remove the ingress and egress rules - defaults to deny all" + ec2_vpc_nacl: + vpc_id: vpc-12345678 + name: prod-dmz-nacl + region: ap-southeast-2 + subnets: + - prod-dmz-1 + - prod-dmz-2 + tags: + CostCode: CC1234 + Project: phoenix + Description: production DMZ + state: present + +- name: "Remove the NACL subnet associations and tags" + ec2_vpc_nacl: + vpc_id: 'vpc-12345678' + name: prod-dmz-nacl + region: ap-southeast-2 + state: present + +- name: "Delete nacl and subnet associations" + ec2_vpc_nacl: + vpc_id: vpc-12345678 + name: prod-dmz-nacl + state: absent +''' +RETURN = ''' +task: + description: The result of the create, or delete action. + returned: success + type: dictionary +''' + +try: + import botocore + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +# Common fields for the default rule that is contained within every VPC NACL. +DEFAULT_RULE_FIELDS = { + 'RuleNumber': 32767, + 'RuleAction': 'deny', + 'CidrBlock': '0.0.0.0/0', + 'Protocol': '-1' +} + +DEFAULT_INGRESS = dict(DEFAULT_RULE_FIELDS.items() + [('Egress', False)]) +DEFAULT_EGRESS = dict(DEFAULT_RULE_FIELDS.items() + [('Egress', True)]) + +# VPC-supported IANA protocol numbers +# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml +PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, } + + +#Utility methods +def icmp_present(entry): + if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1: + return True + + +def load_tags(module): + tags = [] + if module.params.get('tags'): + for name, value in module.params.get('tags').iteritems(): + tags.append({'Key': name, 'Value': str(value)}) + tags.append({'Key': "Name", 'Value': module.params.get('name')}) + else: + tags.append({'Key': "Name", 'Value': module.params.get('name')}) + return tags + + +def subnets_removed(nacl_id, subnets, client, module): + results = find_acl_by_id(nacl_id, client, module) + associations = results['NetworkAcls'][0]['Associations'] + subnet_ids = [assoc['SubnetId'] for assoc in associations] + return [subnet for subnet in subnet_ids if subnet not in subnets] + + +def subnets_added(nacl_id, subnets, client, module): + results = find_acl_by_id(nacl_id, client, module) + associations = results['NetworkAcls'][0]['Associations'] + subnet_ids = [assoc['SubnetId'] for assoc in associations] + return [subnet for subnet in subnets if subnet not in subnet_ids] + + +def subnets_changed(nacl, client, module): + changed = False + vpc_id = module.params.get('vpc_id') + nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + subnets = subnets_to_associate(nacl, client, module) + if not subnets: + default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] + subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module) + if subnets: + replace_network_acl_association(default_nacl_id, subnets, client, module) + changed = True + return changed + changed = False + return changed + subs_added = subnets_added(nacl_id, subnets, client, module) + if subs_added: + replace_network_acl_association(nacl_id, subs_added, client, module) + changed = True + subs_removed = subnets_removed(nacl_id, subnets, client, module) + if subs_removed: + default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] + replace_network_acl_association(default_nacl_id, subs_removed, client, module) + changed = True + return changed + + +def nacls_changed(nacl, client, module): + changed = False + params = dict() + params['egress'] = module.params.get('egress') + params['ingress'] = module.params.get('ingress') + + nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + nacl = describe_network_acl(client, module) + entries = nacl['NetworkAcls'][0]['Entries'] + tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS !=entry] + tmp_ingress = [entry for entry in entries if entry['Egress'] is False] + egress = [rule for rule in tmp_egress if DEFAULT_EGRESS != rule] + ingress = [rule for rule in tmp_ingress if DEFAULT_INGRESS != rule] + if rules_changed(egress, params['egress'], True, nacl_id, client, module): + changed = True + if rules_changed(ingress, params['ingress'], False, nacl_id, client, module): + changed = True + return changed + + +def tags_changed(nacl_id, client, module): + changed = False + tags = dict() + if module.params.get('tags'): + tags = module.params.get('tags') + tags['Name'] = module.params.get('name') + nacl = find_acl_by_id(nacl_id, client, module) + if nacl['NetworkAcls']: + nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']] + nacl_tags = [item for sublist in nacl_values for item in sublist] + tag_values = [[key, str(value)] for key, value in tags.iteritems()] + tags = [item for sublist in tag_values for item in sublist] + if sorted(nacl_tags) == sorted(tags): + changed = False + return changed + else: + delete_tags(nacl_id, client, module) + create_tags(nacl_id, client, module) + changed = True + return changed + return changed + + +def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): + changed = False + rules = list() + for entry in param_rules: + rules.append(process_rule_entry(entry, Egress)) + if rules == aws_rules: + return changed + else: + removed_rules = [x for x in aws_rules if x not in rules] + if removed_rules: + params = dict() + for rule in removed_rules: + params['NetworkAclId'] = nacl_id + params['RuleNumber'] = rule['RuleNumber'] + params['Egress'] = Egress + delete_network_acl_entry(params, client, module) + changed = True + added_rules = [x for x in rules if x not in aws_rules] + if added_rules: + for rule in added_rules: + rule['NetworkAclId'] = nacl_id + create_network_acl_entry(rule, client, module) + changed = True + return changed + + +def process_rule_entry(entry, Egress): + params = dict() + params['RuleNumber'] = entry[0] + params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]]) + params['RuleAction'] = entry[2] + params['Egress'] = Egress + params['CidrBlock'] = entry[3] + if icmp_present(entry): + params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])} + else: + if entry[6] or entry[7]: + params['PortRange'] = {"From": entry[6], 'To': entry[7]} + return params + + +def restore_default_associations(assoc_ids, default_nacl_id, client, module): + if assoc_ids: + params = dict() + params['NetworkAclId'] = default_nacl_id[0] + for assoc_id in assoc_ids: + params['AssociationId'] = assoc_id + restore_default_acl_association(params, client, module) + return True + + +def construct_acl_entries(nacl, client, module): + for entry in module.params.get('ingress'): + params = process_rule_entry(entry, Egress=False) + params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] + create_network_acl_entry(params, client, module) + for rule in module.params.get('egress'): + params = process_rule_entry(rule, Egress=True) + params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] + create_network_acl_entry(params, client, module) + + +## Module invocations +def setup_network_acl(client, module): + changed = False + nacl = describe_network_acl(client, module) + if not nacl['NetworkAcls']: + nacl = create_network_acl(module.params.get('vpc_id'), client, module) + nacl_id = nacl['NetworkAcl']['NetworkAclId'] + create_tags(nacl_id, client, module) + subnets = subnets_to_associate(nacl, client, module) + replace_network_acl_association(nacl_id, subnets, client, module) + construct_acl_entries(nacl, client, module) + changed = True + return(changed, nacl['NetworkAcl']['NetworkAclId']) + else: + changed = False + nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + subnet_result = subnets_changed(nacl, client, module) + nacl_result = nacls_changed(nacl, client, module) + tag_result = tags_changed(nacl_id, client, module) + if subnet_result is True or nacl_result is True or tag_result is True: + changed = True + return(changed, nacl_id) + return (changed, nacl_id) + + +def remove_network_acl(client, module): + changed = False + result = dict() + vpc_id = module.params.get('vpc_id') + nacl = describe_network_acl(client, module) + if nacl['NetworkAcls']: + nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] + associations = nacl['NetworkAcls'][0]['Associations'] + assoc_ids = [a['NetworkAclAssociationId'] for a in associations] + default_nacl_id = find_default_vpc_nacl(vpc_id, client, module) + if not default_nacl_id: + result = {vpc_id: "Default NACL ID not found - Check the VPC ID"} + return changed, result + if restore_default_associations(assoc_ids, default_nacl_id, client, module): + delete_network_acl(nacl_id, client, module) + changed = True + result[nacl_id] = "Successfully deleted" + return changed, result + if not assoc_ids: + delete_network_acl(nacl_id, client, module) + changed = True + result[nacl_id] = "Successfully deleted" + return changed, result + return changed, result + + +#Boto3 client methods +def create_network_acl(vpc_id, client, module): + try: + nacl = client.create_network_acl(VpcId=vpc_id) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + return nacl + + +def create_network_acl_entry(params, client, module): + try: + result = client.create_network_acl_entry(**params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + return result + + +def create_tags(nacl_id, client, module): + try: + delete_tags(nacl_id, client, module) + client.create_tags(Resources=[nacl_id], Tags=load_tags(module)) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def delete_network_acl(nacl_id, client, module): + try: + client.delete_network_acl(NetworkAclId=nacl_id) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def delete_network_acl_entry(params, client, module): + try: + client.delete_network_acl_entry(**params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def delete_tags(nacl_id, client, module): + try: + client.delete_tags(Resources=[nacl_id]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def describe_acl_associations(subnets, client, module): + if not subnets: + return [] + try: + results = client.describe_network_acls(Filters=[ + {'Name': 'association.subnet-id', 'Values': subnets} + ]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + associations = results['NetworkAcls'][0]['Associations'] + return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets] + + +def describe_network_acl(client, module): + try: + nacl = client.describe_network_acls(Filters=[ + {'Name': 'tag:Name', 'Values': [module.params.get('name')]} + ]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + return nacl + + +def find_acl_by_id(nacl_id, client, module): + try: + return client.describe_network_acls(NetworkAclIds=[nacl_id]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def find_default_vpc_nacl(vpc_id, client, module): + try: + response = client.describe_network_acls(Filters=[ + {'Name': 'vpc-id', 'Values': [vpc_id]}]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + nacls = response['NetworkAcls'] + return [n['NetworkAclId'] for n in nacls if n['IsDefault'] == True] + + +def find_subnet_ids_by_nacl_id(nacl_id, client, module): + try: + results = client.describe_network_acls(Filters=[ + {'Name': 'association.network-acl-id', 'Values': [nacl_id]} + ]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + if results['NetworkAcls']: + associations = results['NetworkAcls'][0]['Associations'] + return [s['SubnetId'] for s in associations if s['SubnetId']] + else: + return [] + + +def replace_network_acl_association(nacl_id, subnets, client, module): + params = dict() + params['NetworkAclId'] = nacl_id + for association in describe_acl_associations(subnets, client, module): + params['AssociationId'] = association + try: + client.replace_network_acl_association(**params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def replace_network_acl_entry(entries, Egress, nacl_id, client, module): + params = dict() + for entry in entries: + params = entry + params['NetworkAclId'] = nacl_id + try: + client.replace_network_acl_entry(**params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def restore_default_acl_association(params, client, module): + try: + client.replace_network_acl_association(**params) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def subnets_to_associate(nacl, client, module): + params = list(module.params.get('subnets')) + if not params: + return [] + if params[0].startswith("subnet-"): + try: + subnets = client.describe_subnets(Filters=[ + {'Name': 'subnet-id', 'Values': params}]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + else: + try: + subnets = client.describe_subnets(Filters=[ + {'Name': 'tag:Name', 'Values': params}]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + return [s['SubnetId'] for s in subnets['Subnets'] if s['SubnetId']] + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + vpc_id=dict(required=True), + name=dict(required=True), + subnets=dict(required=False, type='list', default=list()), + tags=dict(required=False, type='dict'), + ingress=dict(required=False, type='list', default=list()), + egress=dict(required=False, type='list', default=list(),), + state=dict(default='present', choices=['present', 'absent']), + ), + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='json, botocore and boto3 are required.') + state = module.params.get('state').lower() + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Can't authorize connection - %s" % str(e)) + + invocations = { + "present": setup_network_acl, + "absent": remove_network_acl + } + (changed, results) = invocations[state](client, module) + module.exit_json(changed=changed, nacl_id=results) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_nacl_facts.py b/cloud/amazon/ec2_vpc_nacl_facts.py new file mode 100644 index 00000000000..e7f6a5b2380 --- /dev/null +++ b/cloud/amazon/ec2_vpc_nacl_facts.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_vpc_nacl_facts +short_description: Gather facts about Network ACLs in an AWS VPC +description: + - Gather facts about Network ACLs in an AWS VPC +version_added: "2.2" +author: "Brad Davidson (@brandond)" +requires: [ boto3 ] +options: + nacl_ids: + description: + - A list of Network ACL IDs to retrieve facts about. + required: false + default: [] + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \ + U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter \ + names and values are case sensitive. + required: false + default: {} +notes: + - By default, the module will return all Network ACLs. + +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather facts about all Network ACLs: +- name: Get All NACLs + register: all_nacls + ec2_vpc_nacl_facts: + region: us-west-2 + +# Retrieve default Network ACLs: +- name: Get Default NACLs + register: default_nacls + ec2_vpc_nacl_facts: + region: us-west-2 + filters: + 'default': 'true' +''' + +RETURN = ''' +nacl: + description: Returns an array of complex objects as described below. + returned: success + type: list of complex + contains: + nacl_id: + description: The ID of the Network Access Control List. + returned: always + type: string + vpc_id: + description: The ID of the VPC that the NACL is attached to. + returned: always + type: string + is_default: + description: True if the NACL is the default for its VPC. + returned: always + type: boolean + tags: + description: A dict of tags associated with the NACL. + returned: always + type: dict + subnets: + description: A list of subnet IDs that are associated with the NACL. + returned: always + type: list of string + ingress: + description: A list of NACL ingress rules. + returned: always + type: list of list + egress: + description: A list of NACL egress rules. + returned: always + type: list of list +''' + +try: + import boto3 + from botocore.exceptions import ClientError, NoCredentialsError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +# VPC-supported IANA protocol numbers +# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml +PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'} + +def list_ec2_vpc_nacls(connection, module): + + nacl_ids = module.params.get("nacl_ids") + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + + try: + nacls = connection.describe_network_acls(NetworkAclIds=nacl_ids, Filters=filters) + except (ClientError, NoCredentialsError) as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_nacls = [] + for nacl in nacls['NetworkAcls']: + snaked_nacls.append(camel_dict_to_snake_dict(nacl)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for nacl in snaked_nacls: + if 'tags' in nacl: + nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags']) + if 'entries' in nacl: + nacl['egress'] = [nacl_entry_to_list(e) for e in nacl['entries'] + if e['rule_number'] != 32767 and e['egress']] + nacl['ingress'] = [nacl_entry_to_list(e) for e in nacl['entries'] + if e['rule_number'] != 32767 and not e['egress']] + del nacl['entries'] + if 'associations' in nacl: + nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']] + del nacl['associations'] + if 'network_acl_id' in nacl: + nacl['nacl_id'] = nacl['network_acl_id'] + del nacl['network_acl_id'] + + module.exit_json(nacls=snaked_nacls) + +def nacl_entry_to_list(entry): + + elist = [entry['rule_number'], + PROTOCOL_NAMES[entry['protocol']], + entry['rule_action'], + entry['cidr_block'] + ] + if entry['protocol'] == '1': + elist = elist + [-1, -1] + else: + elist = elist + [None, None, None, None] + + if 'icmp_type_code' in entry: + elist[4] = entry['icmp_type_code']['type'] + elist[5] = entry['icmp_type_code']['code'] + + if 'port_range' in entry: + elist[6] = entry['port_range']['from'] + elist[7] = entry['port_range']['to'] + + return elist + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + nacl_ids=dict(default=[], type='list'), + filters=dict(default={}, type='dict') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive=[ + ['nacl_ids', 'filters'] + ] + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + connection = boto3_conn(module, conn_type='client', resource='ec2', + region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + list_ec2_vpc_nacls(connection, module) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_nat_gateway.py b/cloud/amazon/ec2_vpc_nat_gateway.py new file mode 100644 index 00000000000..f3f95c107e6 --- /dev/null +++ b/cloud/amazon/ec2_vpc_nat_gateway.py @@ -0,0 +1,1089 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_vpc_nat_gateway +short_description: Manage AWS VPC NAT Gateways. +description: + - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids. +version_added: "2.2" +requirements: [boto3, botocore] +options: + state: + description: + - Ensure NAT Gateway is present or absent. + required: false + default: "present" + choices: ["present", "absent"] + nat_gateway_id: + description: + - The id AWS dynamically allocates to the NAT Gateway on creation. + This is required when the absent option is present. + required: false + default: None + subnet_id: + description: + - The id of the subnet to create the NAT Gateway in. This is required + with the present option. + required: false + default: None + allocation_id: + description: + - The id of the elastic IP allocation. If this is not passed and the + eip_address is not passed. An EIP is generated for this NAT Gateway. + required: false + default: None + eip_address: + description: + - The elastic IP address of the EIP you want attached to this NAT Gateway. + If this is not passed and the allocation_id is not passed, + an EIP is generated for this NAT Gateway. + required: false + if_exist_do_not_create: + description: + - if a NAT Gateway exists already in the subnet_id, then do not create a new one. + required: false + default: false + release_eip: + description: + - Deallocate the EIP from the VPC. + - Option is only valid with the absent state. + - You should use this with the wait option. Since you can not release an address while a delete operation is happening. + required: false + default: true + wait: + description: + - Wait for operation to complete before returning. + required: false + default: false + wait_timeout: + description: + - How many seconds to wait for an operation to complete before timing out. + required: false + default: 300 + client_token: + description: + - Optional unique token to be used during create to ensure idempotency. + When specifying this option, ensure you specify the eip_address parameter + as well otherwise any subsequent runs will fail. + required: false + +author: + - "Allen Sanabria (@linuxdynasty)" + - "Jon Hadfield (@jonhadfield)" + - "Karen Cheng(@Etherdaemon)" +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create new nat gateway with client token. + ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + eip_address: 52.1.1.1 + region: ap-southeast-2 + client_token: abcd-12345678 + register: new_nat_gateway + +- name: Create new nat gateway using an allocation-id. + ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + allocation_id: eipalloc-12345678 + region: ap-southeast-2 + register: new_nat_gateway + +- name: Create new nat gateway, using an EIP address and wait for available status. + ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + eip_address: 52.1.1.1 + wait: yes + region: ap-southeast-2 + register: new_nat_gateway + +- name: Create new nat gateway and allocate new EIP. + ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + wait: yes + region: ap-southeast-2 + register: new_nat_gateway + +- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet. + ec2_vpc_nat_gateway: + state: present + subnet_id: subnet-12345678 + wait: yes + region: ap-southeast-2 + if_exist_do_not_create: true + register: new_nat_gateway + +- name: Delete nat gateway using discovered nat gateways from facts module. + ec2_vpc_nat_gateway: + state: absent + region: ap-southeast-2 + wait: yes + nat_gateway_id: "{{ item.NatGatewayId }}" + release_eip: yes + register: delete_nat_gateway_result + with_items: "{{ gateways_to_remove.result }}" + +- name: Delete nat gateway and wait for deleted status. + ec2_vpc_nat_gateway: + state: absent + nat_gateway_id: nat-12345678 + wait: yes + wait_timeout: 500 + region: ap-southeast-2 + +- name: Delete nat gateway and release EIP. + ec2_vpc_nat_gateway: + state: absent + nat_gateway_id: nat-12345678 + release_eip: yes + wait: yes + wait_timeout: 300 + region: ap-southeast-2 +''' + +RETURN = ''' +create_time: + description: The ISO 8601 date time formatin UTC. + returned: In all cases. + type: string + sample: "2016-03-05T05:19:20.282000+00:00'" +nat_gateway_id: + description: id of the VPC NAT Gateway + returned: In all cases. + type: string + sample: "nat-0d1e3a878585988f8" +subnet_id: + description: id of the Subnet + returned: In all cases. + type: string + sample: "subnet-12345" +state: + description: The current state of the NAT Gateway. + returned: In all cases. + type: string + sample: "available" +vpc_id: + description: id of the VPC. + returned: In all cases. + type: string + sample: "vpc-12345" +nat_gateway_addresses: + description: List of dictionairies containing the public_ip, network_interface_id, private_ip, and allocation_id. + returned: In all cases. + type: string + sample: [ + { + 'public_ip': '52.52.52.52', + 'network_interface_id': 'eni-12345', + 'private_ip': '10.0.0.100', + 'allocation_id': 'eipalloc-12345' + } + ] +''' + +try: + import botocore + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +import datetime +import random +import re +import time + +from dateutil.tz import tzutc + +DRY_RUN_GATEWAYS = [ + { + "nat_gateway_id": "nat-123456789", + "subnet_id": "subnet-123456789", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "state": "available", + "create_time": "2016-03-05T05:19:20.282000+00:00", + "vpc_id": "vpc-12345678" + } +] +DRY_RUN_GATEWAY_UNCONVERTED = [ + { + 'VpcId': 'vpc-12345678', + 'State': 'available', + 'NatGatewayId': 'nat-123456789', + 'SubnetId': 'subnet-123456789', + 'NatGatewayAddresses': [ + { + 'PublicIp': '55.55.55.55', + 'NetworkInterfaceId': 'eni-1234567', + 'AllocationId': 'eipalloc-1234567', + 'PrivateIp': '10.0.0.102' + } + ], + 'CreateTime': datetime.datetime(2016, 3, 5, 5, 19, 20, 282000, tzinfo=tzutc()) + } +] + +DRY_RUN_ALLOCATION_UNCONVERTED = { + 'Addresses': [ + { + 'PublicIp': '55.55.55.55', + 'Domain': 'vpc', + 'AllocationId': 'eipalloc-1234567' + } + ] +} + +DRY_RUN_MSGS = 'DryRun Mode:' + + +def convert_to_lower(data): + """Convert all uppercase keys in dict with lowercase_ + + Args: + data (dict): Dictionary with keys that have upper cases in them + Example.. FooBar == foo_bar + if a val is of type datetime.datetime, it will be converted to + the ISO 8601 + + Basic Usage: + >>> test = {'FooBar': []} + >>> test = convert_to_lower(test) + { + 'foo_bar': [] + } + + Returns: + Dictionary + """ + results = dict() + if isinstance(data, dict): + for key, val in data.items(): + key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower() + if key[0] == '_': + key = key[1:] + if isinstance(val, datetime.datetime): + results[key] = val.isoformat() + elif isinstance(val, dict): + results[key] = convert_to_lower(val) + elif isinstance(val, list): + converted = list() + for item in val: + converted.append(convert_to_lower(item)) + results[key] = converted + else: + results[key] = val + return results + + +def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None, + states=None, check_mode=False): + """Retrieve a list of NAT Gateways + Args: + client (botocore.client.EC2): Boto3 client + + Kwargs: + subnet_id (str): The subnet_id the nat resides in. + nat_gateway_id (str): The Amazon nat id. + states (list): States available (pending, failed, available, deleting, and deleted) + default=None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> subnet_id = 'subnet-12345678' + >>> get_nat_gateways(client, subnet_id) + [ + true, + "", + { + "nat_gateway_id": "nat-123456789", + "subnet_id": "subnet-123456789", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-12345678" + } + + Returns: + Tuple (bool, str, list) + """ + params = dict() + err_msg = "" + gateways_retrieved = False + existing_gateways = list() + if not states: + states = ['available', 'pending'] + if nat_gateway_id: + params['NatGatewayIds'] = [nat_gateway_id] + else: + params['Filter'] = [ + { + 'Name': 'subnet-id', + 'Values': [subnet_id] + }, + { + 'Name': 'state', + 'Values': states + } + ] + + try: + if not check_mode: + gateways = client.describe_nat_gateways(**params)['NatGateways'] + if gateways: + for gw in gateways: + existing_gateways.append(convert_to_lower(gw)) + gateways_retrieved = True + else: + gateways_retrieved = True + if nat_gateway_id: + if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id: + existing_gateways = DRY_RUN_GATEWAYS + elif subnet_id: + if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id: + existing_gateways = DRY_RUN_GATEWAYS + err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return gateways_retrieved, err_msg, existing_gateways + + +def wait_for_status(client, wait_timeout, nat_gateway_id, status, + check_mode=False): + """Wait for the NAT Gateway to reach a status + Args: + client (botocore.client.EC2): Boto3 client + wait_timeout (int): Number of seconds to wait, until this timeout is reached. + nat_gateway_id (str): The Amazon nat id. + status (str): The status to wait for. + examples. status=available, status=deleted + + Basic Usage: + >>> client = boto3.client('ec2') + >>> subnet_id = 'subnet-12345678' + >>> allocation_id = 'eipalloc-12345678' + >>> wait_for_status(client, subnet_id, allocation_id) + [ + true, + "", + { + "nat_gateway_id": "nat-123456789", + "subnet_id": "subnet-1234567", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-12345678" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-12345677" + } + ] + + Returns: + Tuple (bool, str, dict) + """ + polling_increment_secs = 5 + wait_timeout = time.time() + wait_timeout + status_achieved = False + nat_gateway = dict() + states = ['pending', 'failed', 'available', 'deleting', 'deleted'] + err_msg = "" + + while wait_timeout > time.time(): + try: + gws_retrieved, err_msg, nat_gateways = ( + get_nat_gateways( + client, nat_gateway_id=nat_gateway_id, + states=states, check_mode=check_mode + ) + ) + if gws_retrieved and nat_gateways: + nat_gateway = nat_gateways[0] + if check_mode: + nat_gateway['state'] = status + + if nat_gateway.get('state') == status: + status_achieved = True + break + + elif nat_gateway.get('state') == 'failed': + err_msg = nat_gateway.get('failure_message') + break + + elif nat_gateway.get('state') == 'pending': + if 'failure_message' in nat_gateway: + err_msg = nat_gateway.get('failure_message') + status_achieved = False + break + + else: + time.sleep(polling_increment_secs) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + if not status_achieved: + err_msg = "Wait time out reached, while waiting for results" + + return status_achieved, err_msg, nat_gateway + + +def gateway_in_subnet_exists(client, subnet_id, allocation_id=None, + check_mode=False): + """Retrieve all NAT Gateways for a subnet. + Args: + subnet_id (str): The subnet_id the nat resides in. + + Kwargs: + allocation_id (str): The EIP Amazon identifier. + default = None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> subnet_id = 'subnet-1234567' + >>> allocation_id = 'eipalloc-1234567' + >>> gateway_in_subnet_exists(client, subnet_id, allocation_id) + ( + [ + { + "nat_gateway_id": "nat-123456789", + "subnet_id": "subnet-123456789", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-1234567" + } + ], + False + ) + + Returns: + Tuple (list, bool) + """ + allocation_id_exists = False + gateways = [] + states = ['available', 'pending'] + gws_retrieved, _, gws = ( + get_nat_gateways( + client, subnet_id, states=states, check_mode=check_mode + ) + ) + if not gws_retrieved: + return gateways, allocation_id_exists + for gw in gws: + for address in gw['nat_gateway_addresses']: + if allocation_id: + if address.get('allocation_id') == allocation_id: + allocation_id_exists = True + gateways.append(gw) + else: + gateways.append(gw) + + return gateways, allocation_id_exists + + +def get_eip_allocation_id_by_address(client, eip_address, check_mode=False): + """Release an EIP from your EIP Pool + Args: + client (botocore.client.EC2): Boto3 client + eip_address (str): The Elastic IP Address of the EIP. + + Kwargs: + check_mode (bool): if set to true, do not run anything and + falsify the results. + + Basic Usage: + >>> client = boto3.client('ec2') + >>> eip_address = '52.87.29.36' + >>> get_eip_allocation_id_by_address(client, eip_address) + 'eipalloc-36014da3' + + Returns: + Tuple (str, str) + """ + params = { + 'PublicIps': [eip_address], + } + allocation_id = None + err_msg = "" + try: + if not check_mode: + allocations = client.describe_addresses(**params)['Addresses'] + if len(allocations) == 1: + allocation = allocations[0] + else: + allocation = None + else: + dry_run_eip = ( + DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp'] + ) + if dry_run_eip == eip_address: + allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0] + else: + allocation = None + if allocation: + if allocation.get('Domain') != 'vpc': + err_msg = ( + "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP" + .format(eip_address) + ) + else: + allocation_id = allocation.get('AllocationId') + else: + err_msg = ( + "EIP {0} does not exist".format(eip_address) + ) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return allocation_id, err_msg + + +def allocate_eip_address(client, check_mode=False): + """Release an EIP from your EIP Pool + Args: + client (botocore.client.EC2): Boto3 client + + Kwargs: + check_mode (bool): if set to true, do not run anything and + falsify the results. + + Basic Usage: + >>> client = boto3.client('ec2') + >>> allocate_eip_address(client) + True + + Returns: + Tuple (bool, str) + """ + ip_allocated = False + new_eip = None + err_msg = '' + params = { + 'Domain': 'vpc', + } + try: + if check_mode: + ip_allocated = True + random_numbers = ( + ''.join(str(x) for x in random.sample(range(0, 9), 7)) + ) + new_eip = 'eipalloc-{0}'.format(random_numbers) + else: + new_eip = client.allocate_address(**params)['AllocationId'] + ip_allocated = True + err_msg = 'eipalloc id {0} created'.format(new_eip) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return ip_allocated, err_msg, new_eip + + +def release_address(client, allocation_id, check_mode=False): + """Release an EIP from your EIP Pool + Args: + client (botocore.client.EC2): Boto3 client + allocation_id (str): The eip Amazon identifier. + + Kwargs: + check_mode (bool): if set to true, do not run anything and + falsify the results. + + Basic Usage: + >>> client = boto3.client('ec2') + >>> allocation_id = "eipalloc-123456" + >>> release_address(client, allocation_id) + True + + Returns: + Boolean, string + """ + err_msg = '' + if check_mode: + return True, '' + + ip_released = False + params = { + 'AllocationId': allocation_id, + } + try: + client.release_address(**params) + ip_released = True + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return ip_released, err_msg + + +def create(client, subnet_id, allocation_id, client_token=None, + wait=False, wait_timeout=0, if_exist_do_not_create=False, + check_mode=False): + """Create an Amazon NAT Gateway. + Args: + client (botocore.client.EC2): Boto3 client + subnet_id (str): The subnet_id the nat resides in. + allocation_id (str): The eip Amazon identifier. + + Kwargs: + if_exist_do_not_create (bool): if a nat gateway already exists in this + subnet, than do not create another one. + default = False + wait (bool): Wait for the nat to be in the deleted state before returning. + default = False + wait_timeout (int): Number of seconds to wait, until this timeout is reached. + default = 0 + client_token (str): + default = None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> subnet_id = 'subnet-1234567' + >>> allocation_id = 'eipalloc-1234567' + >>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500) + [ + true, + "", + { + "nat_gateway_id": "nat-123456789", + "subnet_id": "subnet-1234567", + "nat_gateway_addresses": [ + { + "public_ip": "55.55.55.55", + "network_interface_id": "eni-1234567", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-1234567" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-1234567" + } + ] + + Returns: + Tuple (bool, str, list) + """ + params = { + 'SubnetId': subnet_id, + 'AllocationId': allocation_id + } + request_time = datetime.datetime.utcnow() + changed = False + success = False + token_provided = False + err_msg = "" + + if client_token: + token_provided = True + params['ClientToken'] = client_token + + try: + if not check_mode: + result = client.create_nat_gateway(**params)["NatGateway"] + else: + result = DRY_RUN_GATEWAY_UNCONVERTED[0] + result['CreateTime'] = datetime.datetime.utcnow() + result['NatGatewayAddresses'][0]['AllocationId'] = allocation_id + result['SubnetId'] = subnet_id + + success = True + changed = True + create_time = result['CreateTime'].replace(tzinfo=None) + if token_provided and (request_time > create_time): + changed = False + elif wait: + success, err_msg, result = ( + wait_for_status( + client, wait_timeout, result['NatGatewayId'], 'available', + check_mode=check_mode + ) + ) + if success: + err_msg = ( + 'NAT gateway {0} created'.format(result['nat_gateway_id']) + ) + + except botocore.exceptions.ClientError as e: + if "IdempotentParameterMismatch" in e.message: + err_msg = ( + 'NAT Gateway does not support update and token has already been provided' + ) + else: + err_msg = str(e) + success = False + changed = False + result = None + + return success, changed, err_msg, result + + +def pre_create(client, subnet_id, allocation_id=None, eip_address=None, + if_exist_do_not_create=False, wait=False, wait_timeout=0, + client_token=None, check_mode=False): + """Create an Amazon NAT Gateway. + Args: + client (botocore.client.EC2): Boto3 client + subnet_id (str): The subnet_id the nat resides in. + + Kwargs: + allocation_id (str): The EIP Amazon identifier. + default = None + eip_address (str): The Elastic IP Address of the EIP. + default = None + if_exist_do_not_create (bool): if a nat gateway already exists in this + subnet, than do not create another one. + default = False + wait (bool): Wait for the nat to be in the deleted state before returning. + default = False + wait_timeout (int): Number of seconds to wait, until this timeout is reached. + default = 0 + client_token (str): + default = None + + Basic Usage: + >>> client = boto3.client('ec2') + >>> subnet_id = 'subnet-w4t12897' + >>> allocation_id = 'eipalloc-36014da3' + >>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500) + [ + true, + "", + { + "nat_gateway_id": "nat-03835afb6e31df79b", + "subnet_id": "subnet-w4t12897", + "nat_gateway_addresses": [ + { + "public_ip": "52.87.29.36", + "network_interface_id": "eni-5579742d", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-36014da3" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-w68571b5" + } + ] + + Returns: + Tuple (bool, bool, str, list) + """ + success = False + changed = False + err_msg = "" + results = list() + + if not allocation_id and not eip_address: + existing_gateways, allocation_id_exists = ( + gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode) + ) + + if len(existing_gateways) > 0 and if_exist_do_not_create: + success = True + changed = False + results = existing_gateways[0] + err_msg = ( + 'NAT Gateway {0} already exists in subnet_id {1}' + .format( + existing_gateways[0]['nat_gateway_id'], subnet_id + ) + ) + return success, changed, err_msg, results + else: + success, err_msg, allocation_id = ( + allocate_eip_address(client, check_mode=check_mode) + ) + if not success: + return success, 'False', err_msg, dict() + + elif eip_address or allocation_id: + if eip_address and not allocation_id: + allocation_id, err_msg = ( + get_eip_allocation_id_by_address( + client, eip_address, check_mode=check_mode + ) + ) + if not allocation_id: + success = False + changed = False + return success, changed, err_msg, dict() + + existing_gateways, allocation_id_exists = ( + gateway_in_subnet_exists( + client, subnet_id, allocation_id, check_mode=check_mode + ) + ) + if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create): + success = True + changed = False + results = existing_gateways[0] + err_msg = ( + 'NAT Gateway {0} already exists in subnet_id {1}' + .format( + existing_gateways[0]['nat_gateway_id'], subnet_id + ) + ) + return success, changed, err_msg, results + + success, changed, err_msg, results = create( + client, subnet_id, allocation_id, client_token, + wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode + ) + + return success, changed, err_msg, results + + +def remove(client, nat_gateway_id, wait=False, wait_timeout=0, + release_eip=False, check_mode=False): + """Delete an Amazon NAT Gateway. + Args: + client (botocore.client.EC2): Boto3 client + nat_gateway_id (str): The Amazon nat id. + + Kwargs: + wait (bool): Wait for the nat to be in the deleted state before returning. + wait_timeout (int): Number of seconds to wait, until this timeout is reached. + release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc. + + Basic Usage: + >>> client = boto3.client('ec2') + >>> nat_gw_id = 'nat-03835afb6e31df79b' + >>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True) + [ + true, + "", + { + "nat_gateway_id": "nat-03835afb6e31df79b", + "subnet_id": "subnet-w4t12897", + "nat_gateway_addresses": [ + { + "public_ip": "52.87.29.36", + "network_interface_id": "eni-5579742d", + "private_ip": "10.0.0.102", + "allocation_id": "eipalloc-36014da3" + } + ], + "state": "deleted", + "create_time": "2016-03-05T00:33:21.209000+00:00", + "delete_time": "2016-03-05T00:36:37.329000+00:00", + "vpc_id": "vpc-w68571b5" + } + ] + + Returns: + Tuple (bool, str, list) + """ + params = { + 'NatGatewayId': nat_gateway_id + } + success = False + changed = False + err_msg = "" + results = list() + states = ['pending', 'available' ] + try: + exist, _, gw = ( + get_nat_gateways( + client, nat_gateway_id=nat_gateway_id, + states=states, check_mode=check_mode + ) + ) + if exist and len(gw) == 1: + results = gw[0] + if not check_mode: + client.delete_nat_gateway(**params) + + allocation_id = ( + results['nat_gateway_addresses'][0]['allocation_id'] + ) + changed = True + success = True + err_msg = ( + 'NAT gateway {0} is in a deleting state. Delete was successfull' + .format(nat_gateway_id) + ) + + if wait: + status_achieved, err_msg, results = ( + wait_for_status( + client, wait_timeout, nat_gateway_id, 'deleted', + check_mode=check_mode + ) + ) + if status_achieved: + err_msg = ( + 'NAT gateway {0} was deleted successfully' + .format(nat_gateway_id) + ) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + if release_eip: + eip_released, eip_err = ( + release_address(client, allocation_id, check_mode) + ) + if not eip_released: + err_msg = ( + "{0}: Failed to release EIP {1}: {2}" + .format(err_msg, allocation_id, eip_err) + ) + success = False + + return success, changed, err_msg, results + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + subnet_id=dict(type='str'), + eip_address=dict(type='str'), + allocation_id=dict(type='str'), + if_exist_do_not_create=dict(type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int', default=320, required=False), + release_eip=dict(type='bool', default=False), + nat_gateway_id=dict(type='str'), + client_token=dict(type='str'), + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['allocation_id', 'eip_address'] + ] + ) + + # Validate Requirements + if not HAS_BOTO3: + module.fail_json(msg='botocore/boto3 is required.') + + state = module.params.get('state').lower() + check_mode = module.check_mode + subnet_id = module.params.get('subnet_id') + allocation_id = module.params.get('allocation_id') + eip_address = module.params.get('eip_address') + nat_gateway_id = module.params.get('nat_gateway_id') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + release_eip = module.params.get('release_eip') + client_token = module.params.get('client_token') + if_exist_do_not_create = module.params.get('if_exist_do_not_create') + + try: + region, ec2_url, aws_connect_kwargs = ( + get_aws_connection_info(module, boto3=True) + ) + client = ( + boto3_conn( + module, conn_type='client', resource='ec2', + region=region, endpoint=ec2_url, **aws_connect_kwargs + ) + ) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Boto3 Client Error - " + str(e.msg)) + + changed = False + err_msg = '' + + if state == 'present': + if not subnet_id: + module.fail_json(msg='subnet_id is required for creation') + + success, changed, err_msg, results = ( + pre_create( + client, subnet_id, allocation_id, eip_address, + if_exist_do_not_create, wait, wait_timeout, + client_token, check_mode=check_mode + ) + ) + else: + if not nat_gateway_id: + module.fail_json(msg='nat_gateway_id is required for removal') + + else: + success, changed, err_msg, results = ( + remove( + client, nat_gateway_id, wait, wait_timeout, release_eip, + check_mode=check_mode + ) + ) + + if not success: + module.fail_json( + msg=err_msg, success=success, changed=changed + ) + else: + module.exit_json( + msg=err_msg, success=success, changed=changed, **results + ) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_net_facts.py b/cloud/amazon/ec2_vpc_net_facts.py new file mode 100644 index 00000000000..14e1c4920f5 --- /dev/null +++ b/cloud/amazon/ec2_vpc_net_facts.py @@ -0,0 +1,131 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net_facts +short_description: Gather facts about ec2 VPCs in AWS +description: + - Gather facts about ec2 VPCs in AWS +version_added: "2.1" +author: "Rob White (@wimnat)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters. + required: false + default: null + +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather facts about all VPCs +- ec2_vpc_net_facts: + +# Gather facts about a particular VPC using VPC ID +- ec2_vpc_net_facts: + filters: + vpc-id: vpc-00112233 + +# Gather facts about any VPC with a tag key Name and value Example +- ec2_vpc_net_facts: + filters: + "tag:Name": Example + +''' + +try: + import boto.vpc + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +def get_vpc_info(vpc): + + try: + classic_link = vpc.classic_link_enabled + except AttributeError: + classic_link = False + + vpc_info = { 'id': vpc.id, + 'instance_tenancy': vpc.instance_tenancy, + 'classic_link_enabled': classic_link, + 'dhcp_options_id': vpc.dhcp_options_id, + 'state': vpc.state, + 'is_default': vpc.is_default, + 'cidr_block': vpc.cidr_block, + 'tags': vpc.tags + } + + return vpc_info + +def list_ec2_vpcs(connection, module): + + filters = module.params.get("filters") + vpc_dict_array = [] + + try: + all_vpcs = connection.get_all_vpcs(filters=filters) + except BotoServerError as e: + module.fail_json(msg=e.message) + + for vpc in all_vpcs: + vpc_dict_array.append(get_vpc_info(vpc)) + + module.exit_json(vpcs=vpc_dict_array) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters = dict(default=None, type='dict') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError) as e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + list_ec2_vpcs(connection, module) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_peer.py b/cloud/amazon/ec2_vpc_peer.py new file mode 100644 index 00000000000..6615ba38a27 --- /dev/null +++ b/cloud/amazon/ec2_vpc_peer.py @@ -0,0 +1,367 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: ec2_vpc_peer +short_description: create, delete, accept, and reject VPC peering connections between two VPCs. +description: + - Read the AWS documentation for VPC Peering Connections + U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html) +version_added: "2.2" +options: + vpc_id: + description: + - VPC id of the requesting VPC. + required: false + peer_vpc_id: + description: + - VPC id of the accepting VPC. + required: false + peer_owner_id: + description: + - The AWS account number for cross account peering. + required: false + tags: + description: + - Dictionary of tags to look for and apply when creating a Peering Connection. + required: false + state: + description: + - Create, delete, accept, reject a peering connection. + required: false + default: present + choices: ['present', 'absent', 'accept', 'reject'] +author: Mike Mochan(@mmochan) +extends_documentation_fragment: aws +requirements: [ botocore, boto3, json ] +''' + +EXAMPLES = ''' +# Complete example to create and accept a local peering connection. +- name: Create local account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-87654321 + state: present + tags: + Name: Peering conenction for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Accept local VPC peering request + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + state: accept + register: action_peer + +# Complete example to delete a local peering connection. +- name: Create local account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-87654321 + state: present + tags: + Name: Peering conenction for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: delete a local VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + state: absent + register: vpc_peer + + # Complete example to create and accept a cross account peering connection. +- name: Create cross account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-12345678 + peer_owner_id: 123456789102 + state: present + tags: + Name: Peering conenction for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Accept peering connection from remote account + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + profile: bot03_profile_for_cross_account + state: accept + register: vpc_peer + +# Complete example to create and reject a local peering connection. +- name: Create local account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-87654321 + state: present + tags: + Name: Peering conenction for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Reject a local VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + state: reject + +# Complete example to create and accept a cross account peering connection. +- name: Create cross account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-12345678 + peer_owner_id: 123456789102 + state: present + tags: + Name: Peering conenction for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Accept a cross account VPC peering connection request + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + profile: bot03_profile_for_cross_account + state: accept + tags: + Name: Peering conenction for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + +# Complete example to create and reject a cross account peering connection. +- name: Create cross account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + vpc_id: vpc-12345678 + peer_vpc_id: vpc-12345678 + peer_owner_id: 123456789102 + state: present + tags: + Name: Peering conenction for VPC 21 to VPC 22 + CostCode: CC1234 + Project: phoenix + register: vpc_peer + +- name: Reject a cross account VPC peering Connection + ec2_vpc_peer: + region: ap-southeast-2 + peering_id: "{{ vpc_peer.peering_id }}" + profile: bot03_profile_for_cross_account + state: reject + +''' +RETURN = ''' +task: + description: The result of the create, accept, reject or delete action. + returned: success + type: dictionary +''' + +try: + import json + import botocore + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +def tags_changed(pcx_id, client, module): + changed = False + tags = dict() + if module.params.get('tags'): + tags = module.params.get('tags') + pcx = find_pcx_by_id(pcx_id, client, module) + if pcx['VpcPeeringConnections']: + pcx_values = [t.values() for t in pcx['VpcPeeringConnections'][0]['Tags']] + pcx_tags = [item for sublist in pcx_values for item in sublist] + tag_values = [[key, str(value)] for key, value in tags.iteritems()] + tags = [item for sublist in tag_values for item in sublist] + if sorted(pcx_tags) == sorted(tags): + changed = False + return changed + else: + delete_tags(pcx_id, client, module) + create_tags(pcx_id, client, module) + changed = True + return changed + return changed + + +def describe_peering_connections(params, client): + result = client.describe_vpc_peering_connections(Filters=[ + {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['VpcId']]}, + {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]} + ]) + if result['VpcPeeringConnections'] == []: + result = client.describe_vpc_peering_connections(Filters=[ + {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]}, + {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['VpcId']]} + ]) + return result + + +def is_active(peering_conn): + return peering_conn['Status']['Code'] == 'active' + + +def is_pending(peering_conn): + return peering_conn['Status']['Code'] == 'pending-acceptance' + + +def create_peer_connection(client, module): + changed = False + params = dict() + params['VpcId'] = module.params.get('vpc_id') + params['PeerVpcId'] = module.params.get('peer_vpc_id') + if module.params.get('peer_owner_id'): + params['PeerOwnerId'] = str(module.params.get('peer_owner_id')) + params['DryRun'] = module.check_mode + peering_conns = describe_peering_connections(params, client) + for peering_conn in peering_conns['VpcPeeringConnections']: + pcx_id = peering_conn['VpcPeeringConnectionId'] + if tags_changed(pcx_id, client, module): + changed = True + if is_active(peering_conn): + return (changed, peering_conn['VpcPeeringConnectionId']) + if is_pending(peering_conn): + return (changed, peering_conn['VpcPeeringConnectionId']) + try: + peering_conn = client.create_vpc_peering_connection(**params) + pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'] + if module.params.get('tags'): + create_tags(pcx_id, client, module) + changed = True + return (changed, peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def peer_status(client, module): + params = dict() + params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')] + vpc_peering_connection = client.describe_vpc_peering_connections(**params) + return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code'] + + +def accept_reject_delete(state, client, module): + changed = False + params = dict() + params['VpcPeeringConnectionId'] = module.params.get('peering_id') + params['DryRun'] = module.check_mode + invocations = { + 'accept': client.accept_vpc_peering_connection, + 'reject': client.reject_vpc_peering_connection, + 'absent': client.delete_vpc_peering_connection + } + if state == 'absent' or peer_status(client, module) != 'active': + try: + invocations[state](**params) + if module.params.get('tags'): + create_tags(params['VpcPeeringConnectionId'], client, module) + changed = True + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + if tags_changed(params['VpcPeeringConnectionId'], client, module): + changed = True + return changed, params['VpcPeeringConnectionId'] + + +def load_tags(module): + tags = [] + if module.params.get('tags'): + for name, value in module.params.get('tags').iteritems(): + tags.append({'Key': name, 'Value': str(value)}) + return tags + + +def create_tags(pcx_id, client, module): + try: + delete_tags(pcx_id, client, module) + client.create_tags(Resources=[pcx_id], Tags=load_tags(module)) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def delete_tags(pcx_id, client, module): + try: + client.delete_tags(Resources=[pcx_id]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def find_pcx_by_id(pcx_id, client, module): + try: + return client.describe_vpc_peering_connections(VpcPeeringConnectionIds=[pcx_id]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=str(e)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + vpc_id=dict(), + peer_vpc_id=dict(), + peering_id=dict(), + peer_owner_id=dict(), + tags=dict(required=False, type='dict'), + profile=dict(), + state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']) + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='json, botocore and boto3 are required.') + state = module.params.get('state').lower() + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Can't authorize connection - "+str(e)) + + if state == 'present': + (changed, results) = create_peer_connection(client, module) + module.exit_json(changed=changed, peering_id=results) + else: + (changed, results) = accept_reject_delete(state, client, module) + module.exit_json(changed=changed, peering_id=results) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 70f53bad26a..1529d923536 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -13,6 +13,10 @@ # You should have received a copy of the GNU General Public License # along with this library. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_vpc_route_table @@ -31,6 +35,7 @@ propagating_vgw_ids: description: - "Enable route propagation from virtual gateways specified by ID." + default: None required: false route_table_id: description: @@ -39,8 +44,12 @@ default: null routes: description: - - "List of routes in the route table. Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id', 'instance_id', 'interface_id', or 'vpc_peering_connection'. If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'." - required: true + - "List of routes in the route table. + Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id', + 'instance_id', 'interface_id', or 'vpc_peering_connection_id'. + If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. Routes are required for present states." + required: false + default: None state: description: - "Create or destroy the VPC route table" @@ -53,7 +62,7 @@ required: true tags: description: - - "A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used to uniquely identify route tables within a VPC when the route_table_id is not supplied." + - "A dictionary of resource tags of the form: { tag1: value1, tag2: value2 }. Tags are used to uniquely identify route tables within a VPC when the route_table_id is not supplied." required: false default: null aliases: [ "resource_tags" ] @@ -61,8 +70,9 @@ description: - "VPC ID of the VPC in which to create the route table." required: true - -extends_documentation_fragment: aws +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' @@ -76,8 +86,8 @@ tags: Name: Public subnets: - - "{{ jumpbox_subnet.subnet_id }}" - - "{{ frontend_subnet.subnet_id }}" + - "{{ jumpbox_subnet.subnet.id }}" + - "{{ frontend_subnet.subnet.id }}" - "{{ vpn_subnet.subnet_id }}" routes: - dest: 0.0.0.0/0 @@ -89,20 +99,18 @@ vpc_id: vpc-1245678 region: us-west-1 tags: - - Name: Internal + Name: Internal subnets: - - "{{ application_subnet.subnet_id }}" + - "{{ application_subnet.subnet.id }}" - 'Database Subnet' - '10.0.0.0/8' routes: - dest: 0.0.0.0/0 instance_id: "{{ nat.instance_id }}" register: nat_route_table - -''' +''' -import sys # noqa import re try: @@ -115,6 +123,9 @@ if __name__ != '__main__': raise +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info + class AnsibleRouteTableException(Exception): pass @@ -173,7 +184,7 @@ def find_subnets(vpc_conn, vpc_id, identified_subnets): for cidr in subnet_cidrs: if not any(s.cidr_block == cidr for s in subnets_by_cidr): raise AnsibleSubnetSearchException( - 'Subnet CIDR "{0}" does not exist'.format(subnet_cidr)) + 'Subnet CIDR "{0}" does not exist'.format(cidr)) subnets_by_name = [] if subnet_names: @@ -181,11 +192,11 @@ def find_subnets(vpc_conn, vpc_id, identified_subnets): filters={'vpc_id': vpc_id, 'tag:Name': subnet_names}) for name in subnet_names: - matching = [s.tags.get('Name') == name for s in subnets_by_name] - if len(matching) == 0: + matching_count = len([1 for s in subnets_by_name if s.tags.get('Name') == name]) + if matching_count == 0: raise AnsibleSubnetSearchException( 'Subnet named "{0}" does not exist'.format(name)) - elif len(matching) > 1: + elif matching_count > 1: raise AnsibleSubnetSearchException( 'Multiple subnets named "{0}"'.format(name)) @@ -252,23 +263,23 @@ def get_route_table_by_id(vpc_conn, vpc_id, route_table_id): route_tables = vpc_conn.get_all_route_tables(route_table_ids=[route_table_id], filters={'vpc_id': vpc_id}) if route_tables: route_table = route_tables[0] - + return route_table - + def get_route_table_by_tags(vpc_conn, vpc_id, tags): - + count = 0 - route_table = None + route_table = None route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id}) for table in route_tables: this_tags = get_resource_tags(vpc_conn, table.id) if tags_match(tags, this_tags): route_table = table count +=1 - + if count > 1: raise RuntimeError("Tags provided do not identify a unique route table") - else: + else: return route_table @@ -280,7 +291,19 @@ def route_spec_matches_route(route_spec, route): 'interface_id': 'interface_id', 'vpc_peering_connection_id': 'vpc_peering_connection_id', } - for k in key_attr_map.iterkeys(): + + # This is a workaround to catch managed NAT gateways as they do not show + # up in any of the returned values when describing route tables. + # The caveat of doing it this way is that if there was an existing + # route for another nat gateway in this route table there is not a way to + # change to another nat gateway id. Long term solution would be to utilise + # boto3 which is a very big task for this module or to update boto. + if route_spec.get('gateway_id') and 'nat-' in route_spec['gateway_id']: + if route.destination_cidr_block == route_spec['destination_cidr_block']: + if all((not route.gateway_id, not route.instance_id, not route.interface_id, not route.vpc_peering_connection_id)): + return True + + for k in key_attr_map: if k in route_spec: if route_spec[k] != getattr(route, k): return False @@ -315,22 +338,36 @@ def ensure_routes(vpc_conn, route_table, route_specs, propagating_vgw_ids, # correct than checking whether the route uses a propagating VGW. # The current logic will leave non-propagated routes using propagating # VGWs in place. - routes_to_delete = [r for r in routes_to_match - if r.gateway_id != 'local' - and r.gateway_id not in propagating_vgw_ids] + routes_to_delete = [] + for r in routes_to_match: + if r.gateway_id: + if r.gateway_id != 'local' and not r.gateway_id.startswith('vpce-'): + if not propagating_vgw_ids or r.gateway_id not in propagating_vgw_ids: + routes_to_delete.append(r) + else: + routes_to_delete.append(r) - changed = routes_to_delete or route_specs_to_create + changed = bool(routes_to_delete or route_specs_to_create) if changed: + for route in routes_to_delete: + try: + vpc_conn.delete_route(route_table.id, + route.destination_cidr_block, + dry_run=check_mode) + except EC2ResponseError as e: + if e.error_code == 'DryRunOperation': + pass + for route_spec in route_specs_to_create: - vpc_conn.create_route(route_table.id, - dry_run=check_mode, - **route_spec) + try: + vpc_conn.create_route(route_table.id, + dry_run=check_mode, + **route_spec) + except EC2ResponseError as e: + if e.error_code == 'DryRunOperation': + pass - for route in routes_to_delete: - vpc_conn.delete_route(route_table.id, - route.destination_cidr_block, - dry_run=check_mode) - return {'changed': changed} + return {'changed': bool(changed)} def ensure_subnet_association(vpc_conn, vpc_id, route_table_id, subnet_id, @@ -405,7 +442,6 @@ def ensure_route_table_absent(connection, module): route_table_id = module.params.get('route_table_id') tags = module.params.get('tags') vpc_id = module.params.get('vpc_id') - check_mode = module.params.get('check_mode') if lookup == 'tag': if tags is not None: @@ -427,9 +463,12 @@ def ensure_route_table_absent(connection, module): return {'changed': False} try: - connection.delete_route_table(route_table.id, dry_run=check_mode) + connection.delete_route_table(route_table.id, dry_run=module.check_mode) except EC2ResponseError as e: - module.fail_json(msg=e.message) + if e.error_code == 'DryRunOperation': + pass + else: + module.fail_json(msg=e.message) return {'changed': True} @@ -449,32 +488,33 @@ def get_route_table_info(route_table): return route_table_info -def create_route_spec(connection, routes, vpc_id): + +def create_route_spec(connection, module, vpc_id): + routes = module.params.get('routes') for route_spec in routes: rename_key(route_spec, 'dest', 'destination_cidr_block') - if 'gateway_id' in route_spec and route_spec['gateway_id'] and \ - route_spec['gateway_id'].lower() == 'igw': + if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw': igw = find_igw(connection, vpc_id) route_spec['gateway_id'] = igw return routes + def ensure_route_table_present(connection, module): - + lookup = module.params.get('lookup') - propagating_vgw_ids = module.params.get('propagating_vgw_ids', []) + propagating_vgw_ids = module.params.get('propagating_vgw_ids') route_table_id = module.params.get('route_table_id') subnets = module.params.get('subnets') tags = module.params.get('tags') vpc_id = module.params.get('vpc_id') - check_mode = module.params.get('check_mode') try: - routes = create_route_spec(connection, module.params.get('routes'), vpc_id) + routes = create_route_spec(connection, module, vpc_id) except AnsibleIgwSearchException as e: module.fail_json(msg=e[0]) - + changed = False tags_valid = False @@ -493,18 +533,21 @@ def ensure_route_table_present(connection, module): route_table = get_route_table_by_id(connection, vpc_id, route_table_id) except EC2ResponseError as e: module.fail_json(msg=e.message) - + # If no route table returned then create new route table if route_table is None: try: - route_table = connection.create_route_table(vpc_id, check_mode) + route_table = connection.create_route_table(vpc_id, module.check_mode) changed = True - except EC2ResponseError, e: + except EC2ResponseError as e: + if e.error_code == 'DryRunOperation': + module.exit_json(changed=True) + module.fail_json(msg=e.message) - + if routes is not None: try: - result = ensure_routes(connection, route_table, routes, propagating_vgw_ids, check_mode) + result = ensure_routes(connection, route_table, routes, propagating_vgw_ids, module.check_mode) changed = changed or result['changed'] except EC2ResponseError as e: module.fail_json(msg=e.message) @@ -512,12 +555,12 @@ def ensure_route_table_present(connection, module): if propagating_vgw_ids is not None: result = ensure_propagation(connection, route_table, propagating_vgw_ids, - check_mode=check_mode) + check_mode=module.check_mode) changed = changed or result['changed'] if not tags_valid and tags is not None: result = ensure_tags(connection, route_table.id, tags, - add_only=True, check_mode=check_mode) + add_only=True, check_mode=module.check_mode) changed = changed or result['changed'] if subnets: @@ -531,7 +574,7 @@ def ensure_route_table_present(connection, module): ) try: - result = ensure_subnet_associations(connection, vpc_id, route_table, associated_subnets, check_mode) + result = ensure_subnet_associations(connection, vpc_id, route_table, associated_subnets, module.check_mode) changed = changed or result['changed'] except EC2ResponseError as e: raise AnsibleRouteTableException( @@ -549,25 +592,25 @@ def main(): lookup = dict(default='tag', required=False, choices=['tag', 'id']), propagating_vgw_ids = dict(default=None, required=False, type='list'), route_table_id = dict(default=None, required=False), - routes = dict(default=None, required=False, type='list'), + routes = dict(default=[], required=False, type='list'), state = dict(default='present', choices=['present', 'absent']), subnets = dict(default=None, required=False, type='list'), tags = dict(default=None, required=False, type='dict', aliases=['resource_tags']), vpc_id = dict(default=None, required=True) ) ) - + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) - + if not HAS_BOTO: module.fail_json(msg='boto is required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -589,9 +632,6 @@ def main(): module.exit_json(**result) -from ansible.module_utils.basic import * # noqa -from ansible.module_utils.ec2 import * # noqa if __name__ == '__main__': main() - diff --git a/cloud/amazon/ec2_vpc_route_table_facts.py b/cloud/amazon/ec2_vpc_route_table_facts.py index 78ef1be3509..f270f2cbb2b 100644 --- a/cloud/amazon/ec2_vpc_route_table_facts.py +++ b/cloud/amazon/ec2_vpc_route_table_facts.py @@ -13,6 +13,10 @@ # You should have received a copy of the GNU General Public License # along with this library. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_vpc_route_table_facts @@ -27,14 +31,9 @@ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. required: false default: null - region: - description: - - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region) - required: false - default: null - aliases: [ 'aws_region', 'ec2_region' ] - -extends_documentation_fragment: aws +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' @@ -46,17 +45,17 @@ # Gather facts about a particular VPC route table using route table ID - ec2_vpc_route_table_facts: filters: - - route-table-id: rtb-00112233 + route-table-id: rtb-00112233 # Gather facts about any VPC route table with a tag key Name and value Example - ec2_vpc_route_table_facts: filters: - - "tag:Name": Example + "tag:Name": Example # Gather facts about any VPC route table within VPC with ID vpc-abcdef00 - ec2_vpc_route_table_facts: filters: - - vpc-id: vpc-abcdef00 + vpc-id: vpc-abcdef00 ''' @@ -67,6 +66,10 @@ except ImportError: HAS_BOTO = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info + + def get_route_table_info(route_table): # Add any routes to array @@ -116,15 +119,13 @@ def main(): if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") list_ec2_vpc_route_tables(connection, module) -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * if __name__ == '__main__': main() diff --git a/cloud/amazon/ec2_vpc_subnet.py b/cloud/amazon/ec2_vpc_subnet.py index 45e84f66939..dc66d445864 100644 --- a/cloud/amazon/ec2_vpc_subnet.py +++ b/cloud/amazon/ec2_vpc_subnet.py @@ -13,6 +13,10 @@ # You should have received a copy of the GNU General Public License # along with this library. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ec2_vpc_subnet @@ -29,7 +33,7 @@ default: null cidr: description: - - "The CIDR block for the subnet. E.g. 10.0.0.0/16. Only required when state=present." + - "The CIDR block for the subnet. E.g. 192.0.2.0/24. Only required when state=present." required: false default: null tags: @@ -49,8 +53,9 @@ - "VPC ID of the VPC in which to create the subnet." required: false default: null - -extends_documentation_fragment: aws +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' @@ -70,10 +75,9 @@ state: absent vpc_id: vpc-123456 cidr: 10.0.1.16/28 - + ''' -import sys # noqa import time try: @@ -86,6 +90,9 @@ if __name__ != '__main__': raise +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info + class AnsibleVPCSubnetException(Exception): pass @@ -121,7 +128,7 @@ def get_subnet_info(subnet): def subnet_exists(vpc_conn, subnet_id): filters = {'subnet-id': subnet_id} subnet = vpc_conn.get_all_subnets(filters=filters) - if subnet[0].state == "available": + if subnet and subnet[0].state == "available": return subnet[0] else: return False @@ -142,7 +149,7 @@ def create_subnet(vpc_conn, vpc_id, cidr, az, check_mode): if e.error_code == "DryRunOperation": subnet = None else: - raise AnsibleVPCSubnetCreationException( + raise AnsibleVPCSubnetCreationException( 'Unable to create subnet {0}, error: {1}'.format(cidr, e)) return subnet @@ -163,7 +170,7 @@ def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode): if to_delete and not add_only: vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode) - to_add = dict((k, tags[k]) for k in tags if k not in cur_tags) + to_add = dict((k, tags[k]) for k in tags if k not in cur_tags or cur_tags[k] != tags[k]) if to_add: vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode) @@ -230,18 +237,18 @@ def main(): vpc_id = dict(default=None, required=True) ) ) - + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) - + if not HAS_BOTO: module.fail_json(msg='boto is required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -264,9 +271,6 @@ def main(): module.exit_json(**result) -from ansible.module_utils.basic import * # noqa -from ansible.module_utils.ec2 import * # noqa if __name__ == '__main__': main() - diff --git a/cloud/amazon/ec2_vpc_subnet_facts.py b/cloud/amazon/ec2_vpc_subnet_facts.py new file mode 100644 index 00000000000..83b4c1cfc51 --- /dev/null +++ b/cloud/amazon/ec2_vpc_subnet_facts.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ec2_vpc_subnet_facts +short_description: Gather facts about ec2 VPC subnets in AWS +description: + - Gather facts about ec2 VPC subnets in AWS +version_added: "2.1" +author: "Rob White (@wimnat)" +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters. + required: false + default: null +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather facts about all VPC subnets +- ec2_vpc_subnet_facts: + +# Gather facts about a particular VPC subnet using ID +- ec2_vpc_subnet_facts: + filters: + subnet-id: subnet-00112233 + +# Gather facts about any VPC subnet with a tag key Name and value Example +- ec2_vpc_subnet_facts: + filters: + "tag:Name": Example + +# Gather facts about any VPC subnet within VPC with ID vpc-abcdef00 +- ec2_vpc_subnet_facts: + filters: + vpc-id: vpc-abcdef00 + +# Gather facts about a set of VPC subnets, publicA, publicB and publicC within a +# VPC with ID vpc-abcdef00 and then use the jinja map function to return the +# subnet_ids as a list. + +- ec2_vpc_subnet_facts: + filters: + vpc-id: vpc-abcdef00 + "tag:Name": "{{ item }}" + with_items: + - publicA + - publicB + - publicC + register: subnet_facts + +- set_fact: + subnet_ids: "{{ subnet_facts.results|map(attribute='subnets.0.id')|list }}" +''' + +try: + import boto.vpc + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +def get_subnet_info(subnet): + + subnet_info = { 'id': subnet.id, + 'availability_zone': subnet.availability_zone, + 'available_ip_address_count': subnet.available_ip_address_count, + 'cidr_block': subnet.cidr_block, + 'default_for_az': subnet.defaultForAz, + 'map_public_ip_on_launch': subnet.mapPublicIpOnLaunch, + 'state': subnet.state, + 'tags': subnet.tags, + 'vpc_id': subnet.vpc_id + } + + return subnet_info + +def list_ec2_vpc_subnets(connection, module): + + filters = module.params.get("filters") + subnet_dict_array = [] + + try: + all_subnets = connection.get_all_subnets(filters=filters) + except BotoServerError as e: + module.fail_json(msg=e.message) + + for subnet in all_subnets: + subnet_dict_array.append(get_subnet_info(subnet)) + + module.exit_json(subnets=subnet_dict_array) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + filters = dict(default=None, type='dict') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + list_ec2_vpc_subnets(connection, module) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_vgw.py b/cloud/amazon/ec2_vpc_vgw.py new file mode 100644 index 00000000000..40eb386156b --- /dev/null +++ b/cloud/amazon/ec2_vpc_vgw.py @@ -0,0 +1,602 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: ec2_vpc_vgw +short_description: Create and delete AWS VPN Virtual Gateways. +description: + - Creates AWS VPN Virtual Gateways + - Deletes AWS VPN Virtual Gateways + - Attaches Virtual Gateways to VPCs + - Detaches Virtual Gateways from VPCs +version_added: "2.2" +requirements: [ boto3 ] +options: + state: + description: + - present to ensure resource is created. + - absent to remove resource + required: false + default: present + choices: [ "present", "absent"] + name: + description: + - name of the vgw to be created or deleted + required: false + type: + description: + - type of the virtual gateway to be created + required: false + choices: [ "ipsec.1" ] + vpn_gateway_id: + description: + - vpn gateway id of an existing virtual gateway + required: false + vpc_id: + description: + - the vpc-id of a vpc to attach or detach + required: false + wait_timeout: + description: + - number of seconds to wait for status during vpc attach and detach + required: false + default: 320 + tags: + description: + - dictionary of resource tags + required: false + default: null + aliases: [ "resource_tags" ] +author: Nick Aslanidis (@naslanidis) +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +- name: Create a new vgw attached to a specific VPC + ec2_vpc_vgw: + state: present + region: ap-southeast-2 + profile: personal + vpc_id: vpc-12345678 + name: personal-testing + type: ipsec.1 + register: created_vgw + +- name: Create a new unattached vgw + ec2_vpc_vgw: + state: present + region: ap-southeast-2 + profile: personal + name: personal-testing + type: ipsec.1 + tags: + environment: production + owner: ABC + register: created_vgw + +- name: Remove a new vgw using the name + ec2_vpc_vgw: + state: absent + region: ap-southeast-2 + profile: personal + name: personal-testing + type: ipsec.1 + register: deleted_vgw + +- name: Remove a new vgw using the vpn_gateway_id + ec2_vpc_vgw: + state: absent + region: ap-southeast-2 + profile: personal + vpn_gateway_id: vgw-3a9aa123 + register: deleted_vgw +''' + +RETURN = ''' +result: + description: The result of the create, or delete action. + returned: success + type: dictionary +''' + +try: + import json + import time + import botocore + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +def get_vgw_info(vgws): + if not isinstance(vgws, list): + return + + for vgw in vgws: + vgw_info = { + 'id': vgw['VpnGatewayId'], + 'type': vgw['Type'], + 'state': vgw['State'], + 'vpc_id': None, + 'tags': dict() + } + + for tag in vgw['Tags']: + vgw_info['tags'][tag['Key']] = tag['Value'] + + if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached': + vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId'] + + return vgw_info + +def wait_for_status(client, module, vpn_gateway_id, status): + polling_increment_secs = 15 + max_retries = (module.params.get('wait_timeout') / polling_increment_secs) + status_achieved = False + + for x in range(0, max_retries): + try: + response = find_vgw(client, module, vpn_gateway_id) + if response[0]['VpcAttachments'][0]['State'] == status: + status_achieved = True + break + else: + time.sleep(polling_increment_secs) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + result = response + return status_achieved, result + + +def attach_vgw(client, module, vpn_gateway_id): + params = dict() + params['VpcId'] = module.params.get('vpc_id') + + try: + response = client.attach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId']) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached') + if not status_achieved: + module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console') + + result = response + return result + + +def detach_vgw(client, module, vpn_gateway_id, vpc_id=None): + params = dict() + params['VpcId'] = module.params.get('vpc_id') + + if vpc_id: + try: + response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + else: + try: + response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId']) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached') + if not status_achieved: + module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console') + + result = response + return result + + +def create_vgw(client, module): + params = dict() + params['Type'] = module.params.get('type') + + try: + response = client.create_vpn_gateway(Type=params['Type']) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + result = response + return result + + +def delete_vgw(client, module, vpn_gateway_id): + + try: + response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + #return the deleted VpnGatewayId as this is not included in the above response + result = vpn_gateway_id + return result + + +def create_tags(client, module, vpn_gateway_id): + params = dict() + + try: + response = client.create_tags(Resources=[vpn_gateway_id],Tags=load_tags(module)) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + result = response + return result + + +def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None): + params = dict() + + if tags_to_delete: + try: + response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + else: + try: + response = client.delete_tags(Resources=[vpn_gateway_id]) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + result = response + return result + + +def load_tags(module): + tags = [] + + if module.params.get('tags'): + for name, value in module.params.get('tags').iteritems(): + tags.append({'Key': name, 'Value': str(value)}) + tags.append({'Key': "Name", 'Value': module.params.get('name')}) + else: + tags.append({'Key': "Name", 'Value': module.params.get('name')}) + return tags + + +def find_tags(client, module, resource_id=None): + + if resource_id: + try: + response = client.describe_tags(Filters=[ + {'Name': 'resource-id', 'Values': [resource_id]} + ]) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + result = response + return result + + +def check_tags(client, module, existing_vgw, vpn_gateway_id): + params = dict() + params['Tags'] = module.params.get('tags') + vgw = existing_vgw + changed = False + tags_list = {} + + #format tags for comparison + for tags in existing_vgw[0]['Tags']: + if tags['Key'] != 'Name': + tags_list[tags['Key']] = tags['Value'] + + # if existing tags don't match the tags arg, delete existing and recreate with new list + if params['Tags'] != None and tags_list != params['Tags']: + delete_tags(client, module, vpn_gateway_id) + create_tags(client, module, vpn_gateway_id) + vgw = find_vgw(client, module) + changed = True + + #if no tag args are supplied, delete any existing tags with the exception of the name tag + if params['Tags'] == None and tags_list != {}: + tags_to_delete = [] + for tags in existing_vgw[0]['Tags']: + if tags['Key'] != 'Name': + tags_to_delete.append(tags) + + delete_tags(client, module, vpn_gateway_id, tags_to_delete) + vgw = find_vgw(client, module) + changed = True + + return vgw, changed + + +def find_vpc(client, module): + params = dict() + params['vpc_id'] = module.params.get('vpc_id') + + if params['vpc_id']: + try: + response = client.describe_vpcs(VpcIds=[params['vpc_id']]) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + result = response + return result + + +def find_vgw(client, module, vpn_gateway_id=None): + params = dict() + params['Name'] = module.params.get('name') + params['Type'] = module.params.get('type') + params['State'] = module.params.get('state') + + if params['State'] == 'present': + try: + response = client.describe_vpn_gateways(Filters=[ + {'Name': 'type', 'Values': [params['Type']]}, + {'Name': 'tag:Name', 'Values': [params['Name']]} + ]) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + else: + if vpn_gateway_id: + try: + response = client.describe_vpn_gateways(VpnGatewayIds=vpn_gateway_id) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + else: + try: + response = client.describe_vpn_gateways(Filters=[ + {'Name': 'type', 'Values': [params['Type']]}, + {'Name': 'tag:Name', 'Values': [params['Name']]} + ]) + except botocore.exceptions.ClientError: + e = get_exception() + module.fail_json(msg=str(e)) + + result = response['VpnGateways'] + return result + + +def ensure_vgw_present(client, module): + +# If an existing vgw name and type matches our args, then a match is considered to have been +# found and we will not create another vgw. + + changed = False + params = dict() + result = dict() + params['Name'] = module.params.get('name') + params['VpcId'] = module.params.get('vpc_id') + params['Type'] = module.params.get('type') + params['Tags'] = module.params.get('tags') + params['VpnGatewayIds'] = module.params.get('vpn_gateway_id') + + # Check that a name argument has been supplied. + if not module.params.get('name'): + module.fail_json(msg='A name is required when a status of \'present\' is suppled') + + # check if a gateway matching our module args already exists + existing_vgw = find_vgw(client, module) + + if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted': + vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] + vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id) + + # if a vpc_id was provided, check if it exists and if it's attached + if params['VpcId']: + + # check that the vpc_id exists. If not, an exception is thrown + vpc = find_vpc(client, module) + current_vpc_attachments = existing_vgw[0]['VpcAttachments'] + + if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached': + if current_vpc_attachments[0]['VpcId'] == params['VpcId'] and current_vpc_attachments[0]['State'] == 'attached': + changed = False + else: + + # detach the existing vpc from the virtual gateway + vpc_to_detach = current_vpc_attachments[0]['VpcId'] + detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) + time.sleep(5) + attached_vgw = attach_vgw(client, module, vpn_gateway_id) + vgw = find_vgw(client, module, [vpn_gateway_id]) + changed = True + else: + # attach the vgw to the supplied vpc + attached_vgw = attach_vgw(client, module, vpn_gateway_id) + vgw = find_vgw(client, module, [vpn_gateway_id]) + changed = True + + # if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it. + else: + existing_vgw = find_vgw(client, module, [vpn_gateway_id]) + + if existing_vgw[0]['VpcAttachments'] != []: + if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': + # detach the vpc from the vgw + vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) + changed = True + + vgw = find_vgw(client, module, [vpn_gateway_id]) + + else: + # create a new vgw + new_vgw = create_vgw(client, module) + changed = True + vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId'] + + # tag the new virtual gateway + create_tags(client, module, vpn_gateway_id) + + # return current state of the vgw + vgw = find_vgw(client, module, [vpn_gateway_id]) + + # if a vpc-id was supplied, attempt to attach it to the vgw + if params['VpcId']: + attached_vgw = attach_vgw(client, module, vpn_gateway_id) + changed = True + vgw = find_vgw(client, module, [vpn_gateway_id]) + + result = get_vgw_info(vgw) + return changed, result + + +def ensure_vgw_absent(client, module): + +# If an existing vgw name and type matches our args, then a match is considered to have been +# found and we will take steps to delete it. + + changed = False + params = dict() + result = dict() + params['Name'] = module.params.get('name') + params['VpcId'] = module.params.get('vpc_id') + params['Type'] = module.params.get('type') + params['Tags'] = module.params.get('tags') + params['VpnGatewayIds'] = module.params.get('vpn_gateway_id') + + # check if a gateway matching our module args already exists + if params['VpnGatewayIds']: + existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']]) + if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted': + existing_vgw = existing_vgw_with_id + if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': + if params['VpcId']: + if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']: + module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console') + + else: + # detach the vpc from the vgw + detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId']) + deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + changed = True + + else: + # attempt to detach any attached vpcs + vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach) + deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + changed = True + + else: + # no vpc's are attached so attempt to delete the vgw + deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds']) + changed = True + + else: + changed = False + deleted_vgw = "Nothing to do" + + else: + #Check that a name and type argument has been supplied if no vgw-id + if not module.params.get('name') or not module.params.get('type'): + module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled') + + existing_vgw = find_vgw(client, module) + if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted': + vpn_gateway_id = existing_vgw[0]['VpnGatewayId'] + if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached': + if params['VpcId']: + if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']: + module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console') + + else: + # detach the vpc from the vgw + detach_vgw(client, module, vpn_gateway_id, params['VpcId']) + + #now that the vpc has been detached, delete the vgw + deleted_vgw = delete_vgw(client, module, vpn_gateway_id) + changed = True + + else: + # attempt to detach any attached vpcs + vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId'] + detach_vgw(client, module, vpn_gateway_id, vpc_to_detach) + changed = True + + #now that the vpc has been detached, delete the vgw + deleted_vgw = delete_vgw(client, module, vpn_gateway_id) + + else: + # no vpc's are attached so attempt to delete the vgw + deleted_vgw = delete_vgw(client, module, vpn_gateway_id) + changed = True + + else: + changed = False + deleted_vgw = None + + result = deleted_vgw + return changed, result + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['present', 'absent']), + region=dict(required=True), + name=dict(), + vpn_gateway_id=dict(), + vpc_id=dict(), + wait_timeout=dict(type='int', default=320), + type=dict(default='ipsec.1', choices=['ipsec.1']), + tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='json and boto3 is required.') + + state = module.params.get('state').lower() + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.NoCredentialsError: + e = get_exception() + module.fail_json(msg="Can't authorize connection - "+str(e)) + + if state == 'present': + (changed, results) = ensure_vgw_present(client, module) + else: + (changed, results) = ensure_vgw_absent(client, module) + module.exit_json(changed=changed, vgw=results) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() + diff --git a/cloud/amazon/ec2_win_password.py b/cloud/amazon/ec2_win_password.py index 6086c42ffbb..4d246d43676 100644 --- a/cloud/amazon/ec2_win_password.py +++ b/cloud/amazon/ec2_win_password.py @@ -1,4 +1,21 @@ #!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -23,12 +40,6 @@ - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3. required: false default: null - region: - description: - - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. - required: false - default: null - aliases: [ 'aws_region', 'ec2_region' ] wait: version_added: "2.0" description: @@ -43,7 +54,9 @@ required: false default: 120 -extends_documentation_fragment: aws +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' @@ -131,8 +144,17 @@ def main(): if wait and datetime.datetime.now() >= end: module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout) - f = open(key_file, 'r') - key = RSA.importKey(f.read(), key_passphrase) + try: + f = open(key_file, 'r') + except IOError as e: + module.fail_json(msg = "I/O error (%d) opening key file: %s" % (e.errno, e.strerror)) + else: + try: + with f: + key = RSA.importKey(f.read(), key_passphrase) + except (ValueError, IndexError, TypeError) as e: + module.fail_json(msg = "unable to parse key file") + cipher = PKCS1_v1_5.new(key) sentinel = 'password decryption failed!!!' @@ -154,4 +176,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ecs_cluster.py b/cloud/amazon/ecs_cluster.py new file mode 100644 index 00000000000..b1409005a8c --- /dev/null +++ b/cloud/amazon/ecs_cluster.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ecs_cluster +short_description: create or terminate ecs clusters +notes: + - When deleting a cluster, the information returned is the state of the cluster prior to deletion. + - It will also wait for a cluster to have instances registered to it. +description: + - Creates or terminates ecs clusters. +version_added: "2.0" +author: Mark Chance(@Java1Guy) +requirements: [ boto, boto3 ] +options: + state: + description: + - The desired state of the cluster + required: true + choices: ['present', 'absent', 'has_instances'] + name: + description: + - The cluster name + required: true + delay: + description: + - Number of seconds to wait + required: false + repeat: + description: + - The number of times to wait for the cluster to have an instance + required: false +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Cluster creation +- ecs_cluster: + name: default + state: present + +# Cluster deletion +- ecs_cluster: + name: default + state: absent + +- name: Wait for register + ecs_cluster: + name: "{{ new_cluster }}" + state: has_instances + delay: 10 + repeat: 10 + register: task_output + +''' +RETURN = ''' +activeServicesCount: + description: how many services are active in this cluster + returned: 0 if a new cluster + type: int +clusterArn: + description: the ARN of the cluster just created + type: string (ARN) + sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok +clusterName: + description: name of the cluster just created (should match the input argument) + type: string + sample: test-cluster-mfshcdok +pendingTasksCount: + description: how many tasks are waiting to run in this cluster + returned: 0 if a new cluster + type: int +registeredContainerInstancesCount: + description: how many container instances are available in this cluster + returned: 0 if a new cluster + type: int +runningTasksCount: + description: how many tasks are running in this cluster + returned: 0 if a new cluster + type: int +status: + description: the status of the new cluster + returned: ACTIVE + type: string +''' +import time + +try: + import boto + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +class EcsClusterManager: + """Handles ECS Clusters""" + + def __init__(self, module): + self.module = module + + try: + # self.ecs = boto3.client('ecs') + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + self.module.fail_json(msg="Can't authorize connection - %s" % str(e)) + + def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'): + for c in array_of_clusters: + if c[field_name].endswith(cluster_name): + return c + return None + + def describe_cluster(self, cluster_name): + response = self.ecs.describe_clusters(clusters=[ + cluster_name + ]) + if len(response['failures'])>0: + c = self.find_in_array(response['failures'], cluster_name, 'arn') + if c and c['reason']=='MISSING': + return None + # fall thru and look through found ones + if len(response['clusters'])>0: + c = self.find_in_array(response['clusters'], cluster_name) + if c: + return c + raise Exception("Unknown problem describing cluster %s." % cluster_name) + + def create_cluster(self, clusterName = 'default'): + response = self.ecs.create_cluster(clusterName=clusterName) + return response['cluster'] + + def delete_cluster(self, clusterName): + return self.ecs.delete_cluster(cluster=clusterName) + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent', 'has_instances'] ), + name=dict(required=True, type='str' ), + delay=dict(required=False, type='int', default=10), + repeat=dict(required=False, type='int', default=10) + )) + required_together = ( ['state', 'name'] ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) + + if not HAS_BOTO: + module.fail_json(msg='boto is required.') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + cluster_mgr = EcsClusterManager(module) + try: + existing = cluster_mgr.describe_cluster(module.params['name']) + except Exception as e: + module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e)) + + results = dict(changed=False) + if module.params['state'] == 'present': + if existing and 'status' in existing and existing['status']=="ACTIVE": + results['cluster']=existing + else: + if not module.check_mode: + # doesn't exist. create it. + results['cluster'] = cluster_mgr.create_cluster(module.params['name']) + results['changed'] = True + + # delete the cluster + elif module.params['state'] == 'absent': + if not existing: + pass + else: + # it exists, so we should delete it and mark changed. + # return info about the cluster deleted + results['cluster'] = existing + if 'status' in existing and existing['status']=="INACTIVE": + results['changed'] = False + else: + if not module.check_mode: + cluster_mgr.delete_cluster(module.params['name']) + results['changed'] = True + elif module.params['state'] == 'has_instances': + if not existing: + module.fail_json(msg="Cluster '"+module.params['name']+" not found.") + return + # it exists, so we should delete it and mark changed. + # return info about the cluster deleted + delay = module.params['delay'] + repeat = module.params['repeat'] + time.sleep(delay) + count = 0 + for i in range(repeat): + existing = cluster_mgr.describe_cluster(module.params['name']) + count = existing['registeredContainerInstancesCount'] + if count > 0: + results['changed'] = True + break + time.sleep(delay) + if count == 0 and i is repeat-1: + module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.") + return + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ecs_service.py b/cloud/amazon/ecs_service.py new file mode 100644 index 00000000000..004a11b267d --- /dev/null +++ b/cloud/amazon/ecs_service.py @@ -0,0 +1,433 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ecs_service +short_description: create, terminate, start or stop a service in ecs +description: + - Creates or terminates ecs services. +notes: + - the service role specified must be assumable (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com) + - for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html) +dependencies: + - An IAM role must have been created +version_added: "2.1" +author: + - "Mark Chance (@java1guy)" + - "Darek Kaczynski (@kaczynskid)" +requirements: [ json, boto, botocore, boto3 ] +options: + state: + description: + - The desired state of the service + required: true + choices: ["present", "absent", "deleting"] + name: + description: + - The name of the service + required: true + cluster: + description: + - The name of the cluster in which the service exists + required: false + task_definition: + description: + - The task definition the service will run + required: false + load_balancers: + description: + - The list of ELBs defined for this service + required: false + + desired_count: + description: + - The count of how many instances of the service + required: false + client_token: + description: + - Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed. + required: false + role: + description: + - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service. + required: false + delay: + description: + - The time to wait before checking that the service is available + required: false + default: 10 + repeat: + description: + - The number of times to check that the service is available + required: false + default: 10 +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. +- ecs_service: + state: present + name: console-test-service + cluster: new_cluster + task_definition: new_cluster-task:1" + desired_count: 0 + +# Basic provisioning example +- ecs_service: + name: default + state: present + cluster: new_cluster + +# Simple example to delete +- ecs_service: + name: default + state: absent + cluster: new_cluster +''' + +RETURN = ''' +service: + description: Details of created service. + returned: when creating a service + type: complex + contains: + clusterArn: + description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service. + returned: always + type: string + desiredCount: + description: The desired number of instantiations of the task definition to keep running on the service. + returned: always + type: int + loadBalancers: + description: A list of load balancer objects + returned: always + type: complex + contains: + loadBalancerName: + description: the name + returned: always + type: string + containerName: + description: The name of the container to associate with the load balancer. + returned: always + type: string + containerPort: + description: The port on the container to associate with the load balancer. + returned: always + type: int + pendingCount: + description: The number of tasks in the cluster that are in the PENDING state. + returned: always + type: int + runningCount: + description: The number of tasks in the cluster that are in the RUNNING state. + returned: always + type: int + serviceArn: + description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service . + returned: always + type: string + serviceName: + description: A user-generated string used to identify the service + returned: always + type: string + status: + description: The valid values are ACTIVE, DRAINING, or INACTIVE. + returned: always + type: string + taskDefinition: + description: The ARN of a task definition to use for tasks in the service. + returned: always + type: string + deployments: + description: list of service deployments + returned: always + type: list of complex + events: + description: lost of service events + returned: always + type: list of complex +ansible_facts: + description: Facts about deleted service. + returned: when deleting a service + type: complex + contains: + service: + description: Details of deleted service in the same structure described above for service creation. + returned: when service existed and was deleted + type: complex +''' +import time + +try: + import boto + import botocore + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +class EcsServiceManager: + """Handles ECS Services""" + + def __init__(self, module): + self.module = module + + try: + # self.ecs = boto3.client('ecs') + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + self.module.fail_json(msg="Can't authorize connection - %s" % str(e)) + + # def list_clusters(self): + # return self.client.list_clusters() + # {'failures=[], + # 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='ce7b5880-1c41-11e5-8a31-47a93a8a98eb'}, + # 'clusters=[{'activeServicesCount=0, 'clusterArn='arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status='ACTIVE', 'pendingTasksCount=0, 'runningTasksCount=0, 'registeredContainerInstancesCount=0, 'clusterName='default'}]} + # {'failures=[{'arn='arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason='MISSING'}], + # 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='0f66c219-1c42-11e5-8a31-47a93a8a98eb'}, + # 'clusters=[]} + + def find_in_array(self, array_of_services, service_name, field_name='serviceArn'): + for c in array_of_services: + if c[field_name].endswith(service_name): + return c + return None + + def describe_service(self, cluster_name, service_name): + response = self.ecs.describe_services( + cluster=cluster_name, + services=[ + service_name + ]) + msg = '' + if len(response['failures'])>0: + c = self.find_in_array(response['failures'], service_name, 'arn') + msg += ", failure reason is "+c['reason'] + if c and c['reason']=='MISSING': + return None + # fall thru and look through found ones + if len(response['services'])>0: + c = self.find_in_array(response['services'], service_name) + if c: + return c + raise StandardError("Unknown problem describing service %s." % service_name) + + def is_matching_service(self, expected, existing): + if expected['task_definition'] != existing['taskDefinition']: + return False + + if (expected['load_balancers'] or []) != existing['loadBalancers']: + return False + + if (expected['desired_count'] or 0) != existing['desiredCount']: + return False + + return True + + def create_service(self, service_name, cluster_name, task_definition, + load_balancers, desired_count, client_token, role): + response = self.ecs.create_service( + cluster=cluster_name, + serviceName=service_name, + taskDefinition=task_definition, + loadBalancers=load_balancers, + desiredCount=desired_count, + clientToken=client_token, + role=role) + return self.jsonize(response['service']) + + def update_service(self, service_name, cluster_name, task_definition, + load_balancers, desired_count, client_token, role): + response = self.ecs.update_service( + cluster=cluster_name, + service=service_name, + taskDefinition=task_definition, + desiredCount=desired_count) + return self.jsonize(response['service']) + + def jsonize(self, service): + # some fields are datetime which is not JSON serializable + # make them strings + if 'deployments' in service: + for d in service['deployments']: + if 'createdAt' in d: + d['createdAt'] = str(d['createdAt']) + if 'updatedAt' in d: + d['updatedAt'] = str(d['updatedAt']) + if 'events' in service: + for e in service['events']: + if 'createdAt' in e: + e['createdAt'] = str(e['createdAt']) + return service + + def delete_service(self, service, cluster=None): + return self.ecs.delete_service(cluster=cluster, service=service) + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent', 'deleting'] ), + name=dict(required=True, type='str' ), + cluster=dict(required=False, type='str' ), + task_definition=dict(required=False, type='str' ), + load_balancers=dict(required=False, type='list' ), + desired_count=dict(required=False, type='int' ), + client_token=dict(required=False, type='str' ), + role=dict(required=False, type='str' ), + delay=dict(required=False, type='int', default=10), + repeat=dict(required=False, type='int', default=10) + )) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_BOTO: + module.fail_json(msg='boto is required.') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + if module.params['state'] == 'present': + if not 'task_definition' in module.params and module.params['task_definition'] is None: + module.fail_json(msg="To use create a service, a task_definition must be specified") + if not 'desired_count' in module.params and module.params['desired_count'] is None: + module.fail_json(msg="To use create a service, a desired_count must be specified") + + service_mgr = EcsServiceManager(module) + try: + existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) + except Exception as e: + module.fail_json(msg="Exception describing service '"+module.params['name']+"' in cluster '"+module.params['cluster']+"': "+str(e)) + + results = dict(changed=False ) + if module.params['state'] == 'present': + + matching = False + update = False + if existing and 'status' in existing and existing['status']=="ACTIVE": + if service_mgr.is_matching_service(module.params, existing): + matching = True + results['service'] = service_mgr.jsonize(existing) + else: + update = True + + if not matching: + if not module.check_mode: + if module.params['load_balancers'] is None: + loadBalancers = [] + else: + loadBalancers = module.params['load_balancers'] + if module.params['role'] is None: + role = '' + else: + role = module.params['role'] + if module.params['client_token'] is None: + clientToken = '' + else: + clientToken = module.params['client_token'] + + if update: + # update required + response = service_mgr.update_service(module.params['name'], + module.params['cluster'], + module.params['task_definition'], + loadBalancers, + module.params['desired_count'], + clientToken, + role) + else: + # doesn't exist. create it. + response = service_mgr.create_service(module.params['name'], + module.params['cluster'], + module.params['task_definition'], + loadBalancers, + module.params['desired_count'], + clientToken, + role) + + results['service'] = response + + results['changed'] = True + + elif module.params['state'] == 'absent': + if not existing: + pass + else: + # it exists, so we should delete it and mark changed. + # return info about the cluster deleted + del existing['deployments'] + del existing['events'] + results['ansible_facts'] = existing + if 'status' in existing and existing['status']=="INACTIVE": + results['changed'] = False + else: + if not module.check_mode: + try: + service_mgr.delete_service( + module.params['name'], + module.params['cluster'] + ) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=e.message) + results['changed'] = True + + elif module.params['state'] == 'deleting': + if not existing: + module.fail_json(msg="Service '"+module.params['name']+" not found.") + return + # it exists, so we should delete it and mark changed. + # return info about the cluster deleted + delay = module.params['delay'] + repeat = module.params['repeat'] + time.sleep(delay) + for i in range(repeat): + existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) + status = existing['status'] + if status == "INACTIVE": + results['changed'] = True + break + time.sleep(delay) + if i is repeat-1: + module.fail_json(msg="Service still not deleted after "+str(repeat)+" tries of "+str(delay)+" seconds each.") + return + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ecs_service_facts.py b/cloud/amazon/ecs_service_facts.py new file mode 100644 index 00000000000..e62b492c4b9 --- /dev/null +++ b/cloud/amazon/ecs_service_facts.py @@ -0,0 +1,240 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ecs_service_facts +short_description: list or describe services in ecs +notes: + - for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html) +description: + - Lists or describes services in ecs. +version_added: "2.1" +author: + - "Mark Chance (@java1guy)" + - "Darek Kaczynski (@kaczynskid)" +requirements: [ json, boto, botocore, boto3 ] +options: + details: + description: + - Set this to true if you want detailed information about the services. + required: false + default: 'false' + choices: ['true', 'false'] + cluster: + description: + - The cluster ARNS in which to list the services. + required: false + default: 'default' + service: + description: + - The service to get details for (required if details is true) + required: false +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Basic listing example +- ecs_service_facts: + cluster: test-cluster + service: console-test-service + details: true + +# Basic listing example +- ecs_service_facts: + cluster: test-cluster +''' + +RETURN = ''' +services: + description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below. + returned: success + type: list of complex + contains: + clusterArn: + description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service. + returned: always + type: string + desiredCount: + description: The desired number of instantiations of the task definition to keep running on the service. + returned: always + type: int + loadBalancers: + description: A list of load balancer objects + returned: always + type: complex + contains: + loadBalancerName: + description: the name + returned: always + type: string + containerName: + description: The name of the container to associate with the load balancer. + returned: always + type: string + containerPort: + description: The port on the container to associate with the load balancer. + returned: always + type: int + pendingCount: + description: The number of tasks in the cluster that are in the PENDING state. + returned: always + type: int + runningCount: + description: The number of tasks in the cluster that are in the RUNNING state. + returned: always + type: int + serviceArn: + description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service . + returned: always + type: string + serviceName: + description: A user-generated string used to identify the service + returned: always + type: string + status: + description: The valid values are ACTIVE, DRAINING, or INACTIVE. + returned: always + type: string + taskDefinition: + description: The ARN of a task definition to use for tasks in the service. + returned: always + type: string + deployments: + description: list of service deployments + returned: always + type: list of complex + events: + description: lost of service events + returned: always + type: list of complex +''' +try: + import boto + import botocore + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +class EcsServiceManager: + """Handles ECS Services""" + + def __init__(self, module): + self.module = module + + try: + # self.ecs = boto3.client('ecs') + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + self.module.fail_json(msg="Can't authorize connection - %s" % str(e)) + + # def list_clusters(self): + # return self.client.list_clusters() + # {'failures': [], + # 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'}, + # 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]} + # {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}], + # 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'}, + # 'clusters': []} + + def list_services(self, cluster): + fn_args = dict() + if cluster and cluster is not None: + fn_args['cluster'] = cluster + response = self.ecs.list_services(**fn_args) + relevant_response = dict(services = response['serviceArns']) + return relevant_response + + def describe_services(self, cluster, services): + fn_args = dict() + if cluster and cluster is not None: + fn_args['cluster'] = cluster + fn_args['services']=services.split(",") + response = self.ecs.describe_services(**fn_args) + relevant_response = dict(services = map(self.extract_service_from, response['services'])) + if 'failures' in response and len(response['failures'])>0: + relevant_response['services_not_running'] = response['failures'] + return relevant_response + + def extract_service_from(self, service): + # some fields are datetime which is not JSON serializable + # make them strings + if 'deployments' in service: + for d in service['deployments']: + if 'createdAt' in d: + d['createdAt'] = str(d['createdAt']) + if 'updatedAt' in d: + d['updatedAt'] = str(d['updatedAt']) + if 'events' in service: + for e in service['events']: + if 'createdAt' in e: + e['createdAt'] = str(e['createdAt']) + return service + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + details=dict(required=False, type='bool', default=False ), + cluster=dict(required=False, type='str' ), + service=dict(required=False, type='str' ) + )) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_BOTO: + module.fail_json(msg='boto is required.') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + show_details = module.params.get('details', False) + + task_mgr = EcsServiceManager(module) + if show_details: + if 'service' not in module.params or not module.params['service']: + module.fail_json(msg="service must be specified for ecs_service_facts") + ecs_facts = task_mgr.describe_services(module.params['cluster'], module.params['service']) + else: + ecs_facts = task_mgr.list_services(module.params['cluster']) + + ecs_facts_result = dict(changed=False, ansible_facts=ecs_facts) + module.exit_json(**ecs_facts_result) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ecs_task.py b/cloud/amazon/ecs_task.py new file mode 100644 index 00000000000..a8ecc4dde48 --- /dev/null +++ b/cloud/amazon/ecs_task.py @@ -0,0 +1,329 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ecs_task +short_description: run, start or stop a task in ecs +description: + - Creates or deletes instances of task definitions. +version_added: "2.0" +author: Mark Chance(@Java1Guy) +requirements: [ json, boto, botocore, boto3 ] +options: + operation: + description: + - Which task operation to execute + required: True + choices: ['run', 'start', 'stop'] + cluster: + description: + - The name of the cluster to run the task on + required: False + task_definition: + description: + - The task definition to start or run + required: False + overrides: + description: + - A dictionary of values to pass to the new instances + required: False + count: + description: + - How many new instances to start + required: False + task: + description: + - The task to stop + required: False + container_instances: + description: + - The list of container instances on which to deploy the task + required: False + started_by: + description: + - A value showing who or what started the task (for informational purposes) + required: False +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Simple example of run task +- name: Run task + ecs_task: + operation: run + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + count: 1 + started_by: ansible_user + register: task_output + +# Simple example of start task + +- name: Start a task + ecs_task: + operation: start + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" + container_instances: + - arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8 + started_by: ansible_user + register: task_output + +- name: Stop a task + ecs_task: + operation: stop + cluster: console-sample-app-static-cluster + task_definition: console-sample-app-static-taskdef + task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a" +''' +RETURN = ''' +task: + description: details about the tast that was started + returned: success + type: complex + contains: + taskArn: + description: The Amazon Resource Name (ARN) that identifies the task. + returned: always + type: string + clusterArn: + description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task. + returned: only when details is true + type: string + taskDefinitionArn: + description: The Amazon Resource Name (ARN) of the task definition. + returned: only when details is true + type: string + containerInstanceArn: + description: The Amazon Resource Name (ARN) of the container running the task. + returned: only when details is true + type: string + overrides: + description: The container overrides set for this task. + returned: only when details is true + type: list of complex + lastStatus: + description: The last recorded status of the task. + returned: only when details is true + type: string + desiredStatus: + description: The desired status of the task. + returned: only when details is true + type: string + containers: + description: The container details. + returned: only when details is true + type: list of complex + startedBy: + description: The used who started the task. + returned: only when details is true + type: string + stoppedReason: + description: The reason why the task was stopped. + returned: only when details is true + type: string + createdAt: + description: The timestamp of when the task was created. + returned: only when details is true + type: string + startedAt: + description: The timestamp of when the task was started. + returned: only when details is true + type: string + stoppedAt: + description: The timestamp of when the task was stopped. + returned: only when details is true + type: string +''' +try: + import boto + import botocore + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +class EcsExecManager: + """Handles ECS Tasks""" + + def __init__(self, module): + self.module = module + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg="Can't authorize connection - %s " % str(e)) + + def list_tasks(self, cluster_name, service_name, status): + response = self.ecs.list_tasks( + cluster=cluster_name, + family=service_name, + desiredStatus=status + ) + if len(response['taskArns'])>0: + for c in response['taskArns']: + if c.endswith(service_name): + return c + return None + + def run_task(self, cluster, task_definition, overrides, count, startedBy): + if overrides is None: + overrides = dict() + response = self.ecs.run_task( + cluster=cluster, + taskDefinition=task_definition, + overrides=overrides, + count=count, + startedBy=startedBy) + # include tasks and failures + return response['tasks'] + + def start_task(self, cluster, task_definition, overrides, container_instances, startedBy): + args = dict() + if cluster: + args['cluster'] = cluster + if task_definition: + args['taskDefinition']=task_definition + if overrides: + args['overrides']=overrides + if container_instances: + args['containerInstances']=container_instances + if startedBy: + args['startedBy']=startedBy + response = self.ecs.start_task(**args) + # include tasks and failures + return response['tasks'] + + def stop_task(self, cluster, task): + response = self.ecs.stop_task(cluster=cluster, task=task) + return response['task'] + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + operation=dict(required=True, choices=['run', 'start', 'stop'] ), + cluster=dict(required=False, type='str' ), # R S P + task_definition=dict(required=False, type='str' ), # R* S* + overrides=dict(required=False, type='dict'), # R S + count=dict(required=False, type='int' ), # R + task=dict(required=False, type='str' ), # P* + container_instances=dict(required=False, type='list'), # S* + started_by=dict(required=False, type='str' ) # R S + )) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + # Validate Requirements + if not HAS_BOTO: + module.fail_json(msg='boto is required.') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + # Validate Inputs + if module.params['operation'] == 'run': + if not 'task_definition' in module.params and module.params['task_definition'] is None: + module.fail_json(msg="To run a task, a task_definition must be specified") + task_to_list = module.params['task_definition'] + status_type = "RUNNING" + + if module.params['operation'] == 'start': + if not 'task_definition' in module.params and module.params['task_definition'] is None: + module.fail_json(msg="To start a task, a task_definition must be specified") + if not 'container_instances' in module.params and module.params['container_instances'] is None: + module.fail_json(msg="To start a task, container instances must be specified") + task_to_list = module.params['task'] + status_type = "RUNNING" + + if module.params['operation'] == 'stop': + if not 'task' in module.params and module.params['task'] is None: + module.fail_json(msg="To stop a task, a task must be specified") + if not 'task_definition' in module.params and module.params['task_definition'] is None: + module.fail_json(msg="To stop a task, a task definition must be specified") + task_to_list = module.params['task_definition'] + status_type = "STOPPED" + + service_mgr = EcsExecManager(module) + existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type) + + results = dict(changed=False) + if module.params['operation'] == 'run': + if existing: + # TBD - validate the rest of the details + results['task']=existing + else: + if not module.check_mode: + results['task'] = service_mgr.run_task( + module.params['cluster'], + module.params['task_definition'], + module.params['overrides'], + module.params['count'], + module.params['started_by']) + results['changed'] = True + + elif module.params['operation'] == 'start': + if existing: + # TBD - validate the rest of the details + results['task']=existing + else: + if not module.check_mode: + results['task'] = service_mgr.start_task( + module.params['cluster'], + module.params['task_definition'], + module.params['overrides'], + module.params['container_instances'], + module.params['started_by'] + ) + results['changed'] = True + + elif module.params['operation'] == 'stop': + if existing: + results['task']=existing + else: + if not module.check_mode: + # it exists, so we should delete it and mark changed. + # return info about the cluster deleted + results['task'] = service_mgr.stop_task( + module.params['cluster'], + module.params['task'] + ) + results['changed'] = True + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ecs_taskdefinition.py b/cloud/amazon/ecs_taskdefinition.py new file mode 100644 index 00000000000..4ee9003aab1 --- /dev/null +++ b/cloud/amazon/ecs_taskdefinition.py @@ -0,0 +1,344 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ecs_taskdefinition +short_description: register a task definition in ecs +description: + - Creates or terminates task definitions +version_added: "2.0" +author: Mark Chance(@Java1Guy) +requirements: [ json, boto, botocore, boto3 ] +options: + state: + description: + - State whether the task definition should exist or be deleted + required: true + choices: ['present', 'absent'] + arn: + description: + - The arn of the task description to delete + required: false + family: + description: + - A Name that would be given to the task definition + required: false + revision: + description: + - A revision number for the task definition + required: False + type: int + containers: + description: + - A list of containers definitions + required: False + type: list of dicts with container definitions + volumes: + description: + - A list of names of volumes to be attached + required: False + type: list of name +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +- name: "Create task definition" + ecs_taskdefinition: + containers: + - name: simple-app + cpu: 10 + essential: true + image: "httpd:2.4" + memory: 300 + mountPoints: + - containerPath: /usr/local/apache2/htdocs + sourceVolume: my-vol + portMappings: + - containerPort: 80 + hostPort: 80 + - name: busybox + command: + - "/bin/sh -c \"while true; do echo ' Amazon ECS Sample App

Amazon ECS Sample App

Congratulations!

Your application is now running on a container in Amazon ECS.

' > top; /bin/date > date ; echo '
' > bottom; cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done\"" + cpu: 10 + entryPoint: + - sh + - "-c" + essential: false + image: busybox + memory: 200 + volumesFrom: + - sourceContainer: simple-app + volumes: + - name: my-vol + family: test-cluster-taskdef + state: present + register: task_output +''' +RETURN = ''' +taskdefinition: + description: a reflection of the input parameters + type: dict inputs plus revision, status, taskDefinitionArn +''' +try: + import boto + import botocore + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +class EcsTaskManager: + """Handles ECS Tasks""" + + def __init__(self, module): + self.module = module + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") + self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg="Can't authorize connection - " % str(e)) + + def describe_task(self, task_name): + try: + response = self.ecs.describe_task_definition(taskDefinition=task_name) + return response['taskDefinition'] + except botocore.exceptions.ClientError: + return None + + def register_task(self, family, container_definitions, volumes): + response = self.ecs.register_task_definition(family=family, + containerDefinitions=container_definitions, volumes=volumes) + return response['taskDefinition'] + + def describe_task_definitions(self, family): + data = { + "taskDefinitionArns": [], + "nextToken": None + } + + def fetch(): + # Boto3 is weird about params passed, so only pass nextToken if we have a value + params = { + 'familyPrefix': family + } + + if data['nextToken']: + params['nextToken'] = data['nextToken'] + + result = self.ecs.list_task_definitions(**params) + data['taskDefinitionArns'] += result['taskDefinitionArns'] + data['nextToken'] = result.get('nextToken', None) + return data['nextToken'] is not None + + # Fetch all the arns, possibly across multiple pages + while fetch(): + pass + + # Return the full descriptions of the task definitions, sorted ascending by revision + return list(sorted([self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']], key=lambda td: td['revision'])) + + def deregister_task(self, taskArn): + response = self.ecs.deregister_task_definition(taskDefinition=taskArn) + return response['taskDefinition'] + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + arn=dict(required=False, type='str'), + family=dict(required=False, type='str'), + revision=dict(required=False, type='int'), + containers=dict(required=False, type='list'), + volumes=dict(required=False, type='list') + )) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_BOTO: + module.fail_json(msg='boto is required.') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + task_to_describe = None + task_mgr = EcsTaskManager(module) + results = dict(changed=False) + + if module.params['state'] == 'present': + if 'containers' not in module.params or not module.params['containers']: + module.fail_json(msg="To use task definitions, a list of containers must be specified") + + if 'family' not in module.params or not module.params['family']: + module.fail_json(msg="To use task definitions, a family must be specified") + + family = module.params['family'] + existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family']) + + if 'revision' in module.params and module.params['revision']: + # The definition specifies revision. We must gurantee that an active revision of that number will result from this. + revision = int(module.params['revision']) + + # A revision has been explicitly specified. Attempt to locate a matching revision + tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision] + existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None + + if existing and existing['status'] != "ACTIVE": + # We cannot reactivate an inactive revision + module.fail_json(msg="A task in family '%s' already exists for revsion %d, but it is inactive" % (family, revision)) + elif not existing: + if len(existing_definitions_in_family) == 0 and revision != 1: + module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision) + elif existing_definitions_in_family[-1]['revision'] + 1 != revision: + module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" % (revision, existing_definitions_in_family[-1]['revision'] + 1)) + else: + existing = None + + def _right_has_values_of_left(left, right): + # Make sure the values are equivalent for everything left has + for k, v in left.iteritems(): + if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])): + # We don't care about list ordering because ECS can change things + if isinstance(v, list) and k in right: + left_list = v + right_list = right[k] or [] + + if len(left_list) != len(right_list): + return False + + for list_val in left_list: + if list_val not in right_list: + return False + else: + return False + + # Make sure right doesn't have anything that left doesn't + for k, v in right.iteritems(): + if v and k not in left: + return False + + return True + + def _task_definition_matches(requested_volumes, requested_containers, existing_task_definition): + if td['status'] != "ACTIVE": + return None + + existing_volumes = td.get('volumes', []) or [] + + if len(requested_volumes) != len(existing_volumes): + # Nope. + return None + + if len(requested_volumes) > 0: + for requested_vol in requested_volumes: + found = False + + for actual_vol in existing_volumes: + if _right_has_values_of_left(requested_vol, actual_vol): + found = True + break + + if not found: + return None + + existing_containers = td.get('containerDefinitions', []) or [] + + if len(requested_containers) != len(existing_containers): + # Nope. + return None + + for requested_container in requested_containers: + found = False + + for actual_container in existing_containers: + if _right_has_values_of_left(requested_container, actual_container): + found = True + break + + if not found: + return None + + return existing_task_definition + + # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested + for td in existing_definitions_in_family: + requested_volumes = module.params.get('volumes', []) or [] + requested_containers = module.params.get('containers', []) or [] + existing = _task_definition_matches(requested_volumes, requested_containers, td) + + if existing: + break + + if existing: + # Awesome. Have an existing one. Nothing to do. + results['taskdefinition'] = existing + else: + if not module.check_mode: + # Doesn't exist. create it. + volumes = module.params.get('volumes', []) or [] + results['taskdefinition'] = task_mgr.register_task(module.params['family'], + module.params['containers'], volumes) + results['changed'] = True + + elif module.params['state'] == 'absent': + # When de-registering a task definition, we can specify the ARN OR the family and revision. + if module.params['state'] == 'absent': + if 'arn' in module.params and module.params['arn'] is not None: + task_to_describe = module.params['arn'] + elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \ + module.params['revision'] is not None: + task_to_describe = module.params['family'] + ":" + str(module.params['revision']) + else: + module.fail_json(msg="To use task definitions, an arn or family and revision must be specified") + + existing = task_mgr.describe_task(task_to_describe) + + if not existing: + pass + else: + # It exists, so we should delete it and mark changed. Return info about the task definition deleted + results['taskdefinition'] = existing + if 'status' in existing and existing['status'] == "INACTIVE": + results['changed'] = False + else: + if not module.check_mode: + task_mgr.deregister_task(task_to_describe) + results['changed'] = True + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/efs.py b/cloud/amazon/efs.py new file mode 100644 index 00000000000..1def68daedc --- /dev/null +++ b/cloud/amazon/efs.py @@ -0,0 +1,630 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: efs +short_description: create and maintain EFS file systems +description: + - Module allows create, search and destroy Amazon EFS file systems +version_added: "2.2" +requirements: [ boto3 ] +author: + - "Ryan Sydnor (@ryansydnor)" + - "Artem Kazakov (@akazakov)" +options: + state: + description: + - Allows to create, search and destroy Amazon EFS file system + required: false + default: 'present' + choices: ['present', 'absent'] + name: + description: + - Creation Token of Amazon EFS file system. Required for create. Either name or ID required for delete. + required: false + default: None + id: + description: + - ID of Amazon EFS. Either name or ID required for delete. + required: false + default: None + performance_mode: + description: + - File system's performance mode to use. Only takes effect during creation. + required: false + default: 'general_purpose' + choices: ['general_purpose', 'max_io'] + tags: + description: + - "List of tags of Amazon EFS. Should be defined as dictionary + In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data." + required: false + default: None + targets: + description: + - "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes: + - subnet_id - Mandatory. The ID of the subnet to add the mount target in. + - ip_address - Optional. A valid IPv4 address within the address range of the specified subnet. + - security_groups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified + This data may be modified for existing EFS using state 'present' and new list of mount targets." + required: false + default: None + wait: + description: + - "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted') + In case of 'absent' state should wait for EFS 'deleted' life cycle state" + required: false + default: "no" + choices: ["yes", "no"] + wait_timeout: + description: + - How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary. + required: false + default: 0 +extends_documentation_fragment: + - aws +''' + +EXAMPLES = ''' +# EFS provisioning +- efs: + state: present + name: myTestEFS + tags: + name: myTestNameTag + purpose: file-storage + targets: + - subnet_id: subnet-748c5d03 + security_groups: [ "sg-1a2b3c4d" ] + +# Modifying EFS data +- efs: + state: present + name: myTestEFS + tags: + name: myAnotherTestTag + targets: + - subnet_id: subnet-7654fdca + security_groups: [ "sg-4c5d6f7a" ] + +# Deleting EFS +- efs: + state: absent + name: myTestEFS +''' + +RETURN = ''' +creation_time: + description: timestamp of creation date + returned: + type: datetime + sample: 2015-11-16 07:30:57-05:00 +creation_token: + description: EFS creation token + returned: + type: UUID + sample: console-88609e04-9a0e-4a2e-912c-feaa99509961 +file_system_id: + description: ID of the file system + returned: + type: unique ID + sample: fs-xxxxxxxx +life_cycle_state: + description: state of the EFS file system + returned: + type: str + sample: creating, available, deleting, deleted +mount_point: + description: url of file system + returned: + type: str + sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/ +mount_targets: + description: list of mount targets + returned: + type: list of dicts + sample: + [ + { + "file_system_id": "fs-a7ad440e", + "ip_address": "172.31.17.173", + "life_cycle_state": "available", + "mount_target_id": "fsmt-d8907871", + "network_interface_id": "eni-6e387e26", + "owner_id": "740748460359", + "security_groups": [ + "sg-a30b22c6" + ], + "subnet_id": "subnet-e265c895" + }, + ... + ] +name: + description: name of the file system + returned: + type: str + sample: my-efs +number_of_mount_targets: + description: the number of targets mounted + returned: + type: int + sample: 3 +owner_id: + description: AWS account ID of EFS owner + returned: + type: str + sample: XXXXXXXXXXXX +size_in_bytes: + description: size of the file system in bytes as of a timestamp + returned: + type: dict + sample: + { + "timestamp": "2015-12-21 13:59:59-05:00", + "value": 12288 + } +performance_mode: + description: performance mode of the file system + returned: + type: str + sample: "generalPurpose" +tags: + description: tags on the efs instance + returned: + type: dict + sample: + { + "name": "my-efs", + "key": "Value" + } + +''' + +import sys +from time import sleep +from time import time as timestamp +from collections import defaultdict + +try: + from botocore.exceptions import ClientError + import boto3 + HAS_BOTO3 = True +except ImportError as e: + HAS_BOTO3 = False + + +class EFSConnection(object): + + DEFAULT_WAIT_TIMEOUT_SECONDS = 0 + + STATE_CREATING = 'creating' + STATE_AVAILABLE = 'available' + STATE_DELETING = 'deleting' + STATE_DELETED = 'deleted' + + def __init__(self, module, region, **aws_connect_params): + try: + self.connection = boto3_conn(module, conn_type='client', + resource='efs', region=region, + **aws_connect_params) + except Exception as e: + module.fail_json(msg="Failed to connect to AWS: %s" % str(e)) + + self.region = region + self.wait = module.params.get('wait') + self.wait_timeout = module.params.get('wait_timeout') + + def get_file_systems(self, **kwargs): + """ + Returns generator of file systems including all attributes of FS + """ + items = iterate_all( + 'FileSystems', + self.connection.describe_file_systems, + **kwargs + ) + for item in items: + item['CreationTime'] = str(item['CreationTime']) + """ + Suffix of network path to be used as NFS device for mount. More detail here: + http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html + """ + item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) + if 'Timestamp' in item['SizeInBytes']: + item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp']) + if item['LifeCycleState'] == self.STATE_AVAILABLE: + item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId']) + item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId'])) + else: + item['Tags'] = {} + item['MountTargets'] = [] + yield item + + def get_tags(self, **kwargs): + """ + Returns tag list for selected instance of EFS + """ + tags = iterate_all( + 'Tags', + self.connection.describe_tags, + **kwargs + ) + return dict((tag['Key'], tag['Value']) for tag in tags) + + def get_mount_targets(self, **kwargs): + """ + Returns mount targets for selected instance of EFS + """ + targets = iterate_all( + 'MountTargets', + self.connection.describe_mount_targets, + **kwargs + ) + for target in targets: + if target['LifeCycleState'] == self.STATE_AVAILABLE: + target['SecurityGroups'] = list(self.get_security_groups( + MountTargetId=target['MountTargetId'] + )) + else: + target['SecurityGroups'] = [] + yield target + + def get_security_groups(self, **kwargs): + """ + Returns security groups for selected instance of EFS + """ + return iterate_all( + 'SecurityGroups', + self.connection.describe_mount_target_security_groups, + **kwargs + ) + + def get_file_system_id(self, name): + """ + Returns ID of instance by instance name + """ + info = first_or_default(iterate_all( + 'FileSystems', + self.connection.describe_file_systems, + CreationToken=name + )) + return info and info['FileSystemId'] or None + + def get_file_system_state(self, name, file_system_id=None): + """ + Returns state of filesystem by EFS id/name + """ + info = first_or_default(iterate_all( + 'FileSystems', + self.connection.describe_file_systems, + CreationToken=name, + FileSystemId=file_system_id + )) + return info and info['LifeCycleState'] or self.STATE_DELETED + + def get_mount_targets_in_state(self, file_system_id, states=None): + """ + Returns states of mount targets of selected EFS with selected state(s) (optional) + """ + targets = iterate_all( + 'MountTargets', + self.connection.describe_mount_targets, + FileSystemId=file_system_id + ) + + if states: + if not isinstance(states, list): + states = [states] + targets = filter(lambda target: target['LifeCycleState'] in states, targets) + + return list(targets) + + def create_file_system(self, name, performance_mode): + """ + Creates new filesystem with selected name + """ + changed = False + state = self.get_file_system_state(name) + if state in [self.STATE_DELETING, self.STATE_DELETED]: + wait_for( + lambda: self.get_file_system_state(name), + self.STATE_DELETED + ) + self.connection.create_file_system(CreationToken=name, PerformanceMode=performance_mode) + changed = True + + # we always wait for the state to be available when creating. + # if we try to take any actions on the file system before it's available + # we'll throw errors + wait_for( + lambda: self.get_file_system_state(name), + self.STATE_AVAILABLE, + self.wait_timeout + ) + + return changed + + def converge_file_system(self, name, tags, targets): + """ + Change attributes (mount targets and tags) of filesystem by name + """ + result = False + fs_id = self.get_file_system_id(name) + + if tags is not None: + tags_to_create, _, tags_to_delete = dict_diff(self.get_tags(FileSystemId=fs_id), tags) + + if tags_to_delete: + self.connection.delete_tags( + FileSystemId=fs_id, + TagKeys=[item[0] for item in tags_to_delete] + ) + result = True + + if tags_to_create: + self.connection.create_tags( + FileSystemId=fs_id, + Tags=[{'Key': item[0], 'Value': item[1]} for item in tags_to_create] + ) + result = True + + if targets is not None: + incomplete_states = [self.STATE_CREATING, self.STATE_DELETING] + wait_for( + lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), + 0 + ) + + index_by_subnet_id = lambda items: dict((item['SubnetId'], item) for item in items) + + current_targets = index_by_subnet_id(self.get_mount_targets(FileSystemId=fs_id)) + targets = index_by_subnet_id(targets) + + targets_to_create, intersection, targets_to_delete = dict_diff(current_targets, + targets, True) + + """ To modify mount target it should be deleted and created again """ + changed = filter( + lambda sid: not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'], + current_targets[sid], targets[sid]), intersection) + targets_to_delete = list(targets_to_delete) + changed + targets_to_create = list(targets_to_create) + changed + + if targets_to_delete: + for sid in targets_to_delete: + self.connection.delete_mount_target( + MountTargetId=current_targets[sid]['MountTargetId'] + ) + wait_for( + lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), + 0 + ) + result = True + + if targets_to_create: + for sid in targets_to_create: + self.connection.create_mount_target( + FileSystemId=fs_id, + **targets[sid] + ) + wait_for( + lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)), + 0, + self.wait_timeout + ) + result = True + + security_groups_to_update = filter( + lambda sid: 'SecurityGroups' in targets[sid] and + current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups'], + intersection + ) + + if security_groups_to_update: + for sid in security_groups_to_update: + self.connection.modify_mount_target_security_groups( + MountTargetId=current_targets[sid]['MountTargetId'], + SecurityGroups=targets[sid]['SecurityGroups'] + ) + result = True + + return result + + def delete_file_system(self, name, file_system_id=None): + """ + Removes EFS instance by id/name + """ + result = False + state = self.get_file_system_state(name, file_system_id) + if state in [self.STATE_CREATING, self.STATE_AVAILABLE]: + wait_for( + lambda: self.get_file_system_state(name), + self.STATE_AVAILABLE + ) + if not file_system_id: + file_system_id = self.get_file_system_id(name) + self.delete_mount_targets(file_system_id) + self.connection.delete_file_system(FileSystemId=file_system_id) + result = True + + if self.wait: + wait_for( + lambda: self.get_file_system_state(name), + self.STATE_DELETED, + self.wait_timeout + ) + + return result + + def delete_mount_targets(self, file_system_id): + """ + Removes mount targets by EFS id + """ + wait_for( + lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)), + 0 + ) + + targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE) + for target in targets: + self.connection.delete_mount_target(MountTargetId=target['MountTargetId']) + + wait_for( + lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)), + 0 + ) + + return len(targets) > 0 + + +def iterate_all(attr, map_method, **kwargs): + """ + Method creates iterator from boto result set + """ + args = dict((key, value) for (key, value) in kwargs.items() if value is not None) + wait = 1 + while True: + try: + data = map_method(**args) + for elm in data[attr]: + yield elm + if 'NextMarker' in data: + args['Marker'] = data['Nextmarker'] + continue + break + except ClientError as e: + if e.response['Error']['Code'] == "ThrottlingException" and wait < 600: + sleep(wait) + wait = wait * 2 + continue + +def targets_equal(keys, a, b): + """ + Method compare two mount targets by specified attributes + """ + for key in keys: + if key in b and a[key] != b[key]: + return False + + return True + + +def dict_diff(dict1, dict2, by_key=False): + """ + Helper method to calculate difference of two dictionaries + """ + keys1 = set(dict1.keys() if by_key else dict1.items()) + keys2 = set(dict2.keys() if by_key else dict2.items()) + + intersection = keys1 & keys2 + + return keys2 ^ intersection, intersection, keys1 ^ intersection + + +def first_or_default(items, default=None): + """ + Helper method to fetch first element of list (if exists) + """ + for item in items: + return item + return default + + +def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS): + """ + Helper method to wait for desired value returned by callback method + """ + wait_start = timestamp() + while True: + if callback() != value: + if timeout != 0 and (timestamp() - wait_start > timeout): + raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)') + else: + sleep(5) + continue + break + + +def main(): + """ + Module action handler + """ + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=False, type='str', choices=["present", "absent"], default="present"), + id=dict(required=False, type='str', default=None), + name=dict(required=False, type='str', default=None), + tags=dict(required=False, type="dict", default={}), + targets=dict(required=False, type="list", default=[]), + performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"), + wait=dict(required=False, type="bool", default=False), + wait_timeout=dict(required=False, type="int", default=0) + )) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, _, aws_connect_params = get_aws_connection_info(module, boto3=True) + connection = EFSConnection(module, region, **aws_connect_params) + + name = module.params.get('name') + fs_id = module.params.get('id') + tags = module.params.get('tags') + target_translations = { + 'ip_address': 'IpAddress', + 'security_groups': 'SecurityGroups', + 'subnet_id': 'SubnetId' + } + targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')] + performance_mode_translations = { + 'general_purpose': 'generalPurpose', + 'max_io': 'maxIO' + } + performance_mode = performance_mode_translations[module.params.get('performance_mode')] + changed = False + + state = str(module.params.get('state')).lower() + + if state == 'present': + if not name: + module.fail_json(msg='Name parameter is required for create') + + changed = connection.create_file_system(name, performance_mode) + changed = connection.converge_file_system(name=name, tags=tags, targets=targets) or changed + result = first_or_default(connection.get_file_systems(CreationToken=name)) + + elif state == 'absent': + if not name and not fs_id: + module.fail_json(msg='Either name or id parameter is required for delete') + + changed = connection.delete_file_system(name, fs_id) + result = None + if result: + result = camel_dict_to_snake_dict(result) + module.exit_json(changed=changed, efs=result) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/efs_facts.py b/cloud/amazon/efs_facts.py new file mode 100644 index 00000000000..aa7adf8bee1 --- /dev/null +++ b/cloud/amazon/efs_facts.py @@ -0,0 +1,379 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: efs_facts +short_description: Get information about Amazon EFS file systems +description: + - Module searches Amazon EFS file systems +version_added: "2.2" +requirements: [ boto3 ] +author: + - "Ryan Sydnor (@ryansydnor)" +options: + name: + description: + - Creation Token of Amazon EFS file system. + required: false + default: None + id: + description: + - ID of Amazon EFS. + required: false + default: None + tags: + description: + - List of tags of Amazon EFS. Should be defined as dictionary + required: false + default: None + targets: + description: + - "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes: + - SubnetId - Mandatory. The ID of the subnet to add the mount target in. + - IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet. + - SecurityGroups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified." + required: false + default: None +extends_documentation_fragment: + - aws +''' + +EXAMPLES = ''' +# find all existing efs +- efs_facts: + register: result + +- efs_facts: + name: myTestNameTag + +- efs_facts: + id: fs-1234abcd + +# Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a' +- efs_facts: + tags: + name: myTestNameTag + targets: + - subnet-1a2b3c4d + - sg-4d3c2b1a +''' + +RETURN = ''' +creation_time: + description: timestamp of creation date + returned: + type: datetime + sample: 2015-11-16 07:30:57-05:00 +creation_token: + description: EFS creation token + returned: + type: UUID + sample: console-88609e04-9a0e-4a2e-912c-feaa99509961 +file_system_id: + description: ID of the file system + returned: + type: unique ID + sample: fs-xxxxxxxx +life_cycle_state: + description: state of the EFS file system + returned: + type: str + sample: creating, available, deleting, deleted +mount_point: + description: url of file system + returned: + type: str + sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/ +mount_targets: + description: list of mount targets + returned: + type: list of dicts + sample: + [ + { + "file_system_id": "fs-a7ad440e", + "ip_address": "172.31.17.173", + "life_cycle_state": "available", + "mount_target_id": "fsmt-d8907871", + "network_interface_id": "eni-6e387e26", + "owner_id": "740748460359", + "security_groups": [ + "sg-a30b22c6" + ], + "subnet_id": "subnet-e265c895" + }, + ... + ] +name: + description: name of the file system + returned: + type: str + sample: my-efs +number_of_mount_targets: + description: the number of targets mounted + returned: + type: int + sample: 3 +owner_id: + description: AWS account ID of EFS owner + returned: + type: str + sample: XXXXXXXXXXXX +size_in_bytes: + description: size of the file system in bytes as of a timestamp + returned: + type: dict + sample: + { + "timestamp": "2015-12-21 13:59:59-05:00", + "value": 12288 + } +performance_mode: + description: performance mode of the file system + returned: + type: str + sample: "generalPurpose" +tags: + description: tags on the efs instance + returned: + type: dict + sample: + { + "name": "my-efs", + "key": "Value" + } + +''' + + +from time import sleep +from collections import defaultdict + +try: + from botocore.exceptions import ClientError + import boto3 + HAS_BOTO3 = True +except ImportError as e: + HAS_BOTO3 = False + +class EFSConnection(object): + STATE_CREATING = 'creating' + STATE_AVAILABLE = 'available' + STATE_DELETING = 'deleting' + STATE_DELETED = 'deleted' + + def __init__(self, module, region, **aws_connect_params): + try: + self.connection = boto3_conn(module, conn_type='client', + resource='efs', region=region, + **aws_connect_params) + except Exception as e: + module.fail_json(msg="Failed to connect to AWS: %s" % str(e)) + + self.region = region + + def get_file_systems(self, **kwargs): + """ + Returns generator of file systems including all attributes of FS + """ + items = iterate_all( + 'FileSystems', + self.connection.describe_file_systems, + **kwargs + ) + for item in items: + item['CreationTime'] = str(item['CreationTime']) + """ + Suffix of network path to be used as NFS device for mount. More detail here: + http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html + """ + item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region) + if 'Timestamp' in item['SizeInBytes']: + item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp']) + if item['LifeCycleState'] == self.STATE_AVAILABLE: + item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId']) + item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId'])) + else: + item['Tags'] = {} + item['MountTargets'] = [] + yield item + + def get_tags(self, **kwargs): + """ + Returns tag list for selected instance of EFS + """ + tags = iterate_all( + 'Tags', + self.connection.describe_tags, + **kwargs + ) + return dict((tag['Key'], tag['Value']) for tag in tags) + + def get_mount_targets(self, **kwargs): + """ + Returns mount targets for selected instance of EFS + """ + targets = iterate_all( + 'MountTargets', + self.connection.describe_mount_targets, + **kwargs + ) + for target in targets: + if target['LifeCycleState'] == self.STATE_AVAILABLE: + target['SecurityGroups'] = list(self.get_security_groups( + MountTargetId=target['MountTargetId'] + )) + else: + target['SecurityGroups'] = [] + yield target + + def get_security_groups(self, **kwargs): + """ + Returns security groups for selected instance of EFS + """ + return iterate_all( + 'SecurityGroups', + self.connection.describe_mount_target_security_groups, + **kwargs + ) + + +def iterate_all(attr, map_method, **kwargs): + """ + Method creates iterator from boto result set + """ + args = dict((key, value) for (key, value) in kwargs.items() if value is not None) + wait = 1 + while True: + try: + data = map_method(**args) + for elm in data[attr]: + yield elm + if 'NextMarker' in data: + args['Marker'] = data['Nextmarker'] + continue + break + except ClientError as e: + if e.response['Error']['Code'] == "ThrottlingException" and wait < 600: + sleep(wait) + wait = wait * 2 + continue + + +def prefix_to_attr(attr_id): + """ + Helper method to convert ID prefix to mount target attribute + """ + attr_by_prefix = { + 'fsmt-': 'MountTargetId', + 'subnet-': 'SubnetId', + 'eni-': 'NetworkInterfaceId', + 'sg-': 'SecurityGroups' + } + prefix = first_or_default(filter( + lambda pref: str(attr_id).startswith(pref), + attr_by_prefix.keys() + )) + if prefix: + return attr_by_prefix[prefix] + return 'IpAddress' + +def first_or_default(items, default=None): + """ + Helper method to fetch first element of list (if exists) + """ + for item in items: + return item + return default + +def has_tags(available, required): + """ + Helper method to determine if tag requested already exists + """ + for key, value in required.items(): + if key not in available or value != available[key]: + return False + return True + +def has_targets(available, required): + """ + Helper method to determine if mount tager requested already exists + """ + grouped = group_list_of_dict(available) + for (value, field) in required: + if field not in grouped or value not in grouped[field]: + return False + return True + +def group_list_of_dict(array): + """ + Helper method to group list of dict to dict with all possible values + """ + result = defaultdict(list) + for item in array: + for key, value in item.items(): + result[key] += value if isinstance(value, list) else [value] + return result + + +def main(): + """ + Module action handler + """ + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + id=dict(required=False, type='str', default=None), + name=dict(required=False, type='str', default=None), + tags=dict(required=False, type="dict", default={}), + targets=dict(required=False, type="list", default=[]) + )) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, _, aws_connect_params = get_aws_connection_info(module, boto3=True) + connection = EFSConnection(module, region, **aws_connect_params) + + name = module.params.get('name') + fs_id = module.params.get('id') + tags = module.params.get('tags') + targets = module.params.get('targets') + + file_systems_info = connection.get_file_systems(FileSystemId=fs_id, CreationToken=name) + + if tags: + file_systems_info = filter(lambda item: has_tags(item['Tags'], tags), file_systems_info) + + if targets: + targets = [(item, prefix_to_attr(item)) for item in targets] + file_systems_info = filter(lambda item: + has_targets(item['MountTargets'], targets), file_systems_info) + + file_systems_info = [camel_dict_to_snake_dict(x) for x in file_systems_info] + module.exit_json(changed=False, ansible_facts={'efs': file_systems_info}) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/execute_lambda.py b/cloud/amazon/execute_lambda.py new file mode 100644 index 00000000000..676d3c5e30b --- /dev/null +++ b/cloud/amazon/execute_lambda.py @@ -0,0 +1,287 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: execute_lambda +short_description: Execute an AWS Lambda function +description: + - This module executes AWS Lambda functions, allowing synchronous and asynchronous + invocation. +version_added: "2.2" +extends_documentation_fragment: + - aws +author: "Ryan Scott Brown (@ryansb) " +requirements: + - python >= 2.6 + - boto3 +notes: + - Async invocation will always return an empty C(output) key. + - Synchronous invocation may result in a function timeout, resulting in an + empty C(output) key. +options: + name: + description: + - The name of the function to be invoked. This can only be used for + invocations within the calling account. To invoke a function in another + account, use I(function_arn) to specify the full ARN. + required: false + default: None + function_arn: + description: + - The name of the function to be invoked + required: false + default: None + tail_log: + description: + - If C(tail_log=true), the result of the task will include the last 4 KB + of the CloudWatch log for the function execution. Log tailing only + works if you use synchronous invocation C(wait=true). This is usually + used for development or testing Lambdas. + required: false + default: false + wait: + description: + - Whether to wait for the function results or not. If I(wait) is false, + the task will not return any results. To wait for the Lambda function + to complete, set C(wait=true) and the result will be available in the + I(output) key. + required: false + default: true + dry_run: + description: + - Do not *actually* invoke the function. A C(DryRun) call will check that + the caller has permissions to call the function, especially for + checking cross-account permissions. + required: false + default: False + version_qualifier: + description: + - Which version/alias of the function to run. This defaults to the + C(LATEST) revision, but can be set to any existing version or alias. + See https;//docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html + for details. + required: false + default: LATEST + payload: + description: + - A dictionary in any form to be provided as input to the Lambda function. + required: false + default: {} +''' + +EXAMPLES = ''' +- execute_lambda: + name: test-function + # the payload is automatically serialized and sent to the function + payload: + foo: bar + value: 8 + register: response + +# Test that you have sufficient permissions to execute a Lambda function in +# another account +- execute_lambda: + function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function + dry_run: true + +- execute_lambda: + name: test-function + payload: + foo: bar + value: 8 + wait: true + tail_log: true + register: response + # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda. + +- execute_lambda: + name: test-function + version_qualifier: PRODUCTION +''' + +RETURN = ''' +output: + description: Function output if wait=true and the function returns a value + returned: success + type: dict + sample: "{ 'output': 'something' }" +logs: + description: The last 4KB of the function logs. Only provided if I(tail_log) is true + type: string +status: + description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async) + type: int + sample: 200 +''' + +import base64 +import json +import traceback + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name = dict(), + function_arn = dict(), + wait = dict(choices=BOOLEANS, default=True, type='bool'), + tail_log = dict(choices=BOOLEANS, default=False, type='bool'), + dry_run = dict(choices=BOOLEANS, default=False, type='bool'), + version_qualifier = dict(), + payload = dict(default={}, type='dict'), + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['name', 'function_arn'], + ] + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + name = module.params.get('name') + function_arn = module.params.get('function_arn') + await_return = module.params.get('wait') + dry_run = module.params.get('dry_run') + tail_log = module.params.get('tail_log') + version_qualifier = module.params.get('version_qualifier') + payload = module.params.get('payload') + + if not HAS_BOTO3: + module.fail_json(msg='Python module "boto3" is missing, please install it') + + if not (name or function_arn): + module.fail_json(msg="Must provide either a function_arn or a name to invoke.") + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=HAS_BOTO3) + if not region: + module.fail_json(msg="The AWS region must be specified as an " + "environment variable or in the AWS credentials " + "profile.") + + try: + client = boto3_conn(module, conn_type='client', resource='lambda', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: + module.fail_json(msg="Failure connecting boto3 to AWS", exception=traceback.format_exc(e)) + + invoke_params = {} + + if await_return: + # await response + invoke_params['InvocationType'] = 'RequestResponse' + else: + # fire and forget + invoke_params['InvocationType'] = 'Event' + if dry_run or module.check_mode: + # dry_run overrides invocation type + invoke_params['InvocationType'] = 'DryRun' + + if tail_log and await_return: + invoke_params['LogType'] = 'Tail' + elif tail_log and not await_return: + module.fail_json(msg="The `tail_log` parameter is only available if " + "the invocation waits for the function to complete. " + "Set `wait` to true or turn off `tail_log`.") + else: + invoke_params['LogType'] = 'None' + + if version_qualifier: + invoke_params['Qualifier'] = version_qualifier + + if payload: + invoke_params['Payload'] = json.dumps(payload) + + if function_arn: + invoke_params['FunctionName'] = function_arn + elif name: + invoke_params['FunctionName'] = name + + try: + response = client.invoke(**invoke_params) + except botocore.exceptions.ClientError as ce: + if ce.response['Error']['Code'] == 'ResourceNotFoundException': + module.fail_json(msg="Could not find Lambda to execute. Make sure " + "the ARN is correct and your profile has " + "permissions to execute this function.", + exception=traceback.format_exc(ce)) + module.fail_json("Client-side error when invoking Lambda, check inputs and specific error", + exception=traceback.format_exc(ce)) + except botocore.exceptions.ParamValidationError as ve: + module.fail_json(msg="Parameters to `invoke` failed to validate", + exception=traceback.format_exc(ve)) + except Exception as e: + module.fail_json(msg="Unexpected failure while invoking Lambda function", + exception=traceback.format_exc(e)) + + results ={ + 'logs': '', + 'status': response['StatusCode'], + 'output': '', + } + + if response.get('LogResult'): + try: + # logs are base64 encoded in the API response + results['logs'] = base64.b64decode(response.get('LogResult', '')) + except Exception as e: + module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc(e)) + + if invoke_params['InvocationType'] == 'RequestResponse': + try: + results['output'] = json.loads(response['Payload'].read()) + except Exception as e: + module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc(e)) + + if isinstance(results.get('output'), dict) and any( + [results['output'].get('stackTrace'), results['output'].get('errorMessage')]): + # AWS sends back stack traces and error messages when a function failed + # in a RequestResponse (synchronous) context. + template = ("Function executed, but there was an error in the Lambda function. " + "Message: {errmsg}, Type: {type}, Stack Trace: {trace}") + error_data = { + # format the stacktrace sent back as an array into a multiline string + 'trace': '\n'.join( + [' '.join([ + str(x) for x in line # cast line numbers to strings + ]) for line in results.get('output', {}).get('stackTrace', [])] + ), + 'errmsg': results['output'].get('errorMessage'), + 'type': results['output'].get('errorType') + } + module.fail_json(msg=template.format(**error_data), result=results) + + module.exit_json(changed=True, result=results) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/iam_mfa_device_facts.py b/cloud/amazon/iam_mfa_device_facts.py new file mode 100644 index 00000000000..539867663c3 --- /dev/null +++ b/cloud/amazon/iam_mfa_device_facts.py @@ -0,0 +1,122 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: iam_mfa_device_facts +short_description: List the MFA (Multi-Factor Authentication) devices registered for a user +description: + - List the MFA (Multi-Factor Authentication) devices registered for a user +version_added: "2.2" +author: Victor Costan (@pwnall) +options: + user_name: + description: + - The name of the user whose MFA devices will be listed + required: false + default: null +extends_documentation_fragment: + - aws + - ec2 +requirements: + - boto3 + - botocore +''' + +RETURN = """ +mfa_devices: + description: The MFA devices registered for the given user + returned: always + type: list + sample: + - enable_date: "2016-03-11T23:25:36+00:00" + serial_number: arn:aws:iam::085120003701:mfa/pwnall + user_name: pwnall + - enable_date: "2016-03-11T23:25:37+00:00" + serial_number: arn:aws:iam::085120003702:mfa/pwnall + user_name: pwnall +""" + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# List MFA devices (more details: http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html) +iam_mfa_device_facts: +register: mfa_devices + +# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) +sts_assume_role: + mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}" + role_arn: "arn:aws:iam::123456789012:role/someRole" + role_session_name: "someRoleSession" +register: assumed_role +''' + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +def list_mfa_devices(connection, module): + user_name = module.params.get('user_name') + changed = False + + args = {} + if user_name is not None: + args['UserName'] = user_name + try: + response = connection.list_mfa_devices(**args) + except ClientError as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + user_name=dict(required=False, default=None) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if region: + connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs) + else: + module.fail_json(msg="region must be specified") + + list_mfa_devices(connection, module) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/iam_server_certificate_facts.py b/cloud/amazon/iam_server_certificate_facts.py new file mode 100644 index 00000000000..1c8637362f3 --- /dev/null +++ b/cloud/amazon/iam_server_certificate_facts.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: iam_server_certificate_facts +short_description: Retrieve the facts of a server certificate +description: + - Retrieve the attributes of a server certificate +version_added: "2.2" +author: "Allen Sanabria (@linuxdynasty)" +requirements: [boto3, botocore] +options: + name: + description: + - The name of the server certificate you are retrieving attributes for. + required: true +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Retrieve server certificate +- iam_server_certificate_facts: + name: production-cert + register: server_cert + +# Fail if the server certificate name was not found +- iam_server_certificate_facts: + name: production-cert + register: server_cert + failed_when: "{{ server_cert.results | length == 0 }}" +''' + +RETURN = ''' +server_certificate_id: + description: The 21 character certificate id + returned: success + type: str + sample: "ADWAJXWTZAXIPIMQHMJPO" +certificate_body: + description: The asn1der encoded PEM string + returned: success + type: str + sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----" +server_certificate_name: + description: The name of the server certificate + returned: success + type: str + sample: "server-cert-name" +arn: + description: The Amazon resource name of the server certificate + returned: success + type: str + sample: "arn:aws:iam::911277865346:server-certificate/server-cert-name" +path: + description: The path of the server certificate + returned: success + type: str + sample: "/" +expiration: + description: The date and time this server certificate will expire, in ISO 8601 format. + returned: success + type: str + sample: "2017-06-15T12:00:00+00:00" +upload_date: + description: The date and time this server certificate was uploaded, in ISO 8601 format. + returned: success + type: str + sample: "2015-04-25T00:36:40+00:00" +''' + + +try: + import boto3 + import botocore.exceptions + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +def get_server_certs(iam, name=None): + """Retrieve the attributes of a server certificate if it exists or all certs. + Args: + iam (botocore.client.IAM): The boto3 iam instance. + + Kwargs: + name (str): The name of the server certificate. + + Basic Usage: + >>> import boto3 + >>> iam = boto3.client('iam') + >>> name = "server-cert-name" + >>> results = get_server_certs(iam, name) + { + "upload_date": "2015-04-25T00:36:40+00:00", + "server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO", + "certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----", + "server_certificate_name": "server-cert-name", + "expiration": "2017-06-15T12:00:00+00:00", + "path": "/", + "arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name" + } + """ + results = dict() + try: + if name: + server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']] + else: + server_certs = iam.list_server_certificates()['ServerCertificateMetadataList'] + + for server_cert in server_certs: + if not name: + server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate'] + cert_md = server_cert['ServerCertificateMetadata'] + results[cert_md['ServerCertificateName']] = { + 'certificate_body': server_cert['CertificateBody'], + 'server_certificate_id': cert_md['ServerCertificateId'], + 'server_certificate_name': cert_md['ServerCertificateName'], + 'arn': cert_md['Arn'], + 'path': cert_md['Path'], + 'expiration': cert_md['Expiration'].isoformat(), + 'upload_date': cert_md['UploadDate'].isoformat(), + } + + except botocore.exceptions.ClientError: + pass + + return results + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name=dict(type='str'), + )) + + module = AnsibleModule(argument_spec=argument_spec,) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + iam = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Boto3 Client Error - " + str(e.msg)) + + cert_name = module.params.get('name') + results = get_server_certs(iam, cert_name) + module.exit_json(results=results) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/kinesis_stream.py b/cloud/amazon/kinesis_stream.py new file mode 100644 index 00000000000..b4e0f7205bf --- /dev/null +++ b/cloud/amazon/kinesis_stream.py @@ -0,0 +1,1102 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: kinesis_stream +short_description: Manage a Kinesis Stream. +description: + - Create or Delete a Kinesis Stream. + - Update the retention period of a Kinesis Stream. + - Update Tags on a Kinesis Stream. +version_added: "2.2" +author: Allen Sanabria (@linuxdynasty) +options: + name: + description: + - "The name of the Kinesis Stream you are managing." + default: None + required: true + shards: + description: + - "The number of shards you want to have with this stream. This can not + be modified after being created." + - "This is required when state == present" + required: false + default: None + retention_period: + description: + - "The default retention period is 24 hours and can not be less than 24 + hours." + - "The retention period can be modified during any point in time." + required: false + default: None + state: + description: + - "Create or Delete the Kinesis Stream." + required: false + default: present + choices: [ 'present', 'absent' ] + wait: + description: + - Wait for operation to complete before returning. + required: false + default: true + wait_timeout: + description: + - How many seconds to wait for an operation to complete before timing out. + required: false + default: 300 + tags: + description: + - "A dictionary of resource tags of the form: { tag1: value1, tag2: value2 }." + required: false + default: null + aliases: [ "resource_tags" ] +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Basic creation example: +- name: Set up Kinesis Stream with 10 shards and wait for the stream to become ACTIVE + kinesis_stream: + name: test-stream + shards: 10 + wait: yes + wait_timeout: 600 + register: test_stream + +# Basic creation example with tags: +- name: Set up Kinesis Stream with 10 shards, tag the environment, and wait for the stream to become ACTIVE + kinesis_stream: + name: test-stream + shards: 10 + tags: + Env: development + wait: yes + wait_timeout: 600 + register: test_stream + +# Basic creation example with tags and increase the retention period from the default 24 hours to 48 hours: +- name: Set up Kinesis Stream with 10 shards, tag the environment, increase the retention period and wait for the stream to become ACTIVE + kinesis_stream: + name: test-stream + retention_period: 48 + shards: 10 + tags: + Env: development + wait: yes + wait_timeout: 600 + register: test_stream + +# Basic delete example: +- name: Delete Kinesis Stream test-stream and wait for it to finish deleting. + kinesis_stream: + name: test-stream + state: absent + wait: yes + wait_timeout: 600 + register: test_stream +''' + +RETURN = ''' +stream_name: + description: The name of the Kinesis Stream. + returned: when state == present. + type: string + sample: "test-stream" +stream_arn: + description: The amazon resource identifier + returned: when state == present. + type: string + sample: "arn:aws:kinesis:east-side:123456789:stream/test-stream" +stream_status: + description: The current state of the Kinesis Stream. + returned: when state == present. + type: string + sample: "ACTIVE" +retention_period_hours: + description: Number of hours messages will be kept for a Kinesis Stream. + returned: when state == present. + type: int + sample: 24 +tags: + description: Dictionary containing all the tags associated with the Kinesis stream. + returned: when state == present. + type: dict + sample: { + "Name": "Splunk", + "Env": "development" + } +''' + +try: + import botocore + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +import re +import datetime +import time +from functools import reduce + + +def convert_to_lower(data): + """Convert all uppercase keys in dict with lowercase_ + Args: + data (dict): Dictionary with keys that have upper cases in them + Example.. FooBar == foo_bar + if a val is of type datetime.datetime, it will be converted to + the ISO 8601 + + Basic Usage: + >>> test = {'FooBar': []} + >>> test = convert_to_lower(test) + { + 'foo_bar': [] + } + + Returns: + Dictionary + """ + results = dict() + if isinstance(data, dict): + for key, val in data.items(): + key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower() + if key[0] == '_': + key = key[1:] + if isinstance(val, datetime.datetime): + results[key] = val.isoformat() + elif isinstance(val, dict): + results[key] = convert_to_lower(val) + elif isinstance(val, list): + converted = list() + for item in val: + converted.append(convert_to_lower(item)) + results[key] = converted + else: + results[key] = val + return results + + +def make_tags_in_proper_format(tags): + """Take a dictionary of tags and convert them into the AWS Tags format. + Args: + tags (list): The tags you want applied. + + Basic Usage: + >>> tags = [{'Key': 'env', 'Value': 'development'}] + >>> make_tags_in_proper_format(tags) + { + "env": "development", + } + + Returns: + Dict + """ + formatted_tags = dict() + for tag in tags: + formatted_tags[tag.get('Key')] = tag.get('Value') + + return formatted_tags + + +def make_tags_in_aws_format(tags): + """Take a dictionary of tags and convert them into the AWS Tags format. + Args: + tags (dict): The tags you want applied. + + Basic Usage: + >>> tags = {'env': 'development', 'service': 'web'} + >>> make_tags_in_proper_format(tags) + [ + { + "Value": "web", + "Key": "service" + }, + { + "Value": "development", + "key": "env" + } + ] + + Returns: + List + """ + formatted_tags = list() + for key, val in tags.items(): + formatted_tags.append({ + 'Key': key, + 'Value': val + }) + + return formatted_tags + + +def get_tags(client, stream_name, check_mode=False): + """Retrieve the tags for a Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): Name of the Kinesis stream. + + Kwargs: + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >> get_tags(client, stream_name) + + Returns: + Tuple (bool, str, dict) + """ + err_msg = '' + success = False + params = { + 'StreamName': stream_name, + } + results = dict() + try: + if not check_mode: + results = ( + client.list_tags_for_stream(**params)['Tags'] + ) + else: + results = [ + { + 'Key': 'DryRunMode', + 'Value': 'true' + }, + ] + success = True + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return success, err_msg, results + + +def find_stream(client, stream_name, check_mode=False): + """Retrieve a Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): Name of the Kinesis stream. + + Kwargs: + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + + Returns: + Tuple (bool, str, dict) + """ + err_msg = '' + success = False + params = { + 'StreamName': stream_name, + } + results = dict() + has_more_shards = True + shards = list() + try: + if not check_mode: + while has_more_shards: + results = ( + client.describe_stream(**params)['StreamDescription'] + ) + shards.extend(results.pop('Shards')) + has_more_shards = results['HasMoreShards'] + results['Shards'] = shards + results['ShardsCount'] = len(shards) + else: + results = { + 'HasMoreShards': True, + 'RetentionPeriodHours': 24, + 'StreamName': stream_name, + 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/{0}'.format(stream_name), + 'StreamStatus': 'ACTIVE' + } + success = True + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return success, err_msg, results + + +def wait_for_status(client, stream_name, status, wait_timeout=300, + check_mode=False): + """Wait for the the status to change for a Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client + stream_name (str): The name of the kinesis stream. + status (str): The status to wait for. + examples. status=available, status=deleted + + Kwargs: + wait_timeout (int): Number of seconds to wait, until this timeout is reached. + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> wait_for_status(client, stream_name, 'ACTIVE', 300) + + Returns: + Tuple (bool, str, dict) + """ + polling_increment_secs = 5 + wait_timeout = time.time() + wait_timeout + status_achieved = False + stream = dict() + err_msg = "" + + while wait_timeout > time.time(): + try: + find_success, find_msg, stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if check_mode: + status_achieved = True + break + + elif status != 'DELETING': + if find_success and stream: + if stream.get('StreamStatus') == status: + status_achieved = True + break + + elif status == 'DELETING' and not check_mode: + if not find_success: + status_achieved = True + break + + else: + time.sleep(polling_increment_secs) + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + if not status_achieved: + err_msg = "Wait time out reached, while waiting for results" + else: + err_msg = "Status {0} achieved successfully".format(status) + + return status_achieved, err_msg, stream + + +def tags_action(client, stream_name, tags, action='create', check_mode=False): + """Create or delete multiple tags from a Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + resource_id (str): The Amazon resource id. + tags (list): List of dictionaries. + examples.. [{Name: "", Values: [""]}] + + Kwargs: + action (str): The action to perform. + valid actions == create and delete + default=create + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('ec2') + >>> resource_id = 'pcx-123345678' + >>> tags = {'env': 'development'} + >>> update_tags(client, resource_id, tags) + [True, ''] + + Returns: + List (bool, str) + """ + success = False + err_msg = "" + params = {'StreamName': stream_name} + try: + if not check_mode: + if action == 'create': + params['Tags'] = tags + client.add_tags_to_stream(**params) + success = True + elif action == 'delete': + params['TagKeys'] = tags.keys() + client.remove_tags_from_stream(**params) + success = True + else: + err_msg = 'Invalid action {0}'.format(action) + else: + if action == 'create': + success = True + elif action == 'delete': + success = True + else: + err_msg = 'Invalid action {0}'.format(action) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return success, err_msg + + +def recreate_tags_from_list(list_of_tags): + """Recreate tags from a list of tuples into the Amazon Tag format. + Args: + list_of_tags (list): List of tuples. + + Basic Usage: + >>> list_of_tags = [('Env', 'Development')] + >>> recreate_tags_from_list(list_of_tags) + [ + { + "Value": "Development", + "Key": "Env" + } + ] + + Returns: + List + """ + tags = list() + i = 0 + list_of_tags = list_of_tags + for i in range(len(list_of_tags)): + key_name = list_of_tags[i][0] + key_val = list_of_tags[i][1] + tags.append( + { + 'Key': key_name, + 'Value': key_val + } + ) + return tags + + +def update_tags(client, stream_name, tags, check_mode=False): + """Update tags for an amazon resource. + Args: + resource_id (str): The Amazon resource id. + tags (dict): Dictionary of tags you want applied to the Kinesis stream. + + Kwargs: + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('ec2') + >>> stream_name = 'test-stream' + >>> tags = {'env': 'development'} + >>> update_tags(client, stream_name, tags) + [True, ''] + + Return: + Tuple (bool, str) + """ + success = False + changed = False + err_msg = '' + tag_success, tag_msg, current_tags = ( + get_tags(client, stream_name, check_mode=check_mode) + ) + if current_tags: + tags = make_tags_in_aws_format(tags) + current_tags_set = ( + set( + reduce( + lambda x, y: x + y, + [make_tags_in_proper_format(current_tags).items()] + ) + ) + ) + + new_tags_set = ( + set( + reduce( + lambda x, y: x + y, + [make_tags_in_proper_format(tags).items()] + ) + ) + ) + tags_to_delete = list(current_tags_set.difference(new_tags_set)) + tags_to_update = list(new_tags_set.difference(current_tags_set)) + if tags_to_delete: + tags_to_delete = make_tags_in_proper_format( + recreate_tags_from_list(tags_to_delete) + ) + delete_success, delete_msg = ( + tags_action( + client, stream_name, tags_to_delete, action='delete', + check_mode=check_mode + ) + ) + if not delete_success: + return delete_success, changed, delete_msg + if tags_to_update: + tags = make_tags_in_proper_format( + recreate_tags_from_list(tags_to_update) + ) + else: + return True, changed, 'Tags do not need to be updated' + + if tags: + create_success, create_msg = ( + tags_action( + client, stream_name, tags, action='create', + check_mode=check_mode + ) + ) + if create_success: + changed = True + return create_success, changed, create_msg + + return success, changed, err_msg + + +def stream_action(client, stream_name, shard_count=1, action='create', + timeout=300, check_mode=False): + """Create or Delete an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + shard_count (int): Number of shards this stream will use. + action (str): The action to perform. + valid actions == create and delete + default=create + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> shard_count = 20 + >>> stream_action(client, stream_name, shard_count, action='create') + + Returns: + List (bool, str) + """ + success = False + err_msg = '' + params = { + 'StreamName': stream_name + } + try: + if not check_mode: + if action == 'create': + params['ShardCount'] = shard_count + client.create_stream(**params) + success = True + elif action == 'delete': + client.delete_stream(**params) + success = True + else: + err_msg = 'Invalid action {0}'.format(action) + else: + if action == 'create': + success = True + elif action == 'delete': + success = True + else: + err_msg = 'Invalid action {0}'.format(action) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return success, err_msg + + +def retention_action(client, stream_name, retention_period=24, + action='increase', check_mode=False): + """Increase or Decreaste the retention of messages in the Kinesis stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The + + Kwargs: + retention_period (int): This is how long messages will be kept before + they are discarded. This can not be less than 24 hours. + action (str): The action to perform. + valid actions == create and delete + default=create + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> retention_period = 48 + >>> stream_action(client, stream_name, retention_period, action='create') + + Returns: + Tuple (bool, str) + """ + success = False + err_msg = '' + params = { + 'StreamName': stream_name + } + try: + if not check_mode: + if action == 'increase': + params['RetentionPeriodHours'] = retention_period + client.increase_stream_retention_period(**params) + success = True + err_msg = ( + 'Retention Period increased successfully to {0}' + .format(retention_period) + ) + elif action == 'decrease': + params['RetentionPeriodHours'] = retention_period + client.decrease_stream_retention_period(**params) + success = True + err_msg = ( + 'Retention Period decreased successfully to {0}' + .format(retention_period) + ) + else: + err_msg = 'Invalid action {0}'.format(action) + else: + if action == 'increase': + success = True + elif action == 'decrease': + success = True + else: + err_msg = 'Invalid action {0}'.format(action) + + except botocore.exceptions.ClientError as e: + err_msg = str(e) + + return success, err_msg + + +def update(client, current_stream, stream_name, retention_period=None, + tags=None, wait=False, wait_timeout=300, check_mode=False): + """Update an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + retention_period (int): This is how long messages will be kept before + they are discarded. This can not be less than 24 hours. + tags (dict): The tags you want applied. + wait (bool): Wait until Stream is ACTIVE. + default=False + wait_timeout (int): How long to wait until this operation is considered failed. + default=300 + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> current_stream = { + 'HasMoreShards': True, + 'RetentionPeriodHours': 24, + 'StreamName': 'test-stream', + 'StreamARN': 'arn:aws:kinesis:us-west-2:123456789:stream/test-stream', + 'StreamStatus': "ACTIVE' + } + >>> stream_name = 'test-stream' + >>> retention_period = 48 + >>> stream_action(client, current_stream, stream_name, + retention_period, action='create' ) + + Returns: + Tuple (bool, bool, str) + """ + success = True + changed = False + err_msg = '' + if retention_period: + if wait: + wait_success, wait_msg, current_stream = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + if not wait_success: + return wait_success, False, wait_msg + + if current_stream['StreamStatus'] == 'ACTIVE': + retention_changed = False + if retention_period > current_stream['RetentionPeriodHours']: + retention_changed, retention_msg = ( + retention_action( + client, stream_name, retention_period, action='increase', + check_mode=check_mode + ) + ) + + elif retention_period < current_stream['RetentionPeriodHours']: + retention_changed, retention_msg = ( + retention_action( + client, stream_name, retention_period, action='decrease', + check_mode=check_mode + ) + ) + + elif retention_period == current_stream['RetentionPeriodHours']: + retention_msg = ( + 'Retention {0} is the same as {1}' + .format( + retention_period, + current_stream['RetentionPeriodHours'] + ) + ) + success = True + + if retention_changed: + success = True + changed = True + + err_msg = retention_msg + if changed and wait: + wait_success, wait_msg, current_stream = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + if not wait_success: + return wait_success, False, wait_msg + elif changed and not wait: + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if stream_found: + if current_stream['StreamStatus'] != 'ACTIVE': + err_msg = ( + 'Retention Period for {0} is in the process of updating' + .format(stream_name) + ) + return success, changed, err_msg + else: + err_msg = ( + 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}' + .format(current_stream['StreamStatus']) + ) + return success, changed, err_msg + + if tags: + _, _, err_msg = ( + update_tags(client, stream_name, tags, check_mode=check_mode) + ) + if wait: + success, err_msg, _ = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + if success and changed: + err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name) + elif success and not changed: + err_msg = 'Kinesis Stream {0} did not changed.'.format(stream_name) + + return success, changed, err_msg + + +def create_stream(client, stream_name, number_of_shards=1, retention_period=None, + tags=None, wait=False, wait_timeout=300, check_mode=False): + """Create an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + number_of_shards (int): Number of shards this stream will use. + default=1 + retention_period (int): Can not be less than 24 hours + default=None + tags (dict): The tags you want applied. + default=None + wait (bool): Wait until Stream is ACTIVE. + default=False + wait_timeout (int): How long to wait until this operation is considered failed. + default=300 + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> number_of_shards = 10 + >>> tags = {'env': 'test'} + >>> create_stream(client, stream_name, number_of_shards, tags=tags) + + Returns: + Tuple (bool, bool, str, dict) + """ + success = False + changed = False + err_msg = '' + results = dict() + + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if stream_found and not check_mode: + if current_stream['ShardsCount'] != number_of_shards: + err_msg = 'Can not change the number of shards in a Kinesis Stream' + return success, changed, err_msg, results + + if stream_found and current_stream['StreamStatus'] == 'DELETING' and wait: + wait_success, wait_msg, current_stream = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + if stream_found and current_stream['StreamStatus'] != 'DELETING': + success, changed, err_msg = update( + client, current_stream, stream_name, retention_period, tags, + wait, wait_timeout, check_mode=check_mode + ) + else: + create_success, create_msg = ( + stream_action( + client, stream_name, number_of_shards, action='create', + check_mode=check_mode + ) + ) + if create_success: + changed = True + if wait: + wait_success, wait_msg, results = ( + wait_for_status( + client, stream_name, 'ACTIVE', wait_timeout, + check_mode=check_mode + ) + ) + err_msg = ( + 'Kinesis Stream {0} is in the process of being created' + .format(stream_name) + ) + if not wait_success: + return wait_success, True, wait_msg, results + else: + err_msg = ( + 'Kinesis Stream {0} created successfully' + .format(stream_name) + ) + + if tags: + changed, err_msg = ( + tags_action( + client, stream_name, tags, action='create', + check_mode=check_mode + ) + ) + if changed: + success = True + if not success: + return success, changed, err_msg, results + + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if retention_period and current_stream['StreamStatus'] == 'ACTIVE': + changed, err_msg = ( + retention_action( + client, stream_name, retention_period, action='increase', + check_mode=check_mode + ) + ) + if changed: + success = True + if not success: + return success, changed, err_msg, results + else: + err_msg = ( + 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}' + .format(current_stream['StreamStatus']) + ) + success = create_success + changed = True + + if success: + _, _, results = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + _, _, current_tags = ( + get_tags(client, stream_name, check_mode=check_mode) + ) + if current_tags and not check_mode: + current_tags = make_tags_in_proper_format(current_tags) + results['Tags'] = current_tags + elif check_mode and tags: + results['Tags'] = tags + else: + results['Tags'] = dict() + results = convert_to_lower(results) + + return success, changed, err_msg, results + + +def delete_stream(client, stream_name, wait=False, wait_timeout=300, + check_mode=False): + """Delete an Amazon Kinesis Stream. + Args: + client (botocore.client.EC2): Boto3 client. + stream_name (str): The name of the kinesis stream. + + Kwargs: + wait (bool): Wait until Stream is ACTIVE. + default=False + wait_timeout (int): How long to wait until this operation is considered failed. + default=300 + check_mode (bool): This will pass DryRun as one of the parameters to the aws api. + default=False + + Basic Usage: + >>> client = boto3.client('kinesis') + >>> stream_name = 'test-stream' + >>> delete_stream(client, stream_name) + + Returns: + Tuple (bool, bool, str, dict) + """ + success = False + changed = False + err_msg = '' + results = dict() + stream_found, stream_msg, current_stream = ( + find_stream(client, stream_name, check_mode=check_mode) + ) + if stream_found: + success, err_msg = ( + stream_action( + client, stream_name, action='delete', check_mode=check_mode + ) + ) + if success: + changed = True + if wait: + success, err_msg, results = ( + wait_for_status( + client, stream_name, 'DELETING', wait_timeout, + check_mode=check_mode + ) + ) + err_msg = 'Stream {0} deleted successfully'.format(stream_name) + if not success: + return success, True, err_msg, results + else: + err_msg = ( + 'Stream {0} is in the process of being deleted' + .format(stream_name) + ) + else: + success = True + changed = False + err_msg = 'Stream {0} does not exist'.format(stream_name) + + return success, changed, err_msg, results + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(default=None, required=True), + shards=dict(default=None, required=False, type='int'), + retention_period=dict(default=None, required=False, type='int'), + tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']), + wait=dict(default=True, required=False, type='bool'), + wait_timeout=dict(default=300, required=False, type='int'), + state=dict(default='present', choices=['present', 'absent']), + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + retention_period = module.params.get('retention_period') + stream_name = module.params.get('name') + shards = module.params.get('shards') + state = module.params.get('state') + tags = module.params.get('tags') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + if state == 'present' and not shards: + module.fail_json(msg='Shards is required when state == present.') + + if retention_period: + if retention_period < 24: + module.fail_json(msg='Retention period can not be less than 24 hours.') + + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required.') + + check_mode = module.check_mode + try: + region, ec2_url, aws_connect_kwargs = ( + get_aws_connection_info(module, boto3=True) + ) + client = ( + boto3_conn( + module, conn_type='client', resource='kinesis', + region=region, endpoint=ec2_url, **aws_connect_kwargs + ) + ) + except botocore.exceptions.ClientError as e: + err_msg = 'Boto3 Client Error - {0}'.format(str(e.msg)) + module.fail_json( + success=False, changed=False, result={}, msg=err_msg + ) + + if state == 'present': + success, changed, err_msg, results = ( + create_stream( + client, stream_name, shards, retention_period, tags, + wait, wait_timeout, check_mode + ) + ) + elif state == 'absent': + success, changed, err_msg, results = ( + delete_stream(client, stream_name, wait, wait_timeout, check_mode) + ) + + if success: + module.exit_json( + success=success, changed=changed, msg=err_msg, **results + ) + else: + module.fail_json( + success=success, changed=changed, msg=err_msg, result=results + ) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/lambda.py b/cloud/amazon/lambda.py new file mode 100644 index 00000000000..cef3b38e30f --- /dev/null +++ b/cloud/amazon/lambda.py @@ -0,0 +1,473 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: lambda +short_description: Manage AWS Lambda functions +description: + - Allows for the management of Lambda functions. +version_added: '2.2' +requirements: [ boto3 ] +options: + name: + description: + - The name you want to assign to the function you are uploading. Cannot be changed. + required: true + state: + description: + - Create or delete Lambda function + required: false + default: present + choices: [ 'present', 'absent' ] + runtime: + description: + - The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs. Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7 + required: true + role: + description: + - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources. You may use the bare ARN if the role belongs to the same AWS account. + default: null + handler: + description: + - The function within your code that Lambda calls to begin execution + default: null + zip_file: + description: + - A .zip file containing your deployment package + required: false + default: null + aliases: [ 'src' ] + s3_bucket: + description: + - Amazon S3 bucket name where the .zip file containing your deployment package is stored + required: false + default: null + s3_key: + description: + - The Amazon S3 object (the deployment package) key name you want to upload + required: false + default: null + s3_object_version: + description: + - The Amazon S3 object (the deployment package) version you want to upload. + required: false + default: null + description: + description: + - A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit. + required: false + default: null + timeout: + description: + - The function execution time at which Lambda should terminate the function. + required: false + default: 3 + memory_size: + description: + - The amount of memory, in MB, your Lambda function is given + required: false + default: 128 + vpc_subnet_ids: + description: + - List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC. + required: false + default: None + vpc_security_group_ids: + description: + - List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used. + required: false + default: None +notes: + - 'Currently this module only supports uploaded code via S3' +author: + - 'Steyn Huizinga (@steynovich)' +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Create Lambda functions +tasks: +- name: looped creation + lambda: + name: '{{ item.name }}' + state: present + zip_file: '{{ item.zip_file }}' + runtime: 'python2.7' + role: 'arn:aws:iam::987654321012:role/lambda_basic_execution' + handler: 'hello_python.my_handler' + vpc_subnet_ids: + - subnet-123abcde + - subnet-edcba321 + vpc_security_group_ids: + - sg-123abcde + - sg-edcba321 + with_items: + - name: HelloWorld + zip_file: hello-code.zip + - name: ByeBye + zip_file: bye-code.zip + +# Basic Lambda function deletion +tasks: +- name: Delete Lambda functions HelloWorld and ByeBye + lambda: + name: '{{ item }}' + state: absent + with_items: + - HelloWorld + - ByeBye +''' + +RETURN = ''' +output: + description: the data returned by create_function in boto3 + returned: success + type: dict + sample: + 'code': + { + 'location': 'an S3 URL', + 'repository_type': 'S3', + } + 'configuration': + { + 'function_name': 'string', + 'function_arn': 'string', + 'runtime': 'nodejs', + 'role': 'string', + 'handler': 'string', + 'code_size': 123, + 'description': 'string', + 'timeout': 123, + 'memory_size': 123, + 'last_modified': 'string', + 'code_sha256': 'string', + 'version': 'string', + } +''' + +# Import from Python standard library +import base64 +import hashlib + +try: + import botocore + HAS_BOTOCORE = True +except ImportError: + HAS_BOTOCORE = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +def get_current_function(connection, function_name, qualifier=None): + try: + if qualifier is not None: + return connection.get_function(FunctionName=function_name, + Qualifier=qualifier) + return connection.get_function(FunctionName=function_name) + except botocore.exceptions.ClientError: + return None + + +def sha256sum(filename): + hasher = hashlib.sha256() + with open(filename, 'rb') as f: + hasher.update(f.read()) + + code_hash = hasher.digest() + code_b64 = base64.b64encode(code_hash) + hex_digest = code_b64.decode('utf-8') + + return hex_digest + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', 'absent']), + runtime=dict(type='str', required=True), + role=dict(type='str', default=None), + handler=dict(type='str', default=None), + zip_file=dict(type='str', default=None, aliases=['src']), + s3_bucket=dict(type='str'), + s3_key=dict(type='str'), + s3_object_version=dict(type='str', default=None), + description=dict(type='str', default=''), + timeout=dict(type='int', default=3), + memory_size=dict(type='int', default=128), + vpc_subnet_ids=dict(type='list', default=None), + vpc_security_group_ids=dict(type='list', default=None), + ) + ) + + mutually_exclusive = [['zip_file', 's3_key'], + ['zip_file', 's3_bucket'], + ['zip_file', 's3_object_version']] + + required_together = [['s3_key', 's3_bucket', 's3_object_version'], + ['vpc_subnet_ids', 'vpc_security_group_ids']] + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_together=required_together) + + name = module.params.get('name') + state = module.params.get('state').lower() + runtime = module.params.get('runtime') + role = module.params.get('role') + handler = module.params.get('handler') + s3_bucket = module.params.get('s3_bucket') + s3_key = module.params.get('s3_key') + s3_object_version = module.params.get('s3_object_version') + zip_file = module.params.get('zip_file') + description = module.params.get('description') + timeout = module.params.get('timeout') + memory_size = module.params.get('memory_size') + vpc_subnet_ids = module.params.get('vpc_subnet_ids') + vpc_security_group_ids = module.params.get('vpc_security_group_ids') + + check_mode = module.check_mode + changed = False + + if not HAS_BOTOCORE: + module.fail_json(msg='Python module "botocore" is missing, please install it') + + if not HAS_BOTO3: + module.fail_json(msg='Python module "boto3" is missing, please install it') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg='region must be specified') + + try: + client = boto3_conn(module, conn_type='client', resource='lambda', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: + module.fail_json(msg=str(e)) + + if role.startswith('arn:aws:iam'): + role_arn = role + else: + # get account ID and assemble ARN + try: + iam_client = boto3_conn(module, conn_type='client', resource='iam', + region=region, endpoint=ec2_url, **aws_connect_kwargs) + account_id = iam_client.get_user()['User']['Arn'].split(':')[4] + role_arn = 'arn:aws:iam::{0}:role/{1}'.format(account_id, role) + except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: + module.fail_json(msg=str(e)) + + # Get function configuration if present, False otherwise + current_function = get_current_function(client, name) + + # Update existing Lambda function + if state == 'present' and current_function: + + # Get current state + current_config = current_function['Configuration'] + current_version = None + + # Update function configuration + func_kwargs = {'FunctionName': name} + + # Update configuration if needed + if role_arn and current_config['Role'] != role_arn: + func_kwargs.update({'Role': role_arn}) + if handler and current_config['Handler'] != handler: + func_kwargs.update({'Handler': handler}) + if description and current_config['Description'] != description: + func_kwargs.update({'Description': description}) + if timeout and current_config['Timeout'] != timeout: + func_kwargs.update({'Timeout': timeout}) + if memory_size and current_config['MemorySize'] != memory_size: + func_kwargs.update({'MemorySize': memory_size}) + + # Check for unsupported mutation + if current_config['Runtime'] != runtime: + module.fail_json(msg='Cannot change runtime. Please recreate the function') + + # If VPC configuration is desired + if vpc_subnet_ids or vpc_security_group_ids: + if len(vpc_subnet_ids) < 1: + module.fail_json(msg='At least 1 subnet is required') + + if len(vpc_security_group_ids) < 1: + module.fail_json(msg='At least 1 security group is required') + + if 'VpcConfig' in current_config: + # Compare VPC config with current config + current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds'] + current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds'] + + subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids) + vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids) + + if any((subnet_net_id_changed, vpc_security_group_ids_changed)): + func_kwargs.update({'VpcConfig': + {'SubnetIds': vpc_subnet_ids,'SecurityGroupIds': vpc_security_group_ids}}) + else: + # No VPC configuration is desired, assure VPC config is empty when present in current config + if ('VpcConfig' in current_config and + 'VpcId' in current_config['VpcConfig'] and + current_config['VpcConfig']['VpcId'] != ''): + func_kwargs.update({'VpcConfig':{'SubnetIds': [], 'SecurityGroupIds': []}}) + + # Upload new configuration if configuration has changed + if len(func_kwargs) > 2: + try: + if not check_mode: + response = client.update_function_configuration(**func_kwargs) + current_version = response['Version'] + changed = True + except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=str(e)) + + # Update code configuration + code_kwargs = {'FunctionName': name, 'Publish': True} + + # Update S3 location + if s3_bucket and s3_key: + # If function is stored on S3 always update + code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key}) + + # If S3 Object Version is given + if s3_object_version: + code_kwargs.update({'S3ObjectVersion': s3_object_version}) + + # Compare local checksum, update remote code when different + elif zip_file: + local_checksum = sha256sum(zip_file) + remote_checksum = current_config['CodeSha256'] + + # Only upload new code when local code is different compared to the remote code + if local_checksum != remote_checksum: + try: + with open(zip_file, 'rb') as f: + encoded_zip = f.read() + code_kwargs.update({'ZipFile': encoded_zip}) + except IOError as e: + module.fail_json(msg=str(e)) + + # Upload new code if needed (e.g. code checksum has changed) + if len(code_kwargs) > 2: + try: + if not check_mode: + response = client.update_function_code(**code_kwargs) + current_version = response['Version'] + changed = True + except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=str(e)) + + # Describe function code and configuration + response = get_current_function(client, name, qualifier=current_version) + if not response: + module.fail_json(msg='Unable to get function information after updating') + + # We're done + module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + + # Function doesn't exists, create new Lambda function + elif state == 'present': + if s3_bucket and s3_key: + # If function is stored on S3 + code = {'S3Bucket': s3_bucket, + 'S3Key': s3_key} + if s3_object_version: + code.update({'S3ObjectVersion': s3_object_version}) + elif zip_file: + # If function is stored in local zipfile + try: + with open(zip_file, 'rb') as f: + zip_content = f.read() + + code = {'ZipFile': zip_content} + except IOError as e: + module.fail_json(msg=str(e)) + + else: + module.fail_json(msg='Either S3 object or path to zipfile required') + + func_kwargs = {'FunctionName': name, + 'Description': description, + 'Publish': True, + 'Runtime': runtime, + 'Role': role_arn, + 'Handler': handler, + 'Code': code, + 'Timeout': timeout, + 'MemorySize': memory_size, + } + + # If VPC configuration is given + if vpc_subnet_ids or vpc_security_group_ids: + if len(vpc_subnet_ids) < 1: + module.fail_json(msg='At least 1 subnet is required') + + if len(vpc_security_group_ids) < 1: + module.fail_json(msg='At least 1 security group is required') + + func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids, + 'SecurityGroupIds': vpc_security_group_ids}}) + + # Finally try to create function + try: + if not check_mode: + response = client.create_function(**func_kwargs) + current_version = response['Version'] + changed = True + except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=str(e)) + + response = get_current_function(client, name, qualifier=current_version) + if not response: + module.fail_json(msg='Unable to get function information after creating') + module.exit_json(changed=changed, **camel_dict_to_snake_dict(response)) + + # Delete existing Lambda function + if state == 'absent' and current_function: + try: + if not check_mode: + client.delete_function(FunctionName=name) + changed = True + except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed) + + # Function already absent, do nothing + elif state == 'absent': + module.exit_json(changed=changed) + + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/lambda_alias.py b/cloud/amazon/lambda_alias.py new file mode 100644 index 00000000000..a06880e4101 --- /dev/null +++ b/cloud/amazon/lambda_alias.py @@ -0,0 +1,389 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +try: + import boto3 + from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: lambda_alias +short_description: Creates, updates or deletes AWS Lambda function aliases. +description: + - This module allows the management of AWS Lambda functions aliases via the Ansible + framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function + itself and M(lambda_event) to manage event source mappings. + +version_added: "2.2" + +author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb) +options: + function_name: + description: + - The name of the function alias. + required: true + state: + description: + - Describes the desired state. + required: true + default: "present" + choices: ["present", "absent"] + name: + description: + - Name of the function alias. + required: true + aliases: ['alias_name'] + description: + description: + - A short, user-defined function alias description. + required: false + version: + description: + - Version associated with the Lambda function alias. + A value of 0 (or omitted parameter) sets the alias to the $LATEST version. + required: false + aliases: ['function_version'] +requirements: + - boto3 +extends_documentation_fragment: + - aws + +''' + +EXAMPLES = ''' +--- +# Simple example to create a lambda function and publish a version +- hosts: localhost + gather_facts: no + vars: + state: present + project_folder: /path/to/deployment/package + deployment_package: lambda.zip + account: 123456789012 + production_version: 5 + tasks: + - name: AWS Lambda Function + lambda: + state: "{{ state | default('present') }}" + name: myLambdaFunction + publish: True + description: lambda function description + code_s3_bucket: package-bucket + code_s3_key: "lambda/{{ deployment_package }}" + local_path: "{{ project_folder }}/{{ deployment_package }}" + runtime: python2.7 + timeout: 5 + handler: lambda.handler + memory_size: 128 + role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole" + + - name: show results + debug: + var: lambda_facts + +# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0) + - name: "alias 'Dev' for function {{ lambda_facts.FunctionName }} " + lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_facts.FunctionName }}" + name: Dev + description: Development is $LATEST version + +# The QA alias will only be created when a new version is published (i.e. not = '$LATEST') + - name: "alias 'QA' for function {{ lambda_facts.FunctionName }} " + lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_facts.FunctionName }}" + name: QA + version: "{{ lambda_facts.Version }}" + description: "QA is version {{ lambda_facts.Version }}" + when: lambda_facts.Version != "$LATEST" + +# The Prod alias will have a fixed version based on a variable + - name: "alias 'Prod' for function {{ lambda_facts.FunctionName }} " + lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_facts.FunctionName }}" + name: Prod + version: "{{ production_version }}" + description: "Production is version {{ production_version }}" +''' + +RETURN = ''' +--- +alias_arn: + description: Full ARN of the function, including the alias + returned: success + type: string + sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev +description: + description: A short description of the alias + returned: success + type: string + sample: The development stage for my hot new app +function_version: + description: The qualifier that the alias refers to + returned: success + type: string + sample: $LATEST +name: + description: The name of the alias assigned + returned: success + type: string + sample: dev +''' + + +class AWSConnection: + """ + Create the connection object and client objects as required. + """ + + def __init__(self, ansible_obj, resources, boto3=True): + + try: + self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3) + + self.resource_client = dict() + if not resources: + resources = ['lambda'] + + resources.append('iam') + + for resource in resources: + aws_connect_kwargs.update(dict(region=self.region, + endpoint=self.endpoint, + conn_type='client', + resource=resource + )) + self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs) + + # if region is not provided, then get default profile/session region + if not self.region: + self.region = self.resource_client['lambda'].meta.region_name + + except (ClientError, ParamValidationError, MissingParametersError) as e: + ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) + + try: + self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] + except (ClientError, ValueError, KeyError, IndexError): + self.account_id = '' + + def client(self, resource='lambda'): + return self.resource_client[resource] + + +def pc(key): + """ + Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'. + + :param key: + :return: + """ + + return "".join([token.capitalize() for token in key.split('_')]) + + +def set_api_params(module, module_params): + """ + Sets module parameters to those expected by the boto3 API. + + :param module: + :param module_params: + :return: + """ + + api_params = dict() + + for param in module_params: + module_param = module.params.get(param, None) + if module_param: + api_params[pc(param)] = module_param + + return api_params + + +def validate_params(module, aws): + """ + Performs basic parameter validation. + + :param module: Ansible module reference + :param aws: AWS client connection + :return: + """ + + function_name = module.params['function_name'] + + # validate function name + if not re.search('^[\w\-:]+$', function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + ) + if len(function_name) > 64: + module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string + if module.params['function_version'] == 0: + module.params['function_version'] = '$LATEST' + else: + module.params['function_version'] = str(module.params['function_version']) + + return + + +def get_lambda_alias(module, aws): + """ + Returns the lambda function alias if it exists. + + :param module: Ansible module reference + :param aws: AWS client connection + :return: + """ + + client = aws.client('lambda') + + # set API parameters + api_params = set_api_params(module, ('function_name', 'name')) + + # check if alias exists and get facts + try: + results = client.get_alias(**api_params) + + except (ClientError, ParamValidationError, MissingParametersError) as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + results = None + else: + module.fail_json(msg='Error retrieving function alias: {0}'.format(e)) + + return results + + +def lambda_alias(module, aws): + """ + Adds, updates or deletes lambda function aliases. + + :param module: Ansible module reference + :param aws: AWS client connection + :return dict: + """ + client = aws.client('lambda') + results = dict() + changed = False + current_state = 'absent' + state = module.params['state'] + + facts = get_lambda_alias(module, aws) + if facts: + current_state = 'present' + + if state == 'present': + if current_state == 'present': + + # check if alias has changed -- only version and description can change + alias_params = ('function_version', 'description') + for param in alias_params: + if module.params.get(param) != facts.get(pc(param)): + changed = True + break + + if changed: + api_params = set_api_params(module, ('function_name', 'name')) + api_params.update(set_api_params(module, alias_params)) + + if not module.check_mode: + try: + results = client.update_alias(**api_params) + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error updating function alias: {0}'.format(e)) + + else: + # create new function alias + api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description')) + + try: + if not module.check_mode: + results = client.create_alias(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error creating function alias: {0}'.format(e)) + + else: # state = 'absent' + if current_state == 'present': + # delete the function + api_params = set_api_params(module, ('function_name', 'name')) + + try: + if not module.check_mode: + results = client.delete_alias(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error deleting function alias: {0}'.format(e)) + + return dict(changed=changed, **dict(results or facts)) + + +def main(): + """ + Main entry point. + + :return dict: ansible facts + """ + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + function_name=dict(required=True, default=None), + name=dict(required=True, default=None, aliases=['alias_name']), + function_version=dict(type='int', required=False, default=0, aliases=['version']), + description=dict(required=False, default=None), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[], + required_together=[] + ) + + # validate dependencies + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required for this module.') + + aws = AWSConnection(module, ['lambda']) + + validate_params(module, aws) + + results = lambda_alias(module, aws) + + module.exit_json(**camel_dict_to_snake_dict(results)) + + +# ansible import module(s) kept at ~eof as recommended +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/lambda_event.py b/cloud/amazon/lambda_event.py new file mode 100644 index 00000000000..acb057a8dee --- /dev/null +++ b/cloud/amazon/lambda_event.py @@ -0,0 +1,427 @@ +#!/usr/bin/python +# (c) 2016, Pierre Jodouin +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import sys + +try: + import boto3 + from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: lambda_event +short_description: Creates, updates or deletes AWS Lambda function event mappings. +description: + - This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream + events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where + AWS Lambda invokes the function. + It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda + function itself and M(lambda_alias) to manage function aliases. + +version_added: "2.2" + +author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb) +options: + lambda_function_arn: + description: + - The name or ARN of the lambda function. + required: true + aliases: ['function_name', 'function_arn'] + state: + description: + - Describes the desired state. + required: true + default: "present" + choices: ["present", "absent"] + alias: + description: + - Name of the function alias. Mutually exclusive with C(version). + required: true + version: + description: + - Version of the Lambda function. Mutually exclusive with C(alias). + required: false + event_source: + description: + - Source of the event that triggers the lambda function. + required: false + default: stream + choices: ['stream'] + source_params: + description: + - Sub-parameters required for event source. + - I(== stream event source ==) + - C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source. + - C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True. + - C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the + time of invoking your function. Default is 100. + - C(starting_position) The position in the stream where AWS Lambda should start reading. + Choices are TRIM_HORIZON or LATEST. + required: true +requirements: + - boto3 +extends_documentation_fragment: + - aws + +''' + +EXAMPLES = ''' +--- +# Example that creates a lambda event notification for a DynamoDB stream +- hosts: localhost + gather_facts: no + vars: + state: present + tasks: + - name: DynamoDB stream event mapping + lambda_event: + state: "{{ state | default('present') }}" + event_source: stream + function_name: "{{ function_name }}" + alias: Dev + source_params: + source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457 + enabled: True + batch_size: 100 + starting_position: TRIM_HORIZON + + - name: Show source event + debug: + var: lambda_stream_events +''' + +RETURN = ''' +--- +lambda_stream_events: + description: list of dictionaries returned by the API describing stream event mappings + returned: success + type: list +''' + +# --------------------------------------------------------------------------------------------------- +# +# Helper Functions & classes +# +# --------------------------------------------------------------------------------------------------- + + +class AWSConnection: + """ + Create the connection object and client objects as required. + """ + + def __init__(self, ansible_obj, resources, use_boto3=True): + + try: + self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3) + + self.resource_client = dict() + if not resources: + resources = ['lambda'] + + resources.append('iam') + + for resource in resources: + aws_connect_kwargs.update(dict(region=self.region, + endpoint=self.endpoint, + conn_type='client', + resource=resource + )) + self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs) + + # if region is not provided, then get default profile/session region + if not self.region: + self.region = self.resource_client['lambda'].meta.region_name + + except (ClientError, ParamValidationError, MissingParametersError) as e: + ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) + + # set account ID + try: + self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] + except (ClientError, ValueError, KeyError, IndexError): + self.account_id = '' + + def client(self, resource='lambda'): + return self.resource_client[resource] + + +def pc(key): + """ + Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'. + + :param key: + :return: + """ + + return "".join([token.capitalize() for token in key.split('_')]) + + +def ordered_obj(obj): + """ + Order object for comparison purposes + + :param obj: + :return: + """ + + if isinstance(obj, dict): + return sorted((k, ordered_obj(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(ordered_obj(x) for x in obj) + else: + return obj + + +def set_api_sub_params(params): + """ + Sets module sub-parameters to those expected by the boto3 API. + + :param params: + :return: + """ + + api_params = dict() + + for param in params.keys(): + param_value = params.get(param, None) + if param_value: + api_params[pc(param)] = param_value + + return api_params + + +def validate_params(module, aws): + """ + Performs basic parameter validation. + + :param module: + :param aws: + :return: + """ + + function_name = module.params['lambda_function_arn'] + + # validate function name + if not re.search('^[\w\-:]+$', function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + ) + if len(function_name) > 64: + module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + # check if 'function_name' needs to be expanded in full ARN format + if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'): + function_name = module.params['lambda_function_arn'] + module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name) + + qualifier = get_qualifier(module) + if qualifier: + function_arn = module.params['lambda_function_arn'] + module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) + + return + + +def get_qualifier(module): + """ + Returns the function qualifier as a version or alias or None. + + :param module: + :return: + """ + + qualifier = None + if module.params['version'] > 0: + qualifier = str(module.params['version']) + elif module.params['alias']: + qualifier = str(module.params['alias']) + + return qualifier + + +# --------------------------------------------------------------------------------------------------- +# +# Lambda Event Handlers +# +# This section defines a lambda_event_X function where X is an AWS service capable of initiating +# the execution of a Lambda function (pull only). +# +# --------------------------------------------------------------------------------------------------- + +def lambda_event_stream(module, aws): + """ + Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications. + :param module: + :param aws: + :return: + """ + + client = aws.client('lambda') + facts = dict() + changed = False + current_state = 'absent' + state = module.params['state'] + + api_params = dict(FunctionName=module.params['lambda_function_arn']) + + # check if required sub-parameters are present and valid + source_params = module.params['source_params'] + + source_arn = source_params.get('source_arn') + if source_arn: + api_params.update(EventSourceArn=source_arn) + else: + module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.") + + # check if optional sub-parameters are valid, if present + batch_size = source_params.get('batch_size') + if batch_size: + try: + source_params['batch_size'] = int(batch_size) + except ValueError: + module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size'])) + + # optional boolean value needs special treatment as not present does not imply False + source_param_enabled = module.boolean(source_params.get('enabled', 'True')) + + # check if event mapping exist + try: + facts = client.list_event_source_mappings(**api_params)['EventSourceMappings'] + if facts: + current_state = 'present' + except ClientError as e: + module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e)) + + if state == 'present': + if current_state == 'absent': + + starting_position = source_params.get('starting_position') + if starting_position: + api_params.update(StartingPosition=starting_position) + else: + module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.") + + if source_arn: + api_params.update(Enabled=source_param_enabled) + if source_params.get('batch_size'): + api_params.update(BatchSize=source_params.get('batch_size')) + + try: + if not module.check_mode: + facts = client.create_event_source_mapping(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e)) + + else: + # current_state is 'present' + api_params = dict(FunctionName=module.params['lambda_function_arn']) + current_mapping = facts[0] + api_params.update(UUID=current_mapping['UUID']) + mapping_changed = False + + # check if anything changed + if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']: + api_params.update(BatchSize=source_params['batch_size']) + mapping_changed = True + + if source_param_enabled is not None: + if source_param_enabled: + if current_mapping['State'] not in ('Enabled', 'Enabling'): + api_params.update(Enabled=True) + mapping_changed = True + else: + if current_mapping['State'] not in ('Disabled', 'Disabling'): + api_params.update(Enabled=False) + mapping_changed = True + + if mapping_changed: + try: + if not module.check_mode: + facts = client.update_event_source_mapping(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e)) + + else: + if current_state == 'present': + # remove the stream event mapping + api_params = dict(UUID=facts[0]['UUID']) + + try: + if not module.check_mode: + facts = client.delete_event_source_mapping(**api_params) + changed = True + except (ClientError, ParamValidationError, MissingParametersError) as e: + module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e)) + + return camel_dict_to_snake_dict(dict(changed=changed, events=facts)) + + +def main(): + """Produce a list of function suffixes which handle lambda events.""" + this_module = sys.modules[__name__] + source_choices = ["stream"] + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + state=dict(required=False, default='present', choices=['present', 'absent']), + lambda_function_arn=dict(required=True, default=None, aliases=['function_name', 'function_arn']), + event_source=dict(required=False, default="stream", choices=source_choices), + source_params=dict(type='dict', required=True, default=None), + alias=dict(required=False, default=None), + version=dict(type='int', required=False, default=0), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['alias', 'version']], + required_together=[] + ) + + # validate dependencies + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required for this module.') + + aws = AWSConnection(module, ['lambda']) + + validate_params(module, aws) + + this_module_function = getattr(this_module, 'lambda_event_{}'.format(module.params['event_source'].lower())) + + results = this_module_function(module, aws) + + module.exit_json(**results) + + +# ansible import module(s) kept at ~eof as recommended +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/lambda_facts.py b/cloud/amazon/lambda_facts.py new file mode 100644 index 00000000000..ac3db667948 --- /dev/null +++ b/cloud/amazon/lambda_facts.py @@ -0,0 +1,413 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import datetime +import sys + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: lambda_facts +short_description: Gathers AWS Lambda function details as Ansible facts +description: + - Gathers various details related to Lambda functions, including aliases, versions and event source mappings. + Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and + M(lambda_event) to manage lambda event source mappings. + +version_added: "2.2" + +options: + query: + description: + - Specifies the resource type for which to gather facts. Leave blank to retrieve all facts. + required: true + choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ] + default: "all" + function_name: + description: + - The name of the lambda function for which facts are requested. + required: false + default: null + aliases: [ "function", "name"] + event_source_arn: + description: + - For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream. + default: null + required: false +author: Pierre Jodouin (@pjodouin) +requirements: + - boto3 +extends_documentation_fragment: + - aws + +''' + +EXAMPLES = ''' +--- +# Simple example of listing all info for a function +- name: List all for a specific function + lambda_facts: + query: all + function_name: myFunction + register: my_function_details +# List all versions of a function +- name: List function versions + lambda_facts: + query: versions + function_name: myFunction + register: my_function_versions +# List all lambda function versions +- name: List all function + lambda_facts: + query: all + max_items: 20 +- name: show Lambda facts + debug: + var: lambda_facts +''' + +RETURN = ''' +--- +lambda_facts: + description: lambda facts + returned: success + type: dict +lambda_facts.function: + description: lambda function list + returned: success + type: dict +lambda_facts.function.TheName: + description: lambda function information, including event, mapping, and version information + returned: success + type: dict +''' + + +def fix_return(node): + """ + fixup returned dictionary + + :param node: + :return: + """ + + if isinstance(node, datetime.datetime): + node_value = str(node) + + elif isinstance(node, list): + node_value = [fix_return(item) for item in node] + + elif isinstance(node, dict): + node_value = dict([(item, fix_return(node[item])) for item in node.keys()]) + + else: + node_value = node + + return node_value + + +def alias_details(client, module): + """ + Returns list of aliases for a specified function. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_facts = dict() + + function_name = module.params.get('function_name') + if function_name: + params = dict() + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + try: + lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(aliases=[]) + else: + module.fail_json(msg='Unable to get {0} aliases, error: {1}'.format(function_name, e)) + else: + module.fail_json(msg='Parameter function_name required for query=aliases.') + + return {function_name: camel_dict_to_snake_dict(lambda_facts)} + + +def all_details(client, module): + """ + Returns all lambda related facts. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + if module.params.get('max_items') or module.params.get('next_marker'): + module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.') + + lambda_facts = dict() + + function_name = module.params.get('function_name') + if function_name: + lambda_facts[function_name] = {} + lambda_facts[function_name].update(config_details(client, module)[function_name]) + lambda_facts[function_name].update(alias_details(client, module)[function_name]) + lambda_facts[function_name].update(policy_details(client, module)[function_name]) + lambda_facts[function_name].update(version_details(client, module)[function_name]) + lambda_facts[function_name].update(mapping_details(client, module)[function_name]) + else: + lambda_facts.update(config_details(client, module)) + + return lambda_facts + + +def config_details(client, module): + """ + Returns configuration details for one or all lambda functions. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_facts = dict() + + function_name = module.params.get('function_name') + if function_name: + try: + lambda_facts.update(client.get_function_configuration(FunctionName=function_name)) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(function={}) + else: + module.fail_json(msg='Unable to get {0} configuration, error: {1}'.format(function_name, e)) + else: + params = dict() + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + try: + lambda_facts.update(function_list=client.list_functions(**params)['Functions']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(function_list=[]) + else: + module.fail_json(msg='Unable to get function list, error: {0}'.format(e)) + + functions = dict() + for func in lambda_facts.pop('function_list', []): + functions[func['FunctionName']] = camel_dict_to_snake_dict(func) + return functions + + return {function_name: camel_dict_to_snake_dict(lambda_facts)} + + +def mapping_details(client, module): + """ + Returns all lambda event source mappings. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_facts = dict() + params = dict() + function_name = module.params.get('function_name') + + if function_name: + params['FunctionName'] = module.params.get('function_name') + + if module.params.get('event_source_arn'): + params['EventSourceArn'] = module.params.get('event_source_arn') + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + try: + lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(mappings=[]) + else: + module.fail_json(msg='Unable to get source event mappings, error: {0}'.format(e)) + + if function_name: + return {function_name: camel_dict_to_snake_dict(lambda_facts)} + + return camel_dict_to_snake_dict(lambda_facts) + + +def policy_details(client, module): + """ + Returns policy attached to a lambda function. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + if module.params.get('max_items') or module.params.get('next_marker'): + module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.') + + lambda_facts = dict() + + function_name = module.params.get('function_name') + if function_name: + try: + # get_policy returns a JSON string so must convert to dict before reassigning to its key + lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy'])) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(policy={}) + else: + module.fail_json(msg='Unable to get {0} policy, error: {1}'.format(function_name, e)) + else: + module.fail_json(msg='Parameter function_name required for query=policy.') + + return {function_name: camel_dict_to_snake_dict(lambda_facts)} + + +def version_details(client, module): + """ + Returns all lambda function versions. + + :param client: AWS API client reference (boto3) + :param module: Ansible module reference + :return dict: + """ + + lambda_facts = dict() + + function_name = module.params.get('function_name') + if function_name: + params = dict() + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + try: + lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions']) + except ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + lambda_facts.update(versions=[]) + else: + module.fail_json(msg='Unable to get {0} versions, error: {1}'.format(function_name, e)) + else: + module.fail_json(msg='Parameter function_name required for query=versions.') + + return {function_name: camel_dict_to_snake_dict(lambda_facts)} + + +def main(): + """ + Main entry point. + + :return dict: ansible facts + """ + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + function_name=dict(required=False, default=None, aliases=['function', 'name']), + query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'), + event_source_arn=dict(required=False, default=None) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[], + required_together=[] + ) + + # validate dependencies + if not HAS_BOTO3: + module.fail_json(msg='boto3 is required for this module.') + + # validate function_name if present + function_name = module.params['function_name'] + if function_name: + if not re.search("^[\w\-:]+$", function_name): + module.fail_json( + msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + ) + if len(function_name) > 64: + module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + + try: + region, endpoint, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + aws_connect_kwargs.update(dict(region=region, + endpoint=endpoint, + conn_type='client', + resource='lambda' + )) + client = boto3_conn(module, **aws_connect_kwargs) + except ClientError as e: + module.fail_json(msg="Can't authorize connection - {0}".format(e)) + + this_module = sys.modules[__name__] + + invocations = dict( + aliases='alias_details', + all='all_details', + config='config_details', + mappings='mapping_details', + policy='policy_details', + versions='version_details', + ) + + this_module_function = getattr(this_module, invocations[module.params['query']]) + all_facts = fix_return(this_module_function(client, module)) + + results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False) + + if module.check_mode: + results['msg'] = 'Check mode set but ignored for fact gathering only.' + + module.exit_json(**results) + + +# ansible import module(s) kept at ~eof as recommended +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/redshift.py b/cloud/amazon/redshift.py new file mode 100644 index 00000000000..a1ae146a427 --- /dev/null +++ b/cloud/amazon/redshift.py @@ -0,0 +1,479 @@ +#!/usr/bin/python + +# Copyright 2014 Jens Carl, Hothead Games Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +author: + - "Jens Carl (@j-carl), Hothead Games Inc." +module: redshift +version_added: "2.2" +short_description: create, delete, or modify an Amazon Redshift instance +description: + - Creates, deletes, or modifies amazon Redshift cluster instances. +options: + command: + description: + - Specifies the action to take. + required: true + choices: [ 'create', 'facts', 'delete', 'modify' ] + identifier: + description: + - Redshift cluster identifier. + required: true + node_type: + description: + - The node type of the cluster. Must be specified when command=create. + choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'] + username: + description: + - Master database username. Used only when command=create. + password: + description: + - Master database password. Used only when command=create. + cluster_type: + description: + - The type of cluster. + choices: ['multi-node', 'single-node' ] + default: 'single-node' + db_name: + description: + - Name of the database. + default: null + availability_zone: + description: + - availability zone in which to launch cluster + aliases: ['zone', 'aws_zone'] + number_of_nodes: + description: + - Number of nodes. Only used when cluster_type=multi-node. + default: null + cluster_subnet_group_name: + description: + - which subnet to place the cluster + aliases: ['subnet'] + cluster_security_groups: + description: + - in which security group the cluster belongs + default: null + aliases: ['security_groups'] + vpc_security_group_ids: + description: + - VPC security group + aliases: ['vpc_security_groups'] + default: null + preferred_maintenance_window: + description: + - maintenance window + aliases: ['maintance_window', 'maint_window'] + default: null + cluster_parameter_group_name: + description: + - name of the cluster parameter group + aliases: ['param_group_name'] + default: null + automated_snapshot_retention_period: + description: + - period when the snapshot take place + aliases: ['retention_period'] + default: null + port: + description: + - which port the cluster is listining + default: null + cluster_version: + description: + - which version the cluster should have + aliases: ['version'] + choices: ['1.0'] + default: null + allow_version_upgrade: + description: + - flag to determinate if upgrade of version is possible + aliases: ['version_upgrade'] + default: true + publicly_accessible: + description: + - if the cluster is accessible publicly or not + default: false + encrypted: + description: + - if the cluster is encrypted or not + default: false + elastic_ip: + description: + - if the cluster has an elastic IP or not + default: null + new_cluster_identifier: + description: + - Only used when command=modify. + aliases: ['new_identifier'] + default: null + wait: + description: + - When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated. + default: "no" + choices: [ "yes", "no" ] + wait_timeout: + description: + - how long before wait gives up, in seconds + default: 300 +requirements: [ 'boto' ] +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic cluster provisioning example +- redshift: > + command=create + node_type=ds1.xlarge + identifier=new_cluster + username=cluster_admin + password=1nsecure +''' + +RETURN = ''' +cluster: + description: dictionary containing all the cluster information + returned: success + type: dictionary + contains: + identifier: + description: Id of the cluster. + returned: success + type: string + sample: "new_redshift_cluster" + create_time: + description: Time of the cluster creation as timestamp. + returned: success + type: float + sample: 1430158536.308 + status: + description: Stutus of the cluster. + returned: success + type: string + sample: "available" + db_name: + description: Name of the database. + returned: success + type: string + sample: "new_db_name" + availability_zone: + description: Amazon availability zone where the cluster is located. + returned: success + type: string + sample: "us-east-1b" + maintenance_window: + description: Time frame when maintenance/upgrade are done. + returned: success + type: string + sample: "sun:09:30-sun:10:00" + private_ip_address: + description: Private IP address of the main node. + returned: success + type: string + sample: "10.10.10.10" + public_ip_address: + description: Public IP address of the main node. + returned: success + type: string + sample: "0.0.0.0" + port: + description: Port of the cluster. + returned: success + type: int + sample: 5439 + url: + description: FQDN of the main cluster node. + returned: success + type: string + sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com" +''' + +import time + +try: + import boto + from boto import redshift + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def _collect_facts(resource): + """Transfrom cluster information to dict.""" + facts = { + 'identifier' : resource['ClusterIdentifier'], + 'create_time' : resource['ClusterCreateTime'], + 'status' : resource['ClusterStatus'], + 'username' : resource['MasterUsername'], + 'db_name' : resource['DBName'], + 'availability_zone' : resource['AvailabilityZone'], + 'maintenance_window': resource['PreferredMaintenanceWindow'], + } + + for node in resource['ClusterNodes']: + if node['NodeRole'] in ('SHARED', 'LEADER'): + facts['private_ip_address'] = node['PrivateIPAddress'] + break + + return facts + + +def create_cluster(module, redshift): + """ + Create a new cluster + + module: AnsibleModule object + redshift: authenticated redshift connection object + + Returns: + """ + + identifier = module.params.get('identifier') + node_type = module.params.get('node_type') + username = module.params.get('username') + password = module.params.get('password') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + changed = True + # Package up the optional parameters + params = {} + for p in ('db_name', 'cluster_type', 'cluster_security_groups', + 'vpc_security_group_ids', 'cluster_subnet_group_name', + 'availability_zone', 'preferred_maintenance_window', + 'cluster_parameter_group_name', + 'automated_snapshot_retention_period', 'port', + 'cluster_version', 'allow_version_upgrade', + 'number_of_nodes', 'publicly_accessible', + 'encrypted', 'elastic_ip'): + if p in module.params: + params[ p ] = module.params.get( p ) + + try: + redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + changed = False + except boto.exception.JSONResponseError as e: + try: + redshift.create_cluster(identifier, node_type, username, password, **params) + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + try: + resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + if wait: + try: + wait_timeout = time.time() + wait_timeout + time.sleep(5) + + while wait_timeout > time.time() and resource['ClusterStatus'] != 'available': + time.sleep(5) + if wait_timeout <= time.time(): + module.fail_json(msg = "Timeout waiting for resource %s" % resource.id) + + resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + return(changed, _collect_facts(resource)) + + +def describe_cluster(module, redshift): + """ + Collect data about the cluster. + + module: Ansible module object + redshift: authenticated redshift connection object + """ + identifier = module.params.get('identifier') + + try: + resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + return(True, _collect_facts(resource)) + + +def delete_cluster(module, redshift): + """ + Delete a cluster. + + module: Ansible module object + redshift: authenticated redshift connection object + """ + + identifier = module.params.get('identifier') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + try: + redshift.delete_custer( identifier ) + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + if wait: + try: + wait_timeout = time.time() + wait_timeout + resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + + while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting': + time.sleep(5) + if wait_timeout <= time.time(): + module.fail_json(msg = "Timeout waiting for resource %s" % resource.id) + + resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + return(True, {}) + + +def modify_cluster(module, redshift): + """ + Modify an existing cluster. + + module: Ansible module object + redshift: authenticated redshift connection object + """ + + identifier = module.params.get('identifier') + wait = module.params.get('wait') + wait_timeout = module.params.get('wait_timeout') + + # Package up the optional parameters + params = {} + for p in ('cluster_type', 'cluster_security_groups', + 'vpc_security_group_ids', 'cluster_subnet_group_name', + 'availability_zone', 'preferred_maintenance_window', + 'cluster_parameter_group_name', + 'automated_snapshot_retention_period', 'port', 'cluster_version', + 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'): + if p in module.params: + params[p] = module.params.get(p) + + try: + redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + except boto.exception.JSONResponseError as e: + try: + redshift.modify_cluster(identifier, **params) + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + try: + resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + if wait: + try: + wait_timeout = time.time() + wait_timeout + time.sleep(5) + + while wait_timeout > time.time() and resource['ClusterStatus'] != 'available': + time.sleep(5) + if wait_timeout <= time.time(): + module.fail_json(msg = "Timeout waiting for resource %s" % resource.id) + + resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] + + except boto.exception.JSONResponseError as e: + # https://github.com/boto/boto/issues/2776 is fixed. + module.fail_json(msg=str(e)) + + return(True, _collect_facts(resource)) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True), + identifier = dict(required=True), + node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False), + username = dict(required=False), + password = dict(no_log=True, required=False), + db_name = dict(require=False), + cluster_type = dict(choices=['multi-node', 'single-node', ], default='single-node'), + cluster_security_groups = dict(aliases=['security_groups'], type='list'), + vpc_security_group_ids = dict(aliases=['vpc_security_groups'], type='list'), + cluster_subnet_group_name = dict(aliases=['subnet']), + availability_zone = dict(aliases=['aws_zone', 'zone']), + preferred_maintenance_window = dict(aliases=['maintance_window', 'maint_window']), + cluster_parameter_group_name = dict(aliases=['param_group_name']), + automated_snapshot_retention_period = dict(aliases=['retention_period']), + port = dict(type='int'), + cluster_version = dict(aliases=['version'], choices=['1.0']), + allow_version_upgrade = dict(aliases=['version_upgrade'], type='bool', default=True), + number_of_nodes = dict(type='int'), + publicly_accessible = dict(type='bool', default=False), + encrypted = dict(type='bool', default=False), + elastic_ip = dict(required=False), + new_cluster_identifier = dict(aliases=['new_identifier']), + wait = dict(type='bool', default=False), + wait_timeout = dict(default=300), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='boto v2.9.0+ required for this module') + + command = module.params.get('command') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION.")) + + # connect to the rds endpoint + try: + conn = connect_to_aws(boto.redshift, region, **aws_connect_params) + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + changed = True + if command == 'create': + (changed, cluster) = create_cluster(module, conn) + + elif command == 'facts': + (changed, cluster) = describe_cluster(module, conn) + + elif command == 'delete': + (changed, cluster) = delete_cluster(module, conn) + + elif command == 'modify': + (changed, cluster) = modify_cluster(module, conn) + + module.exit_json(changed=changed, cluster=cluster) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/redshift_subnet_group.py b/cloud/amazon/redshift_subnet_group.py new file mode 100644 index 00000000000..cecf68209ab --- /dev/null +++ b/cloud/amazon/redshift_subnet_group.py @@ -0,0 +1,186 @@ +#!/usr/bin/python + +# Copyright 2014 Jens Carl, Hothead Games Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +author: + - "Jens Carl (@j-carl), Hothead Games Inc." +module: redshift_subnet_group +version_added: "2.2" +short_description: mange Redshift cluster subnet groups +description: + - Create, modifies, and deletes Redshift cluster subnet groups. +options: + state: + description: + - Specifies whether the subnet should be present or absent. + default: 'present' + choices: ['present', 'absent' ] + group_name: + description: + - Cluster subnet group name. + required: true + aliases: ['name'] + group_description: + description: + - Database subnet group description. + required: false + default: null + aliases: ['description'] + group_subnets: + description: + - List of subnet IDs that make up the cluster subnet group. + required: false + default: null + aliases: ['subnets'] +requirements: [ 'boto' ] +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Create a Redshift subnet group +- local_action: + module: redshift_subnet_group + state: present + group_name: redshift-subnet + group_description: Redshift subnet + group_subnets: + - 'subnet-aaaaa' + - 'subnet-bbbbb' + +# Remove subnet group +redshift_subnet_group: > + state: absent + group_name: redshift-subnet +''' + +RETURN = ''' +group: + description: dictionary containing all Redshift subnet group information + returned: success + type: dictionary + contains: + name: + description: name of the Redshift subnet group + returned: success + type: string + sample: "redshift_subnet_group_name" + vpc_id: + description: Id of the VPC where the subnet is located + returned: success + type: string + sample: "vpc-aabb1122" +''' + +try: + import boto + import boto.redshift + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + group_name=dict(required=True, aliases=['name']), + group_description=dict(required=False, aliases=['description']), + group_subnets=dict(required=False, aliases=['subnets'], type='list'), + )) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto v2.9.0+ required for this module') + + state = module.params.get('state') + group_name = module.params.get('group_name') + group_description = module.params.get('group_description') + group_subnets = module.params.get('group_subnets') + + if state == 'present': + for required in ('group_name', 'group_description', 'group_subnets'): + if not module.params.get(required): + module.fail_json(msg=str("parameter %s required for state='present'" % required)) + else: + for not_allowed in ('group_description', 'group_subnets'): + if module.params.get(not_allowed): + module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed)) + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION.")) + + # Connect to the Redshift endpoint. + try: + conn = connect_to_aws(boto.redshift, region, **aws_connect_params) + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + try: + changed = False + exists = False + group = None + + try: + matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100) + exists = len(matching_groups) > 0 + except boto.exception.JSONResponseError as e: + if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault': + #if e.code != 'ClusterSubnetGroupNotFoundFault': + module.fail_json(msg=str(e)) + + if state == 'absent': + if exists: + conn.delete_cluster_subnet_group(group_name) + changed = True + + else: + if not exists: + new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets) + group = { + 'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult'] + ['ClusterSubnetGroup']['ClusterSubnetGroupName'], + 'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult'] + ['ClusterSubnetGroup']['VpcId'], + } + else: + changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description) + group = { + 'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult'] + ['ClusterSubnetGroup']['ClusterSubnetGroupName'], + 'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult'] + ['ClusterSubnetGroup']['VpcId'], + } + + changed = True + + except boto.exception.JSONResponseError as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=changed, group=group) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/route53_facts.py b/cloud/amazon/route53_facts.py new file mode 100644 index 00000000000..6dad5e21646 --- /dev/null +++ b/cloud/amazon/route53_facts.py @@ -0,0 +1,440 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: route53_facts +short_description: Retrieves route53 details using AWS methods +description: + - Gets various details related to Route53 zone, record set or health check details +version_added: "2.0" +options: + query: + description: + - specifies the query action to take + required: True + choices: [ + 'change', + 'checker_ip_range', + 'health_check', + 'hosted_zone', + 'record_sets', + 'reusable_delegation_set', + ] + change_id: + description: + - The ID of the change batch request. + The value that you specify here is the value that + ChangeResourceRecordSets returned in the Id element + when you submitted the request. + required: false + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone + required: false + max_items: + description: + - Maximum number of items to return for various get/list requests + required: false + next_marker: + description: + - "Some requests such as list_command: hosted_zones will return a maximum + number of entries - EG 100. If the number of entries exceeds this maximum + another request can be sent using the NextMarker entry from the first response + to get the next page of results" + required: false + delegation_set_id: + description: + - The DNS Zone delegation set ID + required: false + start_record_name: + description: + - "The first name in the lexicographic ordering of domain names that you want + the list_command: record_sets to start listing from" + required: false + type: + description: + - The type of DNS record + required: false + choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ] + dns_name: + description: + - The first name in the lexicographic ordering of domain names that you want + the list_command to start listing from + required: false + resource_id: + description: + - The ID/s of the specified resource/s + required: false + aliases: ['resource_ids'] + health_check_id: + description: + - The ID of the health check + required: false + hosted_zone_method: + description: + - "This is used in conjunction with query: hosted_zone. + It allows for listing details, counts or tags of various + hosted zone details." + required: false + choices: [ + 'details', + 'list', + 'list_by_name', + 'count', + 'tags', + ] + default: 'list' + health_check_method: + description: + - "This is used in conjunction with query: health_check. + It allows for listing details, counts or tags of various + health check details." + required: false + choices: [ + 'list', + 'details', + 'status', + 'failure_reason', + 'count', + 'tags', + ] + default: 'list' +author: Karen Cheng(@Etherdaemon) +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Simple example of listing all hosted zones +- name: List all hosted zones + route53_facts: + query: hosted_zone + register: hosted_zones + +# Getting a count of hosted zones +- name: Return a count of all hosted zones + route53_facts: + query: hosted_zone + hosted_zone_method: count + register: hosted_zone_count + +- name: List the first 20 resource record sets in a given hosted zone + route53_facts: + profile: account_name + query: record_sets + hosted_zone_id: ZZZ1111112222 + max_items: 20 + register: record_sets + +- name: List first 20 health checks + route53_facts: + query: health_check + health_check_method: list + max_items: 20 + register: health_checks + +- name: Get health check last failure_reason + route53_facts: + query: health_check + health_check_method: failure_reason + health_check_id: 00000000-1111-2222-3333-12345678abcd + register: health_check_failure_reason + +- name: Retrieve reusable delegation set details + route53_facts: + query: reusable_delegation_set + delegation_set_id: delegation id + register: delegation_sets + +''' +try: + import boto + import botocore + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +def get_hosted_zone(client, module): + params = dict() + + if module.params.get('hosted_zone_id'): + params['Id'] = module.params.get('hosted_zone_id') + else: + module.fail_json(msg="Hosted Zone Id is required") + + results = client.get_hosted_zone(**params) + return results + + +def reusable_delegation_set_details(client, module): + params = dict() + if not module.params.get('delegation_set_id'): + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + results = client.list_reusable_delegation_sets(**params) + else: + params['DelegationSetId'] = module.params.get('delegation_set_id') + results = client.get_reusable_delegation_set(**params) + + return results + + +def list_hosted_zones(client, module): + params = dict() + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + if module.params.get('delegation_set_id'): + params['DelegationSetId'] = module.params.get('delegation_set_id') + + results = client.list_hosted_zones(**params) + return results + + +def list_hosted_zones_by_name(client, module): + params = dict() + + if module.params.get('hosted_zone_id'): + params['HostedZoneId'] = module.params.get('hosted_zone_id') + + if module.params.get('dns_name'): + params['DNSName'] = module.params.get('dns_name') + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + results = client.list_hosted_zones_by_name(**params) + return results + + +def change_details(client, module): + params = dict() + + if module.params.get('change_id'): + params['Id'] = module.params.get('change_id') + else: + module.fail_json(msg="change_id is required") + + results = client.get_change(**params) + return results + + +def checker_ip_range_details(client, module): + results = client.get_checker_ip_ranges() + return results + + +def get_count(client, module): + if module.params.get('query') == 'health_check': + results = client.get_health_check_count() + else: + results = client.get_hosted_zone_count() + + return results + + +def get_health_check(client, module): + params = dict() + + if not module.params.get('health_check_id'): + module.fail_json(msg="health_check_id is required") + else: + params['HealthCheckId'] = module.params.get('health_check_id') + + if module.params.get('health_check_method') == 'details': + results = client.get_health_check(**params) + elif module.params.get('health_check_method') == 'failure_reason': + results = client.get_health_check_last_failure_reason(**params) + elif module.params.get('health_check_method') == 'status': + results = client.get_health_check_status(**params) + + return results + + +def get_resource_tags(client, module): + params = dict() + + if module.params.get('resource_id'): + params['ResourceIds'] = module.params.get('resource_id') + else: + module.fail_json(msg="resource_id or resource_ids is required") + + if module.params.get('query') == 'health_check': + params['ResourceType'] = 'healthcheck' + else: + params['ResourceType'] = 'hostedzone' + + results = client.list_tags_for_resources(**params) + return results + + +def list_health_checks(client, module): + params = dict() + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + results = client.list_health_checks(**params) + return results + + +def record_sets_details(client, module): + params = dict() + + if module.params.get('hosted_zone_id'): + params['HostedZoneId'] = module.params.get('hosted_zone_id') + else: + module.fail_json(msg="Hosted Zone Id is required") + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('start_record_name'): + params['StartRecordName'] = module.params.get('start_record_name') + + if module.params.get('type') and not module.params.get('start_record_name'): + module.fail_json(msg="start_record_name must be specified if type is set") + elif module.params.get('type'): + params['StartRecordType'] = module.params.get('type') + + results = client.list_resource_record_sets(**params) + return results + + +def health_check_details(client, module): + health_check_invocations = { + 'list': list_health_checks, + 'details': get_health_check, + 'status': get_health_check, + 'failure_reason': get_health_check, + 'count': get_count, + 'tags': get_resource_tags, + } + + results = health_check_invocations[module.params.get('health_check_method')](client, module) + return results + + +def hosted_zone_details(client, module): + hosted_zone_invocations = { + 'details': get_hosted_zone, + 'list': list_hosted_zones, + 'list_by_name': list_hosted_zones_by_name, + 'count': get_count, + 'tags': get_resource_tags, + } + + results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module) + return results + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + query=dict(choices=[ + 'change', + 'checker_ip_range', + 'health_check', + 'hosted_zone', + 'record_sets', + 'reusable_delegation_set', + ], required=True), + change_id=dict(), + hosted_zone_id=dict(), + max_items=dict(type='str'), + next_marker=dict(), + delegation_set_id=dict(), + start_record_name=dict(), + type=dict(choices=[ + 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' + ]), + dns_name=dict(), + resource_id=dict(type='list', aliases=['resource_ids']), + health_check_id=dict(), + hosted_zone_method=dict(choices=[ + 'details', + 'list', + 'list_by_name', + 'count', + 'tags' + ], default='list'), + health_check_method=dict(choices=[ + 'list', + 'details', + 'status', + 'failure_reason', + 'count', + 'tags', + ], default='list'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['hosted_zone_method', 'health_check_method'], + ], + ) + + # Validate Requirements + if not (HAS_BOTO or HAS_BOTO3): + module.fail_json(msg='json and boto/boto3 is required.') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound as e: + module.fail_json(msg="Can't authorize connection - %s " % str(e)) + + invocations = { + 'change': change_details, + 'checker_ip_range': checker_ip_range_details, + 'health_check': health_check_details, + 'hosted_zone': hosted_zone_details, + 'record_sets': record_sets_details, + 'reusable_delegation_set': reusable_delegation_set_details, + } + results = invocations[module.params.get('query')](route53, module) + + module.exit_json(**results) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/route53_health_check.py b/cloud/amazon/route53_health_check.py new file mode 100644 index 00000000000..0070b3e288c --- /dev/null +++ b/cloud/amazon/route53_health_check.py @@ -0,0 +1,364 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: route53_health_check +short_description: add or delete health-checks in Amazons Route53 DNS service +description: + - Creates and deletes DNS Health checks in Amazons Route53 service + - Only the port, resource_path, string_match and request_interval are + considered when updating existing health-checks. +version_added: "2.0" +options: + state: + description: + - Specifies the action to take. + required: true + choices: [ 'present', 'absent' ] + ip_address: + description: + - IP address of the end-point to check. Either this or `fqdn` has to be + provided. + required: false + default: null + port: + description: + - The port on the endpoint on which you want Amazon Route 53 to perform + health checks. Required for TCP checks. + required: false + default: null + type: + description: + - The type of health check that you want to create, which indicates how + Amazon Route 53 determines whether an endpoint is healthy. + required: true + choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ] + resource_path: + description: + - The path that you want Amazon Route 53 to request when performing + health checks. The path can be any value for which your endpoint will + return an HTTP status code of 2xx or 3xx when the endpoint is healthy, + for example the file /docs/route53-health-check.html. + - Required for all checks except TCP. + - The path must begin with a / + - Maximum 255 characters. + required: false + default: null + fqdn: + description: + - Domain name of the endpoint to check. Either this or `ip_address` has + to be provided. When both are given the `fqdn` is used in the `Host:` + header of the HTTP request. + required: false + string_match: + description: + - If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string + that you want Amazon Route 53 to search for in the response body from + the specified resource. If the string appears in the first 5120 bytes + of the response body, Amazon Route 53 considers the resource healthy. + required: false + default: null + request_interval: + description: + - The number of seconds between the time that Amazon Route 53 gets a + response from your endpoint and the time that it sends the next + health-check request. + required: true + default: 30 + choices: [ 10, 30 ] + failure_threshold: + description: + - The number of consecutive health checks that an endpoint must pass or + fail for Amazon Route 53 to change the current status of the endpoint + from unhealthy to healthy or vice versa. + required: true + default: 3 + choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] +author: "zimbatm (@zimbatm)" +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Create a health-check for host1.example.com and use it in record +- route53_health_check: + state: present + fqdn: host1.example.com + type: HTTP_STR_MATCH + resource_path: / + string_match: "Hello" + request_interval: 10 + failure_threshold: 2 + register: my_health_check + +- route53: + action: create + zone: "example.com" + type: CNAME + record: "www.example.com" + value: host1.example.com + ttl: 30 + # Routing policy + identifier: "host1@www" + weight: 100 + health_check: "{{ my_health_check.health_check.id }}" + +# Delete health-check +- route53_health_check: + state: absent + fqdn: host1.example.com + +''' + +import uuid + +try: + import boto + import boto.ec2 + from boto import route53 + from boto.route53 import Route53Connection, exception + from boto.route53.healthcheck import HealthCheck + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info + + +# Things that can't get changed: +# protocol +# ip_address or domain +# request_interval +# string_match if not previously enabled +def find_health_check(conn, wanted): + """Searches for health checks that have the exact same set of immutable values""" + for check in conn.get_list_health_checks().HealthChecks: + config = check.HealthCheckConfig + if config.get('IPAddress') == wanted.ip_addr and config.get('FullyQualifiedDomainName') == wanted.fqdn and config.get('Type') == wanted.hc_type and config.get('RequestInterval') == str(wanted.request_interval): + return check + return None + +def to_health_check(config): + return HealthCheck( + config.get('IPAddress'), + config.get('Port'), + config.get('Type'), + config.get('ResourcePath'), + fqdn=config.get('FullyQualifiedDomainName'), + string_match=config.get('SearchString'), + request_interval=int(config.get('RequestInterval')), + failure_threshold=int(config.get('FailureThreshold')), + ) + +def health_check_diff(a, b): + a = a.__dict__ + b = b.__dict__ + if a == b: + return {} + diff = {} + for key in set(a.keys()) | set(b.keys()): + if a.get(key) != b.get(key): + diff[key] = b.get(key) + return diff + +def to_template_params(health_check): + params = { + 'ip_addr_part': '', + 'port': health_check.port, + 'type': health_check.hc_type, + 'resource_path_part': '', + 'fqdn_part': '', + 'string_match_part': '', + 'request_interval': health_check.request_interval, + 'failure_threshold': health_check.failure_threshold, + } + if health_check.ip_addr: + params['ip_addr_part'] = HealthCheck.XMLIpAddrPart % {'ip_addr': health_check.ip_addr} + if health_check.resource_path: + params['resource_path_part'] = XMLResourcePathPart % {'resource_path': health_check.resource_path} + if health_check.fqdn: + params['fqdn_part'] = HealthCheck.XMLFQDNPart % {'fqdn': health_check.fqdn} + if health_check.string_match: + params['string_match_part'] = HealthCheck.XMLStringMatchPart % {'string_match': health_check.string_match} + return params + +XMLResourcePathPart = """%(resource_path)s""" + +POSTXMLBody = """ + + %(caller_ref)s + + %(ip_addr_part)s + %(port)s + %(type)s + %(resource_path_part)s + %(fqdn_part)s + %(string_match_part)s + %(request_interval)s + %(failure_threshold)s + + + """ + +UPDATEHCXMLBody = """ + + %(health_check_version)s + %(ip_addr_part)s + %(port)s + %(resource_path_part)s + %(fqdn_part)s + %(string_match_part)s + %(failure_threshold)i + + """ + +def create_health_check(conn, health_check, caller_ref = None): + if caller_ref is None: + caller_ref = str(uuid.uuid4()) + uri = '/%s/healthcheck' % conn.Version + params = to_template_params(health_check) + params.update(xmlns=conn.XMLNameSpace, caller_ref=caller_ref) + + xml_body = POSTXMLBody % params + response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status == 201: + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + raise exception.DNSServerError(response.status, response.reason, body) + +def update_health_check(conn, health_check_id, health_check_version, health_check): + uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id) + params = to_template_params(health_check) + params.update( + xmlns=conn.XMLNameSpace, + health_check_version=health_check_version, + ) + xml_body = UPDATEHCXMLBody % params + response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status not in (200, 204): + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state = dict(choices=['present', 'absent'], default='present'), + ip_address = dict(), + port = dict(type='int'), + type = dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), + resource_path = dict(), + fqdn = dict(), + string_match = dict(), + request_interval = dict(type='int', choices=[10, 30], default=30), + failure_threshold = dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto 2.27.0+ required for this module') + + state_in = module.params.get('state') + ip_addr_in = module.params.get('ip_address') + port_in = module.params.get('port') + type_in = module.params.get('type') + resource_path_in = module.params.get('resource_path') + fqdn_in = module.params.get('fqdn') + string_match_in = module.params.get('string_match') + request_interval_in = module.params.get('request_interval') + failure_threshold_in = module.params.get('failure_threshold') + + if ip_addr_in is None and fqdn_in is None: + module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required") + + # Default port + if port_in is None: + if type_in in ['HTTP', 'HTTP_STR_MATCH']: + port_in = 80 + elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']: + port_in = 443 + else: + module.fail_json(msg="parameter 'port' is required for 'type' TCP") + + # string_match in relation with type + if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: + if string_match_in is None: + module.fail_json(msg="parameter 'string_match' is required for the HTTP(S)_STR_MATCH types") + elif len(string_match_in) > 255: + module.fail_json(msg="parameter 'string_match' is limited to 255 characters max") + elif string_match_in: + module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types") + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + # connect to the route53 endpoint + try: + conn = Route53Connection(**aws_connect_kwargs) + except boto.exception.BotoServerError as e: + module.fail_json(msg = e.error_message) + + changed = False + action = None + check_id = None + wanted_config = HealthCheck(ip_addr_in, port_in, type_in, resource_path_in, fqdn_in, string_match_in, request_interval_in, failure_threshold_in) + existing_check = find_health_check(conn, wanted_config) + if existing_check: + check_id = existing_check.Id + existing_config = to_health_check(existing_check.HealthCheckConfig) + + if state_in == 'present': + if existing_check is None: + action = "create" + check_id = create_health_check(conn, wanted_config).HealthCheck.Id + changed = True + else: + diff = health_check_diff(existing_config, wanted_config) + if not diff: + action = "update" + update_health_check(conn, existing_check.Id, int(existing_check.HealthCheckVersion), wanted_config) + changed = True + elif state_in == 'absent': + if check_id: + action = "delete" + conn.delete_health_check(check_id) + changed = True + else: + module.fail_json(msg = "Logic Error: Unknown state") + + module.exit_json(changed=changed, health_check=dict(id=check_id), action=action) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/route53_zone.py b/cloud/amazon/route53_zone.py index 4630e00d4fa..758860f6853 100644 --- a/cloud/amazon/route53_zone.py +++ b/cloud/amazon/route53_zone.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: route53_zone short_description: add or delete Route53 zones @@ -46,11 +50,76 @@ - Comment associated with the zone required: false default: '' -extends_documentation_fragment: aws +extends_documentation_fragment: + - aws + - ec2 author: "Christopher Troup (@minichate)" ''' -import time +EXAMPLES = ''' +# create a public zone +- route53_zone: + zone: example.com + state: present + comment: this is an example + +# delete a public zone +- route53_zone: + zone: example.com + state: absent + +- name: private zone for devel + route53_zone: + zone: devel.example.com + state: present + vpc_id: '{{ myvpc_id }}' + comment: developer domain + +# more complex example +- name: register output after creating zone in parameterized region + route53_zone: + vpc_id: '{{ vpc.vpc_id }}' + vpc_region: '{{ ec2_region }}' + zone: '{{ vpc_dns_zone }}' + state: present + register: zone_out + +- debug: + var: zone_out +''' + +RETURN=''' +comment: + description: optional hosted zone comment + returned: when hosted zone exists + type: string + sample: "Private zone" +name: + description: hosted zone name + returned: when hosted zone exists + type: string + sample: "private.local." +private_zone: + description: whether hosted zone is private or public + returned: when hosted zone exists + type: bool + sample: true +vpc_id: + description: id of vpc attached to private hosted zone + returned: for private hosted zone + type: string + sample: "vpc-1d36c84f" +vpc_region: + description: region of vpc attached to private hosted zone + returned: for private hosted zone + type: string + sample: "eu-west-1" +zone_id: + description: hosted zone id + returned: when hosted zone exists + type: string + sample: "Z6JQG9820BEFMW" +''' try: import boto @@ -62,17 +131,19 @@ except ImportError: HAS_BOTO = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info + def main(): - module = AnsibleModule( - argument_spec=dict( + argument_spec = ec2_argument_spec() + argument_spec.update(dict( zone=dict(required=True), state=dict(default='present', choices=['present', 'absent']), vpc_id=dict(default=None), vpc_region=dict(default=None), - comment=dict(default=''), - ) - ) + comment=dict(default=''))) + module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') @@ -83,6 +154,9 @@ def main(): vpc_region = module.params.get('vpc_region') comment = module.params.get('comment') + if zone_in[-1:] != '.': + zone_in += "." + private_zone = vpc_id is not None and vpc_region is not None _, _, aws_connect_kwargs = get_aws_connection_info(module) @@ -90,7 +164,7 @@ def main(): # connect to the route53 endpoint try: conn = Route53Connection(**aws_connect_kwargs) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: module.fail_json(msg=e.error_message) results = conn.get_all_hosted_zones() @@ -158,7 +232,5 @@ def main(): elif state == 'absent': module.exit_json(changed=False) -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/s3_bucket.py b/cloud/amazon/s3_bucket.py index 25c085f8173..970967e30b0 100644 --- a/cloud/amazon/s3_bucket.py +++ b/cloud/amazon/s3_bucket.py @@ -13,17 +13,21 @@ # You should have received a copy of the GNU General Public License # along with this library. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: s3_bucket -short_description: Manage s3 buckets in AWS +short_description: Manage S3 buckets in AWS, Ceph, Walrus and FakeS3 description: - - Manage s3 buckets in AWS + - Manage S3 buckets in AWS, Ceph, Walrus and FakeS3 version_added: "2.0" -author: Rob White (@wimnat) +author: "Rob White (@wimnat)" options: force: - description: + description: - When trying to delete a bucket, delete all keys in the bucket first (an s3 bucket must be empty for a successful deletion) required: false default: no @@ -38,15 +42,15 @@ - The JSON policy as a string. required: false default: null - region: - description: - - AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. - required: false - default: null s3_url: - description: S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS + description: + - S3 URL endpoint for usage with Ceph, Eucalypus, fakes3, etc. Otherwise assumes AWS default: null aliases: [ S3_URL ] + ceph: + description: + - Enable API compatibility with Ceph. It takes into account the S3 API subset working with Ceph in order to provide the same module behaviour where possible. + version_added: "2.2" requester_pays: description: - With Requester Pays buckets, the requester instead of the bucket owner pays the cost of the request and the data download from the bucket. @@ -65,13 +69,14 @@ required: false default: null versioning: - description: + description: - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended) required: false - default: no + default: null choices: [ 'yes', 'no' ] - -extends_documentation_fragment: aws +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' @@ -81,6 +86,12 @@ - s3_bucket: name: mys3bucket +# Create a simple s3 bucket on Ceph Rados Gateway +- s3_bucket: + name: mys3bucket + s3_url: http://your-ceph-rados-gateway-server.xxx + ceph: true + # Remove an s3 bucket and any keys it contains - s3_bucket: name: mys3bucket @@ -96,10 +107,15 @@ tags: example: tag1 another: tag2 - + ''' +import os import xml.etree.ElementTree as ET +import urlparse + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * try: import boto.ec2 @@ -110,17 +126,16 @@ except ImportError: HAS_BOTO = False + def get_request_payment_status(bucket): - + response = bucket.get_request_payment() root = ET.fromstring(response) for message in root.findall('.//{http://s3.amazonaws.com/doc/2006-03-01/}Payer'): payer = message.text - - if payer == "BucketOwner": - return False - else: - return True + + return (payer != "BucketOwner") + def create_tags_container(tags): @@ -132,122 +147,95 @@ def create_tags_container(tags): tags_obj.add_tag_set(tag_set) return tags_obj -def create_bucket(connection, module): - + +def _create_or_update_bucket(connection, module, location): + policy = module.params.get("policy") name = module.params.get("name") - region = module.params.get("region") requester_pays = module.params.get("requester_pays") tags = module.params.get("tags") versioning = module.params.get("versioning") changed = False - + try: bucket = connection.get_bucket(name) - except S3ResponseError, e: + except S3ResponseError as e: try: - bucket = connection.create_bucket(name, location=region) + bucket = connection.create_bucket(name, location=location) changed = True - except S3CreateError, e: + except S3CreateError as e: module.fail_json(msg=e.message) - + # Versioning versioning_status = bucket.get_versioning_status() - if not versioning_status and versioning: - try: - bucket.configure_versioning(versioning) - changed = True - versioning_status = bucket.get_versioning_status() - except S3ResponseError, e: - module.fail_json(msg=e.message) - elif not versioning_status and not versioning: - # do nothing - pass - else: - if versioning_status['Versioning'] == "Enabled" and not versioning: - bucket.configure_versioning(versioning) - changed = True - versioning_status = bucket.get_versioning_status() - elif ( (versioning_status['Versioning'] == "Disabled" and versioning) or (versioning_status['Versioning'] == "Suspended" and versioning) ): - bucket.configure_versioning(versioning) - changed = True - versioning_status = bucket.get_versioning_status() - + if versioning_status: + if versioning is not None: + if versioning and versioning_status['Versioning'] != "Enabled": + try: + bucket.configure_versioning(versioning) + changed = True + versioning_status = bucket.get_versioning_status() + except S3ResponseError as e: + module.fail_json(msg=e.message) + elif not versioning and versioning_status['Versioning'] != "Enabled": + try: + bucket.configure_versioning(versioning) + changed = True + versioning_status = bucket.get_versioning_status() + except S3ResponseError as e: + module.fail_json(msg=e.message) + # Requester pays requester_pays_status = get_request_payment_status(bucket) if requester_pays_status != requester_pays: if requester_pays: - bucket.set_request_payment(payer='Requester') - changed = True - requester_pays_status = get_request_payment_status(bucket) + payer='Requester' else: - bucket.set_request_payment(payer='BucketOwner') - changed = True - requester_pays_status = get_request_payment_status(bucket) + payer='BucketOwner' + bucket.set_request_payment(payer=payer) + changed = True + requester_pays_status = get_request_payment_status(bucket) - # Policy + # Policy try: - current_policy = bucket.get_policy() - except S3ResponseError, e: + current_policy = json.loads(bucket.get_policy()) + except S3ResponseError as e: if e.error_code == "NoSuchBucketPolicy": - current_policy = None + current_policy = {} else: module.fail_json(msg=e.message) - - if current_policy is not None and policy is not None: - if policy is not None: - policy = json.dumps(policy) - - if json.loads(current_policy) != json.loads(policy): + if policy is not None: + if isinstance(policy, basestring): + policy = json.loads(policy) + + if not policy: + bucket.delete_policy() + # only show changed if there was already a policy + changed = bool(current_policy) + + elif current_policy != policy: try: - bucket.set_policy(policy) + bucket.set_policy(json.dumps(policy)) changed = True - current_policy = bucket.get_policy() - except S3ResponseError, e: + current_policy = json.loads(bucket.get_policy()) + except S3ResponseError as e: module.fail_json(msg=e.message) - elif current_policy is None and policy is not None: - policy = json.dumps(policy) - - try: - bucket.set_policy(policy) - changed = True - current_policy = bucket.get_policy() - except S3ResponseError, e: - module.fail_json(msg=e.message) - - elif current_policy is not None and policy is None: - try: - bucket.delete_policy() - changed = True - current_policy = bucket.get_policy() - except S3ResponseError, e: - if e.error_code == "NoSuchBucketPolicy": - current_policy = None - else: - module.fail_json(msg=e.message) - - #### - ## Fix up json of policy so it's not escaped - #### - # Tags try: current_tags = bucket.get_tags() - tag_set = TagSet() - except S3ResponseError, e: + except S3ResponseError as e: if e.error_code == "NoSuchTagSet": current_tags = None else: module.fail_json(msg=e.message) - - if current_tags is not None or tags is not None: - - if current_tags is None: - current_tags_dict = {} - else: - current_tags_dict = dict((t.key, t.value) for t in current_tags[0]) + if current_tags is None: + current_tags_dict = {} + else: + current_tags_dict = dict((t.key, t.value) for t in current_tags[0]) + + if tags is not None: if current_tags_dict != tags: try: if tags: @@ -256,43 +244,86 @@ def create_bucket(connection, module): bucket.delete_tags() current_tags_dict = tags changed = True - except S3ResponseError, e: + except S3ResponseError as e: module.fail_json(msg=e.message) module.exit_json(changed=changed, name=bucket.name, versioning=versioning_status, requester_pays=requester_pays_status, policy=current_policy, tags=current_tags_dict) - -def destroy_bucket(connection, module): - + + +def _destroy_bucket(connection, module): + force = module.params.get("force") name = module.params.get("name") changed = False - + try: bucket = connection.get_bucket(name) - except S3ResponseError, e: + except S3ResponseError as e: if e.error_code != "NoSuchBucket": module.fail_json(msg=e.message) else: # Bucket already absent module.exit_json(changed=changed) - + if force: try: # Empty the bucket for key in bucket.list(): key.delete() - - except BotoServerError, e: + + except BotoServerError as e: module.fail_json(msg=e.message) - + try: bucket = connection.delete_bucket(name) changed = True - except S3ResponseError, e: + except S3ResponseError as e: module.fail_json(msg=e.message) - + module.exit_json(changed=changed) + +def _create_or_update_bucket_ceph(connection, module, location): + #TODO: add update + + name = module.params.get("name") + + changed = False + + try: + bucket = connection.get_bucket(name) + except S3ResponseError as e: + try: + bucket = connection.create_bucket(name, location=location) + changed = True + except S3CreateError as e: + module.fail_json(msg=e.message) + + if bucket: + module.exit_json(changed=changed) + else: + module.fail_json(msg='Unable to create bucket, no error from the API') + + +def _destroy_bucket_ceph(connection, module): + + _destroy_bucket(connection, module) + + +def create_or_update_bucket(connection, module, location, flavour='aws'): + if flavour == 'ceph': + _create_or_update_bucket_ceph(connection, module, location) + else: + _create_or_update_bucket(connection, module, location) + + +def destroy_bucket(connection, module, flavour='aws'): + if flavour == 'ceph': + _destroy_bucket_ceph(connection, module) + else: + _destroy_bucket(connection, module) + + def is_fakes3(s3_url): """ Return True if s3_url has scheme fakes3:// """ if s3_url is not None: @@ -300,6 +331,7 @@ def is_fakes3(s3_url): else: return False + def is_walrus(s3_url): """ Return True if it's Walrus endpoint, not S3 @@ -311,26 +343,27 @@ def is_walrus(s3_url): return False def main(): - + argument_spec = ec2_argument_spec() argument_spec.update( dict( - force = dict(required=False, default='no', type='bool'), - policy = dict(required=False, default=None), - name = dict(required=True), - requester_pays = dict(default='no', type='bool'), - s3_url = dict(aliases=['S3_URL']), - state = dict(default='present', choices=['present', 'absent']), - tags = dict(required=None, default={}, type='dict'), - versioning = dict(default='no', type='bool') + force=dict(required=False, default='no', type='bool'), + policy=dict(required=False, default=None, type='json'), + name=dict(required=True, type='str'), + requester_pays=dict(default='no', type='bool'), + s3_url=dict(aliases=['S3_URL'], type='str'), + state=dict(default='present', type='str', choices=['present', 'absent']), + tags=dict(required=False, default=None, type='dict'), + versioning=dict(default=None, type='bool'), + ceph=dict(default='no', type='bool') ) ) - + module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') - + region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region in ('us-east-1', '', None): @@ -347,10 +380,27 @@ def main(): if not s3_url and 'S3_URL' in os.environ: s3_url = os.environ['S3_URL'] + ceph = module.params.get('ceph') + + if ceph and not s3_url: + module.fail_json(msg='ceph flavour requires s3_url') + + flavour = 'aws' + # Look at s3_url and tweak connection settings # if connecting to Walrus or fakes3 try: - if is_fakes3(s3_url): + if s3_url and ceph: + ceph = urlparse.urlparse(s3_url) + connection = boto.connect_s3( + host=ceph.hostname, + port=ceph.port, + is_secure=ceph.scheme == 'https', + calling_format=OrdinaryCallingFormat(), + **aws_connect_params + ) + flavour = 'ceph' + elif is_fakes3(s3_url): fakes3 = urlparse.urlparse(s3_url) connection = S3Connection( is_secure=fakes3.scheme == 'fakes3s', @@ -368,9 +418,9 @@ def main(): if connection is None: connection = boto.connect_s3(**aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg='No Authentication Handler found: %s ' % str(e)) - except Exception, e: + except Exception as e: module.fail_json(msg='Failed to connect to S3: %s' % str(e)) if connection is None: # this should never happen @@ -379,12 +429,9 @@ def main(): state = module.params.get("state") if state == 'present': - create_bucket(connection, module) + create_or_update_bucket(connection, module, location) elif state == 'absent': - destroy_bucket(connection, module) - -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * + destroy_bucket(connection, module, flavour=flavour) if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/cloud/amazon/s3_lifecycle.py b/cloud/amazon/s3_lifecycle.py new file mode 100644 index 00000000000..f981dfadb8f --- /dev/null +++ b/cloud/amazon/s3_lifecycle.py @@ -0,0 +1,439 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: s3_lifecycle +short_description: Manage s3 bucket lifecycle rules in AWS +description: + - Manage s3 bucket lifecycle rules in AWS +version_added: "2.0" +author: "Rob White (@wimnat)" +notes: + - If specifying expiration time as days then transition time must also be specified in days + - If specifying expiration time as a date then transition time must also be specified as a date +requirements: + - python-dateutil +options: + name: + description: + - "Name of the s3 bucket" + required: true + expiration_date: + description: + - "Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified." + required: false + default: null + expiration_days: + description: + - "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer." + required: false + default: null + prefix: + description: + - "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket." + required: false + default: null + rule_id: + description: + - "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided." + required: false + default: null + state: + description: + - "Create or remove the lifecycle rule" + required: false + default: present + choices: [ 'present', 'absent' ] + status: + description: + - "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied." + required: false + default: enabled + choices: [ 'enabled', 'disabled' ] + storage_class: + description: + - "The storage class to transition to. Currently there are two supported values - 'glacier' or 'standard_ia'." + - "The 'standard_ia' class is only being available from Ansible version 2.2." + required: false + default: glacier + choices: [ 'glacier', 'standard_ia'] + transition_date: + description: + - "Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified, this parameter is required." + required: false + default: null + transition_days: + description: + - "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required." + required: false + default: null +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days +- s3_lifecycle: + name: mybucket + expiration_days: 30 + prefix: /logs/ + status: enabled + state: present + +# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days +- s3_lifecycle: + name: mybucket + transition_days: 7 + expiration_days: 90 + prefix: /logs/ + status: enabled + state: present + +# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. Note that midnight GMT must be specified. +# Be sure to quote your date strings +- s3_lifecycle: + name: mybucket + transition_date: "2020-12-30T00:00:00.000Z" + expiration_date: "2030-12-30T00:00:00.000Z" + prefix: /logs/ + status: enabled + state: present + +# Disable the rule created above +- s3_lifecycle: + name: mybucket + prefix: /logs/ + status: disabled + state: present + +# Delete the lifecycle rule created above +- s3_lifecycle: + name: mybucket + prefix: /logs/ + state: absent + +# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class. +- s3_lifecycle: + name: mybucket + prefix: /backups/ + storage_class: standard_ia + transition_days: 31 + state: present + status: enabled + +''' + +import xml.etree.ElementTree as ET +import copy +import datetime + +try: + import dateutil.parser + HAS_DATEUTIL = True +except ImportError: + HAS_DATEUTIL = False + +try: + import boto + import boto.ec2 + from boto.s3.connection import OrdinaryCallingFormat, Location + from boto.s3.lifecycle import Lifecycle, Rule, Expiration, Transition + from boto.exception import BotoServerError, S3CreateError, S3ResponseError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info + +def create_lifecycle_rule(connection, module): + + name = module.params.get("name") + expiration_date = module.params.get("expiration_date") + expiration_days = module.params.get("expiration_days") + prefix = module.params.get("prefix") + rule_id = module.params.get("rule_id") + status = module.params.get("status") + storage_class = module.params.get("storage_class") + transition_date = module.params.get("transition_date") + transition_days = module.params.get("transition_days") + changed = False + + try: + bucket = connection.get_bucket(name) + except S3ResponseError as e: + module.fail_json(msg=e.message) + + # Get the bucket's current lifecycle rules + try: + current_lifecycle_obj = bucket.get_lifecycle_config() + except S3ResponseError as e: + if e.error_code == "NoSuchLifecycleConfiguration": + current_lifecycle_obj = Lifecycle() + else: + module.fail_json(msg=e.message) + + # Create expiration + if expiration_days is not None: + expiration_obj = Expiration(days=expiration_days) + elif expiration_date is not None: + expiration_obj = Expiration(date=expiration_date) + else: + expiration_obj = None + + # Create transition + if transition_days is not None: + transition_obj = Transition(days=transition_days, storage_class=storage_class.upper()) + elif transition_date is not None: + transition_obj = Transition(date=transition_date, storage_class=storage_class.upper()) + else: + transition_obj = None + + # Create rule + rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj) + + # Create lifecycle + lifecycle_obj = Lifecycle() + + appended = False + # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule + if current_lifecycle_obj: + # If rule ID exists, use that for comparison otherwise compare based on prefix + for existing_rule in current_lifecycle_obj: + if rule.id == existing_rule.id: + if compare_rule(rule, existing_rule): + lifecycle_obj.append(rule) + appended = True + else: + lifecycle_obj.append(rule) + changed = True + appended = True + elif rule.prefix == existing_rule.prefix: + existing_rule.id = None + if compare_rule(rule, existing_rule): + lifecycle_obj.append(rule) + appended = True + else: + lifecycle_obj.append(rule) + changed = True + appended = True + else: + lifecycle_obj.append(existing_rule) + # If nothing appended then append now as the rule must not exist + if not appended: + lifecycle_obj.append(rule) + changed = True + else: + lifecycle_obj.append(rule) + changed = True + + # Write lifecycle to bucket + try: + bucket.configure_lifecycle(lifecycle_obj) + except S3ResponseError as e: + module.fail_json(msg=e.message) + + module.exit_json(changed=changed) + +def compare_rule(rule_a, rule_b): + + # Copy objects + rule1 = copy.deepcopy(rule_a) + rule2 = copy.deepcopy(rule_b) + + # Delete Rule from Rule + try: + del rule1.Rule + except AttributeError: + pass + + try: + del rule2.Rule + except AttributeError: + pass + + # Extract Expiration and Transition objects + rule1_expiration = rule1.expiration + rule1_transition = rule1.transition + rule2_expiration = rule2.expiration + rule2_transition = rule2.transition + + # Delete the Expiration and Transition objects from the Rule objects + del rule1.expiration + del rule1.transition + del rule2.expiration + del rule2.transition + + # Compare + if rule1_transition is None: + rule1_transition = Transition() + if rule2_transition is None: + rule2_transition = Transition() + if rule1_expiration is None: + rule1_expiration = Expiration() + if rule2_expiration is None: + rule2_expiration = Expiration() + + if (rule1.__dict__ == rule2.__dict__) and (rule1_expiration.__dict__ == rule2_expiration.__dict__) and (rule1_transition.__dict__ == rule2_transition.__dict__): + return True + else: + return False + + +def destroy_lifecycle_rule(connection, module): + + name = module.params.get("name") + prefix = module.params.get("prefix") + rule_id = module.params.get("rule_id") + changed = False + + if prefix is None: + prefix = "" + + try: + bucket = connection.get_bucket(name) + except S3ResponseError as e: + module.fail_json(msg=e.message) + + # Get the bucket's current lifecycle rules + try: + current_lifecycle_obj = bucket.get_lifecycle_config() + except S3ResponseError as e: + if e.error_code == "NoSuchLifecycleConfiguration": + module.exit_json(changed=changed) + else: + module.fail_json(msg=e.message) + + # Create lifecycle + lifecycle_obj = Lifecycle() + + # Check if rule exists + # If an ID exists, use that otherwise compare based on prefix + if rule_id is not None: + for existing_rule in current_lifecycle_obj: + if rule_id == existing_rule.id: + # We're not keeping the rule (i.e. deleting) so mark as changed + changed = True + else: + lifecycle_obj.append(existing_rule) + else: + for existing_rule in current_lifecycle_obj: + if prefix == existing_rule.prefix: + # We're not keeping the rule (i.e. deleting) so mark as changed + changed = True + else: + lifecycle_obj.append(existing_rule) + + # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration + try: + if lifecycle_obj: + bucket.configure_lifecycle(lifecycle_obj) + else: + bucket.delete_lifecycle_configuration() + except BotoServerError as e: + module.fail_json(msg=e.message) + + module.exit_json(changed=changed) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name = dict(required=True, type='str'), + expiration_days = dict(default=None, required=False, type='int'), + expiration_date = dict(default=None, required=False, type='str'), + prefix = dict(default=None, required=False), + requester_pays = dict(default='no', type='bool'), + rule_id = dict(required=False, type='str'), + state = dict(default='present', choices=['present', 'absent']), + status = dict(default='enabled', choices=['enabled', 'disabled']), + storage_class = dict(default='glacier', type='str', choices=['glacier', 'standard_ia']), + transition_days = dict(default=None, required=False, type='int'), + transition_date = dict(default=None, required=False, type='str') + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + mutually_exclusive = [ + [ 'expiration_days', 'expiration_date' ], + [ 'expiration_days', 'transition_date' ], + [ 'transition_days', 'transition_date' ], + [ 'transition_days', 'expiration_date' ] + ] + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + if not HAS_DATEUTIL: + module.fail_json(msg='dateutil required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region in ('us-east-1', '', None): + # S3ism for the US Standard region + location = Location.DEFAULT + else: + # Boto uses symbolic names for locations but region strings will + # actually work fine for everything except us-east-1 (US Standard) + location = region + try: + connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params) + # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases + if connection is None: + connection = boto.connect_s3(**aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + + expiration_date = module.params.get("expiration_date") + transition_date = module.params.get("transition_date") + state = module.params.get("state") + storage_class = module.params.get("storage_class") + + # If expiration_date set, check string is valid + if expiration_date is not None: + try: + datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z") + except ValueError as e: + module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") + + if transition_date is not None: + try: + datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z") + except ValueError as e: + module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") + + boto_required_version = (2,40,0) + if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version: + module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0") + + if state == 'present': + create_lifecycle_rule(connection, module) + elif state == 'absent': + destroy_lifecycle_rule(connection, module) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/s3_logging.py b/cloud/amazon/s3_logging.py index 75b3fe73508..653e315848f 100644 --- a/cloud/amazon/s3_logging.py +++ b/cloud/amazon/s3_logging.py @@ -13,6 +13,10 @@ # You should have received a copy of the GNU General Public License # along with this library. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: s3_logging @@ -26,11 +30,6 @@ description: - "Name of the s3 bucket." required: true - region: - description: - - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard." - required: false - default: null state: description: - "Enable or disable logging." @@ -47,8 +46,9 @@ - "The prefix that should be prepended to the generated log files written to the target_bucket." required: false default: "" - -extends_documentation_fragment: aws +extends_documentation_fragment: + - aws + - ec2 ''' EXAMPLES = ''' @@ -65,7 +65,7 @@ s3_logging: name: mywebsite.com state: absent - + ''' try: @@ -76,23 +76,26 @@ except ImportError: HAS_BOTO = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info + def compare_bucket_logging(bucket, target_bucket, target_prefix): - + bucket_log_obj = bucket.get_logging_status() if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix: return False else: return True - + def enable_bucket_logging(connection, module): - + bucket_name = module.params.get("name") target_bucket = module.params.get("target_bucket") target_prefix = module.params.get("target_prefix") changed = False - + try: bucket = connection.get_bucket(bucket_name) except S3ResponseError as e: @@ -115,15 +118,15 @@ def enable_bucket_logging(connection, module): except S3ResponseError as e: module.fail_json(msg=e.message) - + module.exit_json(changed=changed) - - + + def disable_bucket_logging(connection, module): - + bucket_name = module.params.get("name") changed = False - + try: bucket = connection.get_bucket(bucket_name) if not compare_bucket_logging(bucket, None, None): @@ -131,12 +134,12 @@ def disable_bucket_logging(connection, module): changed = True except S3ResponseError as e: module.fail_json(msg=e.message) - + module.exit_json(changed=changed) - - + + def main(): - + argument_spec = ec2_argument_spec() argument_spec.update( dict( @@ -146,16 +149,16 @@ def main(): state = dict(required=False, default='present', choices=['present', 'absent']) ) ) - + module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') - + region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region in ('us-east-1', '', None): - # S3ism for the US Standard region + # S3ism for the US Standard region location = Location.DEFAULT else: # Boto uses symbolic names for locations but region strings will @@ -166,10 +169,9 @@ def main(): # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases if connection is None: connection = boto.connect_s3(**aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) - state = module.params.get("state") if state == 'present': @@ -177,8 +179,6 @@ def main(): elif state == 'absent': disable_bucket_logging(connection, module) -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/cloud/amazon/s3_website.py b/cloud/amazon/s3_website.py new file mode 100644 index 00000000000..b8e1503b2d2 --- /dev/null +++ b/cloud/amazon/s3_website.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: s3_website +short_description: Configure an s3 bucket as a website +description: + - Configure an s3 bucket as a website +version_added: "2.2" +author: Rob White (@wimnat) +options: + name: + description: + - "Name of the s3 bucket" + required: true + default: null + error_key: + description: + - "The object key name to use when a 4XX class error occurs. To remove an error key, set to None." + required: false + default: null + redirect_all_requests: + description: + - "Describes the redirect behavior for every request to this s3 bucket website endpoint" + required: false + default: null + region: + description: + - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard." + required: false + default: null + state: + description: + - "Add or remove s3 website configuration" + required: false + default: present + choices: [ 'present', 'absent' ] + suffix: + description: + - "Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash character." + required: false + default: index.html + +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Configure an s3 bucket to redirect all requests to example.com +- s3_website: + name: mybucket.com + redirect_all_requests: example.com + state: present + +# Remove website configuration from an s3 bucket +- s3_website: + name: mybucket.com + state: absent + +# Configure an s3 bucket as a website with index and error pages +- s3_website: + name: mybucket.com + suffix: home.htm + error_key: errors/404.htm + state: present + +''' + +RETURN = ''' +index_document: + suffix: + description: suffix that is appended to a request that is for a directory on the website endpoint + returned: success + type: string + sample: index.html +error_document: + key: + description: object key name to use when a 4XX class error occurs + returned: when error_document parameter set + type: string + sample: error.html +redirect_all_requests_to: + host_name: + description: name of the host where requests will be redirected. + returned: when redirect all requests parameter set + type: string + sample: ansible.com +routing_rules: + routing_rule: + host_name: + description: name of the host where requests will be redirected. + returned: when host name set as part of redirect rule + type: string + sample: ansible.com + condition: + key_prefix_equals: + description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html + returned: when routing rule present + type: string + sample: docs/ + redirect: + replace_key_prefix_with: + description: object key prefix to use in the redirect request + returned: when routing rule present + type: string + sample: documents/ + +''' + +import time + +try: + from botocore.exceptions import ClientError, ParamValidationError, NoCredentialsError + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +def _create_redirect_dict(url): + + redirect_dict = {} + url_split = url.split(':') + + # Did we split anything? + if len(url_split) == 2: + redirect_dict[u'Protocol'] = url_split[0] + redirect_dict[u'HostName'] = url_split[1].replace('//', '') + elif len(url_split) == 1: + redirect_dict[u'HostName'] = url_split[0] + else: + raise ValueError('Redirect URL appears invalid') + + return redirect_dict + + +def _create_website_configuration(suffix, error_key, redirect_all_requests): + + website_configuration = {} + + if error_key is not None: + website_configuration['ErrorDocument'] = { 'Key': error_key } + + if suffix is not None: + website_configuration['IndexDocument'] = { 'Suffix': suffix } + + if redirect_all_requests is not None: + website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests) + + return website_configuration + + +def enable_or_update_bucket_as_website(client_connection, resource_connection, module): + + bucket_name = module.params.get("name") + redirect_all_requests = module.params.get("redirect_all_requests") + # If redirect_all_requests is set then don't use the default suffix that has been set + if redirect_all_requests is not None: + suffix = None + else: + suffix = module.params.get("suffix") + error_key = module.params.get("error_key") + changed = False + + try: + bucket_website = resource_connection.BucketWebsite(bucket_name) + except ClientError as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + try: + website_config = client_connection.get_bucket_website(Bucket=bucket_name) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': + website_config = None + else: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + if website_config is None: + try: + bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + changed = True + except (ClientError, ParamValidationError) as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + except ValueError as e: + module.fail_json(msg=str(e)) + else: + try: + if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \ + (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \ + (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)): + + try: + bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + changed = True + except (ClientError, ParamValidationError) as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + except KeyError as e: + try: + bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests)) + changed = True + except (ClientError, ParamValidationError) as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + except ValueError as e: + module.fail_json(msg=str(e)) + + # Wait 5 secs before getting the website_config again to give it time to update + time.sleep(5) + + website_config = client_connection.get_bucket_website(Bucket=bucket_name) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config)) + + +def disable_bucket_as_website(client_connection, module): + + changed = False + bucket_name = module.params.get("name") + + try: + client_connection.get_bucket_website(Bucket=bucket_name) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': + module.exit_json(changed=changed) + else: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + try: + client_connection.delete_bucket_website(Bucket=bucket_name) + changed = True + except ClientError as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + module.exit_json(changed=changed) + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['present', 'absent']), + suffix=dict(type='str', required=False, default='index.html'), + error_key=dict(type='str', required=False), + redirect_all_requests=dict(type='str', required=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive = [ + ['redirect_all_requests', 'suffix'], + ['redirect_all_requests', 'error_key'] + ]) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + + if region: + client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) + resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) + else: + module.fail_json(msg="region must be specified") + + state = module.params.get("state") + + if state == 'present': + enable_or_update_bucket_as_website(client_connection, resource_connection, module) + elif state == 'absent': + disable_bucket_as_website(client_connection, module) + + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/sns_topic.py b/cloud/amazon/sns_topic.py new file mode 100644 index 00000000000..e2b31484a1f --- /dev/null +++ b/cloud/amazon/sns_topic.py @@ -0,0 +1,410 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = """ +module: sns_topic +short_description: Manages AWS SNS topics and subscriptions +description: + - The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. +version_added: 2.0 +author: + - "Joel Thompson (@joelthompson)" + - "Fernando Jose Pando (@nand0p)" +options: + name: + description: + - The name or ARN of the SNS topic to converge + required: True + state: + description: + - Whether to create or destroy an SNS topic + required: False + default: present + choices: ["absent", "present"] + display_name: + description: + - Display name of the topic + required: False + default: None + policy: + description: + - Policy to apply to the SNS topic + required: False + default: None + delivery_policy: + description: + - Delivery policy to apply to the SNS topic + required: False + default: None + subscriptions: + description: + - List of subscriptions to apply to the topic. Note that AWS requires + subscriptions to be confirmed, so you will need to confirm any new + subscriptions. + required: False + default: [] + purge_subscriptions: + description: + - "Whether to purge any subscriptions not listed here. NOTE: AWS does not + allow you to purge any PendingConfirmation subscriptions, so if any + exist and would be purged, they are silently skipped. This means that + somebody could come back later and confirm the subscription. Sorry. + Blame Amazon." + required: False + default: True +extends_documentation_fragment: aws +requirements: [ "boto" ] +""" + +EXAMPLES = """ + +- name: Create alarm SNS topic + sns_topic: + name: "alarms" + state: present + display_name: "alarm SNS topic" + delivery_policy: + http: + defaultHealthyRetryPolicy: + minDelayTarget: 2 + maxDelayTarget: 4 + numRetries: 3 + numMaxDelayRetries: 5 + backoffFunction: "" + disableSubscriptionOverrides: True + defaultThrottlePolicy: + maxReceivesPerSecond: 10 + subscriptions: + - endpoint: "my_email_address@example.com" + protocol: "email" + - endpoint: "my_mobile_number" + protocol: "sms" + +""" + +RETURN = ''' +sns_arn: + description: The ARN of the topic you are modifying + type: string + sample: "arn:aws:sns:us-east-1:123456789012:my_topic_name" + +sns_topic: + description: Dict of sns topic details + type: dict + sample: + name: sns-topic-name + state: present + display_name: default + policy: {} + delivery_policy: {} + subscriptions_new: [] + subscriptions_existing: [] + subscriptions_deleted: [] + subscriptions_added: [] + subscriptions_purge': false + check_mode: false + topic_created: false + topic_deleted: false + attributes_set: [] +''' + +import time +import json +import re + +try: + import boto.sns + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +class SnsTopicManager(object): + """ Handles SNS Topic creation and destruction """ + + def __init__(self, + module, + name, + state, + display_name, + policy, + delivery_policy, + subscriptions, + purge_subscriptions, + check_mode, + region, + **aws_connect_params): + + self.region = region + self.aws_connect_params = aws_connect_params + self.connection = self._get_boto_connection() + self.changed = False + self.module = module + self.name = name + self.state = state + self.display_name = display_name + self.policy = policy + self.delivery_policy = delivery_policy + self.subscriptions = subscriptions + self.subscriptions_existing = [] + self.subscriptions_deleted = [] + self.subscriptions_added = [] + self.purge_subscriptions = purge_subscriptions + self.check_mode = check_mode + self.topic_created = False + self.topic_deleted = False + self.arn_topic = None + self.attributes_set = [] + + def _get_boto_connection(self): + try: + return connect_to_aws(boto.sns, self.region, + **self.aws_connect_params) + except BotoServerError as err: + self.module.fail_json(msg=err.message) + + def _get_all_topics(self): + next_token = None + topics = [] + while True: + try: + response = self.connection.get_all_topics(next_token) + except BotoServerError as err: + self.module.fail_json(msg=err.message) + topics.extend(response['ListTopicsResponse']['ListTopicsResult']['Topics']) + next_token = response['ListTopicsResponse']['ListTopicsResult']['NextToken'] + if not next_token: + break + return [t['TopicArn'] for t in topics] + + + def _arn_topic_lookup(self): + # topic names cannot have colons, so this captures the full topic name + all_topics = self._get_all_topics() + lookup_topic = ':%s' % self.name + for topic in all_topics: + if topic.endswith(lookup_topic): + return topic + + + def _create_topic(self): + self.changed = True + self.topic_created = True + if not self.check_mode: + self.connection.create_topic(self.name) + self.arn_topic = self._arn_topic_lookup() + while not self.arn_topic: + time.sleep(3) + self.arn_topic = self._arn_topic_lookup() + + + def _set_topic_attrs(self): + topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \ + ['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \ + ['Attributes'] + + if self.display_name and self.display_name != topic_attributes['DisplayName']: + self.changed = True + self.attributes_set.append('display_name') + if not self.check_mode: + self.connection.set_topic_attributes(self.arn_topic, 'DisplayName', + self.display_name) + + if self.policy and self.policy != json.loads(topic_attributes['Policy']): + self.changed = True + self.attributes_set.append('policy') + if not self.check_mode: + self.connection.set_topic_attributes(self.arn_topic, 'Policy', + json.dumps(self.policy)) + + if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \ + self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])): + self.changed = True + self.attributes_set.append('delivery_policy') + if not self.check_mode: + self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy', + json.dumps(self.delivery_policy)) + + + def _canonicalize_endpoint(self, protocol, endpoint): + if protocol == 'sms': + return re.sub('[^0-9]*', '', endpoint) + return endpoint + + + def _get_topic_subs(self): + next_token = None + while True: + response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token) + self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \ + ['ListSubscriptionsByTopicResult']['Subscriptions']) + next_token = response['ListSubscriptionsByTopicResponse'] \ + ['ListSubscriptionsByTopicResult']['NextToken'] + if not next_token: + break + + def _set_topic_subs(self): + subscriptions_existing_list = [] + desired_subscriptions = [(sub['protocol'], + self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in + self.subscriptions] + + if self.subscriptions_existing: + for sub in self.subscriptions_existing: + sub_key = (sub['Protocol'], sub['Endpoint']) + subscriptions_existing_list.append(sub_key) + if self.purge_subscriptions and sub_key not in desired_subscriptions and \ + sub['SubscriptionArn'] != 'PendingConfirmation': + self.changed = True + self.subscriptions_deleted.append(sub_key) + if not self.check_mode: + self.connection.unsubscribe(sub['SubscriptionArn']) + + for (protocol, endpoint) in desired_subscriptions: + if (protocol, endpoint) not in subscriptions_existing_list: + self.changed = True + self.subscriptions_added.append(sub) + if not self.check_mode: + self.connection.subscribe(self.arn_topic, protocol, endpoint) + + + def _delete_subscriptions(self): + # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days + # https://forums.aws.amazon.com/thread.jspa?threadID=85993 + for sub in self.subscriptions_existing: + if sub['SubscriptionArn'] != 'PendingConfirmation': + self.subscriptions_deleted.append(sub['SubscriptionArn']) + self.changed = True + if not self.check_mode: + self.connection.unsubscribe(sub['SubscriptionArn']) + + + def _delete_topic(self): + self.topic_deleted = True + self.changed = True + if not self.check_mode: + self.connection.delete_topic(self.arn_topic) + + + def ensure_ok(self): + self.arn_topic = self._arn_topic_lookup() + if not self.arn_topic: + self._create_topic() + self._set_topic_attrs() + self._get_topic_subs() + self._set_topic_subs() + + def ensure_gone(self): + self.arn_topic = self._arn_topic_lookup() + if self.arn_topic: + self._get_topic_subs() + if self.subscriptions_existing: + self._delete_subscriptions() + self._delete_topic() + + + def get_info(self): + info = { + 'name': self.name, + 'state': self.state, + 'display_name': self.display_name, + 'policy': self.policy, + 'delivery_policy': self.delivery_policy, + 'subscriptions_new': self.subscriptions, + 'subscriptions_existing': self.subscriptions_existing, + 'subscriptions_deleted': self.subscriptions_deleted, + 'subscriptions_added': self.subscriptions_added, + 'subscriptions_purge': self.purge_subscriptions, + 'check_mode': self.check_mode, + 'topic_created': self.topic_created, + 'topic_deleted': self.topic_deleted, + 'attributes_set': self.attributes_set + } + + return info + + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', + 'absent']), + display_name=dict(type='str', required=False), + policy=dict(type='dict', required=False), + delivery_policy=dict(type='dict', required=False), + subscriptions=dict(default=[], type='list', required=False), + purge_subscriptions=dict(type='bool', default=True), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + name = module.params.get('name') + state = module.params.get('state') + display_name = module.params.get('display_name') + policy = module.params.get('policy') + delivery_policy = module.params.get('delivery_policy') + subscriptions = module.params.get('subscriptions') + purge_subscriptions = module.params.get('purge_subscriptions') + check_mode = module.check_mode + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="region must be specified") + + sns_topic = SnsTopicManager(module, + name, + state, + display_name, + policy, + delivery_policy, + subscriptions, + purge_subscriptions, + check_mode, + region, + **aws_connect_params) + + if state == 'present': + sns_topic.ensure_ok() + + elif state == 'absent': + sns_topic.ensure_gone() + + sns_facts = dict(changed=sns_topic.changed, + sns_arn=sns_topic.arn_topic, + sns_topic=sns_topic.get_info()) + + module.exit_json(**sns_facts) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/sqs_queue.py b/cloud/amazon/sqs_queue.py new file mode 100644 index 00000000000..bad72f96bb1 --- /dev/null +++ b/cloud/amazon/sqs_queue.py @@ -0,0 +1,321 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: sqs_queue +short_description: Creates or deletes AWS SQS queues. +description: + - Create or delete AWS SQS queues. + - Update attributes on existing queues. +version_added: "2.0" +author: + - Alan Loi (@loia) + - Fernando Jose Pando (@nand0p) + - Nadir Lloret (@nadirollo) +requirements: + - "boto >= 2.33.0" +options: + state: + description: + - Create or delete the queue + required: false + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Name of the queue. + required: true + default_visibility_timeout: + description: + - The default visibility timeout in seconds. + required: false + default: null + message_retention_period: + description: + - The message retention period in seconds. + required: false + default: null + maximum_message_size: + description: + - The maximum message size in bytes. + required: false + default: null + delivery_delay: + description: + - The delivery delay in seconds. + required: false + default: null + receive_message_wait_time: + description: + - The receive message wait time in seconds. + required: false + default: null + policy: + description: + - The json dict policy to attach to queue + required: false + default: null + version_added: "2.1" + redrive_policy: + description: + - json dict with the redrive_policy (see example) + required: false + default: null + version_added: "2.2" +extends_documentation_fragment: + - aws + - ec2 +""" + +RETURN = ''' +default_visibility_timeout: + description: The default visibility timeout in seconds. + returned: always + sample: 30 +delivery_delay: + description: The delivery delay in seconds. + returned: always + sample: 0 +maximum_message_size: + description: The maximum message size in bytes. + returned: always + sample: 262144 +message_retention_period: + description: The message retention period in seconds. + returned: always + sample: 345600 +name: + description: Name of the SQS Queue + returned: always + sample: "queuename-987d2de0" +queue_arn: + description: The queue's Amazon resource name (ARN). + returned: on successful creation or update of the queue + sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0' +receive_message_wait_time: + description: The receive message wait time in seconds. + returned: always + sample: 0 +region: + description: Region that the queue was created within + returned: always + sample: 'us-east-1' +''' + +EXAMPLES = ''' +# Create SQS queue with redrive policy +- sqs_queue: + name: my-queue + region: ap-southeast-2 + default_visibility_timeout: 120 + message_retention_period: 86400 + maximum_message_size: 1024 + delivery_delay: 30 + receive_message_wait_time: 20 + policy: "{{ json_dict }}" + redrive_policy: + maxReceiveCount: 5 + deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue + +# Delete SQS queue +- sqs_queue: + name: my-queue + region: ap-southeast-2 + state: absent +''' + +import json +import traceback + +try: + import boto.sqs + from boto.exception import BotoServerError, NoAuthHandlerFound + HAS_BOTO = True + +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +def create_or_update_sqs_queue(connection, module): + queue_name = module.params.get('name') + + queue_attributes = dict( + default_visibility_timeout=module.params.get('default_visibility_timeout'), + message_retention_period=module.params.get('message_retention_period'), + maximum_message_size=module.params.get('maximum_message_size'), + delivery_delay=module.params.get('delivery_delay'), + receive_message_wait_time=module.params.get('receive_message_wait_time'), + policy=module.params.get('policy'), + redrive_policy=module.params.get('redrive_policy') + ) + + result = dict( + region=module.params.get('region'), + name=queue_name, + ) + result.update(queue_attributes) + + try: + queue = connection.get_queue(queue_name) + if queue: + # Update existing + result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes) + else: + # Create new + if not module.check_mode: + queue = connection.create_queue(queue_name) + update_sqs_queue(queue, **queue_attributes) + result['changed'] = True + + if not module.check_mode: + result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn'] + result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout'] + result['message_retention_period'] = queue.get_attributes('MessageRetentionPeriod')['MessageRetentionPeriod'] + result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize'] + result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds'] + result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds'] + + except BotoServerError: + result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc() + module.fail_json(**result) + else: + module.exit_json(**result) + + +def update_sqs_queue(queue, + check_mode=False, + default_visibility_timeout=None, + message_retention_period=None, + maximum_message_size=None, + delivery_delay=None, + receive_message_wait_time=None, + policy=None, + redrive_policy=None): + changed = False + + changed = set_queue_attribute(queue, 'VisibilityTimeout', default_visibility_timeout, + check_mode=check_mode) or changed + changed = set_queue_attribute(queue, 'MessageRetentionPeriod', message_retention_period, + check_mode=check_mode) or changed + changed = set_queue_attribute(queue, 'MaximumMessageSize', maximum_message_size, + check_mode=check_mode) or changed + changed = set_queue_attribute(queue, 'DelaySeconds', delivery_delay, + check_mode=check_mode) or changed + changed = set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', receive_message_wait_time, + check_mode=check_mode) or changed + changed = set_queue_attribute(queue, 'Policy', policy, + check_mode=check_mode) or changed + changed = set_queue_attribute(queue, 'RedrivePolicy', redrive_policy, + check_mode=check_mode) or changed + return changed + + +def set_queue_attribute(queue, attribute, value, check_mode=False): + if not value: + return False + + try: + existing_value = queue.get_attributes(attributes=attribute)[attribute] + except: + existing_value = '' + + # convert dict attributes to JSON strings (sort keys for comparing) + if attribute in ['Policy', 'RedrivePolicy']: + value = json.dumps(value, sort_keys=True) + if existing_value: + existing_value = json.dumps(json.loads(existing_value), sort_keys=True) + + if str(value) != existing_value: + if not check_mode: + queue.set_attribute(attribute, value) + return True + + return False + + +def delete_sqs_queue(connection, module): + queue_name = module.params.get('name') + + result = dict( + region=module.params.get('region'), + name=queue_name, + ) + + try: + queue = connection.get_queue(queue_name) + if queue: + if not module.check_mode: + connection.delete_queue(queue) + result['changed'] = True + + else: + result['changed'] = False + + except BotoServerError: + result['msg'] = 'Failed to delete sqs queue due to error: ' + traceback.format_exc() + module.fail_json(**result) + else: + module.exit_json(**result) + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict(default='present', choices=['present', 'absent']), + name=dict(required=True, type='str'), + default_visibility_timeout=dict(type='int'), + message_retention_period=dict(type='int'), + maximum_message_size=dict(type='int'), + delivery_delay=dict(type='int'), + receive_message_wait_time=dict(type='int'), + policy=dict(type='dict', required=False), + redrive_policy=dict(type='dict', required=False), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg='region must be specified') + + try: + connection = connect_to_aws(boto.sqs, region, **aws_connect_params) + + except (NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + + state = module.params.get('state') + if state == 'present': + create_or_update_sqs_queue(connection, module) + elif state == 'absent': + delete_sqs_queue(connection, module) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/sts_assume_role.py b/cloud/amazon/sts_assume_role.py index 7eec28b843a..d856947a7d0 100644 --- a/cloud/amazon/sts_assume_role.py +++ b/cloud/amazon/sts_assume_role.py @@ -1,154 +1,157 @@ -#!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: sts_assume_role -short_description: Assume a role using AWS Security Token Service and obtain temporary credentials -description: - - Assume a role using AWS Security Token Service and obtain temporary credentials -version_added: "2.0" -author: Boris Ekelchik (@bekelchik) -options: - role_arn: - description: - - The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs) - required: true - role_session_name: - description: - - Name of the role's session - will be used by CloudTrail - required: true - policy: - description: - - Supplemental policy to use in addition to assumed role's policies. - required: false - default: null - duration_seconds: - description: - - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds. - required: false - default: null - external_id: - description: - - A unique identifier that is used by third parties to assume a role in their customers' accounts. - required: false - default: null - mfa_serial_number: - description: - - he identification number of the MFA device that is associated with the user who is making the AssumeRole call. - required: false - default: null - mfa_token: - description: - - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. - required: false - default: null -notes: - - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token -extends_documentation_fragment: aws -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) -sts_assume_role: - role_arn: "arn:aws:iam::123456789012:role/someRole" - session_name: "someRoleSession" -register: assumed_role - -# Use the assumed role above to tag an instance in account 123456789012 -ec2_tag: - aws_access_key: "{{ assumed_role.sts_creds.access_key }}" - aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}" - security_token: "{{ assumed_role.sts_creds.session_token }}" - resource: i-xyzxyz01 - state: present - tags: - MyNewTag: value - -''' - -import sys -import time - -try: - import boto.sts - from boto.exception import BotoServerError - HAS_BOTO = True -except ImportError: - HAS_BOTO = False - - -def assume_role_policy(connection, module): - - role_arn = module.params.get('role_arn') - role_session_name = module.params.get('role_session_name') - policy = module.params.get('policy') - duration_seconds = module.params.get('duration_seconds') - external_id = module.params.get('external_id') - mfa_serial_number = module.params.get('mfa_serial_number') - mfa_token = module.params.get('mfa_token') - changed = False - - try: - assumed_role = connection.assume_role(role_arn, role_session_name, policy, duration_seconds, external_id, mfa_serial_number, mfa_token) - changed = True - except BotoServerError, e: - module.fail_json(msg=e) - - module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__) - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update( - dict( - role_arn = dict(required=True, default=None), - role_session_name = dict(required=True, default=None), - duration_seconds = dict(required=False, default=None, type='int'), - external_id = dict(required=False, default=None), - policy = dict(required=False, default=None), - mfa_serial_number = dict(required=False, default=None), - mfa_token = dict(required=False, default=None) - ) - ) - - module = AnsibleModule(argument_spec=argument_spec) - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - if region: - try: - connection = connect_to_aws(boto.sts, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: - module.fail_json(msg=str(e)) - else: - module.fail_json(msg="region must be specified") - - try: - assume_role_policy(connection, module) - except BotoServerError, e: - module.fail_json(msg=e) - - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -main() +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: sts_assume_role +short_description: Assume a role using AWS Security Token Service and obtain temporary credentials +description: + - Assume a role using AWS Security Token Service and obtain temporary credentials +version_added: "2.0" +author: Boris Ekelchik (@bekelchik) +options: + role_arn: + description: + - The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs) + required: true + role_session_name: + description: + - Name of the role's session - will be used by CloudTrail + required: true + policy: + description: + - Supplemental policy to use in addition to assumed role's policies. + required: false + default: null + duration_seconds: + description: + - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds. + required: false + default: null + external_id: + description: + - A unique identifier that is used by third parties to assume a role in their customers' accounts. + required: false + default: null + mfa_serial_number: + description: + - he identification number of the MFA device that is associated with the user who is making the AssumeRole call. + required: false + default: null + mfa_token: + description: + - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. + required: false + default: null +notes: + - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) +sts_assume_role: + role_arn: "arn:aws:iam::123456789012:role/someRole" + role_session_name: "someRoleSession" +register: assumed_role + +# Use the assumed role above to tag an instance in account 123456789012 +ec2_tag: + aws_access_key: "{{ assumed_role.sts_creds.access_key }}" + aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}" + security_token: "{{ assumed_role.sts_creds.session_token }}" + resource: i-xyzxyz01 + state: present + tags: + MyNewTag: value + +''' + +try: + import boto.sts + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info + + +def assume_role_policy(connection, module): + + role_arn = module.params.get('role_arn') + role_session_name = module.params.get('role_session_name') + policy = module.params.get('policy') + duration_seconds = module.params.get('duration_seconds') + external_id = module.params.get('external_id') + mfa_serial_number = module.params.get('mfa_serial_number') + mfa_token = module.params.get('mfa_token') + changed = False + + try: + assumed_role = connection.assume_role(role_arn, role_session_name, policy, duration_seconds, external_id, mfa_serial_number, mfa_token) + changed = True + except BotoServerError as e: + module.fail_json(msg=e) + + module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__) + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + role_arn = dict(required=True, default=None), + role_session_name = dict(required=True, default=None), + duration_seconds = dict(required=False, default=None, type='int'), + external_id = dict(required=False, default=None), + policy = dict(required=False, default=None), + mfa_serial_number = dict(required=False, default=None), + mfa_token = dict(required=False, default=None) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.sts, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + try: + assume_role_policy(connection, module) + except BotoServerError as e: + module.fail_json(msg=e) + + +if __name__ == '__main__': + main() diff --git a/cloud/amazon/sts_session_token.py b/cloud/amazon/sts_session_token.py new file mode 100644 index 00000000000..4886b625fd2 --- /dev/null +++ b/cloud/amazon/sts_session_token.py @@ -0,0 +1,164 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: sts_session_token +short_description: Obtain a session token from the AWS Security Token Service +description: + - Obtain a session token from the AWS Security Token Service +version_added: "2.2" +author: Victor Costan (@pwnall) +options: + duration_seconds: + description: + - The duration, in seconds, of the session token. See http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters for acceptable and default values. + required: false + default: null + mfa_serial_number: + description: + - The identification number of the MFA device that is associated with the user who is making the GetSessionToken call. + required: false + default: null + mfa_token: + description: + - The value provided by the MFA device, if the trust policy of the user requires MFA. + required: false + default: null +notes: + - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token). +extends_documentation_fragment: + - aws + - ec2 +requirements: + - boto3 + - botocore + - python >= 2.6 +''' + +RETURN = """ +sts_creds: + description: The Credentials object returned by the AWS Security Token Service + returned: always + type: list + sample: + access_key: ASXXXXXXXXXXXXXXXXXX + expiration: "2016-04-08T11:59:47+00:00" + secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +changed: + description: True if obtaining the credentials succeeds + type: bool + returned: always +""" + + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Get a session token (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html) +sts_session_token: + duration: 3600 +register: session_credentials + +# Use the session token obtained above to tag an instance in account 123456789012 +ec2_tag: + aws_access_key: "{{ session_credentials.sts_creds.access_key }}" + aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}" + security_token: "{{ session_credentials.sts_creds.session_token }}" + resource: i-xyzxyz01 + state: present + tags: + MyNewTag: value + +''' + +try: + import boto3 + from botocore.exceptions import ClientError + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info + + +def normalize_credentials(credentials): + access_key = credentials.get('AccessKeyId', None) + secret_key = credentials.get('SecretAccessKey', None) + session_token = credentials.get('SessionToken', None) + expiration = credentials.get('Expiration', None) + return { + 'access_key': access_key, + 'secret_key': secret_key, + 'session_token': session_token, + 'expiration': expiration + } + +def get_session_token(connection, module): + duration_seconds = module.params.get('duration_seconds') + mfa_serial_number = module.params.get('mfa_serial_number') + mfa_token = module.params.get('mfa_token') + changed = False + + args = {} + if duration_seconds is not None: + args['DurationSeconds'] = duration_seconds + if mfa_serial_number is not None: + args['SerialNumber'] = mfa_serial_number + if mfa_token is not None: + args['TokenCode'] = mfa_token + + try: + response = connection.get_session_token(**args) + changed = True + except ClientError as e: + module.fail_json(msg=e) + + credentials = normalize_credentials(response.get('Credentials', {})) + module.exit_json(changed=changed, sts_creds=credentials) + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + duration_seconds = dict(required=False, default=None, type='int'), + mfa_serial_number = dict(required=False, default=None), + mfa_token = dict(required=False, default=None) + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 and botocore are required.') + + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + if region: + connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs) + else: + module.fail_json(msg="region must be specified") + + get_session_token(connection, module) + + +if __name__ == '__main__': + main() diff --git a/__init__.py b/cloud/atomic/__init__.py similarity index 100% rename from __init__.py rename to cloud/atomic/__init__.py diff --git a/cloud/atomic/atomic_host.py b/cloud/atomic/atomic_host.py new file mode 100644 index 00000000000..ae4cb06e28c --- /dev/null +++ b/cloud/atomic/atomic_host.py @@ -0,0 +1,110 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public licenses +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION=''' +--- +module: atomic_host +short_description: Manage the atomic host platform +description: + - Manage the atomic host platform + - Rebooting of Atomic host platform should be done outside this module +version_added: "2.2" +author: "Saravanan KR @krsacme" +notes: + - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file) +requirements: + - atomic + - "python >= 2.6" +options: + revision: + description: + - The version number of the atomic host to be deployed. Providing C(latest) will upgrade to the latest available version. + required: false + default: latest + aliases: ["version"] +''' + +EXAMPLES = ''' + +# Upgrade the atomic host platform to the latest version (atomic host upgrade) +- atomic_host: + revision: latest + +# Deploy a specific revision as the atomic host (atomic host deploy 23.130) +- atomic_host: + revision: 23.130 +''' + +RETURN = ''' +msg: + description: The command standard output + returned: always + type: string + sample: 'Already on latest' +''' + +def core(module): + revision = module.params['revision'] + args = [] + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + if revision == 'latest': + args = ['atomic', 'host', 'upgrade'] + else: + args = ['atomic', 'host', 'deploy', revision] + + out = {} + err = {} + rc = 0 + + rc, out, err = module.run_command(args, check_rc=False) + + if rc == 77 and revision == 'latest': + module.exit_json(msg="Already on latest", changed=False) + elif rc != 0: + module.fail_json(rc=rc, msg=err) + else: + module.exit_json(msg=out, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + revision = dict(default='latest', required=False, aliases=["version"]), + ), + ) + + # Verify that the platform is atomic host + if not os.path.exists("/run/ostree-booted"): + module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only") + + try: + core(module) + except Exception as e: + module.fail_json(msg=str(e)) + + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/atomic/atomic_image.py b/cloud/atomic/atomic_image.py new file mode 100644 index 00000000000..8210a1d3b86 --- /dev/null +++ b/cloud/atomic/atomic_image.py @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION=''' +--- +module: atomic_image +short_description: Manage the container images on the atomic host platform +description: + - Manage the container images on the atomic host platform + - Allows to execute the commands on the container images +version_added: "2.2" +author: "Saravanan KR @krsacme" +notes: + - Host should be support C(atomic) command +requirements: + - atomic + - "python >= 2.6" +options: + name: + description: + - Name of the container image + required: True + default: null + state: + description: + - The state of the container image. + - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running. + required: False + choices: ["present", "absent", "latest"] + default: latest + started: + description: + - Start or Stop the continer + required: False + choices: ["yes", "no"] + default: yes +''' + +EXAMPLES = ''' + +# Execute the run command on rsyslog container image (atomic run rhel7/rsyslog) +- atomic_image: + name: rhel7/rsyslog + state: latest + +''' + +RETURN = ''' +msg: + description: The command standard output + returned: always + type: string + sample: [u'Using default tag: latest ...'] +''' + +def do_upgrade(module, image): + args = ['atomic', 'update', '--force', image] + rc, out, err = module.run_command(args, check_rc=False) + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=err) + elif 'Image is up to date' in out: + return False + + return True + + +def core(module): + image = module.params['name'] + state = module.params['state'] + started = module.params['started'] + is_upgraded = False + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C') + + if state == 'present' or state == 'latest': + if state == 'latest': + is_upgraded = do_upgrade(module, image) + + if started: + args = ['atomic', 'run', image] + else: + args = ['atomic', 'install', image] + elif state == 'absent': + args = ['atomic', 'uninstall', image] + + out = {} + err = {} + rc = 0 + rc, out, err = module.run_command(args, check_rc=False) + + if rc < 0: + module.fail_json(rc=rc, msg=err) + elif rc == 1 and 'already present' in err: + module.exit_json(restult=err, changed=is_upgraded) + elif started and 'Container is running' in out: + module.exit_json(result=out, changed=is_upgraded) + else: + module.exit_json(msg=out, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(default=None, required=True), + state = dict(default='latest', choices=['present', 'absent', 'latest']), + started = dict(default='yes', type='bool'), + ), + ) + + # Verify that the platform supports atomic command + rc, out, err = module.run_command('atomic -v', check_rc=False) + if rc != 0: + module.fail_json(msg="Error in running atomic command", err=err) + + try: + core(module) + except Exception as e: + module.fail_json(msg=str(e)) + + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/azure/__init__.py b/cloud/azure/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/azure/azure_rm_deployment.py b/cloud/azure/azure_rm_deployment.py new file mode 100644 index 00000000000..88ecf0cea02 --- /dev/null +++ b/cloud/azure/azure_rm_deployment.py @@ -0,0 +1,665 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: azure_rm_deployment + +short_description: Create or destroy Azure Resource Manager template deployments + +version_added: "2.1" + +description: + - "Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python. + You can find some quick start templates in GitHub here https://github.com/azure/azure-quickstart-templates. + For more information on Azue resource manager templates see https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/." + +options: + resource_group_name: + description: + - The resource group name to use or create to host the deployed template + required: true + location: + description: + - The geo-locations in which the resource group will be located. + required: false + default: westus + deployment_mode: + description: + - In incremental mode, resources are deployed without deleting existing resources that are not included in the template. + In complete mode resources are deployed and existing resources in the resource group not included in the template are deleted. + required: false + default: incremental + choices: + - complete + - incremental + state: + description: + - If state is "present", template will be created. If state is "present" and if deployment exists, it will be + updated. If state is "absent", stack will be removed. + default: present + required: false + choices: + - present + - absent + template: + description: + - A hash containing the templates inline. This parameter is mutually exclusive with 'template_link'. + Either one of them is required if "state" parameter is "present". + required: false + default: null + template_link: + description: + - Uri of file containing the template body. This parameter is mutually exclusive with 'template'. Either one + of them is required if "state" parameter is "present". + required: false + default: null + parameters: + description: + - A hash of all the required template variables for the deployment template. This parameter is mutually exclusive + with 'parameters_link'. Either one of them is required if "state" parameter is "present". + required: false + default: null + parameters_link: + description: + - Uri of file containing the parameters body. This parameter is mutually exclusive with 'parameters'. Either + one of them is required if "state" parameter is "present". + required: false + default: null + deployment_name: + description: + - The name of the deployment to be tracked in the resource group deployment history. Re-using a deployment name + will overwrite the previous value in the resource group's deployment history. + default: ansible-arm + wait_for_deployment_completion: + description: + - Whether or not to block until the deployment has completed. + default: yes + choices: ['yes', 'no'] + wait_for_deployment_polling_period: + description: + - Time (in seconds) to wait between polls when waiting for deployment completion. + default: 10 + +extends_documentation_fragment: + - azure + +author: + - David Justice (@devigned) + - Laurent Mazuel (@lmazuel) + - Andre Price (@obsoleted) + +''' + +EXAMPLES = ''' +# Destroy a template deployment +- name: Destroy Azure Deploy + azure_rm_deployment: + state: absent + subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + resource_group_name: dev-ops-cle + +# Create or update a template deployment based on uris using parameter and template links +- name: Create Azure Deploy + azure_rm_deployment: + state: present + resource_group_name: dev-ops-cle + template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json' + parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json' + +# Create or update a template deployment based on a uri to the template and parameters specified inline. +# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then +# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH. +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Destroy Azure Deploy + azure_rm_deployment: + state: absent + subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + resource_group_name: dev-ops-cle + + - name: Create Azure Deploy + azure_rm_deployment: + state: present + subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + resource_group_name: dev-ops-cle + parameters: + newStorageAccountName: + value: devopsclestorage1 + adminUsername: + value: devopscle + dnsNameForPublicIP: + value: devopscleazure + location: + value: West US + vmSize: + value: Standard_A2 + vmName: + value: ansibleSshVm + sshKeyData: + value: YOUR_SSH_PUBLIC_KEY + template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json' + register: azure + + - name: Add new instance to host group + add_host: + hostname: '{{ item['ips'][0].public_ip }}' + groupname: azure_vms + with_items: "{{ azure.deployment.instances }}" + + - hosts: azure_vms + user: devopscle + tasks: + - name: Wait for SSH to come up + wait_for: + port: 22 + timeout: 2000 + state: started + - name: echo the hostname of the vm + shell: hostname + +# Deploy an Azure WebApp running a hello world'ish node app +- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js + azure_rm_deployment: + state: present + subscription_id: cbbdaed0-fea9-4693-bf0c-d446ac93c030 + resource_group_name: dev-ops-cle-webapp + parameters: + repoURL: + value: 'https://github.com/devigned/az-roadshow-oss.git' + siteName: + value: devopscleweb + hostingPlanName: + value: someplan + siteLocation: + value: westus + sku: + value: Standard + template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json' + +# Create or update a template deployment based on an inline template and parameters +- name: Create Azure Deploy + azure_rm_deployment: + state: present + subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + resource_group_name: dev-ops-cle + + template: + $schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#" + contentVersion: "1.0.0.0" + parameters: + newStorageAccountName: + type: "string" + metadata: + description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed." + adminUsername: + type: "string" + metadata: + description: "User name for the Virtual Machine." + adminPassword: + type: "securestring" + metadata: + description: "Password for the Virtual Machine." + dnsNameForPublicIP: + type: "string" + metadata: + description: "Unique DNS Name for the Public IP used to access the Virtual Machine." + ubuntuOSVersion: + type: "string" + defaultValue: "14.04.2-LTS" + allowedValues: + - "12.04.5-LTS" + - "14.04.2-LTS" + - "15.04" + metadata: + description: "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04." + variables: + location: "West US" + imagePublisher: "Canonical" + imageOffer: "UbuntuServer" + OSDiskName: "osdiskforlinuxsimple" + nicName: "myVMNic" + addressPrefix: "192.0.2.0/24" + subnetName: "Subnet" + subnetPrefix: "10.0.0.0/24" + storageAccountType: "Standard_LRS" + publicIPAddressName: "myPublicIP" + publicIPAddressType: "Dynamic" + vmStorageAccountContainerName: "vhds" + vmName: "MyUbuntuVM" + vmSize: "Standard_D1" + virtualNetworkName: "MyVNET" + vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]" + subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]" + resources: + - type: "Microsoft.Storage/storageAccounts" + name: "[parameters('newStorageAccountName')]" + apiVersion: "2015-05-01-preview" + location: "[variables('location')]" + properties: + accountType: "[variables('storageAccountType')]" + - apiVersion: "2015-05-01-preview" + type: "Microsoft.Network/publicIPAddresses" + name: "[variables('publicIPAddressName')]" + location: "[variables('location')]" + properties: + publicIPAllocationMethod: "[variables('publicIPAddressType')]" + dnsSettings: + domainNameLabel: "[parameters('dnsNameForPublicIP')]" + - type: "Microsoft.Network/virtualNetworks" + apiVersion: "2015-05-01-preview" + name: "[variables('virtualNetworkName')]" + location: "[variables('location')]" + properties: + addressSpace: + addressPrefixes: + - "[variables('addressPrefix')]" + subnets: + - + name: "[variables('subnetName')]" + properties: + addressPrefix: "[variables('subnetPrefix')]" + - type: "Microsoft.Network/networkInterfaces" + apiVersion: "2015-05-01-preview" + name: "[variables('nicName')]" + location: "[variables('location')]" + dependsOn: + - "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]" + - "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]" + properties: + ipConfigurations: + - + name: "ipconfig1" + properties: + privateIPAllocationMethod: "Dynamic" + publicIPAddress: + id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]" + subnet: + id: "[variables('subnetRef')]" + - type: "Microsoft.Compute/virtualMachines" + apiVersion: "2015-06-15" + name: "[variables('vmName')]" + location: "[variables('location')]" + dependsOn: + - "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]" + - "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]" + properties: + hardwareProfile: + vmSize: "[variables('vmSize')]" + osProfile: + computername: "[variables('vmName')]" + adminUsername: "[parameters('adminUsername')]" + adminPassword: "[parameters('adminPassword')]" + storageProfile: + imageReference: + publisher: "[variables('imagePublisher')]" + offer: "[variables('imageOffer')]" + sku: "[parameters('ubuntuOSVersion')]" + version: "latest" + osDisk: + name: "osdisk" + vhd: + uri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',variables('OSDiskName'),'.vhd')]" + caching: "ReadWrite" + createOption: "FromImage" + networkProfile: + networkInterfaces: + - + id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]" + diagnosticsProfile: + bootDiagnostics: + enabled: "true" + storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]" + parameters: + newStorageAccountName: + value: devopsclestorage + adminUsername: + value: devopscle + adminPassword: + value: Password1! + dnsNameForPublicIP: + value: devopscleazure +''' + +RETURN = ''' +deployment: + description: Deployment details + type: dict + returned: always + sample: + group_name: + description: Name of the resource group + type: string + returned: always + id: + description: The Azure ID of the deployment + type: string + returned: always + instances: + description: Provides the public IP addresses for each VM instance. + type: list + returned: always + name: + description: Name of the deployment + type: string + returned: always + outputs: + description: Dictionary of outputs received from the deployment + type: dict + returned: always +''' + +PREREQ_IMPORT_ERROR = None + +try: + import time + import yaml +except ImportError as exc: + IMPORT_ERROR = "Error importing module prerequisites: %s" % exc + +from ansible.module_utils.azure_rm_common import * + +try: + from itertools import chain + from azure.common.credentials import ServicePrincipalCredentials + from azure.common.exceptions import CloudError + from azure.mgmt.resource.resources.models import (DeploymentProperties, + ParametersLink, + TemplateLink, + Deployment, + ResourceGroup, + Dependency) + from azure.mgmt.resource.resources import ResourceManagementClient + from azure.mgmt.network import NetworkManagementClient + +except ImportError: + # This is handled in azure_rm_common + pass + + +class AzureRMDeploymentManager(AzureRMModuleBase): + + def __init__(self): + + self.module_arg_spec = dict( + resource_group_name=dict(type='str', required=True, aliases=['resource_group']), + state=dict(type='str', default='present', choices=['present', 'absent']), + template=dict(type='dict', default=None), + parameters=dict(type='dict', default=None), + template_link=dict(type='str', default=None), + parameters_link=dict(type='str', default=None), + location=dict(type='str', default="westus"), + deployment_mode=dict(type='str', default='incremental', choices=['complete', 'incremental']), + deployment_name=dict(type='str', default="ansible-arm"), + wait_for_deployment_completion=dict(type='bool', default=True), + wait_for_deployment_polling_period=dict(type='int', default=10) + ) + + mutually_exclusive = [('template', 'template_link'), + ('parameters', 'parameters_link')] + + self.resource_group_name = None + self.state = None + self.template = None + self.parameters = None + self.template_link = None + self.parameters_link = None + self.location = None + self.deployment_mode = None + self.deployment_name = None + self.wait_for_deployment_completion = None + self.wait_for_deployment_polling_period = None + self.tags = None + + self.results = dict( + deployment=dict(), + changed=False, + msg="" + ) + + super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec, + mutually_exclusive=mutually_exclusive, + supports_check_mode=False) + + def exec_module(self, **kwargs): + + if PREREQ_IMPORT_ERROR: + self.fail(PREREQ_IMPORT_ERROR) + + for key in self.module_arg_spec.keys() + ['tags']: + setattr(self, key, kwargs[key]) + + if self.state == 'present': + deployment = self.deploy_template() + self.results['deployment'] = dict( + name=deployment.name, + group_name=self.resource_group_name, + id=deployment.id, + outputs=deployment.properties.outputs, + instances=self._get_instances(deployment) + ) + self.results['changed'] = True + self.results['msg'] = 'deployment succeeded' + else: + if self.resource_group_exists(self.resource_group_name): + self.destroy_resource_group() + self.results['changed'] = True + self.results['msg'] = "deployment deleted" + + return self.results + + def deploy_template(self): + """ + Deploy the targeted template and parameters + :param module: Ansible module containing the validated configuration for the deployment template + :param client: resource management client for azure + :param conn_info: connection info needed + :return: + """ + + deploy_parameter = DeploymentProperties(self.deployment_mode) + if not self.parameters_link: + deploy_parameter.parameters = self.parameters + else: + deploy_parameter.parameters_link = ParametersLink( + uri=self.parameters_link + ) + if not self.template_link: + deploy_parameter.template = self.template + else: + deploy_parameter.template_link = TemplateLink( + uri=self.template_link + ) + + params = ResourceGroup(location=self.location, tags=self.tags) + + try: + self.rm_client.resource_groups.create_or_update(self.resource_group_name, params) + except CloudError as exc: + self.fail("Resource group create_or_update failed with status code: %s and message: %s" % + (exc.status_code, exc.message)) + try: + result = self.rm_client.deployments.create_or_update(self.resource_group_name, + self.deployment_name, + deploy_parameter) + + deployment_result = self.get_poller_result(result) + if self.wait_for_deployment_completion: + while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted', + 'Succeeded']: + time.sleep(self.wait_for_deployment_polling_period) + deployment_result = self.rm_client.deployments.get(self.resource_group_name, self.deployment_name) + except CloudError as exc: + failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name) + self.log("Deployment failed %s: %s" % (exc.status_code, exc.message)) + self.fail("Deployment failed with status code: %s and message: %s" % (exc.status_code, exc.message), + failed_deployment_operations=failed_deployment_operations) + + if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded': + self.log("provisioning state: %s" % deployment_result.properties.provisioning_state) + failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name) + self.fail('Deployment failed. Deployment id: %s' % deployment_result.id, + failed_deployment_operations=failed_deployment_operations) + + return deployment_result + + def destroy_resource_group(self): + """ + Destroy the targeted resource group + """ + try: + result = self.rm_client.resource_groups.delete(self.resource_group_name) + result.wait() # Blocking wait till the delete is finished + except CloudError as e: + if e.status_code == 404 or e.status_code == 204: + return + else: + self.fail("Delete resource group and deploy failed with status code: %s and message: %s" % + (e.status_code, e.message)) + + def resource_group_exists(self, resource_group): + ''' + Return True/False based on existence of requested resource group. + + :param resource_group: string. Name of a resource group. + :return: boolean + ''' + try: + self.rm_client.resource_groups.get(resource_group) + except CloudError: + return False + return True + + def _get_failed_nested_operations(self, current_operations): + new_operations = [] + for operation in current_operations: + if operation.properties.provisioning_state == 'Failed': + new_operations.append(operation) + if operation.properties.target_resource and \ + 'Microsoft.Resources/deployments' in operation.properties.target_resource.id: + nested_deployment = operation.properties.target_resource.resource_name + try: + nested_operations = self.rm_client.deployment_operations.list(self.resource_group_name, + nested_deployment) + except CloudError as exc: + self.fail("List nested deployment operations failed with status code: %s and message: %s" % + (e.status_code, e.message)) + new_nested_operations = self._get_failed_nested_operations(nested_operations) + new_operations += new_nested_operations + return new_operations + + def _get_failed_deployment_operations(self, deployment_name): + results = [] + # time.sleep(15) # there is a race condition between when we ask for deployment status and when the + # # status is available. + + try: + operations = self.rm_client.deployment_operations.list(self.resource_group_name, deployment_name) + except CloudError as exc: + self.fail("Get deployment failed with status code: %s and message: %s" % + (exc.status_code, exc.message)) + try: + results = [ + dict( + id=op.id, + operation_id=op.operation_id, + status_code=op.properties.status_code, + status_message=op.properties.status_message, + target_resource=dict( + id=op.properties.target_resource.id, + resource_name=op.properties.target_resource.resource_name, + resource_type=op.properties.target_resource.resource_type + ) if op.properties.target_resource else None, + provisioning_state=op.properties.provisioning_state, + ) + for op in self._get_failed_nested_operations(operations) + ] + except: + # If we fail here, the original error gets lost and user receives wrong error message/stacktrace + pass + self.log(dict(failed_deployment_operations=results), pretty_print=True) + return results + + def _get_instances(self, deployment): + dep_tree = self._build_hierarchy(deployment.properties.dependencies) + vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines") + vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces")) + for vm in vms] + vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics)) + for vm, nics in vms_and_nics] + return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip) + for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0] + + def _get_dependencies(self, dep_tree, resource_type): + matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type] + for child_tree in [value['children'] for value in dep_tree.values()]: + matches += self._get_dependencies(child_tree, resource_type) + return matches + + def _build_hierarchy(self, dependencies, tree=None): + tree = dict(top=True) if tree is None else tree + for dep in dependencies: + if dep.resource_name not in tree: + tree[dep.resource_name] = dict(dep=dep, children=dict()) + if isinstance(dep, Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0: + self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children']) + + if 'top' in tree: + tree.pop('top', None) + keys = list(tree.keys()) + for key1 in keys: + for key2 in keys: + if key2 in tree and key1 in tree[key2]['children'] and key1 in tree: + tree[key2]['children'][key1] = tree[key1] + tree.pop(key1) + return tree + + def _get_ip_dict(self, ip): + ip_dict = dict(name=ip.name, + id=ip.id, + public_ip=ip.ip_address, + public_ip_allocation_method=str(ip.public_ip_allocation_method) + ) + if ip.dns_settings: + ip_dict['dns_settings'] = { + 'domain_name_label':ip.dns_settings.domain_name_label, + 'fqdn':ip.dns_settings.fqdn + } + return ip_dict + + def _nic_to_public_ips_instance(self, nics): + return [self.network_client.public_ip_addresses.get(public_ip_id.split('/')[4], public_ip_id.split('/')[-1]) + for nic_obj in [self.network_client.network_interfaces.get(self.resource_group_name, + nic['dep'].resource_name) for nic in nics] + for public_ip_id in [ip_conf_instance.public_ip_address.id + for ip_conf_instance in nic_obj.ip_configurations + if ip_conf_instance.public_ip_address]] + + +def main(): + AzureRMDeploymentManager() + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() + diff --git a/cloud/centurylink/__init__.py b/cloud/centurylink/__init__.py index 8b137891791..e69de29bb2d 100644 --- a/cloud/centurylink/__init__.py +++ b/cloud/centurylink/__init__.py @@ -1 +0,0 @@ - diff --git a/cloud/centurylink/clc_aa_policy.py b/cloud/centurylink/clc_aa_policy.py new file mode 100644 index 00000000000..8693f4c774b --- /dev/null +++ b/cloud/centurylink/clc_aa_policy.py @@ -0,0 +1,360 @@ +#!/usr/bin/python + +# +# Copyright (c) 2015 CenturyLink +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: clc_aa_policy +short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud. +description: + - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud. +version_added: "2.0" +options: + name: + description: + - The name of the Anti Affinity Policy. + required: True + location: + description: + - Datacenter in which the policy lives/should live. + required: True + state: + description: + - Whether to create or delete the policy. + required: False + default: present + choices: ['present','absent'] + wait: + description: + - Whether to wait for the tasks to finish before returning. + default: True + required: False + choices: [True, False] +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +--- +- name: Create AA Policy + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Create an Anti Affinity Policy + clc_aa_policy: + name: Hammer Time + location: UK3 + state: present + register: policy + + - name: debug + debug: + var: policy + +--- +- name: Delete AA Policy + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Delete an Anti Affinity Policy + clc_aa_policy: + name: Hammer Time + location: UK3 + state: absent + register: policy + + - name: debug + debug: + var: policy +''' + +RETURN = ''' +policy: + description: The anti affinity policy information + returned: success + type: dict + sample: + { + "id":"1a28dd0988984d87b9cd61fa8da15424", + "name":"test_aa_policy", + "location":"UC1", + "links":[ + { + "rel":"self", + "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424", + "verbs":[ + "GET", + "DELETE", + "PUT" + ] + }, + { + "rel":"location", + "href":"/v2/datacenters/wfad/UC1", + "id":"uc1", + "name":"UC1 - US West (Santa Clara)" + } + ] + } +''' + +__version__ = '${version}' + +import os + +from distutils.version import LooseVersion + +try: + import requests +except ImportError: + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + +from ansible.module_utils.basic import AnsibleModule + + +class ClcAntiAffinityPolicy: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + self.policy_dict = {} + + if not CLC_FOUND: + self.module.fail_json( + msg='clc-python-sdk required for this module') + if not REQUESTS_FOUND: + self.module.fail_json( + msg='requests library is required for this module') + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(required=True), + location=dict(required=True), + wait=dict(default=True), + state=dict(default='present', choices=['present', 'absent']), + ) + return argument_spec + + # Module Behavior Goodness + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + + self._set_clc_credentials_from_env() + self.policy_dict = self._get_policies_for_datacenter(p) + + if p['state'] == "absent": + changed, policy = self._ensure_policy_is_absent(p) + else: + changed, policy = self._ensure_policy_is_present(p) + + if hasattr(policy, 'data'): + policy = policy.data + elif hasattr(policy, '__dict__'): + policy = policy.__dict__ + + self.module.exit_json(changed=changed, policy=policy) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _get_policies_for_datacenter(self, p): + """ + Get the Policies for a datacenter by calling the CLC API. + :param p: datacenter to get policies from + :return: policies in the datacenter + """ + response = {} + + policies = self.clc.v2.AntiAffinity.GetAll(location=p['location']) + + for policy in policies: + response[policy.name] = policy + return response + + def _create_policy(self, p): + """ + Create an Anti Affinity Policy using the CLC API. + :param p: datacenter to create policy in + :return: response dictionary from the CLC API. + """ + try: + return self.clc.v2.AntiAffinity.Create( + name=p['name'], + location=p['location']) + except CLCException as ex: + self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format( + p['name'], ex.response_text + )) + + def _delete_policy(self, p): + """ + Delete an Anti Affinity Policy using the CLC API. + :param p: datacenter to delete a policy from + :return: none + """ + try: + policy = self.policy_dict[p['name']] + policy.Delete() + except CLCException as ex: + self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format( + p['name'], ex.response_text + )) + + def _policy_exists(self, policy_name): + """ + Check to see if an Anti Affinity Policy exists + :param policy_name: name of the policy + :return: boolean of if the policy exists + """ + if policy_name in self.policy_dict: + return self.policy_dict.get(policy_name) + + return False + + def _ensure_policy_is_absent(self, p): + """ + Makes sure that a policy is absent + :param p: dictionary of policy name + :return: tuple of if a deletion occurred and the name of the policy that was deleted + """ + changed = False + if self._policy_exists(policy_name=p['name']): + changed = True + if not self.module.check_mode: + self._delete_policy(p) + return changed, None + + def _ensure_policy_is_present(self, p): + """ + Ensures that a policy is present + :param p: dictionary of a policy name + :return: tuple of if an addition occurred and the name of the policy that was added + """ + changed = False + policy = self._policy_exists(policy_name=p['name']) + if not policy: + changed = True + policy = None + if not self.module.check_mode: + policy = self._create_policy(p) + return changed, policy + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + module = AnsibleModule( + argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(), + supports_check_mode=True) + clc_aa_policy = ClcAntiAffinityPolicy(module) + clc_aa_policy.process_request() + +if __name__ == '__main__': + main() diff --git a/cloud/centurylink/clc_alert_policy.py b/cloud/centurylink/clc_alert_policy.py new file mode 100644 index 00000000000..6e8c4618543 --- /dev/null +++ b/cloud/centurylink/clc_alert_policy.py @@ -0,0 +1,541 @@ +#!/usr/bin/python + +# +# Copyright (c) 2015 CenturyLink +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: clc_alert_policy +short_description: Create or Delete Alert Policies at CenturyLink Cloud. +description: + - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud. +version_added: "2.0" +options: + alias: + description: + - The alias of your CLC Account + required: True + name: + description: + - The name of the alert policy. This is mutually exclusive with id + required: False + default: None + id: + description: + - The alert policy id. This is mutually exclusive with name + required: False + default: None + alert_recipients: + description: + - A list of recipient email ids to notify the alert. + This is required for state 'present' + required: False + default: None + metric: + description: + - The metric on which to measure the condition that will trigger the alert. + This is required for state 'present' + required: False + default: None + choices: ['cpu','memory','disk'] + duration: + description: + - The length of time in minutes that the condition must exceed the threshold. + This is required for state 'present' + required: False + default: None + threshold: + description: + - The threshold that will trigger the alert when the metric equals or exceeds it. + This is required for state 'present' + This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0 + required: False + default: None + state: + description: + - Whether to create or delete the policy. + required: False + default: present + choices: ['present','absent'] +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +--- +- name: Create Alert Policy Example + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Create an Alert Policy for disk above 80% for 5 minutes + clc_alert_policy: + alias: wfad + name: 'alert for disk > 80%' + alert_recipients: + - test1@centurylink.com + - test2@centurylink.com + metric: 'disk' + duration: '00:05:00' + threshold: 80 + state: present + register: policy + + - name: debug + debug: var=policy + +--- +- name: Delete Alert Policy Example + hosts: localhost + gather_facts: False + connection: local + tasks: + - name: Delete an Alert Policy + clc_alert_policy: + alias: wfad + name: 'alert for disk > 80%' + state: absent + register: policy + + - name: debug + debug: var=policy +''' + +RETURN = ''' +policy: + description: The alert policy information + returned: success + type: dict + sample: + { + "actions": [ + { + "action": "email", + "settings": { + "recipients": [ + "user1@domain.com", + "user1@domain.com" + ] + } + } + ], + "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7", + "links": [ + { + "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7", + "rel": "self", + "verbs": [ + "GET", + "DELETE", + "PUT" + ] + } + ], + "name": "test_alert", + "triggers": [ + { + "duration": "00:05:00", + "metric": "disk", + "threshold": 80.0 + } + ] + } +''' + +__version__ = '${version}' + +from distutils.version import LooseVersion + +try: + import requests +except ImportError: + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +try: + import clc as clc_sdk + from clc import APIFailedResponse +except ImportError: + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + + +class ClcAlertPolicy: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + self.policy_dict = {} + + if not CLC_FOUND: + self.module.fail_json( + msg='clc-python-sdk required for this module') + if not REQUESTS_FOUND: + self.module.fail_json( + msg='requests library is required for this module') + if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + @staticmethod + def _define_module_argument_spec(): + """ + Define the argument spec for the ansible module + :return: argument spec dictionary + """ + argument_spec = dict( + name=dict(default=None), + id=dict(default=None), + alias=dict(required=True, default=None), + alert_recipients=dict(type='list', default=None), + metric=dict( + choices=[ + 'cpu', + 'memory', + 'disk'], + default=None), + duration=dict(type='str', default=None), + threshold=dict(type='int', default=None), + state=dict(default='present', choices=['present', 'absent']) + ) + mutually_exclusive = [ + ['name', 'id'] + ] + return {'argument_spec': argument_spec, + 'mutually_exclusive': mutually_exclusive} + + # Module Behavior Goodness + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + + self._set_clc_credentials_from_env() + self.policy_dict = self._get_alert_policies(p['alias']) + + if p['state'] == 'present': + changed, policy = self._ensure_alert_policy_is_present() + else: + changed, policy = self._ensure_alert_policy_is_absent() + + self.module.exit_json(changed=changed, policy=policy) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + def _ensure_alert_policy_is_present(self): + """ + Ensures that the alert policy is present + :return: (changed, policy) + changed: A flag representing if anything is modified + policy: the created/updated alert policy + """ + changed = False + p = self.module.params + policy_name = p.get('name') + + if not policy_name: + self.module.fail_json(msg='Policy name is a required') + policy = self._alert_policy_exists(policy_name) + if not policy: + changed = True + policy = None + if not self.module.check_mode: + policy = self._create_alert_policy() + else: + changed_u, policy = self._ensure_alert_policy_is_updated(policy) + if changed_u: + changed = True + return changed, policy + + def _ensure_alert_policy_is_absent(self): + """ + Ensures that the alert policy is absent + :return: (changed, None) + changed: A flag representing if anything is modified + """ + changed = False + p = self.module.params + alert_policy_id = p.get('id') + alert_policy_name = p.get('name') + alias = p.get('alias') + if not alert_policy_id and not alert_policy_name: + self.module.fail_json( + msg='Either alert policy id or policy name is required') + if not alert_policy_id and alert_policy_name: + alert_policy_id = self._get_alert_policy_id( + self.module, + alert_policy_name) + if alert_policy_id and alert_policy_id in self.policy_dict: + changed = True + if not self.module.check_mode: + self._delete_alert_policy(alias, alert_policy_id) + return changed, None + + def _ensure_alert_policy_is_updated(self, alert_policy): + """ + Ensures the alert policy is updated if anything is changed in the alert policy configuration + :param alert_policy: the target alert policy + :return: (changed, policy) + changed: A flag representing if anything is modified + policy: the updated the alert policy + """ + changed = False + p = self.module.params + alert_policy_id = alert_policy.get('id') + email_list = p.get('alert_recipients') + metric = p.get('metric') + duration = p.get('duration') + threshold = p.get('threshold') + policy = alert_policy + if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \ + (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \ + (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))): + changed = True + elif email_list: + t_email_list = list( + alert_policy.get('actions')[0].get('settings').get('recipients')) + if set(email_list) != set(t_email_list): + changed = True + if changed and not self.module.check_mode: + policy = self._update_alert_policy(alert_policy_id) + return changed, policy + + def _get_alert_policies(self, alias): + """ + Get the alert policies for account alias by calling the CLC API. + :param alias: the account alias + :return: the alert policies for the account alias + """ + response = {} + + policies = self.clc.v2.API.Call('GET', + '/v2/alertPolicies/%s' + % alias) + + for policy in policies.get('items'): + response[policy.get('id')] = policy + return response + + def _create_alert_policy(self): + """ + Create an alert Policy using the CLC API. + :return: response dictionary from the CLC API. + """ + p = self.module.params + alias = p['alias'] + email_list = p['alert_recipients'] + metric = p['metric'] + duration = p['duration'] + threshold = p['threshold'] + policy_name = p['name'] + arguments = json.dumps( + { + 'name': policy_name, + 'actions': [{ + 'action': 'email', + 'settings': { + 'recipients': email_list + } + }], + 'triggers': [{ + 'metric': metric, + 'duration': duration, + 'threshold': threshold + }] + } + ) + try: + result = self.clc.v2.API.Call( + 'POST', + '/v2/alertPolicies/%s' % alias, + arguments) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to create alert policy "{0}". {1}'.format( + policy_name, str(e.response_text))) + return result + + def _update_alert_policy(self, alert_policy_id): + """ + Update alert policy using the CLC API. + :param alert_policy_id: The clc alert policy id + :return: response dictionary from the CLC API. + """ + p = self.module.params + alias = p['alias'] + email_list = p['alert_recipients'] + metric = p['metric'] + duration = p['duration'] + threshold = p['threshold'] + policy_name = p['name'] + arguments = json.dumps( + { + 'name': policy_name, + 'actions': [{ + 'action': 'email', + 'settings': { + 'recipients': email_list + } + }], + 'triggers': [{ + 'metric': metric, + 'duration': duration, + 'threshold': threshold + }] + } + ) + try: + result = self.clc.v2.API.Call( + 'PUT', '/v2/alertPolicies/%s/%s' % + (alias, alert_policy_id), arguments) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to update alert policy "{0}". {1}'.format( + policy_name, str(e.response_text))) + return result + + def _delete_alert_policy(self, alias, policy_id): + """ + Delete an alert policy using the CLC API. + :param alias : the account alias + :param policy_id: the alert policy id + :return: response dictionary from the CLC API. + """ + try: + result = self.clc.v2.API.Call( + 'DELETE', '/v2/alertPolicies/%s/%s' % + (alias, policy_id), None) + except APIFailedResponse as e: + return self.module.fail_json( + msg='Unable to delete alert policy id "{0}". {1}'.format( + policy_id, str(e.response_text))) + return result + + def _alert_policy_exists(self, policy_name): + """ + Check to see if an alert policy exists + :param policy_name: name of the alert policy + :return: boolean of if the policy exists + """ + result = False + for policy_id in self.policy_dict: + if self.policy_dict.get(policy_id).get('name') == policy_name: + result = self.policy_dict.get(policy_id) + return result + + def _get_alert_policy_id(self, module, alert_policy_name): + """ + retrieves the alert policy id of the account based on the name of the policy + :param module: the AnsibleModule object + :param alert_policy_name: the alert policy name + :return: alert_policy_id: The alert policy id + """ + alert_policy_id = None + for policy_id in self.policy_dict: + if self.policy_dict.get(policy_id).get('name') == alert_policy_name: + if not alert_policy_id: + alert_policy_id = policy_id + else: + return module.fail_json( + msg='multiple alert policies were found with policy name : %s' % alert_policy_name) + return alert_policy_id + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + The main function. Instantiates the module and calls process_request. + :return: none + """ + argument_dict = ClcAlertPolicy._define_module_argument_spec() + module = AnsibleModule(supports_check_mode=True, **argument_dict) + clc_alert_policy = ClcAlertPolicy(module) + clc_alert_policy.process_request() + +from ansible.module_utils.basic import * # pylint: disable=W0614 +if __name__ == '__main__': + main() diff --git a/cloud/centurylink/clc_blueprint_package.py b/cloud/centurylink/clc_blueprint_package.py index 3548944210d..8d4d28d20f8 100644 --- a/cloud/centurylink/clc_blueprint_package.py +++ b/cloud/centurylink/clc_blueprint_package.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: clc_blueprint_package short_description: deploys a blue print package on a set of servers in CenturyLink Cloud. @@ -55,6 +59,7 @@ - python = 2.7 - requests >= 2.5.0 - clc-sdk +author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud @@ -73,12 +78,24 @@ - name: Deploy package clc_blueprint_package: server_ids: - - UC1WFSDANS01 - - UC1WFSDANS02 + - UC1TEST-SERVER1 + - UC1TEST-SERVER2 package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a package_params: {} ''' +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SERVER1", + "UC1TEST-SERVER2" + ] +''' + __version__ = '${version}' from distutils.version import LooseVersion @@ -203,7 +220,7 @@ def clc_install_package(self, server, package_id, package_params): parameters=package_params) except CLCException as ex: self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format( - package_id, server.id, ex.response_text + package_id, server.id, ex.message )) return result diff --git a/cloud/centurylink/clc_firewall_policy.py b/cloud/centurylink/clc_firewall_policy.py index b851ea48a44..4ccfe171f21 100644 --- a/cloud/centurylink/clc_firewall_policy.py +++ b/cloud/centurylink/clc_firewall_policy.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: clc_firewall_policy short_description: Create/delete/update firewall policies @@ -85,6 +89,7 @@ - python = 2.7 - requests >= 2.5.0 - clc-sdk +author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud @@ -125,7 +130,48 @@ source_account_alias: WFAD location: VA1 state: absent - firewall_policy_id: 'c62105233d7a4231bd2e91b9c791e43e1' + firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1 +''' + +RETURN = ''' +firewall_policy_id: + description: The fire wall policy id + returned: success + type: string + sample: fc36f1bfd47242e488a9c44346438c05 +firewall_policy: + description: The fire wall policy information + returned: success + type: dict + sample: + { + "destination":[ + "10.1.1.0/24", + "10.2.2.0/24" + ], + "destinationAccount":"wfad", + "enabled":true, + "id":"fc36f1bfd47242e488a9c44346438c05", + "links":[ + { + "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05", + "rel":"self", + "verbs":[ + "GET", + "PUT", + "DELETE" + ] + } + ], + "ports":[ + "any" + ], + "source":[ + "10.1.1.0/24", + "10.2.2.0/24" + ], + "status":"active" + } ''' __version__ = '${version}' diff --git a/cloud/centurylink/clc_group.py b/cloud/centurylink/clc_group.py index e6e7267f05e..4c522b7b0ba 100644 --- a/cloud/centurylink/clc_group.py +++ b/cloud/centurylink/clc_group.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: clc_group short_description: Create/delete Server Groups at Centurylink Cloud @@ -58,11 +62,12 @@ - python = 2.7 - requests >= 2.5.0 - clc-sdk +author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account passwod for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login @@ -82,13 +87,14 @@ tasks: - name: Create / Verify a Server Group at CenturyLink Cloud clc_group: - name: 'My Cool Server Group' - parent: 'Default Group' + name: My Cool Server Group + parent: Default Group state: present register: clc - name: debug - debug: var=clc + debug: + var: clc # Delete a Server Group @@ -100,18 +106,118 @@ tasks: - name: Delete / Verify Absent a Server Group at CenturyLink Cloud clc_group: - name: 'My Cool Server Group' - parent: 'Default Group' + name: My Cool Server Group + parent: Default Group state: absent register: clc - name: debug - debug: var=clc + debug: + var: clc +''' +RETURN = ''' +group: + description: The group information + returned: success + type: dict + sample: + { + "changeInfo":{ + "createdBy":"service.wfad", + "createdDate":"2015-07-29T18:52:47Z", + "modifiedBy":"service.wfad", + "modifiedDate":"2015-07-29T18:52:47Z" + }, + "customFields":[ + + ], + "description":"test group", + "groups":[ + + ], + "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1", + "links":[ + { + "href":"/v2/groups/wfad", + "rel":"createGroup", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad", + "rel":"createServer", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1", + "rel":"self", + "verbs":[ + "GET", + "PATCH", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", + "id":"086ac1dfe0b6411989e8d1b77c4065f0", + "rel":"parentGroup" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults", + "rel":"defaults", + "verbs":[ + "GET", + "POST" + ] + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing", + "rel":"billing" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive", + "rel":"archiveGroupAction" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics", + "rel":"statistics" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities", + "rel":"upcomingScheduledActivities" + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy", + "rel":"horizontalAutoscalePolicyMapping", + "verbs":[ + "GET", + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities", + "rel":"scheduledActivities", + "verbs":[ + "GET", + "POST" + ] + } + ], + "locationId":"UC1", + "name":"test group", + "status":"active", + "type":"default" + } ''' __version__ = '${version}' +import os from distutils.version import LooseVersion try: @@ -134,6 +240,8 @@ else: CLC_FOUND = True +from ansible.module_utils.basic import AnsibleModule + class ClcGroup(object): @@ -178,13 +286,16 @@ def process_request(self): if state == "absent": changed, group, requests = self._ensure_group_is_absent( group_name=group_name, parent_name=parent_name) - + if requests: + self._wait_for_requests_to_complete(requests) else: - changed, group, requests = self._ensure_group_is_present( + changed, group = self._ensure_group_is_present( group_name=group_name, parent_name=parent_name, group_description=group_description) - if requests: - self._wait_for_requests_to_complete(requests) - self.module.exit_json(changed=changed, group=group_name) + try: + group = group.data + except AttributeError: + group = group_name + self.module.exit_json(changed=changed, group=group) @staticmethod def _define_module_argument_spec(): @@ -238,14 +349,16 @@ def _ensure_group_is_absent(self, group_name, parent_name): :return: changed, group """ changed = False - requests = [] + group = [] + results = [] if self._group_exists(group_name=group_name, parent_name=parent_name): if not self.module.check_mode: - request = self._delete_group(group_name) - requests.append(request) + group.append(group_name) + result = self._delete_group(group_name) + results.append(result) changed = True - return changed, group_name, requests + return changed, group, results def _delete_group(self, group_name): """ @@ -257,7 +370,7 @@ def _delete_group(self, group_name): group, parent = self.group_dict.get(group_name) try: response = group.Delete() - except CLCException, ex: + except CLCException as ex: self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format( group_name, ex.response_text )) @@ -281,6 +394,7 @@ def _ensure_group_is_present( parent = parent_name if parent_name is not None else self.root_group.name description = group_description changed = False + group = group_name parent_exists = self._group_exists(group_name=parent, parent_name=None) child_exists = self._group_exists( @@ -292,8 +406,8 @@ def _ensure_group_is_present( changed = False elif parent_exists and not child_exists: if not self.module.check_mode: - self._create_group( - group=group_name, + group = self._create_group( + group=group, parent=parent, description=description) changed = True @@ -303,7 +417,7 @@ def _ensure_group_is_present( parent + " does not exist") - return changed, group_name, None + return changed, group def _create_group(self, group, parent, description): """ @@ -317,10 +431,9 @@ def _create_group(self, group, parent, description): (parent, grandparent) = self.group_dict[parent] try: response = parent.Create(name=group, description=description) - except CLCException, ex: + except CLCException as ex: self.module.fail_json(msg='Failed to create group :{0}. {1}'.format( - group, ex.response_text - )) + group, ex.response_text)) return response def _group_exists(self, group_name, parent_name): @@ -403,6 +516,6 @@ def main(): clc_group = ClcGroup(module) clc_group.process_request() -from ansible.module_utils.basic import * # pylint: disable=W0614 + if __name__ == '__main__': main() diff --git a/cloud/centurylink/clc_loadbalancer.py b/cloud/centurylink/clc_loadbalancer.py index 5847c5b1c00..e159953ba3e 100644 --- a/cloud/centurylink/clc_loadbalancer.py +++ b/cloud/centurylink/clc_loadbalancer.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: clc_loadbalancer short_description: Create, Delete shared loadbalancers in CenturyLink Cloud. @@ -78,6 +82,21 @@ required: False default: present choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent'] +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. ''' EXAMPLES = ''' @@ -94,7 +113,8 @@ location: WA1 port: 443 nodes: - - { 'ipAddress': '10.11.22.123', 'privatePort': 80 } + - ipAddress: 10.11.22.123 + privatePort: 80 state: present - name: Add node to an existing loadbalancer pool @@ -109,7 +129,8 @@ location: WA1 port: 443 nodes: - - { 'ipAddress': '10.11.22.234', 'privatePort': 80 } + - ipAddress: 10.11.22.234 + privatePort: 80 state: nodes_present - name: Remove node from an existing loadbalancer pool @@ -124,7 +145,8 @@ location: WA1 port: 443 nodes: - - { 'ipAddress': '10.11.22.234', 'privatePort': 80 } + - ipAddress: 10.11.22.234 + privatePort: 80 state: nodes_absent - name: Delete LoadbalancerPool @@ -139,7 +161,8 @@ location: WA1 port: 443 nodes: - - { 'ipAddress': '10.11.22.123', 'privatePort': 80 } + - ipAddress: 10.11.22.123 + privatePort: 80 state: port_absent - name: Delete Loadbalancer @@ -154,22 +177,46 @@ location: WA1 port: 443 nodes: - - { 'ipAddress': '10.11.22.123', 'privatePort': 80 } + - ipAddress: 10.11.22.123 + privatePort: 80 state: absent -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +RETURN = ''' +loadbalancer: + description: The load balancer result object from CLC + returned: success + type: dict + sample: + { + "description":"test-lb", + "id":"ab5b18cb81e94ab9925b61d1ca043fb5", + "ipAddress":"66.150.174.197", + "links":[ + { + "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5", + "rel":"self", + "verbs":[ + "GET", + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools", + "rel":"pools", + "verbs":[ + "GET", + "POST" + ] + } + ], + "name":"test-lb", + "pools":[ + + ], + "status":"enabled" + } ''' __version__ = '${version}' @@ -827,8 +874,8 @@ def define_argument_spec(): argument_spec = dict( name=dict(required=True), description=dict(default=None), - location=dict(required=True, default=None), - alias=dict(required=True, default=None), + location=dict(required=True), + alias=dict(required=True), port=dict(choices=[80, 443]), method=dict(choices=['leastConnection', 'roundRobin']), persistence=dict(choices=['standard', 'sticky']), diff --git a/cloud/centurylink/clc_modify_server.py b/cloud/centurylink/clc_modify_server.py index 9683f6835df..d65073daccb 100644 --- a/cloud/centurylink/clc_modify_server.py +++ b/cloud/centurylink/clc_modify_server.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: clc_modify_server short_description: modify servers in CenturyLink Cloud. @@ -80,6 +84,7 @@ - python = 2.7 - requests >= 2.5.0 - clc-sdk +author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud @@ -154,11 +159,6 @@ ''' RETURN = ''' -changed: - description: A flag indicating if any change was made or not - returned: success - type: boolean - sample: True server_ids: description: The list of server ids that are changed returned: success diff --git a/cloud/centurylink/clc_publicip.py b/cloud/centurylink/clc_publicip.py index 9879b61fd49..a53aeb79531 100644 --- a/cloud/centurylink/clc_publicip.py +++ b/cloud/centurylink/clc_publicip.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: clc_publicip short_description: Add and Delete public ips on servers in CenturyLink Cloud. @@ -27,22 +31,23 @@ version_added: "2.0" options: protocol: - descirption: + description: - The protocol that the public IP will listen for. default: TCP choices: ['TCP', 'UDP', 'ICMP'] required: False ports: description: - - A list of ports to expose. - required: True + - A list of ports to expose. This is required when state is 'present' + required: False + default: None server_ids: description: - A list of servers to create public ips on. required: True state: description: - - Determine wheteher to create or delete public IPs. If present module will not create a second public ip if one + - Determine whether to create or delete public IPs. If present module will not create a second public ip if one already exists. default: present choices: ['present', 'absent'] @@ -57,11 +62,12 @@ - python = 2.7 - requests >= 2.5.0 - clc-sdk +author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account passwod for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login @@ -79,17 +85,18 @@ tasks: - name: Create Public IP For Servers clc_publicip: - protocol: 'TCP' + protocol: TCP ports: - - 80 + - 80 server_ids: - - UC1ACCTSRVR01 - - UC1ACCTSRVR02 + - UC1TEST-SVR01 + - UC1TEST-SVR02 state: present register: clc - name: debug - debug: var=clc + debug: + var: clc - name: Delete Public IP from Server hosts: localhost @@ -99,17 +106,31 @@ - name: Create Public IP For Servers clc_publicip: server_ids: - - UC1ACCTSRVR01 - - UC1ACCTSRVR02 + - UC1TEST-SVR01 + - UC1TEST-SVR02 state: absent register: clc - name: debug - debug: var=clc + debug: + var: clc +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] ''' __version__ = '${version}' +import os from distutils.version import LooseVersion try: @@ -132,11 +153,12 @@ else: CLC_FOUND = True +from ansible.module_utils.basic import AnsibleModule + class ClcPublicIp(object): clc = clc_sdk module = None - group_dict = {} def __init__(self, module): """ @@ -158,7 +180,6 @@ def __init__(self, module): def process_request(self): """ Process the request - Main Code Path - :param params: dictionary of module parameters :return: Returns with either an exit_json or fail_json """ self._set_clc_credentials_from_env() @@ -167,21 +188,18 @@ def process_request(self): ports = params['ports'] protocol = params['protocol'] state = params['state'] - requests = [] - chagned_server_ids = [] - changed = False if state == 'present': - changed, chagned_server_ids, requests = self.ensure_public_ip_present( + changed, changed_server_ids, requests = self.ensure_public_ip_present( server_ids=server_ids, protocol=protocol, ports=ports) elif state == 'absent': - changed, chagned_server_ids, requests = self.ensure_public_ip_absent( + changed, changed_server_ids, requests = self.ensure_public_ip_absent( server_ids=server_ids) else: return self.module.fail_json(msg="Unknown State: " + state) self._wait_for_requests_to_complete(requests) return self.module.exit_json(changed=changed, - server_ids=chagned_server_ids) + server_ids=changed_server_ids) @staticmethod def _define_module_argument_spec(): @@ -192,7 +210,7 @@ def _define_module_argument_spec(): argument_spec = dict( server_ids=dict(type='list', required=True), protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), - ports=dict(type='list', required=True), + ports=dict(type='list'), wait=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), ) @@ -232,7 +250,7 @@ def _add_publicip_to_server(self, server, ports_to_expose): result = None try: result = server.PublicIPs().Add(ports_to_expose) - except CLCException, ex: + except CLCException as ex: self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format( server.id, ex.response_text )) @@ -265,10 +283,11 @@ def ensure_public_ip_absent(self, server_ids): return changed, changed_server_ids, results def _remove_publicip_from_server(self, server): + result = None try: for ip_address in server.PublicIPs().public_ips: result = ip_address.Delete() - except CLCException, ex: + except CLCException as ex: self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( server.id, ex.response_text )) @@ -348,6 +367,6 @@ def main(): clc_public_ip = ClcPublicIp(module) clc_public_ip.process_request() -from ansible.module_utils.basic import * # pylint: disable=W0614 + if __name__ == '__main__': main() diff --git a/cloud/centurylink/clc_server.py b/cloud/centurylink/clc_server.py index d2329465f4a..721582cc33c 100644 --- a/cloud/centurylink/clc_server.py +++ b/cloud/centurylink/clc_server.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: clc_server short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud. @@ -193,7 +197,7 @@ - The template to use for server creation. Will search for a template if a partial string is provided. This is required when state is 'present' default: None - required: false + required: False ttl: description: - The time to live for the server in seconds. The server will be deleted when this time expires. @@ -204,7 +208,20 @@ - The type of server to create. default: 'standard' required: False - choices: ['standard', 'hyperscale'] + choices: ['standard', 'hyperscale', 'bareMetal'] + configuration_id: + description: + - Only required for bare metal servers. + Specifies the identifier for the specific configuration type of bare metal server to deploy. + default: None + required: False + os_type: + description: + - Only required for bare metal servers. + Specifies the OS to provision with the bare metal server. + default: None + required: False + choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit'] wait: description: - Whether to wait for the provisioning tasks to finish before returning. @@ -215,6 +232,7 @@ - python = 2.7 - requests >= 2.5.0 - clc-sdk +author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud @@ -235,7 +253,7 @@ name: test template: ubuntu-14-64 count: 1 - group: 'Default Group' + group: Default Group state: present - name: Ensure 'Default Group' has exactly 5 servers @@ -243,25 +261,206 @@ name: test template: ubuntu-14-64 exact_count: 5 - count_group: 'Default Group' - group: 'Default Group' + count_group: Default Group + group: Default Group - name: Stop a Server clc_server: - server_ids: ['UC1ACCTTEST01'] + server_ids: + - UC1ACCT-TEST01 state: stopped - name: Start a Server clc_server: - server_ids: ['UC1ACCTTEST01'] + server_ids: + - UC1ACCT-TEST01 state: started - name: Delete a Server clc_server: - server_ids: ['UC1ACCTTEST01'] + server_ids: + - UC1ACCT-TEST01 state: absent ''' +RETURN = ''' +server_ids: + description: The list of server ids that are created + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +partially_created_server_ids: + description: The list of server ids that are partially created + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +servers: + description: The list of server objects returned from CLC + returned: success + type: list + sample: + [ + { + "changeInfo":{ + "createdBy":"service.wfad", + "createdDate":1438196820, + "modifiedBy":"service.wfad", + "modifiedDate":1438196820 + }, + "description":"test-server", + "details":{ + "alertPolicies":[ + + ], + "cpu":1, + "customFields":[ + + ], + "diskCount":3, + "disks":[ + { + "id":"0:0", + "partitionPaths":[ + + ], + "sizeGB":1 + }, + { + "id":"0:1", + "partitionPaths":[ + + ], + "sizeGB":2 + }, + { + "id":"0:2", + "partitionPaths":[ + + ], + "sizeGB":14 + } + ], + "hostName":"", + "inMaintenanceMode":false, + "ipAddresses":[ + { + "internal":"10.1.1.1" + } + ], + "memoryGB":1, + "memoryMB":1024, + "partitions":[ + + ], + "powerState":"started", + "snapshots":[ + + ], + "storageGB":17 + }, + "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", + "id":"test-server", + "ipaddress":"10.120.45.23", + "isTemplate":false, + "links":[ + { + "href":"/v2/servers/wfad/test-server", + "id":"test-server", + "rel":"self", + "verbs":[ + "GET", + "PATCH", + "DELETE" + ] + }, + { + "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", + "id":"086ac1dfe0b6411989e8d1b77c4065f0", + "rel":"group" + }, + { + "href":"/v2/accounts/wfad", + "id":"wfad", + "rel":"account" + }, + { + "href":"/v2/billing/wfad/serverPricing/test-server", + "rel":"billing" + }, + { + "href":"/v2/servers/wfad/test-server/publicIPAddresses", + "rel":"publicIPAddresses", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/credentials", + "rel":"credentials" + }, + { + "href":"/v2/servers/wfad/test-server/statistics", + "rel":"statistics" + }, + { + "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", + "rel":"upcomingScheduledActivities" + }, + { + "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", + "rel":"scheduledActivities", + "verbs":[ + "GET", + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/capabilities", + "rel":"capabilities" + }, + { + "href":"/v2/servers/wfad/test-server/alertPolicies", + "rel":"alertPolicyMappings", + "verbs":[ + "POST" + ] + }, + { + "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", + "rel":"antiAffinityPolicyMapping", + "verbs":[ + "PUT", + "DELETE" + ] + }, + { + "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", + "rel":"cpuAutoscalePolicyMapping", + "verbs":[ + "PUT", + "DELETE" + ] + } + ], + "locationId":"UC1", + "name":"test-server", + "os":"ubuntu14_64Bit", + "osType":"Ubuntu 14 64-bit", + "status":"active", + "storageType":"standard", + "type":"standard" + } + ] +''' + __version__ = '${version}' from time import sleep @@ -361,7 +560,7 @@ def process_request(self): elif state == 'present': # Changed is always set to true when provisioning new instances - if not p.get('template'): + if not p.get('template') and p.get('type') != 'bareMetal': return self.module.fail_json( msg='template parameter is required for new instance') @@ -406,7 +605,7 @@ def _define_module_argument_spec(): choices=[ 'standard', 'hyperscale']), - type=dict(default='standard', choices=['standard', 'hyperscale']), + type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']), primary_dns=dict(default=None), secondary_dns=dict(default=None), additional_disks=dict(type='list', default=[]), @@ -440,6 +639,14 @@ def _define_module_argument_spec(): 'UDP', 'ICMP']), public_ip_ports=dict(type='list', default=[]), + configuration_id=dict(default=None), + os_type=dict(default=None, + choices=[ + 'redHat6_64Bit', + 'centOS6_64Bit', + 'windows2012R2Standard_64Bit', + 'ubuntu14_64Bit' + ]), wait=dict(type='bool', default=True)) mutually_exclusive = [ @@ -462,7 +669,6 @@ def _set_clc_credentials_from_env(self): v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) clc_alias = env.get('CLC_ACCT_ALIAS', False) api_url = env.get('CLC_V2_API_URL', False) - if api_url: self.clc.defaults.ENDPOINT_URL_V2 = api_url @@ -520,9 +726,12 @@ def _find_datacenter(clc, module): """ location = module.params.get('location') try: - datacenter = clc.v2.Datacenter(location) - return datacenter - except CLCException: + if not location: + account = clc.v2.Account() + location = account.data.get('primaryDataCenter') + data_center = clc.v2.Datacenter(location) + return data_center + except CLCException as ex: module.fail_json( msg=str( "Unable to find location: {0}".format(location))) @@ -668,9 +877,10 @@ def _find_template_id(module, datacenter): """ lookup_template = module.params.get('template') state = module.params.get('state') + type = module.params.get('type') result = None - if state == 'present': + if state == 'present' and type != 'bareMetal': try: result = datacenter.Templates().Search(lookup_template)[0].id except CLCException: @@ -793,7 +1003,9 @@ def _create_servers(self, module, clc, override_count=None): 'source_server_password': p.get('source_server_password'), 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'), 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'packages': p.get('packages') + 'packages': p.get('packages'), + 'configuration_id': p.get('configuration_id'), + 'os_type': p.get('os_type') } count = override_count if override_count else p.get('count') @@ -1124,7 +1336,12 @@ def _change_server_power_state(module, server, state): if state == 'started': result = server.PowerOn() else: - result = server.PowerOff() + # Try to shut down the server and fall back to power off when unable to shut down. + result = server.ShutDown() + if result and hasattr(result, 'requests') and result.requests[0]: + return result + else: + result = server.PowerOff() except CLCException: module.fail_json( msg='Unable to change power state for server {0}'.format( @@ -1251,7 +1468,9 @@ def _create_clc_server( 'customFields': server_params.get('custom_fields'), 'additionalDisks': server_params.get('additional_disks'), 'ttl': server_params.get('ttl'), - 'packages': server_params.get('packages')})) + 'packages': server_params.get('packages'), + 'configurationId': server_params.get('configuration_id'), + 'osType': server_params.get('os_type')})) result = clc.v2.Requests(res) except APIFailedResponse as ex: diff --git a/cloud/centurylink/clc_server_snapshot.py b/cloud/centurylink/clc_server_snapshot.py new file mode 100644 index 00000000000..e176f2d779f --- /dev/null +++ b/cloud/centurylink/clc_server_snapshot.py @@ -0,0 +1,417 @@ +#!/usr/bin/python + +# +# Copyright (c) 2015 CenturyLink +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: clc_server_snapshot +short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud. +description: + - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud. +version_added: "2.0" +options: + server_ids: + description: + - The list of CLC server Ids. + required: True + expiration_days: + description: + - The number of days to keep the server snapshot before it expires. + default: 7 + required: False + state: + description: + - The state to insure that the provided resources are in. + default: 'present' + required: False + choices: ['present', 'absent', 'restore'] + wait: + description: + - Whether to wait for the provisioning tasks to finish before returning. + default: True + required: False + choices: [True, False] +requirements: + - python = 2.7 + - requests >= 2.5.0 + - clc-sdk +author: "CLC Runner (@clc-runner)" +notes: + - To use this module, it is required to set the below environment variables which enables access to the + Centurylink Cloud + - CLC_V2_API_USERNAME, the account login id for the centurylink cloud + - CLC_V2_API_PASSWORD, the account password for the centurylink cloud + - Alternatively, the module accepts the API token and account alias. The API token can be generated using the + CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login + - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login + - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud + - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. +''' + +EXAMPLES = ''' +# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples + +- name: Create server snapshot + clc_server_snapshot: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + expiration_days: 10 + wait: True + state: present + +- name: Restore server snapshot + clc_server_snapshot: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + wait: True + state: restore + +- name: Delete server snapshot + clc_server_snapshot: + server_ids: + - UC1TEST-SVR01 + - UC1TEST-SVR02 + wait: True + state: absent +''' + +RETURN = ''' +server_ids: + description: The list of server ids that are changed + returned: success + type: list + sample: + [ + "UC1TEST-SVR01", + "UC1TEST-SVR02" + ] +''' + +__version__ = '${version}' + +from distutils.version import LooseVersion + +try: + import requests +except ImportError: + REQUESTS_FOUND = False +else: + REQUESTS_FOUND = True + +# +# Requires the clc-python-sdk. +# sudo pip install clc-sdk +# +try: + import clc as clc_sdk + from clc import CLCException +except ImportError: + CLC_FOUND = False + clc_sdk = None +else: + CLC_FOUND = True + + +class ClcSnapshot: + + clc = clc_sdk + module = None + + def __init__(self, module): + """ + Construct module + """ + self.module = module + + if not CLC_FOUND: + self.module.fail_json( + msg='clc-python-sdk required for this module') + if not REQUESTS_FOUND: + self.module.fail_json( + msg='requests library is required for this module') + if requests.__version__ and LooseVersion( + requests.__version__) < LooseVersion('2.5.0'): + self.module.fail_json( + msg='requests library version should be >= 2.5.0') + + self._set_user_agent(self.clc) + + def process_request(self): + """ + Process the request - Main Code Path + :return: Returns with either an exit_json or fail_json + """ + p = self.module.params + server_ids = p['server_ids'] + expiration_days = p['expiration_days'] + state = p['state'] + request_list = [] + changed = False + changed_servers = [] + + self._set_clc_credentials_from_env() + if state == 'present': + changed, request_list, changed_servers = self.ensure_server_snapshot_present( + server_ids=server_ids, + expiration_days=expiration_days) + elif state == 'absent': + changed, request_list, changed_servers = self.ensure_server_snapshot_absent( + server_ids=server_ids) + elif state == 'restore': + changed, request_list, changed_servers = self.ensure_server_snapshot_restore( + server_ids=server_ids) + + self._wait_for_requests_to_complete(request_list) + return self.module.exit_json( + changed=changed, + server_ids=changed_servers) + + def ensure_server_snapshot_present(self, server_ids, expiration_days): + """ + Ensures the given set of server_ids have the snapshots created + :param server_ids: The list of server_ids to create the snapshot + :param expiration_days: The number of days to keep the snapshot + :return: (changed, request_list, changed_servers) + changed: A flag indicating whether any change was made + request_list: the list of clc request objects from CLC API call + changed_servers: The list of servers ids that are modified + """ + request_list = [] + changed = False + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.GetSnapshots()) == 0] + for server in servers_to_change: + changed = True + if not self.module.check_mode: + request = self._create_server_snapshot(server, expiration_days) + request_list.append(request) + changed_servers = [ + server.id for server in servers_to_change if server.id] + return changed, request_list, changed_servers + + def _create_server_snapshot(self, server, expiration_days): + """ + Create the snapshot for the CLC server + :param server: the CLC server object + :param expiration_days: The number of days to keep the snapshot + :return: the create request object from CLC API Call + """ + result = None + try: + result = server.CreateSnapshot( + delete_existing=True, + expiration_days=expiration_days) + except CLCException as ex: + self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def ensure_server_snapshot_absent(self, server_ids): + """ + Ensures the given set of server_ids have the snapshots removed + :param server_ids: The list of server_ids to delete the snapshot + :return: (changed, request_list, changed_servers) + changed: A flag indicating whether any change was made + request_list: the list of clc request objects from CLC API call + changed_servers: The list of servers ids that are modified + """ + request_list = [] + changed = False + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.GetSnapshots()) > 0] + for server in servers_to_change: + changed = True + if not self.module.check_mode: + request = self._delete_server_snapshot(server) + request_list.append(request) + changed_servers = [ + server.id for server in servers_to_change if server.id] + return changed, request_list, changed_servers + + def _delete_server_snapshot(self, server): + """ + Delete snapshot for the CLC server + :param server: the CLC server object + :return: the delete snapshot request object from CLC API + """ + result = None + try: + result = server.DeleteSnapshot() + except CLCException as ex: + self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def ensure_server_snapshot_restore(self, server_ids): + """ + Ensures the given set of server_ids have the snapshots restored + :param server_ids: The list of server_ids to delete the snapshot + :return: (changed, request_list, changed_servers) + changed: A flag indicating whether any change was made + request_list: the list of clc request objects from CLC API call + changed_servers: The list of servers ids that are modified + """ + request_list = [] + changed = False + servers = self._get_servers_from_clc( + server_ids, + 'Failed to obtain server list from the CLC API') + servers_to_change = [ + server for server in servers if len( + server.GetSnapshots()) > 0] + for server in servers_to_change: + changed = True + if not self.module.check_mode: + request = self._restore_server_snapshot(server) + request_list.append(request) + changed_servers = [ + server.id for server in servers_to_change if server.id] + return changed, request_list, changed_servers + + def _restore_server_snapshot(self, server): + """ + Restore snapshot for the CLC server + :param server: the CLC server object + :return: the restore snapshot request object from CLC API + """ + result = None + try: + result = server.RestoreSnapshot() + except CLCException as ex: + self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format( + server.id, ex.response_text + )) + return result + + def _wait_for_requests_to_complete(self, requests_lst): + """ + Waits until the CLC requests are complete if the wait argument is True + :param requests_lst: The list of CLC request objects + :return: none + """ + if not self.module.params['wait']: + return + for request in requests_lst: + request.WaitUntilComplete() + for request_details in request.requests: + if request_details.Status() != 'succeeded': + self.module.fail_json( + msg='Unable to process server snapshot request') + + @staticmethod + def define_argument_spec(): + """ + This function defines the dictionary object required for + package module + :return: the package dictionary object + """ + argument_spec = dict( + server_ids=dict(type='list', required=True), + expiration_days=dict(default=7), + wait=dict(default=True), + state=dict( + default='present', + choices=[ + 'present', + 'absent', + 'restore']), + ) + return argument_spec + + def _get_servers_from_clc(self, server_list, message): + """ + Internal function to fetch list of CLC server objects from a list of server ids + :param server_list: The list of server ids + :param message: The error message to throw in case of any error + :return the list of CLC server objects + """ + try: + return self.clc.v2.Servers(server_list).servers + except CLCException as ex: + return self.module.fail_json(msg=message + ': %s' % ex) + + def _set_clc_credentials_from_env(self): + """ + Set the CLC Credentials on the sdk by reading environment variables + :return: none + """ + env = os.environ + v2_api_token = env.get('CLC_V2_API_TOKEN', False) + v2_api_username = env.get('CLC_V2_API_USERNAME', False) + v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) + clc_alias = env.get('CLC_ACCT_ALIAS', False) + api_url = env.get('CLC_V2_API_URL', False) + + if api_url: + self.clc.defaults.ENDPOINT_URL_V2 = api_url + + if v2_api_token and clc_alias: + self.clc._LOGIN_TOKEN_V2 = v2_api_token + self.clc._V2_ENABLED = True + self.clc.ALIAS = clc_alias + elif v2_api_username and v2_api_passwd: + self.clc.v2.SetCredentials( + api_username=v2_api_username, + api_passwd=v2_api_passwd) + else: + return self.module.fail_json( + msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " + "environment variables") + + @staticmethod + def _set_user_agent(clc): + if hasattr(clc, 'SetRequestsSession'): + agent_string = "ClcAnsibleModule/" + __version__ + ses = requests.Session() + ses.headers.update({"Api-Client": agent_string}) + ses.headers['User-Agent'] += " " + agent_string + clc.SetRequestsSession(ses) + + +def main(): + """ + Main function + :return: None + """ + module = AnsibleModule( + argument_spec=ClcSnapshot.define_argument_spec(), + supports_check_mode=True + ) + clc_snapshot = ClcSnapshot(module) + clc_snapshot.process_request() + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_account.py b/cloud/cloudstack/cs_account.py index 1ce6fdde88f..0074ad29ca3 100644 --- a/cloud/cloudstack/cs_account.py +++ b/cloud/cloudstack/cs_account.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_account @@ -85,9 +89,10 @@ state: description: - State of the account. + - C(unlocked) is an alias for C(enabled). required: false default: 'present' - choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked' ] + choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked' ] poll_async: description: - Poll async jobs until job has finished. @@ -171,12 +176,6 @@ sample: ROOT ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -195,39 +194,38 @@ def __init__(self, module): 'domain_admin': 2, } - def get_account_type(self): account_type = self.module.params.get('account_type') return self.account_types[account_type] - def get_account(self): if not self.account: - args = {} - args['listall'] = True - args['domainid'] = self.get_domain('id') + args = { + 'listall': True, + 'domainid': self.get_domain(key='id'), + } accounts = self.cs.listAccounts(**args) if accounts: account_name = self.module.params.get('name') for a in accounts['account']: - if account_name in [ a['name'] ]: + if account_name == a['name']: self.account = a break return self.account - def enable_account(self): account = self.get_account() if not account: - self.module.fail_json(msg="Failed: account not present") + account = self.present_account() if account['state'].lower() != 'enabled': self.result['changed'] = True - args = {} - args['id'] = account['id'] - args['account'] = self.module.params.get('name') - args['domainid'] = self.get_domain('id') + args = { + 'id': account['id'], + 'account': self.module.params.get('name'), + 'domainid': self.get_domain(key='id') + } if not self.module.check_mode: res = self.cs.enableAccount(**args) if 'errortext' in res: @@ -235,32 +233,30 @@ def enable_account(self): account = res['account'] return account - def lock_account(self): return self.lock_or_disable_account(lock=True) - def disable_account(self): return self.lock_or_disable_account() - def lock_or_disable_account(self, lock=False): account = self.get_account() if not account: - self.module.fail_json(msg="Failed: account not present") + account = self.present_account() # we need to enable the account to lock it. if lock and account['state'].lower() == 'disabled': account = self.enable_account() - if lock and account['state'].lower() != 'locked' \ - or not lock and account['state'].lower() != 'disabled': + if (lock and account['state'].lower() != 'locked' or + not lock and account['state'].lower() != 'disabled'): self.result['changed'] = True - args = {} - args['id'] = account['id'] - args['account'] = self.module.params.get('name') - args['domainid'] = self.get_domain('id') - args['lock'] = lock + args = { + 'id': account['id'], + 'account': self.module.params.get('name'), + 'domainid': self.get_domain(key='id'), + 'lock': lock, + } if not self.module.check_mode: account = self.cs.disableAccount(**args) @@ -269,47 +265,36 @@ def lock_or_disable_account(self, lock=False): poll_async = self.module.params.get('poll_async') if poll_async: - account = self._poll_job(account, 'account') + account = self.poll_job(account, 'account') return account - def present_account(self): - missing_params = [] - - if not self.module.params.get('email'): - missing_params.append('email') - - if not self.module.params.get('username'): - missing_params.append('username') - - if not self.module.params.get('password'): - missing_params.append('password') - - if not self.module.params.get('first_name'): - missing_params.append('first_name') - - if not self.module.params.get('last_name'): - missing_params.append('last_name') - - if missing_params: - self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params)) + required_params = [ + 'email', + 'username', + 'password', + 'first_name', + 'last_name', + ] + self.module.fail_on_missing_params(required_params=required_params) account = self.get_account() if not account: self.result['changed'] = True - args = {} - args['account'] = self.module.params.get('name') - args['domainid'] = self.get_domain('id') - args['accounttype'] = self.get_account_type() - args['networkdomain'] = self.module.params.get('network_domain') - args['username'] = self.module.params.get('username') - args['password'] = self.module.params.get('password') - args['firstname'] = self.module.params.get('first_name') - args['lastname'] = self.module.params.get('last_name') - args['email'] = self.module.params.get('email') - args['timezone'] = self.module.params.get('timezone') + args = { + 'account': self.module.params.get('name'), + 'domainid': self.get_domain(key='id'), + 'accounttype': self.get_account_type(), + 'networkdomain': self.module.params.get('network_domain'), + 'username': self.module.params.get('username'), + 'password': self.module.params.get('password'), + 'firstname': self.module.params.get('first_name'), + 'lastname': self.module.params.get('last_name'), + 'email': self.module.params.get('email'), + 'timezone': self.module.params.get('timezone') + } if not self.module.check_mode: res = self.cs.createAccount(**args) if 'errortext' in res: @@ -317,7 +302,6 @@ def present_account(self): account = res['account'] return account - def absent_account(self): account = self.get_account() if account: @@ -326,20 +310,19 @@ def absent_account(self): if not self.module.check_mode: res = self.cs.deleteAccount(id=account['id']) - if 'errortext' in account: + if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if poll_async: - res = self._poll_job(res, 'account') + self.poll_job(res, 'account') return account - def get_result(self, account): super(AnsibleCloudStackAccount, self).get_result(account) if account: if 'accounttype' in account: - for key,value in self.account_types.items(): + for key, value in self.account_types.items(): if value == account['accounttype']: self.result['account_type'] = key break @@ -347,35 +330,28 @@ def get_result(self, account): def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'), + account_type=dict(choices=['user', 'root_admin', 'domain_admin'], default='user'), + network_domain=dict(default=None), + domain=dict(default='ROOT'), + email=dict(default=None), + first_name=dict(default=None), + last_name=dict(default=None), + username=dict(default=None), + password=dict(default=None, no_log=True), + timezone=dict(default=None), + poll_async=dict(type='bool', default=True), + )) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked' ], default='present'), - account_type = dict(choices=['user', 'root_admin', 'domain_admin'], default='user'), - network_domain = dict(default=None), - domain = dict(default='ROOT'), - email = dict(default=None), - first_name = dict(default=None), - last_name = dict(default=None), - username = dict(default=None), - password = dict(default=None), - timezone = dict(default=None), - poll_async = dict(choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_acc = AnsibleCloudStackAccount(module) @@ -384,7 +360,7 @@ def main(): if state in ['absent']: account = acs_acc.absent_account() - elif state in ['enabled']: + elif state in ['enabled', 'unlocked']: account = acs_acc.enable_account() elif state in ['disabled']: @@ -398,7 +374,7 @@ def main(): result = acs_acc.get_result(account) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_affinitygroup.py b/cloud/cloudstack/cs_affinitygroup.py index 40b764aa8ef..a9c71c42b0c 100644 --- a/cloud/cloudstack/cs_affinitygroup.py +++ b/cloud/cloudstack/cs_affinitygroup.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_affinitygroup @@ -57,6 +61,11 @@ - Account the affinity group is related to. required: false default: null + project: + description: + - Name of the project the affinity group is related to. + required: false + default: null poll_async: description: - Poll async jobs until job has finished. @@ -101,14 +110,23 @@ returned: success type: string sample: host anti-affinity +project: + description: Name of project the affinity group is related to. + returned: success + type: string + sample: Production +domain: + description: Domain the affinity group is related to. + returned: success + type: string + sample: example domain +account: + description: Account the affinity group is related to. + returned: success + type: string + sample: example account ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -122,24 +140,20 @@ def __init__(self, module): } self.affinity_group = None - def get_affinity_group(self): if not self.affinity_group: - affinity_group = self.module.params.get('name') - - args = {} - args['account'] = self.get_account('name') - args['domainid'] = self.get_domain('id') + args = { + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'name': self.module.params.get('name'), + } affinity_groups = self.cs.listAffinityGroups(**args) if affinity_groups: - for a in affinity_groups['affinitygroup']: - if affinity_group in [ a['name'], a['id'] ]: - self.affinity_group = a - break + self.affinity_group = affinity_groups['affinitygroup'][0] return self.affinity_group - def get_affinity_type(self): affinity_type = self.module.params.get('affinty_type') @@ -153,19 +167,19 @@ def get_affinity_type(self): return a['type'] self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type) - def create_affinity_group(self): affinity_group = self.get_affinity_group() if not affinity_group: self.result['changed'] = True - args = {} - args['name'] = self.module.params.get('name') - args['type'] = self.get_affinity_type() - args['description'] = self.module.params.get('description') - args['account'] = self.get_account('name') - args['domainid'] = self.get_domain('id') - + args = { + 'name': self.module.params.get('name'), + 'type': self.get_affinity_type(), + 'description': self.module.params.get('description'), + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + } if not self.module.check_mode: res = self.cs.createAffinityGroup(**args) @@ -174,20 +188,20 @@ def create_affinity_group(self): poll_async = self.module.params.get('poll_async') if res and poll_async: - affinity_group = self._poll_job(res, 'affinitygroup') + affinity_group = self.poll_job(res, 'affinitygroup') return affinity_group - def remove_affinity_group(self): affinity_group = self.get_affinity_group() if affinity_group: self.result['changed'] = True - args = {} - args['name'] = self.module.params.get('name') - args['account'] = self.get_account('name') - args['domainid'] = self.get_domain('id') - + args = { + 'name': self.module.params.get('name'), + 'projectid': self.get_project(key='id'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + } if not self.module.check_mode: res = self.cs.deleteAffinityGroup(**args) @@ -196,35 +210,29 @@ def remove_affinity_group(self): poll_async = self.module.params.get('poll_async') if res and poll_async: - res = self._poll_job(res, 'affinitygroup') + self.poll_job(res, 'affinitygroup') return affinity_group def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + affinty_type=dict(default=None), + description=dict(default=None), + state=dict(choices=['present', 'absent'], default='present'), + domain=dict(default=None), + account=dict(default=None), + project=dict(default=None), + poll_async=dict(type='bool', default=True), + )) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - affinty_type = dict(default=None), - description = dict(default=None), - state = dict(choices=['present', 'absent'], default='present'), - domain = dict(default=None), - account = dict(default=None), - poll_async = dict(choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_ag = AnsibleCloudStackAffinityGroup(module) @@ -236,7 +244,7 @@ def main(): result = acs_ag.get_result(affinity_group) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_cluster.py b/cloud/cloudstack/cs_cluster.py new file mode 100644 index 00000000000..7c9d39e6149 --- /dev/null +++ b/cloud/cloudstack/cs_cluster.py @@ -0,0 +1,421 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_cluster +short_description: Manages host clusters on Apache CloudStack based clouds. +description: + - Create, update and remove clusters. +version_added: "2.1" +author: "René Moser (@resmo)" +options: + name: + description: + - name of the cluster. + required: true + zone: + description: + - Name of the zone in which the cluster belongs to. + - If not set, default zone is used. + required: false + default: null + pod: + description: + - Name of the pod in which the cluster belongs to. + required: false + default: null + cluster_type: + description: + - Type of the cluster. + - Required if C(state=present) + required: false + default: null + choices: [ 'CloudManaged', 'ExternalManaged' ] + hypervisor: + description: + - Name the hypervisor to be used. + - Required if C(state=present). + required: false + default: none + choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] + url: + description: + - URL for the cluster + required: false + default: null + username: + description: + - Username for the cluster. + required: false + default: null + password: + description: + - Password for the cluster. + required: false + default: null + guest_vswitch_name: + description: + - Name of virtual switch used for guest traffic in the cluster. + - This would override zone wide traffic label setting. + required: false + default: null + guest_vswitch_type: + description: + - Type of virtual switch used for guest traffic in the cluster. + - Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch) + required: false + default: null + choices: [ 'vmwaresvs', 'vmwaredvs' ] + public_vswitch_name: + description: + - Name of virtual switch used for public traffic in the cluster. + - This would override zone wide traffic label setting. + required: false + default: null + public_vswitch_type: + description: + - Type of virtual switch used for public traffic in the cluster. + - Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch) + required: false + default: null + choices: [ 'vmwaresvs', 'vmwaredvs' ] + vms_ip_address: + description: + - IP address of the VSM associated with this cluster. + required: false + default: null + vms_username: + description: + - Username for the VSM associated with this cluster. + required: false + default: null + vms_password: + description: + - Password for the VSM associated with this cluster. + required: false + default: null + ovm3_cluster: + description: + - Ovm3 native OCFS2 clustering enabled for cluster. + required: false + default: null + ovm3_pool: + description: + - Ovm3 native pooling enabled for cluster. + required: false + default: null + ovm3_vip: + description: + - Ovm3 vip to use for pool (and cluster). + required: false + default: null + state: + description: + - State of the cluster. + required: false + default: 'present' + choices: [ 'present', 'absent', 'disabled', 'enabled' ] +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Ensure a cluster is present +- local_action: + module: cs_cluster + name: kvm-cluster-01 + zone: ch-zrh-ix-01 + hypervisor: KVM + cluster_type: CloudManaged + +# Ensure a cluster is disabled +- local_action: + module: cs_cluster + name: kvm-cluster-01 + zone: ch-zrh-ix-01 + state: disabled + +# Ensure a cluster is enabled +- local_action: + module: cs_cluster + name: kvm-cluster-01 + zone: ch-zrh-ix-01 + state: enabled + +# Ensure a cluster is absent +- local_action: + module: cs_cluster + name: kvm-cluster-01 + zone: ch-zrh-ix-01 + state: absent +''' + +RETURN = ''' +--- +id: + description: UUID of the cluster. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the cluster. + returned: success + type: string + sample: cluster01 +allocation_state: + description: State of the cluster. + returned: success + type: string + sample: Enabled +cluster_type: + description: Type of the cluster. + returned: success + type: string + sample: ExternalManaged +cpu_overcommit_ratio: + description: The CPU overcommit ratio of the cluster. + returned: success + type: string + sample: 1.0 +memory_overcommit_ratio: + description: The memory overcommit ratio of the cluster. + returned: success + type: string + sample: 1.0 +managed_state: + description: Whether this cluster is managed by CloudStack. + returned: success + type: string + sample: Managed +ovm3_vip: + description: Ovm3 VIP to use for pooling and/or clustering + returned: success + type: string + sample: 10.10.10.101 +hypervisor: + description: Hypervisor of the cluster + returned: success + type: string + sample: VMware +zone: + description: Name of zone the cluster is in. + returned: success + type: string + sample: ch-gva-2 +pod: + description: Name of pod the cluster is in. + returned: success + type: string + sample: pod01 +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackCluster(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackCluster, self).__init__(module) + self.returns = { + 'allocationstate': 'allocation_state', + 'hypervisortype': 'hypervisor', + 'clustertype': 'cluster_type', + 'podname': 'pod', + 'managedstate': 'managed_state', + 'memoryovercommitratio': 'memory_overcommit_ratio', + 'cpuovercommitratio': 'cpu_overcommit_ratio', + 'ovm3vip': 'ovm3_vip', + } + self.cluster = None + + def _get_common_cluster_args(self): + args = { + 'clustername': self.module.params.get('name'), + 'hypervisor': self.module.params.get('hypervisor'), + 'clustertype': self.module.params.get('cluster_type'), + } + state = self.module.params.get('state') + if state in ['enabled', 'disabled']: + args['allocationstate'] = state.capitalize() + return args + + def get_pod(self, key=None): + args = { + 'name': self.module.params.get('pod'), + 'zoneid': self.get_zone(key='id'), + } + pods = self.cs.listPods(**args) + if pods: + return self._get_by_key(key, pods['pod'][0]) + self.module.fail_json(msg="Pod %s not found in zone %s." % (self.module.params.get('pod'), self.get_zone(key='name'))) + + def get_cluster(self): + if not self.cluster: + args = {} + + uuid = self.module.params.get('id') + if uuid: + args['id'] = uuid + clusters = self.cs.listClusters(**args) + if clusters: + self.cluster = clusters['cluster'][0] + return self.cluster + + args['name'] = self.module.params.get('name') + clusters = self.cs.listClusters(**args) + if clusters: + self.cluster = clusters['cluster'][0] + # fix differnt return from API then request argument given + self.cluster['hypervisor'] = self.cluster['hypervisortype'] + self.cluster['clustername'] = self.cluster['name'] + return self.cluster + + def present_cluster(self): + cluster = self.get_cluster() + if cluster: + cluster = self._update_cluster() + else: + cluster = self._create_cluster() + return cluster + + def _create_cluster(self): + required_params = [ + 'cluster_type', + 'hypervisor', + ] + self.module.fail_on_missing_params(required_params=required_params) + + args = self._get_common_cluster_args() + args['zoneid'] = self.get_zone(key='id') + args['podid'] = self.get_pod(key='id') + args['url'] = self.module.params.get('url') + args['username'] = self.module.params.get('username') + args['password'] = self.module.params.get('password') + args['guestvswitchname'] = self.module.params.get('guest_vswitch_name') + args['guestvswitchtype'] = self.module.params.get('guest_vswitch_type') + args['publicvswitchtype'] = self.module.params.get('public_vswitch_name') + args['publicvswitchtype'] = self.module.params.get('public_vswitch_type') + args['vsmipaddress'] = self.module.params.get('vms_ip_address') + args['vsmusername'] = self.module.params.get('vms_username') + args['vmspassword'] = self.module.params.get('vms_password') + args['ovm3cluster'] = self.module.params.get('ovm3_cluster') + args['ovm3pool'] = self.module.params.get('ovm3_pool') + args['ovm3vip'] = self.module.params.get('ovm3_vip') + + self.result['changed'] = True + + cluster = None + if not self.module.check_mode: + res = self.cs.addCluster(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + # API returns a list as result CLOUDSTACK-9205 + if isinstance(res['cluster'], list): + cluster = res['cluster'][0] + else: + cluster = res['cluster'] + return cluster + + def _update_cluster(self): + cluster = self.get_cluster() + + args = self._get_common_cluster_args() + args['id'] = cluster['id'] + + if self.has_changed(args, cluster): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.cs.updateCluster(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + cluster = res['cluster'] + return cluster + + def absent_cluster(self): + cluster = self.get_cluster() + if cluster: + self.result['changed'] = True + + args = { + 'id': cluster['id'], + } + if not self.module.check_mode: + res = self.cs.deleteCluster(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + return cluster + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + zone=dict(default=None), + pod=dict(default=None), + cluster_type=dict(choices=['CloudManaged', 'ExternalManaged'], default=None), + hypervisor=dict(choices=CS_HYPERVISORS, default=None), + state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'), + url=dict(default=None), + username=dict(default=None), + password=dict(default=None, no_log=True), + guest_vswitch_name=dict(default=None), + guest_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs'], default=None), + public_vswitch_name=dict(default=None), + public_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs'], default=None), + vms_ip_address=dict(default=None), + vms_username=dict(default=None), + vms_password=dict(default=None, no_log=True), + ovm3_cluster=dict(default=None), + ovm3_pool=dict(default=None), + ovm3_vip=dict(default=None), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + try: + acs_cluster = AnsibleCloudStackCluster(module) + + state = module.params.get('state') + if state in ['absent']: + cluster = acs_cluster.absent_cluster() + else: + cluster = acs_cluster.present_cluster() + + result = acs_cluster.get_result(cluster) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_configuration.py b/cloud/cloudstack/cs_configuration.py new file mode 100644 index 00000000000..696593550a5 --- /dev/null +++ b/cloud/cloudstack/cs_configuration.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_configuration +short_description: Manages configuration on Apache CloudStack based clouds. +description: + - Manages global, zone, account, storage and cluster configurations. +version_added: "2.1" +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the configuration. + required: true + value: + description: + - Value of the configuration. + required: true + account: + description: + - Ensure the value for corresponding account. + required: false + default: null + domain: + description: + - Domain the account is related to. + - Only considered if C(account) is used. + required: false + default: ROOT + zone: + description: + - Ensure the value for corresponding zone. + required: false + default: null + storage: + description: + - Ensure the value for corresponding storage pool. + required: false + default: null + cluster: + description: + - Ensure the value for corresponding cluster. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Ensure global configuration +- local_action: + module: cs_configuration + name: router.reboot.when.outofband.migrated + value: false + +# Ensure zone configuration +- local_action: + module: cs_configuration + name: router.reboot.when.outofband.migrated + zone: ch-gva-01 + value: true + +# Ensure storage configuration +- local_action: + module: cs_configuration + name: storage.overprovisioning.factor + storage: storage01 + value: 2.0 + +# Ensure account configuration +- local_action: + module: cs_configuration: + name: allow.public.user.templates + value: false + account: acme inc + domain: customers +''' + +RETURN = ''' +--- +category: + description: Category of the configuration. + returned: success + type: string + sample: Advanced +scope: + description: Scope (zone/cluster/storagepool/account) of the parameter that needs to be updated. + returned: success + type: string + sample: storagepool +description: + description: Description of the configuration. + returned: success + type: string + sample: Setup the host to do multipath +name: + description: Name of the configuration. + returned: success + type: string + sample: zone.vlan.capacity.notificationthreshold +value: + description: Value of the configuration. + returned: success + type: string + sample: "0.75" +account: + description: Account of the configuration. + returned: success + type: string + sample: admin +Domain: + description: Domain of account of the configuration. + returned: success + type: string + sample: ROOT +zone: + description: Zone of the configuration. + returned: success + type: string + sample: ch-gva-01 +cluster: + description: Cluster of the configuration. + returned: success + type: string + sample: cluster01 +storage: + description: Storage of the configuration. + returned: success + type: string + sample: storage01 +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackConfiguration(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackConfiguration, self).__init__(module) + self.returns = { + 'category': 'category', + 'scope': 'scope', + 'value': 'value', + } + self.storage = None + self.account = None + self.cluster = None + + + def _get_common_configuration_args(self): + args = {} + args['name'] = self.module.params.get('name') + args['accountid'] = self.get_account(key='id') + args['storageid'] = self.get_storage(key='id') + args['zoneid'] = self.get_zone(key='id') + args['clusterid'] = self.get_cluster(key='id') + return args + + + def get_zone(self, key=None): + # make sure we do net use the default zone + zone = self.module.params.get('zone') + if zone: + return super(AnsibleCloudStackConfiguration, self).get_zone(key=key) + + + def get_cluster(self, key=None): + if not self.cluster: + cluster_name = self.module.params.get('cluster') + if not cluster_name: + return None + args = {} + args['name'] = cluster_name + clusters = self.cs.listClusters(**args) + if clusters: + self.cluster = clusters['cluster'][0] + self.result['cluster'] = self.cluster['name'] + else: + self.module.fail_json(msg="Cluster %s not found." % cluster_name) + return self._get_by_key(key=key, my_dict=self.cluster) + + + def get_storage(self, key=None): + if not self.storage: + storage_pool_name = self.module.params.get('storage') + if not storage_pool_name: + return None + args = {} + args['name'] = storage_pool_name + storage_pools = self.cs.listStoragePools(**args) + if storage_pools: + self.storage = storage_pools['storagepool'][0] + self.result['storage'] = self.storage['name'] + else: + self.module.fail_json(msg="Storage pool %s not found." % storage_pool_name) + return self._get_by_key(key=key, my_dict=self.storage) + + + def get_configuration(self): + configuration = None + args = self._get_common_configuration_args() + configurations = self.cs.listConfigurations(**args) + if not configurations: + self.module.fail_json(msg="Configuration %s not found." % args['name']) + configuration = configurations['configuration'][0] + return configuration + + + def get_value(self): + value = str(self.module.params.get('value')) + if value in ('True', 'False'): + value = value.lower() + return value + + + def present_configuration(self): + configuration = self.get_configuration() + args = self._get_common_configuration_args() + args['value'] = self.get_value() + if self.has_changed(args, configuration, ['value']): + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.updateConfiguration(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + configuration = res['configuration'] + return configuration + + + def get_result(self, configuration): + self.result = super(AnsibleCloudStackConfiguration, self).get_result(configuration) + if self.account: + self.result['account'] = self.account['name'] + self.result['domain'] = self.domain['path'] + elif self.zone: + self.result['zone'] = self.zone['name'] + return self.result + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + value = dict(type='str', required=True), + zone = dict(default=None), + storage = dict(default=None), + cluster = dict(default=None), + account = dict(default=None), + domain = dict(default='ROOT') + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + try: + acs_configuration = AnsibleCloudStackConfiguration(module) + configuration = acs_configuration.present_configuration() + result = acs_configuration.get_result(configuration) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_domain.py b/cloud/cloudstack/cs_domain.py index 27410040aec..35e32aa0661 100644 --- a/cloud/cloudstack/cs_domain.py +++ b/cloud/cloudstack/cs_domain.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_domain @@ -106,12 +110,6 @@ sample: example.local ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -207,7 +205,7 @@ def update_domain(self, domain): args['id'] = domain['id'] args['networkdomain'] = self.module.params.get('network_domain') - if self._has_changed(args, domain): + if self.has_changed(args, domain): self.result['changed'] = True if not self.module.check_mode: res = self.cs.updateDomain(**args) @@ -233,34 +231,27 @@ def absent_domain(self): poll_async = self.module.params.get('poll_async') if poll_async: - res = self._poll_job(res, 'domain') + res = self.poll_job(res, 'domain') return domain def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + path = dict(required=True), + state = dict(choices=['present', 'absent'], default='present'), + network_domain = dict(default=None), + clean_up = dict(type='bool', default=False), + poll_async = dict(type='bool', default=True), + )) + module = AnsibleModule( - argument_spec = dict( - path = dict(required=True), - state = dict(choices=['present', 'absent'], default='present'), - network_domain = dict(default=None), - clean_up = dict(choices=BOOLEANS, default=False), - poll_async = dict(choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_dom = AnsibleCloudStackDomain(module) @@ -272,7 +263,7 @@ def main(): result = acs_dom.get_result(domain) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_facts.py b/cloud/cloudstack/cs_facts.py index 11230b4c229..6f51127df65 100644 --- a/cloud/cloudstack/cs_facts.py +++ b/cloud/cloudstack/cs_facts.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_facts @@ -77,12 +81,12 @@ type: string sample: 185.19.28.35 cloudstack_public_hostname: - description: public hostname of the instance. + description: public IPv4 of the router. Same as C(cloudstack_public_ipv4). returned: success type: string sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139 cloudstack_public_ipv4: - description: public IPv4 of the instance. + description: public IPv4 of the router. returned: success type: string sample: 185.19.28.35 diff --git a/cloud/cloudstack/cs_firewall.py b/cloud/cloudstack/cs_firewall.py index e52683d7a67..160e58d4723 100644 --- a/cloud/cloudstack/cs_firewall.py +++ b/cloud/cloudstack/cs_firewall.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_firewall @@ -99,6 +103,12 @@ - Name of the project the firewall rule is related to. required: false default: null + zone: + description: + - Name of the zone in which the virtual machine is in. + - If not set, default zone is used. + required: false + default: null poll_async: description: - Poll async jobs until job has finished. @@ -204,12 +214,6 @@ sample: my_network ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -228,6 +232,7 @@ def __init__(self, module): 'icmptype': 'icmp_type', } self.firewall_rule = None + self.network = None def get_firewall_rule(self): @@ -303,30 +308,6 @@ def _type_cidr_match(self, rule, cidr): return cidr == rule['cidrlist'] - def get_network(self, key=None, network=None): - if not network: - network = self.module.params.get('network') - - if not network: - return None - - args = {} - args['account'] = self.get_account('name') - args['domainid'] = self.get_domain('id') - args['projectid'] = self.get_project('id') - args['zoneid'] = self.get_zone('id') - - networks = self.cs.listNetworks(**args) - if not networks: - self.module.fail_json(msg="No networks available") - - for n in networks['network']: - if network in [ n['displaytext'], n['name'], n['id'] ]: - return self._get_by_key(key, n) - break - self.module.fail_json(msg="Network '%s' not found" % network) - - def create_firewall_rule(self): firewall_rule = self.get_firewall_rule() if not firewall_rule: @@ -354,7 +335,7 @@ def create_firewall_rule(self): poll_async = self.module.params.get('poll_async') if poll_async: - firewall_rule = self._poll_job(res, 'firewallrule') + firewall_rule = self.poll_job(res, 'firewallrule') return firewall_rule @@ -378,7 +359,7 @@ def remove_firewall_rule(self): poll_async = self.module.params.get('poll_async') if poll_async: - res = self._poll_job(res, 'firewallrule') + res = self.poll_job(res, 'firewallrule') return firewall_rule @@ -386,41 +367,42 @@ def get_result(self, firewall_rule): super(AnsibleCloudStackFirewall, self).get_result(firewall_rule) if firewall_rule: self.result['type'] = self.module.params.get('type') - if 'networkid' in firewall_rule: - self.result['network'] = self.get_network(key='displaytext', network=firewall_rule['networkid']) + if self.result['type'] == 'egress': + self.result['network'] = self.get_network(key='displaytext') return self.result def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + ip_address = dict(default=None), + network = dict(default=None), + cidr = dict(default='0.0.0.0/0'), + protocol = dict(choices=['tcp', 'udp', 'icmp', 'all'], default='tcp'), + type = dict(choices=['ingress', 'egress'], default='ingress'), + icmp_type = dict(type='int', default=None), + icmp_code = dict(type='int', default=None), + start_port = dict(type='int', aliases=['port'], default=None), + end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + + required_together = cs_required_together() + required_together.extend([ + ['icmp_type', 'icmp_code'], + ]) + module = AnsibleModule( - argument_spec = dict( - ip_address = dict(default=None), - network = dict(default=None), - cidr = dict(default='0.0.0.0/0'), - protocol = dict(choices=['tcp', 'udp', 'icmp', 'all'], default='tcp'), - type = dict(choices=['ingress', 'egress'], default='ingress'), - icmp_type = dict(type='int', default=None), - icmp_code = dict(type='int', default=None), - start_port = dict(type='int', aliases=['port'], default=None), - end_port = dict(type='int', default=None), - state = dict(choices=['present', 'absent'], default='present'), - domain = dict(default=None), - account = dict(default=None), - project = dict(default=None), - poll_async = dict(choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), + argument_spec=argument_spec, + required_together=required_together, required_one_of = ( ['ip_address', 'network'], ), - required_together = ( - ['icmp_type', 'icmp_code'], - ['api_key', 'api_secret', 'api_url'], - ), mutually_exclusive = ( ['icmp_type', 'start_port'], ['icmp_type', 'end_port'], @@ -429,9 +411,6 @@ def main(): supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_fw = AnsibleCloudStackFirewall(module) @@ -443,7 +422,7 @@ def main(): result = acs_fw.get_result(fw_rule) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_instance.py b/cloud/cloudstack/cs_instance.py index 6f1339123d8..58c98724853 100644 --- a/cloud/cloudstack/cs_instance.py +++ b/cloud/cloudstack/cs_instance.py @@ -18,22 +18,31 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_instance short_description: Manages instances and virtual machines on Apache CloudStack based clouds. description: - - Deploy, start, update, scale, restart, stop and destroy instances. + - Deploy, start, update, scale, restart, restore, stop and destroy instances. version_added: '2.0' author: "René Moser (@resmo)" options: name: description: - Host name of the instance. C(name) can only contain ASCII letters. - required: true + - Name will be generated (UUID) by CloudStack if not specified and can not be changed afterwards. + - Either C(name) or C(display_name) is required. + required: false + default: null display_name: description: - Custom display name of the instances. + - Display name will be set to C(name) if not specified. + - Either C(name) or C(display_name) is required. required: false default: null group: @@ -46,13 +55,28 @@ - State of the instance. required: false default: 'present' - choices: [ 'deployed', 'started', 'stopped', 'restarted', 'destroyed', 'expunged', 'present', 'absent' ] + choices: [ 'deployed', 'started', 'stopped', 'restarted', 'restored', 'destroyed', 'expunged', 'present', 'absent' ] service_offering: description: - Name or id of the service offering of the new instance. - If not set, first found service offering is used. required: false default: null + cpu: + description: + - The number of CPUs to allocate to the instance, used with custom service offerings + required: false + default: null + cpu_speed: + description: + - The clock speed/shares allocated to the instance, used with custom service offerings + required: false + default: null + memory: + description: + - The memory allocated to the instance, used with custom service offerings + required: false + default: null template: description: - Name or id of the template to be used for creating the new instance. @@ -67,6 +91,15 @@ - Mutually exclusive with C(template) option. required: false default: null + template_filter: + description: + - Name of the filter used to search for the template or iso. + - Used for params C(iso) or C(template) on C(state=present). + required: false + default: 'executable' + choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ] + aliases: [ 'iso_filter' ] + version_added: '2.1' hypervisor: description: - Name the hypervisor to be used for creating the new instance. @@ -123,7 +156,7 @@ description: - List of security groups the instance to be applied to. required: false - default: [] + default: null aliases: [ 'security_group' ] domain: description: @@ -164,6 +197,12 @@ - Consider switching to HTTP_POST by using C(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB. required: false default: null + vpc: + description: + - Name of the VPC. + required: false + default: null + version_added: "2.3" force: description: - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed. @@ -175,6 +214,7 @@ - "If you want to delete all tags, set a empty list e.g. C(tags: [])." required: false default: null + aliases: [ 'tag' ] poll_async: description: - Poll async jobs until job has finished. @@ -186,8 +226,7 @@ EXAMPLES = ''' # Create a instance from an ISO # NOTE: Names of offerings and ISOs depending on the CloudStack configuration. -- local_action: - module: cs_instance +- cs_instance: name: web-vm-1 iso: Linux Debian 7 64-bit hypervisor: VMware @@ -200,45 +239,64 @@ - Server Integration - Sync Integration - Storage Integration + delegate_to: localhost # For changing a running instance, use the 'force' parameter -- local_action: - module: cs_instance +- cs_instance: name: web-vm-1 - display_name: web-vm-01.example.com + display_name: web-vm-01.example.com iso: Linux Debian 7 64-bit service_offering: 2cpu_2gb force: yes + delegate_to: localhost -# Create or update a instance on Exoscale's public cloud -- local_action: - module: cs_instance - name: web-vm-1 +# Create or update a instance on Exoscale's public cloud using display_name. +# Note: user_data can be used to kickstart the instance using cloud-init yaml config. +- cs_instance: + display_name: web-vm-1 template: Linux Debian 7 64-bit service_offering: Tiny ssh_key: john@example.com tags: - - { key: admin, value: john } - - { key: foo, value: bar } + - key: admin + value: john + - key: foo + value: bar + user_data: | + #cloud-config + packages: + - nginx + delegate_to: localhost # Create an instance with multiple interfaces specifying the IP addresses -- local_action: - module: cs_instance +- cs_instance: name: web-vm-1 template: Linux Debian 7 64-bit service_offering: Tiny ip_to_networks: - - {'network': NetworkA, 'ip': '10.1.1.1'} - - {'network': NetworkB, 'ip': '192.168.1.1'} - -# Ensure a instance has stopped -- local_action: cs_instance name=web-vm-1 state=stopped + - network: NetworkA + ip: 10.1.1.1 + - network: NetworkB + ip: 192.0.2.1 + delegate_to: localhost + +# Ensure an instance is stopped +- cs_instance: + name: web-vm-1 + state: stopped + delegate_to: localhost -# Ensure a instance is running -- local_action: cs_instance name=web-vm-1 state=started +# Ensure an instance is running +- cs_instance: + name: web-vm-1 + state: started + delegate_to: localhost -# Remove a instance -- local_action: cs_instance name=web-vm-1 state=absent +# Remove an instance +- cs_instance: + name: web-vm-1 + state: absent + delegate_to: localhost ''' RETURN = ''' @@ -362,12 +420,6 @@ import base64 -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -387,7 +439,6 @@ def __init__(self, module): 'isoname': 'iso', 'templatename': 'template', 'keypair': 'ssh_key', - 'securitygroup': 'security_group', } self.instance = None self.template = None @@ -413,7 +464,7 @@ def get_template_or_iso(self, key=None): iso = self.module.params.get('iso') if not template and not iso: - self.module.fail_json(msg="Template or ISO is required.") + return None args = {} args['account'] = self.get_account(key='name') @@ -426,7 +477,7 @@ def get_template_or_iso(self, key=None): if self.template: return self._get_by_key(key, self.template) - args['templatefilter'] = 'executable' + args['templatefilter'] = self.module.params.get('template_filter') templates = self.cs.listTemplates(**args) if templates: for t in templates['template']: @@ -438,7 +489,7 @@ def get_template_or_iso(self, key=None): elif iso: if self.iso: return self._get_by_key(key, self.iso) - args['isofilter'] = 'executable' + args['isofilter'] = self.module.params.get('template_filter') isos = self.cs.listIsos(**args) if isos: for i in isos['iso']: @@ -465,21 +516,28 @@ def get_disk_offering_id(self): def get_instance(self): instance = self.instance if not instance: - instance_name = self.module.params.get('name') - - args = {} - args['account'] = self.get_account(key='name') - args['domainid'] = self.get_domain(key='id') - args['projectid'] = self.get_project(key='id') + instance_name = self.get_or_fallback('name', 'display_name') + vpc_id = self.get_vpc(key='id') + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'vpcid': vpc_id, + } # Do not pass zoneid, as the instance name must be unique across zones. instances = self.cs.listVirtualMachines(**args) if instances: for v in instances['virtualmachine']: - if instance_name in [ v['name'], v['displayname'], v['id'] ]: + # Due the limitation of the API, there is no easy way (yet) to get only those VMs + # not belonging to a VPC. + if not vpc_id and self.is_vm_in_vpc(vm=v): + continue + if instance_name.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]: self.instance = v break return self.instance + def get_iptonetwork_mappings(self): network_mappings = self.module.params.get('ip_to_networks') if network_mappings is None: @@ -495,6 +553,28 @@ def get_iptonetwork_mappings(self): res.append({'networkid': ids[i], 'ip': data['ip']}) return res + + def security_groups_has_changed(self): + security_groups = self.module.params.get('security_groups') + if security_groups is None: + return False + + security_groups = [s.lower() for s in security_groups] + instance_security_groups = self.instance.get('securitygroup',[]) + + instance_security_group_names = [] + for instance_security_group in instance_security_groups: + if instance_security_group['name'].lower() not in security_groups: + return True + else: + instance_security_group_names.append(instance_security_group['name'].lower()) + + for security_group in security_groups: + if security_group not in instance_security_group_names: + return True + return False + + def get_network_ids(self, network_names=None): if network_names is None: network_names = self.module.params.get('networks') @@ -502,12 +582,13 @@ def get_network_ids(self, network_names=None): if not network_names: return None - args = {} - args['account'] = self.get_account(key='name') - args['domainid'] = self.get_domain(key='id') - args['projectid'] = self.get_project(key='id') - args['zoneid'] = self.get_zone(key='id') - + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + 'vpcid': self.get_vpc(key='id'), + } networks = self.cs.listNetworks(**args) if not networks: self.module.fail_json(msg="No networks available") @@ -527,27 +608,45 @@ def get_network_ids(self, network_names=None): return network_ids - def present_instance(self): + def present_instance(self, start_vm=True): instance = self.get_instance() + if not instance: - instance = self.deploy_instance() + instance = self.deploy_instance(start_vm=start_vm) else: - instance = self.update_instance(instance) + instance = self.recover_instance(instance=instance) + instance = self.update_instance(instance=instance, start_vm=start_vm) # In check mode, we do not necessarely have an instance if instance: instance = self.ensure_tags(resource=instance, resource_type='UserVm') + # refresh instance data + self.instance = instance return instance def get_user_data(self): user_data = self.module.params.get('user_data') - if user_data: - user_data = base64.b64encode(user_data) + if user_data is not None: + user_data = base64.b64encode(str(user_data)) return user_data + def get_details(self): + res = None + cpu = self.module.params.get('cpu') + cpu_speed = self.module.params.get('cpu_speed') + memory = self.module.params.get('memory') + if all([cpu, cpu_speed, memory]): + res = [{ + 'cpuNumber': cpu, + 'cpuSpeed': cpu_speed, + 'memory': memory, + }] + return res + + def deploy_instance(self, start_vm=True): self.result['changed'] = True networkids = self.get_network_ids() @@ -556,6 +655,9 @@ def deploy_instance(self, start_vm=True): args = {} args['templateid'] = self.get_template_or_iso(key='id') + if not args['templateid']: + self.module.fail_json(msg="Template or ISO is required.") + args['zoneid'] = self.get_zone(key='id') args['serviceofferingid'] = self.get_service_offering_id() args['account'] = self.get_account(key='name') @@ -575,8 +677,12 @@ def deploy_instance(self, start_vm=True): args['size'] = self.module.params.get('disk_size') args['startvm'] = start_vm args['rootdisksize'] = self.module.params.get('root_disk_size') - args['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups')) + args['details'] = self.get_details() + + security_groups = self.module.params.get('security_groups') + if security_groups is not None: + args['securitygroupnames'] = ','.join(security_groups) template_iso = self.get_template_or_iso() if 'hypervisor' not in template_iso: @@ -591,45 +697,60 @@ def deploy_instance(self, start_vm=True): poll_async = self.module.params.get('poll_async') if poll_async: - instance = self._poll_job(instance, 'virtualmachine') + instance = self.poll_job(instance, 'virtualmachine') return instance - def update_instance(self, instance): - args_service_offering = {} - args_service_offering['id'] = instance['id'] - args_service_offering['serviceofferingid'] = self.get_service_offering_id() - - args_instance_update = {} - args_instance_update['id'] = instance['id'] - args_instance_update['group'] = self.module.params.get('group') - args_instance_update['displayname'] = self.get_or_fallback('display_name', 'name') - args_instance_update['userdata'] = self.get_user_data() - args_instance_update['ostypeid'] = self.get_os_type(key='id') - - args_ssh_key = {} - args_ssh_key['id'] = instance['id'] - args_ssh_key['keypair'] = self.module.params.get('ssh_key') - args_ssh_key['projectid'] = self.get_project(key='id') - - if self._has_changed(args_service_offering, instance) or \ - self._has_changed(args_instance_update, instance) or \ - self._has_changed(args_ssh_key, instance): - + def update_instance(self, instance, start_vm=True): + # Service offering data + args_service_offering = {} + args_service_offering['id'] = instance['id'] + if self.module.params.get('service_offering'): + args_service_offering['serviceofferingid'] = self.get_service_offering_id() + service_offering_changed = self.has_changed(args_service_offering, instance) + + # Instance data + args_instance_update = {} + args_instance_update['id'] = instance['id'] + args_instance_update['userdata'] = self.get_user_data() + args_instance_update['ostypeid'] = self.get_os_type(key='id') + if self.module.params.get('group'): + args_instance_update['group'] = self.module.params.get('group') + if self.module.params.get('display_name'): + args_instance_update['displayname'] = self.module.params.get('display_name') + instance_changed = self.has_changed(args_instance_update, instance) + + # SSH key data + args_ssh_key = {} + args_ssh_key['id'] = instance['id'] + args_ssh_key['projectid'] = self.get_project(key='id') + if self.module.params.get('ssh_key'): + args_ssh_key['keypair'] = self.module.params.get('ssh_key') + ssh_key_changed = self.has_changed(args_ssh_key, instance) + + security_groups_changed = self.security_groups_has_changed() + + changed = [ + service_offering_changed, + instance_changed, + security_groups_changed, + ssh_key_changed, + ] + + if True in changed: force = self.module.params.get('force') instance_state = instance['state'].lower() - if instance_state == 'stopped' or force: self.result['changed'] = True if not self.module.check_mode: # Ensure VM has stopped instance = self.stop_instance() - instance = self._poll_job(instance, 'virtualmachine') + instance = self.poll_job(instance, 'virtualmachine') self.instance = instance # Change service offering - if self._has_changed(args_service_offering, instance): + if service_offering_changed: res = self.cs.changeServiceForVirtualMachine(**args_service_offering) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) @@ -637,7 +758,9 @@ def update_instance(self, instance): self.instance = instance # Update VM - if self._has_changed(args_instance_update, instance): + if instance_changed or security_groups_changed: + if security_groups_changed: + args_instance_update['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) res = self.cs.updateVirtualMachine(**args_instance_update) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) @@ -645,20 +768,31 @@ def update_instance(self, instance): self.instance = instance # Reset SSH key - if self._has_changed(args_ssh_key, instance): + if ssh_key_changed: instance = self.cs.resetSSHKeyForVirtualMachine(**args_ssh_key) if 'errortext' in instance: self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) - instance = self._poll_job(instance, 'virtualmachine') + instance = self.poll_job(instance, 'virtualmachine') self.instance = instance # Start VM again if it was running before - if instance_state == 'running': + if instance_state == 'running' and start_vm: instance = self.start_instance() return instance + def recover_instance(self, instance): + if instance['state'].lower() in [ 'destroying', 'destroyed' ]: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.recoverVirtualMachine(id=instance['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + instance = res['virtualmachine'] + return instance + + def absent_instance(self): instance = self.get_instance() if instance: @@ -672,7 +806,7 @@ def absent_instance(self): poll_async = self.module.params.get('poll_async') if poll_async: - instance = self._poll_job(res, 'virtualmachine') + instance = self.poll_job(res, 'virtualmachine') return instance @@ -695,79 +829,88 @@ def expunge_instance(self): poll_async = self.module.params.get('poll_async') if poll_async: - res = self._poll_job(res, 'virtualmachine') + res = self.poll_job(res, 'virtualmachine') return instance def stop_instance(self): instance = self.get_instance() + # in check mode intance may not be instanciated + if instance: + if instance['state'].lower() in ['stopping', 'stopped']: + return instance - if not instance: - instance = self.deploy_instance(start_vm=False) - return instance - - elif instance['state'].lower() in ['stopping', 'stopped']: - return instance - - if instance['state'].lower() in ['starting', 'running']: - self.result['changed'] = True - if not self.module.check_mode: - instance = self.cs.stopVirtualMachine(id=instance['id']) + if instance['state'].lower() in ['starting', 'running']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.stopVirtualMachine(id=instance['id']) - if 'errortext' in instance: - self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) - poll_async = self.module.params.get('poll_async') - if poll_async: - instance = self._poll_job(instance, 'virtualmachine') + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self.poll_job(instance, 'virtualmachine') return instance def start_instance(self): instance = self.get_instance() + # in check mode intance may not be instanciated + if instance: + if instance['state'].lower() in ['starting', 'running']: + return instance - if not instance: - instance = self.deploy_instance() - return instance - - elif instance['state'].lower() in ['starting', 'running']: - return instance - - if instance['state'].lower() in ['stopped', 'stopping']: - self.result['changed'] = True - if not self.module.check_mode: - instance = self.cs.startVirtualMachine(id=instance['id']) + if instance['state'].lower() in ['stopped', 'stopping']: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.startVirtualMachine(id=instance['id']) - if 'errortext' in instance: - self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) - poll_async = self.module.params.get('poll_async') - if poll_async: - instance = self._poll_job(instance, 'virtualmachine') + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self.poll_job(instance, 'virtualmachine') return instance def restart_instance(self): instance = self.get_instance() + # in check mode intance may not be instanciated + if instance: + if instance['state'].lower() in [ 'running', 'starting' ]: + self.result['changed'] = True + if not self.module.check_mode: + instance = self.cs.rebootVirtualMachine(id=instance['id']) - if not instance: - instance = self.deploy_instance() - return instance + if 'errortext' in instance: + self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) - elif instance['state'].lower() in [ 'running', 'starting' ]: - self.result['changed'] = True - if not self.module.check_mode: - instance = self.cs.rebootVirtualMachine(id=instance['id']) + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self.poll_job(instance, 'virtualmachine') - if 'errortext' in instance: - self.module.fail_json(msg="Failed: '%s'" % instance['errortext']) + elif instance['state'].lower() in [ 'stopping', 'stopped' ]: + instance = self.start_instance() + return instance - poll_async = self.module.params.get('poll_async') - if poll_async: - instance = self._poll_job(instance, 'virtualmachine') - elif instance['state'].lower() in [ 'stopping', 'stopped' ]: - instance = self.start_instance() + def restore_instance(self): + instance = self.get_instance() + self.result['changed'] = True + # in check mode intance may not be instanciated + if instance: + args = {} + args['templateid'] = self.get_template_or_iso(key='id') + args['virtualmachineid'] = instance['id'] + res = self.cs.restoreVirtualMachine(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + instance = self.poll_job(res, 'virtualmachine') return instance @@ -790,54 +933,61 @@ def get_result(self, instance): self.result['default_ip'] = nic['ipaddress'] return self.result + def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(default=None), + display_name = dict(default=None), + group = dict(default=None), + state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'restored', 'absent', 'destroyed', 'expunged'], default='present'), + service_offering = dict(default=None), + cpu = dict(default=None, type='int'), + cpu_speed = dict(default=None, type='int'), + memory = dict(default=None, type='int'), + template = dict(default=None), + iso = dict(default=None), + template_filter = dict(default="executable", aliases=['iso_filter'], choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']), + networks = dict(type='list', aliases=[ 'network' ], default=None), + ip_to_networks = dict(type='list', aliases=['ip_to_network'], default=None), + ip_address = dict(defaul=None), + ip6_address = dict(defaul=None), + disk_offering = dict(default=None), + disk_size = dict(type='int', default=None), + root_disk_size = dict(type='int', default=None), + keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None), + hypervisor = dict(choices=CS_HYPERVISORS, default=None), + security_groups = dict(type='list', aliases=[ 'security_group' ], default=None), + affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + user_data = dict(default=None), + zone = dict(default=None), + ssh_key = dict(default=None), + force = dict(type='bool', default=False), + tags = dict(type='list', aliases=[ 'tag' ], default=None), + vpc = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + + required_together = cs_required_together() + required_together.extend([ + ['cpu', 'cpu_speed', 'memory'], + ]) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - display_name = dict(default=None), - group = dict(default=None), - state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'absent', 'destroyed', 'expunged'], default='present'), - service_offering = dict(default=None), - template = dict(default=None), - iso = dict(default=None), - networks = dict(type='list', aliases=[ 'network' ], default=None), - ip_to_networks = dict(type='list', aliases=['ip_to_network'], default=None), - ip_address = dict(defaul=None), - ip6_address = dict(defaul=None), - disk_offering = dict(default=None), - disk_size = dict(type='int', default=None), - root_disk_size = dict(type='int', default=None), - keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None), - hypervisor = dict(choices=['KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM', 'Simulator'], default=None), - security_groups = dict(type='list', aliases=[ 'security_group' ], default=[]), - affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]), - domain = dict(default=None), - account = dict(default=None), - project = dict(default=None), - user_data = dict(default=None), - zone = dict(default=None), - ssh_key = dict(default=None), - force = dict(choices=BOOLEANS, default=False), - tags = dict(type='list', aliases=[ 'tag' ], default=None), - poll_async = dict(choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), + argument_spec=argument_spec, + required_together=required_together, + required_one_of = ( + ['display_name', 'name'], ), mutually_exclusive = ( ['template', 'iso'], ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_instance = AnsibleCloudStackInstance(module) @@ -849,16 +999,23 @@ def main(): elif state in ['expunged']: instance = acs_instance.expunge_instance() + elif state in ['restored']: + acs_instance.present_instance() + instance = acs_instance.restore_instance() + elif state in ['present', 'deployed']: instance = acs_instance.present_instance() elif state in ['stopped']: + acs_instance.present_instance(start_vm=False) instance = acs_instance.stop_instance() elif state in ['started']: + acs_instance.present_instance() instance = acs_instance.start_instance() elif state in ['restarted']: + acs_instance.present_instance() instance = acs_instance.restart_instance() if instance and 'state' in instance and instance['state'].lower() == 'error': @@ -866,7 +1023,7 @@ def main(): result = acs_instance.get_result(instance) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_instance_facts.py b/cloud/cloudstack/cs_instance_facts.py new file mode 100644 index 00000000000..2aee631395d --- /dev/null +++ b/cloud/cloudstack/cs_instance_facts.py @@ -0,0 +1,278 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_instance_facts +short_description: Gathering facts from the API of instances from Apache CloudStack based clouds. +description: + - Gathering facts from the API of an instance. +version_added: "2.1" +author: "René Moser (@resmo)" +options: + name: + description: + - Name or display name of the instance. + required: true + domain: + description: + - Domain the instance is related to. + required: false + default: null + account: + description: + - Account the instance is related to. + required: false + default: null + project: + description: + - Project the instance is related to. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +- cs_instance_facts: + name: web-vm-1 + delegate_to: localhost + +- debug: + var: cloudstack_instance +''' + +RETURN = ''' +--- +cloudstack_instance.id: + description: UUID of the instance. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +cloudstack_instance.name: + description: Name of the instance. + returned: success + type: string + sample: web-01 +cloudstack_instance.display_name: + description: Display name of the instance. + returned: success + type: string + sample: web-01 +cloudstack_instance.group: + description: Group name of the instance is related. + returned: success + type: string + sample: web +created: + description: Date of the instance was created. + returned: success + type: string + sample: 2014-12-01T14:57:57+0100 +cloudstack_instance.password_enabled: + description: True if password setting is enabled. + returned: success + type: boolean + sample: true +cloudstack_instance.password: + description: The password of the instance if exists. + returned: success + type: string + sample: Ge2oe7Do +cloudstack_instance.ssh_key: + description: Name of SSH key deployed to instance. + returned: success + type: string + sample: key@work +cloudstack_instance.domain: + description: Domain the instance is related to. + returned: success + type: string + sample: example domain +cloudstack_instance.account: + description: Account the instance is related to. + returned: success + type: string + sample: example account +cloudstack_instance.project: + description: Name of project the instance is related to. + returned: success + type: string + sample: Production +cloudstack_instance.default_ip: + description: Default IP address of the instance. + returned: success + type: string + sample: 10.23.37.42 +cloudstack_instance.public_ip: + description: Public IP address with instance via static NAT rule. + returned: success + type: string + sample: 1.2.3.4 +cloudstack_instance.iso: + description: Name of ISO the instance was deployed with. + returned: success + type: string + sample: Debian-8-64bit +cloudstack_instance.template: + description: Name of template the instance was deployed with. + returned: success + type: string + sample: Debian-8-64bit +cloudstack_instance.service_offering: + description: Name of the service offering the instance has. + returned: success + type: string + sample: 2cpu_2gb +cloudstack_instance.zone: + description: Name of zone the instance is in. + returned: success + type: string + sample: ch-gva-2 +cloudstack_instance.state: + description: State of the instance. + returned: success + type: string + sample: Running +cloudstack_instance.security_groups: + description: Security groups the instance is in. + returned: success + type: list + sample: '[ "default" ]' +cloudstack_instance.affinity_groups: + description: Affinity groups the instance is in. + returned: success + type: list + sample: '[ "webservers" ]' +cloudstack_instance.tags: + description: List of resource tags associated with the instance. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +cloudstack_instance.hypervisor: + description: Hypervisor related to this instance. + returned: success + type: string + sample: KVM +cloudstack_instance.instance_name: + description: Internal name of the instance (ROOT admin only). + returned: success + type: string + sample: i-44-3992-VM +''' + +import base64 + +# import cloudstack common +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackInstanceFacts(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackInstanceFacts, self).__init__(module) + self.instance = None + self.returns = { + 'group': 'group', + 'hypervisor': 'hypervisor', + 'instancename': 'instance_name', + 'publicip': 'public_ip', + 'passwordenabled': 'password_enabled', + 'password': 'password', + 'serviceofferingname': 'service_offering', + 'isoname': 'iso', + 'templatename': 'template', + 'keypair': 'ssh_key', + } + self.facts = { + 'cloudstack_instance': None, + } + + + def get_instance(self): + instance = self.instance + if not instance: + instance_name = self.module.params.get('name') + + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + # Do not pass zoneid, as the instance name must be unique across zones. + instances = self.cs.listVirtualMachines(**args) + if instances: + for v in instances['virtualmachine']: + if instance_name.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]: + self.instance = v + break + return self.instance + + + def run(self): + instance = self.get_instance() + if not instance: + self.module.fail_json(msg="Instance not found: %s" % self.module.params.get('name')) + self.facts['cloudstack_instance'] = self.get_result(instance) + return self.facts + + + def get_result(self, instance): + super(AnsibleCloudStackInstanceFacts, self).get_result(instance) + if instance: + if 'securitygroup' in instance: + security_groups = [] + for securitygroup in instance['securitygroup']: + security_groups.append(securitygroup['name']) + self.result['security_groups'] = security_groups + if 'affinitygroup' in instance: + affinity_groups = [] + for affinitygroup in instance['affinitygroup']: + affinity_groups.append(affinitygroup['name']) + self.result['affinity_groups'] = affinity_groups + if 'nic' in instance: + for nic in instance['nic']: + if nic['isdefault'] and 'ipaddress' in nic: + self.result['default_ip'] = nic['ipaddress'] + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False, + ) + + cs_instance_facts = AnsibleCloudStackInstanceFacts(module=module).run() + cs_facts_result = dict(changed=False, ansible_facts=cs_instance_facts) + module.exit_json(**cs_facts_result) + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_instancegroup.py b/cloud/cloudstack/cs_instancegroup.py index 537d9d90b28..12b2bc7baeb 100644 --- a/cloud/cloudstack/cs_instancegroup.py +++ b/cloud/cloudstack/cs_instancegroup.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_instancegroup @@ -102,12 +106,6 @@ sample: example project ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -170,28 +168,21 @@ def absent_instance_group(self): def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + state = dict(default='present', choices=['present', 'absent']), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + )) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - state = dict(default='present', choices=['present', 'absent']), - domain = dict(default=None), - account = dict(default=None), - project = dict(default=None), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_ig = AnsibleCloudStackInstanceGroup(module) @@ -203,7 +194,7 @@ def main(): result = acs_ig.get_result(instance_group) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_ip_address.py b/cloud/cloudstack/cs_ip_address.py index e9507f855ed..233720827f1 100644 --- a/cloud/cloudstack/cs_ip_address.py +++ b/cloud/cloudstack/cs_ip_address.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # # (c) 2015, Darren Worrall +# (c) 2015, René Moser # # This file is part of Ansible # @@ -18,6 +19,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_ip_address @@ -27,7 +32,9 @@ limitations this is not an idempotent call, so be sure to only conditionally call this when C(state=present) version_added: '2.0' -author: "Darren Worrall @dazworrall" +author: + - "Darren Worrall (@dazworrall)" + - "René Moser (@resmo)" options: ip_address: description: @@ -45,6 +52,12 @@ - Network the IP address is related to. required: false default: null + vpc: + description: + - VPC the IP address is related to. + required: false + default: null + version_added: "2.2" account: description: - Account the IP address is related to. @@ -118,13 +131,6 @@ sample: example domain ''' - -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -137,62 +143,34 @@ def __init__(self, module): 'ipaddress': 'ip_address', } - - #TODO: Add to parent class, duplicated in cs_network - def get_network(self, key=None, network=None): - if not network: - network = self.module.params.get('network') - - if not network: - return None - - args = {} - args['account'] = self.get_account('name') - args['domainid'] = self.get_domain('id') - args['projectid'] = self.get_project('id') - args['zoneid'] = self.get_zone('id') - - networks = self.cs.listNetworks(**args) - if not networks: - self.module.fail_json(msg="No networks available") - - for n in networks['network']: - if network in [ n['displaytext'], n['name'], n['id'] ]: - return self._get_by_key(key, n) - break - self.module.fail_json(msg="Network '%s' not found" % network) - - - #TODO: Merge changes here with parent class def get_ip_address(self, key=None): if self.ip_address: return self._get_by_key(key, self.ip_address) ip_address = self.module.params.get('ip_address') - if not ip_address: - self.module.fail_json(msg="IP address param 'ip_address' is required") - - args = {} - args['ipaddress'] = ip_address - args['account'] = self.get_account(key='name') - args['domainid'] = self.get_domain(key='id') - args['projectid'] = self.get_project(key='id') + args = { + 'ipaddress': self.module.params.get('ip_address'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'vpcid': self.get_vpc(key='id'), + } ip_addresses = self.cs.listPublicIpAddresses(**args) if ip_addresses: self.ip_address = ip_addresses['publicipaddress'][0] return self._get_by_key(key, self.ip_address) - def associate_ip_address(self): self.result['changed'] = True - args = {} - args['account'] = self.get_account(key='name') - args['domainid'] = self.get_domain(key='id') - args['projectid'] = self.get_project(key='id') - args['networkid'] = self.get_network(key='id') - args['zoneid'] = self.get_zone(key='id') - ip_address = {} + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'networkid': self.get_network(key='id'), + 'zoneid': self.get_zone(key='id'), + } + ip_address = None if not self.module.check_mode: res = self.cs.associateIpAddress(**args) if 'errortext' in res: @@ -200,15 +178,13 @@ def associate_ip_address(self): poll_async = self.module.params.get('poll_async') if poll_async: - res = self._poll_job(res, 'ipaddress') - ip_address = res + ip_address = self.poll_job(res, 'ipaddress') return ip_address - def disassociate_ip_address(self): ip_address = self.get_ip_address() - if ip_address is None: - return ip_address + if not ip_address: + return None if ip_address['isstaticnat']: self.module.fail_json(msg="IP address is allocated via static nat") @@ -219,36 +195,33 @@ def disassociate_ip_address(self): self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if poll_async: - res = self._poll_job(res, 'ipaddress') + self.poll_job(res, 'ipaddress') return ip_address def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + ip_address = dict(required=False), + state = dict(choices=['present', 'absent'], default='present'), + vpc = dict(default=None), + network = dict(default=None), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + module = AnsibleModule( - argument_spec = dict( - ip_address = dict(required=False), - state = dict(choices=['present', 'absent'], default='present'), - zone = dict(default=None), - domain = dict(default=None), - account = dict(default=None), - network = dict(default=None), - project = dict(default=None), - poll_async = dict(choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), + required_if=[ + ('state', 'absent', ['ip_address']), + ], supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_ip_address = AnsibleCloudStackIPAddress(module) @@ -260,7 +233,7 @@ def main(): result = acs_ip_address.get_result(ip_address) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_iso.py b/cloud/cloudstack/cs_iso.py index 37f110cbe68..ee84bd22f2c 100644 --- a/cloud/cloudstack/cs_iso.py +++ b/cloud/cloudstack/cs_iso.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_iso @@ -197,12 +201,6 @@ sample: example project ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -295,37 +293,30 @@ def remove_iso(self): def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + url = dict(default=None), + os_type = dict(default=None), + zone = dict(default=None), + iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + checksum = dict(default=None), + is_ready = dict(type='bool', default=False), + bootable = dict(type='bool', default=True), + is_featured = dict(type='bool', default=False), + is_dynamically_scalable = dict(type='bool', default=False), + state = dict(choices=['present', 'absent'], default='present'), + )) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - url = dict(default=None), - os_type = dict(default=None), - zone = dict(default=None), - iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]), - domain = dict(default=None), - account = dict(default=None), - project = dict(default=None), - checksum = dict(default=None), - is_ready = dict(choices=BOOLEANS, default=False), - bootable = dict(choices=BOOLEANS, default=True), - is_featured = dict(choices=BOOLEANS, default=False), - is_dynamically_scalable = dict(choices=BOOLEANS, default=False), - state = dict(choices=['present', 'absent'], default='present'), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_iso = AnsibleCloudStackIso(module) @@ -337,7 +328,7 @@ def main(): result = acs_iso.get_result(iso) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_loadbalancer_rule.py b/cloud/cloudstack/cs_loadbalancer_rule.py new file mode 100644 index 00000000000..2e5f11e415b --- /dev/null +++ b/cloud/cloudstack/cs_loadbalancer_rule.py @@ -0,0 +1,384 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Darren Worrall +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_loadbalancer_rule +short_description: Manages load balancer rules on Apache CloudStack based clouds. +description: + - Add, update and remove load balancer rules. +version_added: '2.0' +author: + - "Darren Worrall (@dazworrall)" + - "René Moser (@resmo)" +options: + name: + description: + - The name of the load balancer rule. + required: true + description: + description: + - The description of the load balancer rule. + required: false + default: null + algorithm: + description: + - Load balancer algorithm + - Required when using C(state=present). + required: false + choices: [ 'source', 'roundrobin', 'leastconn' ] + default: 'source' + private_port: + description: + - The private port of the private ip address/virtual machine where the network traffic will be load balanced to. + - Required when using C(state=present). + - Can not be changed once the rule exists due API limitation. + required: false + default: null + public_port: + description: + - The public port from where the network traffic will be load balanced from. + - Required when using C(state=present). + - Can not be changed once the rule exists due API limitation. + required: true + default: null + ip_address: + description: + - Public IP address from where the network traffic will be load balanced from. + required: true + aliases: [ 'public_ip' ] + open_firewall: + description: + - Whether the firewall rule for public port should be created, while creating the new rule. + - Use M(cs_firewall) for managing firewall rules. + required: false + default: false + cidr: + description: + - CIDR (full notation) to be used for firewall rule if required. + required: false + default: null + protocol: + description: + - The protocol to be used on the load balancer + required: false + default: null + project: + description: + - Name of the project the load balancer IP address is related to. + required: false + default: null + state: + description: + - State of the rule. + required: true + default: 'present' + choices: [ 'present', 'absent' ] + domain: + description: + - Domain the rule is related to. + required: false + default: null + account: + description: + - Account the rule is related to. + required: false + default: null + zone: + description: + - Name of the zone in which the rule shoud be created. + - If not set, default zone is used. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create a load balancer rule +- local_action: + module: cs_loadbalancer_rule + name: balance_http + public_ip: 1.2.3.4 + algorithm: leastconn + public_port: 80 + private_port: 8080 + +# update algorithm of an existing load balancer rule +- local_action: + module: cs_loadbalancer_rule + name: balance_http + public_ip: 1.2.3.4 + algorithm: roundrobin + public_port: 80 + private_port: 8080 + +# Delete a load balancer rule +- local_action: + module: cs_loadbalancer_rule + name: balance_http + public_ip: 1.2.3.4 + state: absent +''' + +RETURN = ''' +--- +id: + description: UUID of the rule. + returned: success + type: string + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +zone: + description: Name of zone the rule is related to. + returned: success + type: string + sample: ch-gva-2 +project: + description: Name of project the rule is related to. + returned: success + type: string + sample: Production +account: + description: Account the rule is related to. + returned: success + type: string + sample: example account +domain: + description: Domain the rule is related to. + returned: success + type: string + sample: example domain +algorithm: + description: Load balancer algorithm used. + returned: success + type: string + sample: "source" +cidr: + description: CIDR to forward traffic from. + returned: success + type: string + sample: "" +name: + description: Name of the rule. + returned: success + type: string + sample: "http-lb" +description: + description: Description of the rule. + returned: success + type: string + sample: "http load balancer rule" +protocol: + description: Protocol of the rule. + returned: success + type: string + sample: "tcp" +public_port: + description: Public port. + returned: success + type: string + sample: 80 +private_port: + description: Private IP address. + returned: success + type: string + sample: 80 +public_ip: + description: Public IP address. + returned: success + type: string + sample: "1.2.3.4" +tags: + description: List of resource tags associated with the rule. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +state: + description: State of the rule. + returned: success + type: string + sample: "Add" +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackLBRule(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackLBRule, self).__init__(module) + self.returns = { + 'publicip': 'public_ip', + 'algorithm': 'algorithm', + 'cidrlist': 'cidr', + 'protocol': 'protocol', + } + # these values will be casted to int + self.returns_to_int = { + 'publicport': 'public_port', + 'privateport': 'private_port', + } + + + def get_rule(self, **kwargs): + rules = self.cs.listLoadBalancerRules(**kwargs) + if rules: + return rules['loadbalancerrule'][0] + + + def _get_common_args(self): + return { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + 'publicipid': self.get_ip_address(key='id'), + 'name': self.module.params.get('name'), + } + + + def present_lb_rule(self): + missing_params = [] + for required_params in [ + 'algorithm', + 'private_port', + 'public_port', + ]: + if not self.module.params.get(required_params): + missing_params.append(required_params) + if missing_params: + self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params)) + + args = self._get_common_args() + rule = self.get_rule(**args) + if rule: + rule = self._update_lb_rule(rule) + else: + rule = self._create_lb_rule(rule) + + if rule: + rule = self.ensure_tags(resource=rule, resource_type='LoadBalancer') + return rule + + + def _create_lb_rule(self, rule): + self.result['changed'] = True + if not self.module.check_mode: + args = self._get_common_args() + args['algorithm'] = self.module.params.get('algorithm') + args['privateport'] = self.module.params.get('private_port') + args['publicport'] = self.module.params.get('public_port') + args['cidrlist'] = self.module.params.get('cidr') + args['description'] = self.module.params.get('description') + args['protocol'] = self.module.params.get('protocol') + res = self.cs.createLoadBalancerRule(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + rule = self.poll_job(res, 'loadbalancer') + return rule + + + def _update_lb_rule(self, rule): + args = {} + args['id'] = rule['id'] + args['algorithm'] = self.module.params.get('algorithm') + args['description'] = self.module.params.get('description') + if self.has_changed(args, rule): + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.updateLoadBalancerRule(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + rule = self.poll_job(res, 'loadbalancer') + return rule + + + def absent_lb_rule(self): + args = self._get_common_args() + rule = self.get_rule(**args) + if rule: + self.result['changed'] = True + if rule and not self.module.check_mode: + res = self.cs.deleteLoadBalancerRule(id=rule['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self.poll_job(res, 'loadbalancer') + return rule + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + description = dict(default=None), + algorithm = dict(choices=['source', 'roundrobin', 'leastconn'], default='source'), + private_port = dict(type='int', default=None), + public_port = dict(type='int', default=None), + protocol = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + ip_address = dict(required=True, aliases=['public_ip']), + cidr = dict(default=None), + project = dict(default=None), + open_firewall = dict(type='bool', default=False), + tags = dict(type='list', aliases=['tag'], default=None), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + try: + acs_lb_rule = AnsibleCloudStackLBRule(module) + + state = module.params.get('state') + if state in ['absent']: + rule = acs_lb_rule.absent_lb_rule() + else: + rule = acs_lb_rule.present_lb_rule() + + result = acs_lb_rule.get_result(rule) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_loadbalancer_rule_member.py b/cloud/cloudstack/cs_loadbalancer_rule_member.py new file mode 100644 index 00000000000..0695ed9be5b --- /dev/null +++ b/cloud/cloudstack/cs_loadbalancer_rule_member.py @@ -0,0 +1,364 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Darren Worrall +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_loadbalancer_rule_member +short_description: Manages load balancer rule members on Apache CloudStack based clouds. +description: + - Add and remove load balancer rule members. +version_added: '2.0' +author: + - "Darren Worrall (@dazworrall)" + - "René Moser (@resmo)" +options: + name: + description: + - The name of the load balancer rule. + required: true + ip_address: + description: + - Public IP address from where the network traffic will be load balanced from. + - Only needed to find the rule if C(name) is not unique. + required: false + default: null + aliases: [ 'public_ip' ] + vms: + description: + - List of VMs to assign to or remove from the rule. + required: true + type: list + aliases: [ 'vm' ] + state: + description: + - Should the VMs be present or absent from the rule. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + project: + description: + - Name of the project the firewall rule is related to. + required: false + default: null + domain: + description: + - Domain the rule is related to. + required: false + default: null + account: + description: + - Account the rule is related to. + required: false + default: null + zone: + description: + - Name of the zone in which the rule should be located. + - If not set, default zone is used. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Add VMs to an exising load balancer +- local_action: + module: cs_loadbalancer_rule_member + name: balance_http + vms: + - web01 + - web02 + +# Remove a VM from an existing load balancer +- local_action: + module: cs_loadbalancer_rule_member + name: balance_http + vms: + - web01 + - web02 + state: absent + +# Rolling upgrade of hosts +- hosts: webservers + serial: 1 + pre_tasks: + - name: Remove from load balancer + local_action: + module: cs_loadbalancer_rule_member + name: balance_http + vm: "{{ ansible_hostname }}" + state: absent + tasks: + # Perform update + post_tasks: + - name: Add to load balancer + local_action: + module: cs_loadbalancer_rule_member + name: balance_http + vm: "{{ ansible_hostname }}" + state: present +''' + +RETURN = ''' +--- +id: + description: UUID of the rule. + returned: success + type: string + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +zone: + description: Name of zone the rule is related to. + returned: success + type: string + sample: ch-gva-2 +project: + description: Name of project the rule is related to. + returned: success + type: string + sample: Production +account: + description: Account the rule is related to. + returned: success + type: string + sample: example account +domain: + description: Domain the rule is related to. + returned: success + type: string + sample: example domain +algorithm: + description: Load balancer algorithm used. + returned: success + type: string + sample: "source" +cidr: + description: CIDR to forward traffic from. + returned: success + type: string + sample: "" +name: + description: Name of the rule. + returned: success + type: string + sample: "http-lb" +description: + description: Description of the rule. + returned: success + type: string + sample: "http load balancer rule" +protocol: + description: Protocol of the rule. + returned: success + type: string + sample: "tcp" +public_port: + description: Public port. + returned: success + type: string + sample: 80 +private_port: + description: Private IP address. + returned: success + type: string + sample: 80 +public_ip: + description: Public IP address. + returned: success + type: string + sample: "1.2.3.4" +vms: + description: Rule members. + returned: success + type: list + sample: '[ "web01", "web02" ]' +tags: + description: List of resource tags associated with the rule. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +state: + description: State of the rule. + returned: success + type: string + sample: "Add" +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackLBRuleMember(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackLBRuleMember, self).__init__(module) + self.returns = { + 'publicip': 'public_ip', + 'algorithm': 'algorithm', + 'cidrlist': 'cidr', + 'protocol': 'protocol', + } + # these values will be casted to int + self.returns_to_int = { + 'publicport': 'public_port', + 'privateport': 'private_port', + } + + + def get_rule(self): + args = self._get_common_args() + args['name'] = self.module.params.get('name') + args['zoneid'] = self.get_zone(key='id') + if self.module.params.get('ip_address'): + args['publicipid'] = self.get_ip_address(key='id') + rules = self.cs.listLoadBalancerRules(**args) + if rules: + if len(rules['loadbalancerrule']) > 1: + self.module.fail_json(msg="More than one rule having name %s. Please pass 'ip_address' as well." % args['name']) + return rules['loadbalancerrule'][0] + return None + + + def _get_common_args(self): + return { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + } + + + def _get_members_of_rule(self, rule): + res = self.cs.listLoadBalancerRuleInstances(id=rule['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + return res.get('loadbalancerruleinstance', []) + + + def _ensure_members(self, operation): + if operation not in ['add', 'remove']: + self.module.fail_json(msg="Bad operation: %s" % operation) + + rule = self.get_rule() + if not rule: + self.module.fail_json(msg="Unknown rule: %s" % self.module.params.get('name')) + + existing = {} + for vm in self._get_members_of_rule(rule=rule): + existing[vm['name']] = vm['id'] + + wanted_names = self.module.params.get('vms') + + if operation =='add': + cs_func = self.cs.assignToLoadBalancerRule + to_change = set(wanted_names) - set(existing.keys()) + else: + cs_func = self.cs.removeFromLoadBalancerRule + to_change = set(wanted_names) & set(existing.keys()) + + if not to_change: + return rule + + args = self._get_common_args() + vms = self.cs.listVirtualMachines(**args) + to_change_ids = [] + for name in to_change: + for vm in vms.get('virtualmachine', []): + if vm['name'] == name: + to_change_ids.append(vm['id']) + break + else: + self.module.fail_json(msg="Unknown VM: %s" % name) + + if to_change_ids: + self.result['changed'] = True + + if to_change_ids and not self.module.check_mode: + res = cs_func( + id = rule['id'], + virtualmachineids = to_change_ids, + ) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res) + rule = self.get_rule() + return rule + + + def add_members(self): + return self._ensure_members('add') + + + def remove_members(self): + return self._ensure_members('remove') + + + def get_result(self, rule): + super(AnsibleCloudStackLBRuleMember, self).get_result(rule) + if rule: + self.result['vms'] = [] + for vm in self._get_members_of_rule(rule=rule): + self.result['vms'].append(vm['name']) + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + ip_address = dict(default=None, aliases=['public_ip']), + vms = dict(required=True, aliases=['vm'], type='list'), + state = dict(choices=['present', 'absent'], default='present'), + zone = dict(default=None), + domain = dict(default=None), + project = dict(default=None), + account = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + try: + acs_lb_rule_member = AnsibleCloudStackLBRuleMember(module) + + state = module.params.get('state') + if state in ['absent']: + rule = acs_lb_rule_member.remove_members() + else: + rule = acs_lb_rule_member.add_members() + + result = acs_lb_rule_member.get_result(rule) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_network.py b/cloud/cloudstack/cs_network.py index cab24bdfefe..092fbf7326e 100644 --- a/cloud/cloudstack/cs_network.py +++ b/cloud/cloudstack/cs_network.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_network @@ -59,14 +63,14 @@ gateway: description: - The gateway of the network. - - Required for shared networks and isolated networks when it belongs to VPC. + - Required for shared networks and isolated networks when it belongs to a VPC. - Only considered on create. required: false default: null netmask: description: - The netmask of the network. - - Required for shared networks and isolated networks when it belongs to VPC. + - Required for shared networks and isolated networks when it belongs to a VPC. - Only considered on create. required: false default: null @@ -91,7 +95,7 @@ default: null gateway_ipv6: description: - - The gateway of the IPv6 network. + - The gateway of the IPv6 network. - Required for shared networks. - Only considered on create. required: false @@ -103,12 +107,12 @@ default: null vpc: description: - - The ID or VID of the network. + - Name of the VPC of the network. required: false default: null isolated_pvlan: description: - - The isolated private vlan for this network. + - The isolated private VLAN for this network. required: false default: null clean_up: @@ -318,12 +322,6 @@ sample: DefaultIsolatedNetworkOfferingWithSourceNatService ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -348,29 +346,9 @@ def __init__(self, module): 'dns1': 'dns1', 'dns2': 'dns2', } - self.network = None - def get_vpc(self, key=None): - vpc = self.module.params.get('vpc') - if not vpc: - return None - - args = {} - args['account'] = self.get_account(key='name') - args['domainid'] = self.get_domain(key='id') - args['projectid'] = self.get_project(key='id') - args['zoneid'] = self.get_zone(key='id') - - vpcs = self.cs.listVPCs(**args) - if vpcs: - for v in vpcs['vpc']: - if vpc in [ v['name'], v['displaytext'], v['id'] ]: - return self._get_by_key(key, v) - self.module.fail_json(msg="VPC '%s' not found" % vpc) - - def get_network_offering(self, key=None): network_offering = self.module.params.get('network_offering') if not network_offering: @@ -428,7 +406,7 @@ def update_network(self, network): args = self._get_args() args['id'] = network['id'] - if self._has_changed(args, network): + if self.has_changed(args, network): self.result['changed'] = True if not self.module.check_mode: network = self.cs.updateNetwork(**args) @@ -438,7 +416,7 @@ def update_network(self, network): poll_async = self.module.params.get('poll_async') if network and poll_async: - network = self._poll_job(network, 'network') + network = self.poll_job(network, 'network') return network @@ -496,7 +474,7 @@ def restart_network(self): poll_async = self.module.params.get('poll_async') if network and poll_async: - network = self._poll_job(network, 'network') + network = self.poll_job(network, 'network') return network @@ -516,54 +494,50 @@ def absent_network(self): poll_async = self.module.params.get('poll_async') if res and poll_async: - res = self._poll_job(res, 'network') + res = self.poll_job(res, 'network') return network def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + display_text = dict(default=None), + network_offering = dict(default=None), + zone = dict(default=None), + start_ip = dict(default=None), + end_ip = dict(default=None), + gateway = dict(default=None), + netmask = dict(default=None), + start_ipv6 = dict(default=None), + end_ipv6 = dict(default=None), + cidr_ipv6 = dict(default=None), + gateway_ipv6 = dict(default=None), + vlan = dict(default=None), + vpc = dict(default=None), + isolated_pvlan = dict(default=None), + clean_up = dict(type='bool', default=False), + network_domain = dict(default=None), + state = dict(choices=['present', 'absent', 'restarted' ], default='present'), + acl_type = dict(choices=['account', 'domain'], default='account'), + project = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + required_together = cs_required_together() + required_together.extend([ + ['start_ip', 'netmask', 'gateway'], + ['start_ipv6', 'cidr_ipv6', 'gateway_ipv6'], + ]) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - display_text = dict(default=None), - network_offering = dict(default=None), - zone = dict(default=None), - start_ip = dict(default=None), - end_ip = dict(default=None), - gateway = dict(default=None), - netmask = dict(default=None), - start_ipv6 = dict(default=None), - end_ipv6 = dict(default=None), - cidr_ipv6 = dict(default=None), - gateway_ipv6 = dict(default=None), - vlan = dict(default=None), - vpc = dict(default=None), - isolated_pvlan = dict(default=None), - clean_up = dict(type='bool', choices=BOOLEANS, default=False), - network_domain = dict(default=None), - state = dict(choices=['present', 'absent', 'restarted' ], default='present'), - acl_type = dict(choices=['account', 'domain'], default='account'), - project = dict(default=None), - domain = dict(default=None), - account = dict(default=None), - poll_async = dict(type='bool', choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ['start_ip', 'netmask', 'gateway'], - ['start_ipv6', 'cidr_ipv6', 'gateway_ipv6'], - ), + argument_spec=argument_spec, + required_together=required_together, supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_network = AnsibleCloudStackNetwork(module) @@ -579,7 +553,7 @@ def main(): result = acs_network.get_result(network) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_nic.py b/cloud/cloudstack/cs_nic.py new file mode 100644 index 00000000000..a9947c266e5 --- /dev/null +++ b/cloud/cloudstack/cs_nic.py @@ -0,0 +1,297 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_nic +short_description: Manages NICs and secondary IPs of an instance on Apache CloudStack based clouds. +description: + - Add and remove secondary IPs to and from a NIC. +version_added: "2.3" +author: "René Moser (@resmo)" +options: + vm: + description: + - Name of instance. + required: true + aliases: ['name'] + network: + description: + - Name of the network. + - Required to find the NIC if instance has multiple networks assigned. + required: false + default: null + vm_guest_ip: + description: + - Secondary IP address to be added to the instance nic. + - If not set, the API always returns a new IP address and idempotency is not given. + required: false + default: null + aliases: ['secondary_ip'] + vpc: + description: + - Name of the VPC the C(vm) is related to. + required: false + default: null + domain: + description: + - Domain the instance is related to. + required: false + default: null + account: + description: + - Account the instance is related to. + required: false + default: null + project: + description: + - Name of the project the instance is deployed in. + required: false + default: null + zone: + description: + - Name of the zone in which the instance is deployed in. + - If not set, default zone is used. + required: false + default: null + state: + description: + - State of the ipaddress. + required: false + default: "present" + choices: [ 'present', 'absent' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Assign a specific IP to the default NIC of the VM +- local_action: + module: cs_nic + vm: customer_xy + vm_guest_ip: 10.10.10.10 + +# Assign an IP to the default NIC of the VM +# Note: If vm_guest_ip is not set, you will get a new IP address on every run. +- local_action: + module: cs_nic + vm: customer_xy + +# Remove a specific IP from the default NIC +- local_action: + module: cs_nic + vm: customer_xy + vm_guest_ip: 10.10.10.10 + state: absent +''' + +RETURN = ''' +--- +id: + description: UUID of the nic. + returned: success + type: string + sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8 +vm: + description: Name of the VM. + returned: success + type: string + sample: web-01 +ip_address: + description: Primary IP of the NIC. + returned: success + type: string + sample: 10.10.10.10 +netmask: + description: Netmask of the NIC. + returned: success + type: string + sample: 255.255.255.0 +mac_address: + description: MAC address of the NIC. + returned: success + type: string + sample: 02:00:33:31:00:e4 +vm_guest_ip: + description: Secondary IP of the NIC. + returned: success + type: string + sample: 10.10.10.10 +network: + description: Name of the network if not default. + returned: success + type: string + sample: sync network +domain: + description: Domain the VM is related to. + returned: success + type: string + sample: example domain +account: + description: Account the VM is related to. + returned: success + type: string + sample: example account +project: + description: Name of project the VM is related to. + returned: success + type: string + sample: Production +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackNic(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackNic, self).__init__(module) + self.vm_guest_ip = self.module.params.get('vm_guest_ip') + self.nic = None + self.returns = { + 'ipaddress': 'ip_address', + 'macaddress': 'mac_address', + 'netmask': 'netmask', + } + + def get_nic(self): + if self.nic: + return self.nic + args = { + 'virtualmachineid': self.get_vm(key='id'), + 'networkdid': self.get_network(key='id'), + } + nics = self.cs.listNics(**args) + if nics: + self.nic = nics['nic'][0] + return self.nic + self.module.fail_json("NIC for VM %s in network %s not found" (self.get_vm(key='name'), self.get_network(key='name'))) + + def get_secondary_ip(self): + nic = self.get_nic() + if self.vm_guest_ip: + secondary_ips = nic.get('secondaryip') or [] + for secondary_ip in secondary_ips: + if secondary_ip['ipaddress'] == self.vm_guest_ip: + return secondary_ip + return None + + def present_nic(self): + nic = self.get_nic() + if not self.get_secondary_ip(): + self.result['changed'] = True + args = { + 'nicid': nic['id'], + 'ipaddress': self.vm_guest_ip, + } + + if not self.module.check_mode: + res = self.cs.addIpToNic(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + nic = self.poll_job(res, 'nicsecondaryip') + # Save result for RETURNS + self.vm_guest_ip = nic['ipaddress'] + return nic + + def absent_nic(self): + nic = self.get_nic() + secondary_ip = self.get_secondary_ip() + if secondary_ip: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.removeIpFromNic(id=secondary_ip['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % nic['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'nicsecondaryip') + return nic + + def get_result(self, nic): + super(AnsibleCloudStackNic, self).get_result(nic) + if nic and not self.module.params.get('network'): + self.module.params['network'] = nic.get('networkid') + self.result['network'] = self.get_network(key='name') + self.result['vm'] = self.get_vm(key='name') + self.result['vm_guest_ip'] = self.vm_guest_ip + self.result['domain'] = self.get_domain(key='path') + self.result['account'] = self.get_account(key='name') + self.result['project'] = self.get_project(key='name') + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + vm=dict(required=True, aliases=['name']), + vm_guest_ip=dict(default=None, aliases=['secondary_ip']), + network=dict(default=None), + vpc=dict(default=None), + state=dict(choices=['present', 'absent'], default='present'), + domain=dict(default=None), + account=dict(default=None), + project=dict(default=None), + zone=dict(default=None), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True, + required_if=([ + ('state', 'absent', ['vm_guest_ip']) + ]) + ) + + try: + acs_nic = AnsibleCloudStackNic(module) + + state = module.params.get('state') + + if state == 'absent': + nic = acs_nic.absent_nic() + else: + nic = acs_nic.present_nic() + + result = acs_nic.get_result(nic) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_pod.py b/cloud/cloudstack/cs_pod.py new file mode 100644 index 00000000000..afccea1404a --- /dev/null +++ b/cloud/cloudstack/cs_pod.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_pod +short_description: Manages pods on Apache CloudStack based clouds. +description: + - Create, update, delete pods. +version_added: "2.1" +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the pod. + required: true + id: + description: + - uuid of the exising pod. + default: null + required: false + start_ip: + description: + - Starting IP address for the Pod. + - Required on C(state=present) + default: null + required: false + end_ip: + description: + - Ending IP address for the Pod. + default: null + required: false + netmask: + description: + - Netmask for the Pod. + - Required on C(state=present) + default: null + required: false + gateway: + description: + - Gateway for the Pod. + - Required on C(state=present) + default: null + required: false + zone: + description: + - Name of the zone in which the pod belongs to. + - If not set, default zone is used. + required: false + default: null + state: + description: + - State of the pod. + required: false + default: 'present' + choices: [ 'present', 'enabled', 'disabled', 'absent' ] +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Ensure a pod is present +- local_action: + module: cs_pod + name: pod1 + zone: ch-zrh-ix-01 + start_ip: 10.100.10.101 + gateway: 10.100.10.1 + netmask: 255.255.255.0 + +# Ensure a pod is disabled +- local_action: + module: cs_pod + name: pod1 + zone: ch-zrh-ix-01 + state: disabled + +# Ensure a pod is enabled +- local_action: + module: cs_pod + name: pod1 + zone: ch-zrh-ix-01 + state: enabled + +# Ensure a pod is absent +- local_action: + module: cs_pod + name: pod1 + zone: ch-zrh-ix-01 + state: absent +''' + +RETURN = ''' +--- +id: + description: UUID of the pod. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the pod. + returned: success + type: string + sample: pod01 +start_ip: + description: Starting IP of the pod. + returned: success + type: string + sample: 10.100.1.101 +end_ip: + description: Ending IP of the pod. + returned: success + type: string + sample: 10.100.1.254 +netmask: + description: Netmask of the pod. + returned: success + type: string + sample: 255.255.255.0 +gateway: + description: Gateway of the pod. + returned: success + type: string + sample: 10.100.1.1 +allocation_state: + description: State of the pod. + returned: success + type: string + sample: Enabled +zone: + description: Name of zone the pod is in. + returned: success + type: string + sample: ch-gva-2 +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackPod(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackPod, self).__init__(module) + self.returns = { + 'endip': 'end_ip', + 'startip': 'start_ip', + 'gateway': 'gateway', + 'netmask': 'netmask', + 'allocationstate': 'allocation_state', + } + self.pod = None + + + def _get_common_pod_args(self): + args = {} + args['name'] = self.module.params.get('name') + args['zoneid'] = self.get_zone(key='id') + args['startip'] = self.module.params.get('start_ip') + args['endip'] = self.module.params.get('end_ip') + args['netmask'] = self.module.params.get('netmask') + args['gateway'] = self.module.params.get('gateway') + state = self.module.params.get('state') + if state in [ 'enabled', 'disabled']: + args['allocationstate'] = state.capitalize() + return args + + + def get_pod(self): + if not self.pod: + args = {} + + uuid = self.module.params.get('id') + if uuid: + args['id'] = uuid + args['zoneid'] = self.get_zone(key='id') + pods = self.cs.listPods(**args) + if pods: + self.pod = pods['pod'][0] + return self.pod + + args['name'] = self.module.params.get('name') + args['zoneid'] = self.get_zone(key='id') + pods = self.cs.listPods(**args) + if pods: + self.pod = pods['pod'][0] + return self.pod + + + def present_pod(self): + pod = self.get_pod() + if pod: + pod = self._update_pod() + else: + pod = self._create_pod() + return pod + + + def _create_pod(self): + required_params = [ + 'start_ip', + 'netmask', + 'gateway', + ] + self.module.fail_on_missing_params(required_params=required_params) + + pod = None + self.result['changed'] = True + args = self._get_common_pod_args() + if not self.module.check_mode: + res = self.cs.createPod(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + pod = res['pod'] + return pod + + + def _update_pod(self): + pod = self.get_pod() + args = self._get_common_pod_args() + args['id'] = pod['id'] + + if self.has_changed(args, pod): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.cs.updatePod(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + pod = res['pod'] + return pod + + + def absent_pod(self): + pod = self.get_pod() + if pod: + self.result['changed'] = True + + args = {} + args['id'] = pod['id'] + + if not self.module.check_mode: + res = self.cs.deletePod(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + return pod + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + id = dict(default=None), + name = dict(required=True), + gateway = dict(default=None), + netmask = dict(default=None), + start_ip = dict(default=None), + end_ip = dict(default=None), + zone = dict(default=None), + state = dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + try: + acs_pod = AnsibleCloudStackPod(module) + state = module.params.get('state') + if state in ['absent']: + pod = acs_pod.absent_pod() + else: + pod = acs_pod.present_pod() + + result = acs_pod.get_result(pod) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_portforward.py b/cloud/cloudstack/cs_portforward.py index f2f87b660ef..139fa7773d3 100644 --- a/cloud/cloudstack/cs_portforward.py +++ b/cloud/cloudstack/cs_portforward.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_portforward @@ -135,7 +139,6 @@ public_port: 53 private_port: 53 protocol: udp - open_firewall: true # remove ssh port forwarding - local_action: @@ -204,12 +207,6 @@ sample: 10.101.65.152 ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -231,36 +228,9 @@ def __init__(self, module): 'publicport': 'public_port', 'publicendport': 'public_end_port', 'privateport': 'private_port', - 'private_end_port': 'private_end_port', + 'privateendport': 'private_end_port', } self.portforwarding_rule = None - self.vm_default_nic = None - - - def get_vm_guest_ip(self): - vm_guest_ip = self.module.params.get('vm_guest_ip') - default_nic = self.get_vm_default_nic() - - if not vm_guest_ip: - return default_nic['ipaddress'] - - for secondary_ip in default_nic['secondaryip']: - if vm_guest_ip == secondary_ip['ipaddress']: - return vm_guest_ip - self.module.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip) - - - def get_vm_default_nic(self): - if self.vm_default_nic: - return self.vm_default_nic - - nics = self.cs.listNics(virtualmachineid=self.get_vm(key='id')) - if nics: - for n in nics['nic']: - if n['isdefault']: - self.vm_default_nic = n - return self.vm_default_nic - self.module.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm')) def get_portforwarding_rule(self): @@ -273,6 +243,8 @@ def get_portforwarding_rule(self): args = {} args['ipaddressid'] = self.get_ip_address(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') args['projectid'] = self.get_project(key='id') portforwarding_rules = self.cs.listPortForwardingRules(**args) @@ -305,6 +277,8 @@ def create_portforwarding_rule(self): args['vmguestip'] = self.get_vm_guest_ip() args['ipaddressid'] = self.get_ip_address(key='id') args['virtualmachineid'] = self.get_vm(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') portforwarding_rule = None self.result['changed'] = True @@ -312,7 +286,7 @@ def create_portforwarding_rule(self): portforwarding_rule = self.cs.createPortForwardingRule(**args) poll_async = self.module.params.get('poll_async') if poll_async: - portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule') + portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule') return portforwarding_rule @@ -323,12 +297,11 @@ def update_portforwarding_rule(self, portforwarding_rule): args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port') args['privateport'] = self.module.params.get('private_port') args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port') - args['openfirewall'] = self.module.params.get('open_firewall') args['vmguestip'] = self.get_vm_guest_ip() args['ipaddressid'] = self.get_ip_address(key='id') args['virtualmachineid'] = self.get_vm(key='id') - if self._has_changed(args, portforwarding_rule): + if self.has_changed(args, portforwarding_rule): self.result['changed'] = True if not self.module.check_mode: # API broken in 4.2.1?, workaround using remove/create instead of update @@ -337,7 +310,7 @@ def update_portforwarding_rule(self, portforwarding_rule): portforwarding_rule = self.cs.createPortForwardingRule(**args) poll_async = self.module.params.get('poll_async') if poll_async: - portforwarding_rule = self._poll_job(portforwarding_rule, 'portforwardingrule') + portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule') return portforwarding_rule @@ -353,7 +326,7 @@ def absent_portforwarding_rule(self): res = self.cs.deletePortForwardingRule(**args) poll_async = self.module.params.get('poll_async') if poll_async: - self._poll_job(res, 'portforwardingrule') + self.poll_job(res, 'portforwardingrule') return portforwarding_rule @@ -368,38 +341,31 @@ def get_result(self, portforwarding_rule): def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + ip_address = dict(required=True), + protocol= dict(choices=['tcp', 'udp'], default='tcp'), + public_port = dict(type='int', required=True), + public_end_port = dict(type='int', default=None), + private_port = dict(type='int', required=True), + private_end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + open_firewall = dict(type='bool', default=False), + vm_guest_ip = dict(default=None), + vm = dict(default=None), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + module = AnsibleModule( - argument_spec = dict( - ip_address = dict(required=True), - protocol= dict(choices=['tcp', 'udp'], default='tcp'), - public_port = dict(type='int', required=True), - public_end_port = dict(type='int', default=None), - private_port = dict(type='int', required=True), - private_end_port = dict(type='int', default=None), - state = dict(choices=['present', 'absent'], default='present'), - open_firewall = dict(choices=BOOLEANS, default=False), - vm_guest_ip = dict(default=None), - vm = dict(default=None), - zone = dict(default=None), - domain = dict(default=None), - account = dict(default=None), - project = dict(default=None), - poll_async = dict(choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_pf = AnsibleCloudStackPortforwarding(module) state = module.params.get('state') @@ -410,7 +376,7 @@ def main(): result = acs_pf.get_result(pf_rule) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_project.py b/cloud/cloudstack/cs_project.py index 6a48956bb1c..472762b4324 100644 --- a/cloud/cloudstack/cs_project.py +++ b/cloud/cloudstack/cs_project.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_project @@ -53,6 +57,13 @@ - Account the project is related to. required: false default: null + tags: + description: + - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). + - "If you want to delete all tags, set a empty list e.g. C(tags: [])." + required: false + default: null + version_added: "2.2" poll_async: description: - Poll async jobs until job has finished. @@ -66,6 +77,9 @@ - local_action: module: cs_project name: web + tags: + - { key: admin, value: john } + - { key: foo, value: bar } # Rename a project - local_action: @@ -131,12 +145,6 @@ sample: '[ { "key": "foo", "value": "bar" } ]' ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -167,6 +175,10 @@ def present_project(self): project = self.create_project(project) else: project = self.update_project(project) + if project: + project = self.ensure_tags(resource=project, resource_type='project') + # refresh resource + self.project = project return project @@ -175,7 +187,7 @@ def update_project(self, project): args['id'] = project['id'] args['displaytext'] = self.get_or_fallback('display_text', 'name') - if self._has_changed(args, project): + if self.has_changed(args, project): self.result['changed'] = True if not self.module.check_mode: project = self.cs.updateProject(**args) @@ -185,7 +197,7 @@ def update_project(self, project): poll_async = self.module.params.get('poll_async') if project and poll_async: - project = self._poll_job(project, 'project') + project = self.poll_job(project, 'project') return project @@ -206,15 +218,12 @@ def create_project(self, project): poll_async = self.module.params.get('poll_async') if project and poll_async: - project = self._poll_job(project, 'project') + project = self.poll_job(project, 'project') return project - def state_project(self, state=None): - project = self.get_project() - - if not project: - self.module.fail_json(msg="No project named '%s' found." % self.module.params('name')) + def state_project(self, state='active'): + project = self.present_project() if project['state'].lower() != state: self.result['changed'] = True @@ -233,7 +242,7 @@ def state_project(self, state=None): poll_async = self.module.params.get('poll_async') if project and poll_async: - project = self._poll_job(project, 'project') + project = self.poll_job(project, 'project') return project @@ -253,35 +262,29 @@ def absent_project(self): poll_async = self.module.params.get('poll_async') if res and poll_async: - res = self._poll_job(res, 'project') + res = self.poll_job(res, 'project') return project def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + display_text = dict(default=None), + state = dict(choices=['present', 'absent', 'active', 'suspended' ], default='present'), + domain = dict(default=None), + account = dict(default=None), + poll_async = dict(type='bool', default=True), + tags=dict(type='list', aliases=['tag'], default=None), + )) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - display_text = dict(default=None), - state = dict(choices=['present', 'absent', 'active', 'suspended' ], default='present'), - domain = dict(default=None), - account = dict(default=None), - poll_async = dict(type='bool', choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_project = AnsibleCloudStackProject(module) @@ -297,7 +300,7 @@ def main(): result = acs_project.get_result(project) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_region.py b/cloud/cloudstack/cs_region.py new file mode 100644 index 00000000000..74e4c079fa6 --- /dev/null +++ b/cloud/cloudstack/cs_region.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_region +short_description: Manages regions on Apache CloudStack based clouds. +description: + - Add, update and remove regions. +version_added: "2.3" +author: "René Moser (@resmo)" +options: + id: + description: + - ID of the region. + - Must be an number (int). + required: true + name: + description: + - Name of the region. + - Required if C(state=present) + required: false + default: null + endpoint: + description: + - Endpoint URL of the region. + - Required if C(state=present) + required: false + default: null + state: + description: + - State of the region. + required: false + default: 'present' + choices: [ 'present', 'absent' ] +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# create a region +local_action: + module: cs_region + id: 2 + name: geneva + endpoint: https://cloud.gva.example.com + +# remove a region with ID 2 +local_action: + module: cs_region + id: 2 + state: absent +''' + +RETURN = ''' +--- +id: + description: ID of the region. + returned: success + type: int + sample: 1 +name: + description: Name of the region. + returned: success + type: string + sample: local +endpoint: + description: Endpoint of the region. + returned: success + type: string + sample: http://cloud.example.com +gslb_service_enabled: + description: Whether the GSLB service is enabled or not + returned: success + type: bool + sample: true +portable_ip_service_enabled: + description: Whether the portable IP service is enabled or not + returned: success + type: bool + sample: true +''' + + +from ansible.module_utils.cloudstack import * +from ansible.module_utils.basic import AnsibleModule + +class AnsibleCloudStackRegion(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackRegion, self).__init__(module) + self.returns = { + 'endpoint': 'endpoint', + 'gslbserviceenabled': 'gslb_service_enabled', + 'portableipserviceenabled': 'portable_ip_service_enabled', + } + + def get_region(self): + id = self.module.params.get('id') + regions = self.cs.listRegions(id=id) + if regions: + return regions['region'][0] + return None + + def present_region(self): + region = self.get_region() + if not region: + region = self._create_region(region=region) + else: + region = self._update_region(region=region) + return region + + def _create_region(self, region): + self.result['changed'] = True + args = { + 'id': self.module.params.get('id'), + 'name': self.module.params.get('name'), + 'endpoint': self.module.params.get('endpoint') + } + if not self.module.check_mode: + res = self.cs.addRegion(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + region = res['region'] + return region + + def _update_region(self, region): + args = { + 'id': self.module.params.get('id'), + 'name': self.module.params.get('name'), + 'endpoint': self.module.params.get('endpoint') + } + if self.has_changed(args, region): + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.updateRegion(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + region = res['region'] + return region + + def absent_region(self): + region = self.get_region() + if region: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.removeRegion(id=region['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + return region + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + id=dict(required=True, type='int'), + name=dict(default=None), + endpoint=dict(default=None), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + required_if=[ + ('state', 'present', ['name', 'endpoint']), + ], + supports_check_mode=True + ) + + try: + acs_region = AnsibleCloudStackRegion(module) + + state = module.params.get('state') + if state == 'absent': + region = acs_region.absent_region() + else: + region = acs_region.present_region() + + result = acs_region.get_result(region) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_resourcelimit.py b/cloud/cloudstack/cs_resourcelimit.py new file mode 100644 index 00000000000..e5bfb7096e2 --- /dev/null +++ b/cloud/cloudstack/cs_resourcelimit.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_resourcelimit +short_description: Manages resource limits on Apache CloudStack based clouds. +description: + - Manage limits of resources for domains, accounts and projects. +version_added: "2.1" +author: "René Moser (@resmo)" +options: + resource_type: + description: + - Type of the resource. + required: true + choices: + - instance + - ip_address + - volume + - snapshot + - template + - network + - vpc + - cpu + - memory + - primary_storage + - secondary_storage + aliases: [ 'type' ] + limit: + description: + - Maximum number of the resource. + - Default is unlimited C(-1). + required: false + default: -1 + aliases: [ 'max' ] + domain: + description: + - Domain the resource is related to. + required: false + default: null + account: + description: + - Account the resource is related to. + required: false + default: null + project: + description: + - Name of the project the resource is related to. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Update a resource limit for instances of a domain +local_action: + module: cs_resourcelimit + type: instance + limit: 10 + domain: customers + +# Update a resource limit for instances of an account +local_action: + module: cs_resourcelimit + type: instance + limit: 12 + account: moserre + domain: customers +''' + +RETURN = ''' +--- +recource_type: + description: Type of the resource + returned: success + type: string + sample: instance +limit: + description: Maximum number of the resource. + returned: success + type: int + sample: -1 +domain: + description: Domain the resource is related to. + returned: success + type: string + sample: example domain +account: + description: Account the resource is related to. + returned: success + type: string + sample: example account +project: + description: Project the resource is related to. + returned: success + type: string + sample: example project +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + +RESOURCE_TYPES = { + 'instance': 0, + 'ip_address': 1, + 'volume': 2, + 'snapshot': 3, + 'template': 4, + 'network': 6, + 'vpc': 7, + 'cpu': 8, + 'memory': 9, + 'primary_storage': 10, + 'secondary_storage': 11, +} + +class AnsibleCloudStackResourceLimit(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackResourceLimit, self).__init__(module) + self.returns = { + 'max': 'limit', + } + + + def get_resource_type(self): + resource_type = self.module.params.get('resource_type') + return RESOURCE_TYPES.get(resource_type) + + + def get_resource_limit(self): + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['resourcetype'] = self.get_resource_type() + resource_limit = self.cs.listResourceLimits(**args) + if resource_limit: + return resource_limit['resourcelimit'][0] + self.module.fail_json(msg="Resource limit type '%s' not found." % self.module.params.get('resource_type')) + + + def update_resource_limit(self): + resource_limit = self.get_resource_limit() + + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['resourcetype'] = self.get_resource_type() + args['max'] = self.module.params.get('limit', -1) + + if self.has_changed(args, resource_limit): + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.updateResourceLimit(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + resource_limit = res['resourcelimit'] + return resource_limit + + + def get_result(self, resource_limit): + self.result = super(AnsibleCloudStackResourceLimit, self).get_result(resource_limit) + self.result['resource_type'] = self.module.params.get('resource_type') + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + resource_type = dict(required=True, choices=RESOURCE_TYPES.keys(), aliases=['type']), + limit = dict(default=-1, aliases=['max']), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + try: + acs_resource_limit = AnsibleCloudStackResourceLimit(module) + resource_limit = acs_resource_limit.update_resource_limit() + result = acs_resource_limit.get_result(resource_limit) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_router.py b/cloud/cloudstack/cs_router.py new file mode 100644 index 00000000000..49a2dbe7b6b --- /dev/null +++ b/cloud/cloudstack/cs_router.py @@ -0,0 +1,378 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_router +short_description: Manages routers on Apache CloudStack based clouds. +description: + - Start, restart, stop and destroy routers. + - C(state=present) is not able to create routers, use M(cs_network) instead. +version_added: "2.2" +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the router. + required: true + service_offering: + description: + - Name or id of the service offering of the router. + required: false + default: null + domain: + description: + - Domain the router is related to. + required: false + default: null + account: + description: + - Account the router is related to. + required: false + default: null + project: + description: + - Name of the project the router is related to. + required: false + default: null + state: + description: + - State of the router. + required: false + default: 'present' + choices: [ 'present', 'absent', 'started', 'stopped', 'restarted' ] +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Ensure the router has the desired service offering, no matter if +# the router is running or not. +- local_action: + module: cs_router + name: r-40-VM + service_offering: System Offering for Software Router + +# Ensure started +- local_action: + module: cs_router + name: r-40-VM + state: started + +# Ensure started with desired service offering. +# If the service offerings changes, router will be rebooted. +- local_action: + module: cs_router + name: r-40-VM + service_offering: System Offering for Software Router + state: started + +# Ensure stopped +- local_action: + module: cs_router + name: r-40-VM + state: stopped + +# Remove a router +- local_action: + module: cs_router + name: r-40-VM + state: absent +''' + +RETURN = ''' +--- +id: + description: UUID of the router. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the router. + returned: success + type: string + sample: r-40-VM +created: + description: Date of the router was created. + returned: success + type: string + sample: 2014-12-01T14:57:57+0100 +template_version: + description: Version of the system VM template. + returned: success + type: string + sample: 4.5.1 +requires_upgrade: + description: Whether the router needs to be upgraded to the new template. + returned: success + type: bool + sample: false +redundant_state: + description: Redundant state of the router. + returned: success + type: string + sample: UNKNOWN +role: + description: Role of the router. + returned: success + type: string + sample: VIRTUAL_ROUTER +zone: + description: Name of zone the router is in. + returned: success + type: string + sample: ch-gva-2 +service_offering: + description: Name of the service offering the router has. + returned: success + type: string + sample: System Offering For Software Router +state: + description: State of the router. + returned: success + type: string + sample: Active +domain: + description: Domain the router is related to. + returned: success + type: string + sample: ROOT +account: + description: Account the router is related to. + returned: success + type: string + sample: admin +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackRouter(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackRouter, self).__init__(module) + self.returns = { + 'serviceofferingname': 'service_offering', + 'version': 'template_version', + 'requiresupgrade': 'requires_upgrade', + 'redundantstate': 'redundant_state', + 'role': 'role' + } + self.router = None + + + def get_service_offering_id(self): + service_offering = self.module.params.get('service_offering') + if not service_offering: + return None + + args = {} + args['issystem'] = True + + service_offerings = self.cs.listServiceOfferings(**args) + if service_offerings: + for s in service_offerings['serviceoffering']: + if service_offering in [ s['name'], s['id'] ]: + return s['id'] + self.module.fail_json(msg="Service offering '%s' not found" % service_offering) + + def get_router(self): + if not self.router: + router = self.module.params.get('name') + + args = {} + args['projectid'] = self.get_project(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + + routers = self.cs.listRouters(**args) + if routers: + for r in routers['router']: + if router.lower() in [ r['name'].lower(), r['id']]: + self.router = r + break + return self.router + + def start_router(self): + router = self.get_router() + if not router: + self.module.fail_json(msg="Router not found") + + if router['state'].lower() != "running": + self.result['changed'] = True + + args = {} + args['id'] = router['id'] + + if not self.module.check_mode: + res = self.cs.startRouter(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + router = self.poll_job(res, 'router') + return router + + def stop_router(self): + router = self.get_router() + if not router: + self.module.fail_json(msg="Router not found") + + if router['state'].lower() != "stopped": + self.result['changed'] = True + + args = {} + args['id'] = router['id'] + + if not self.module.check_mode: + res = self.cs.stopRouter(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + router = self.poll_job(res, 'router') + return router + + def reboot_router(self): + router = self.get_router() + if not router: + self.module.fail_json(msg="Router not found") + + self.result['changed'] = True + + args = {} + args['id'] = router['id'] + + if not self.module.check_mode: + res = self.cs.rebootRouter(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + router = self.poll_job(res, 'router') + return router + + def absent_router(self): + router = self.get_router() + if router: + self.result['changed'] = True + + args = {} + args['id'] = router['id'] + + if not self.module.check_mode: + res = self.cs.destroyRouter(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'router') + return router + + + def present_router(self): + router = self.get_router() + if not router: + self.module.fail_json(msg="Router can not be created using the API, see cs_network.") + + args = {} + args['id'] = router['id'] + args['serviceofferingid'] = self.get_service_offering_id() + + state = self.module.params.get('state') + + if self.has_changed(args, router): + self.result['changed'] = True + + if not self.module.check_mode: + current_state = router['state'].lower() + + self.stop_router() + router = self.cs.changeServiceForRouter(**args) + + if 'errortext' in router: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + if state in [ 'restarted', 'started' ]: + router = self.start_router() + + # if state=present we get to the state before the service + # offering change. + elif state == "present" and current_state == "running": + router = self.start_router() + + elif state == "started": + router = self.start_router() + + elif state == "stopped": + router = self.stop_router() + + elif state == "restarted": + router = self.reboot_router() + + return router + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + service_offering = dict(default=None), + state = dict(choices=['present', 'started', 'stopped', 'restarted', 'absent'], default="present"), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + try: + acs_router = AnsibleCloudStackRouter(module) + + state = module.params.get('state') + if state in ['absent']: + router = acs_router.absent_router() + else: + router = acs_router.present_router() + + result = acs_router.get_result(router) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_securitygroup.py b/cloud/cloudstack/cs_securitygroup.py index f54de925936..c65d63c8f4d 100644 --- a/cloud/cloudstack/cs_securitygroup.py +++ b/cloud/cloudstack/cs_securitygroup.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_securitygroup @@ -42,6 +46,16 @@ required: false default: 'present' choices: [ 'present', 'absent' ] + domain: + description: + - Domain the security group is related to. + required: false + default: null + account: + description: + - Account the security group is related to. + required: false + default: null project: description: - Name of the project the security group to be created in. @@ -81,14 +95,28 @@ returned: success type: string sample: application security group +tags: + description: List of resource tags associated with the security group. + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +project: + description: Name of project the security group is related to. + returned: success + type: string + sample: Production +domain: + description: Domain the security group is related to. + returned: success + type: string + sample: example domain +account: + description: Account the security group is related to. + returned: success + type: string + sample: example account ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -102,15 +130,16 @@ def __init__(self, module): def get_security_group(self): if not self.security_group: - sg_name = self.module.params.get('name') + args = {} - args['projectid'] = self.get_project('id') + args['projectid'] = self.get_project(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['securitygroupname'] = self.module.params.get('name') + sgs = self.cs.listSecurityGroups(**args) if sgs: - for s in sgs['securitygroup']: - if s['name'] == sg_name: - self.security_group = s - break + self.security_group = sgs['securitygroup'][0] return self.security_group @@ -121,7 +150,9 @@ def create_security_group(self): args = {} args['name'] = self.module.params.get('name') - args['projectid'] = self.get_project('id') + args['projectid'] = self.get_project(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') args['description'] = self.module.params.get('description') if not self.module.check_mode: @@ -140,7 +171,9 @@ def remove_security_group(self): args = {} args['name'] = self.module.params.get('name') - args['projectid'] = self.get_project('id') + args['projectid'] = self.get_project(key='id') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') if not self.module.check_mode: res = self.cs.deleteSecurityGroup(**args) @@ -152,27 +185,22 @@ def remove_security_group(self): def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + description = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + account = dict(default=None), + domain = dict(default=None), + )) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - description = dict(default=None), - state = dict(choices=['present', 'absent'], default='present'), - project = dict(default=None), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_sg = AnsibleCloudStackSecurityGroup(module) @@ -184,7 +212,7 @@ def main(): result = acs_sg.get_result(sg) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_securitygroup_rule.py b/cloud/cloudstack/cs_securitygroup_rule.py index c17923daca7..85617b5baac 100644 --- a/cloud/cloudstack/cs_securitygroup_rule.py +++ b/cloud/cloudstack/cs_securitygroup_rule.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_securitygroup_rule @@ -181,12 +185,6 @@ sample: 80 ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -309,14 +307,16 @@ def add_rule(self): res = None sg_type = self.module.params.get('type') if sg_type == 'ingress': - rule = self._get_rule(security_group['ingressrule']) + if 'ingressrule' in security_group: + rule = self._get_rule(security_group['ingressrule']) if not rule: self.result['changed'] = True if not self.module.check_mode: res = self.cs.authorizeSecurityGroupIngress(**args) elif sg_type == 'egress': - rule = self._get_rule(security_group['egressrule']) + if 'egressrule' in security_group: + rule = self._get_rule(security_group['egressrule']) if not rule: self.result['changed'] = True if not self.module.check_mode: @@ -327,7 +327,7 @@ def add_rule(self): poll_async = self.module.params.get('poll_async') if res and poll_async: - security_group = self._poll_job(res, 'securitygroup') + security_group = self.poll_job(res, 'securitygroup') key = sg_type + "rule" # ingressrule / egressrule if key in security_group: rule = security_group[key][0] @@ -358,7 +358,7 @@ def remove_rule(self): poll_async = self.module.params.get('poll_async') if res and poll_async: - res = self._poll_job(res, 'securitygroup') + res = self.poll_job(res, 'securitygroup') return rule @@ -371,30 +371,29 @@ def get_result(self, security_group_rule): def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + security_group = dict(required=True), + type = dict(choices=['ingress', 'egress'], default='ingress'), + cidr = dict(default='0.0.0.0/0'), + user_security_group = dict(default=None), + protocol = dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'), + icmp_type = dict(type='int', default=None), + icmp_code = dict(type='int', default=None), + start_port = dict(type='int', default=None, aliases=['port']), + end_port = dict(type='int', default=None), + state = dict(choices=['present', 'absent'], default='present'), + project = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + required_together = cs_required_together() + required_together.extend([ + ['icmp_type', 'icmp_code'], + ]) + module = AnsibleModule( - argument_spec = dict( - security_group = dict(required=True), - type = dict(choices=['ingress', 'egress'], default='ingress'), - cidr = dict(default='0.0.0.0/0'), - user_security_group = dict(default=None), - protocol = dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'), - icmp_type = dict(type='int', default=None), - icmp_code = dict(type='int', default=None), - start_port = dict(type='int', default=None, aliases=['port']), - end_port = dict(type='int', default=None), - state = dict(choices=['present', 'absent'], default='present'), - project = dict(default=None), - poll_async = dict(choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['icmp_type', 'icmp_code'], - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=required_together, mutually_exclusive = ( ['icmp_type', 'start_port'], ['icmp_type', 'end_port'], @@ -404,9 +403,6 @@ def main(): supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module) @@ -418,7 +414,7 @@ def main(): result = acs_sg_rule.get_result(sg_rule) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_snapshot_policy.py b/cloud/cloudstack/cs_snapshot_policy.py new file mode 100644 index 00000000000..157d05e803c --- /dev/null +++ b/cloud/cloudstack/cs_snapshot_policy.py @@ -0,0 +1,387 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_snapshot_policy +short_description: Manages volume snapshot policies on Apache CloudStack based clouds. +description: + - Create, update and delete volume snapshot policies. +version_added: '2.2' +author: "René Moser (@resmo)" +options: + volume: + description: + - Name of the volume. + - Either C(volume) or C(vm) is required. + required: false + default: null + volume_type: + description: + - Type of the volume. + required: false + default: null + choices: + - DATADISK + - ROOT + version_added: "2.3" + vm: + description: + - Name of the instance to select the volume from. + - Use C(volume_type) if VM has a DATADISK and ROOT volume. + - In case of C(volume_type=DATADISK), additionally use C(device_id) if VM has more than one DATADISK volume. + - Either C(volume) or C(vm) is required. + required: false + default: null + version_added: "2.3" + device_id: + description: + - ID of the device on a VM the volume is attached to. + - This will only be considered if VM has multiple DATADISK volumes. + required: false + default: null + version_added: "2.3" + vpc: + description: + - Name of the vpc the instance is deployed in. + required: false + default: null + version_added: "2.3" + interval_type: + description: + - Interval of the snapshot. + required: false + default: 'daily' + choices: [ 'hourly', 'daily', 'weekly', 'monthly' ] + aliases: [ 'interval' ] + max_snaps: + description: + - Max number of snapshots. + required: false + default: 8 + aliases: [ 'max' ] + schedule: + description: + - Time the snapshot is scheduled. Required if C(state=present). + - 'Format for C(interval_type=HOURLY): C(MM)' + - 'Format for C(interval_type=DAILY): C(MM:HH)' + - 'Format for C(interval_type=WEEKLY): C(MM:HH:DD (1-7))' + - 'Format for C(interval_type=MONTHLY): C(MM:HH:DD (1-28))' + required: false + default: null + time_zone: + description: + - Specifies a timezone for this command. + required: false + default: 'UTC' + aliases: [ 'timezone' ] + state: + description: + - State of the snapshot policy. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + domain: + description: + - Domain the volume is related to. + required: false + default: null + account: + description: + - Account the volume is related to. + required: false + default: null + project: + description: + - Name of the project the volume is related to. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Ensure a snapshot policy daily at 1h00 UTC +- local_action: + module: cs_snapshot_policy + volume: ROOT-478 + schedule: '00:1' + max_snaps: 3 + +# Ensure a snapshot policy daily at 1h00 UTC on the second DATADISK of VM web-01 +- local_action: + module: cs_snapshot_policy + vm: web-01 + volume_type: DATADISK + device_id: 2 + schedule: '00:1' + max_snaps: 3 + +# Ensure a snapshot policy hourly at minute 5 UTC +- local_action: + module: cs_snapshot_policy + volume: ROOT-478 + schedule: '5' + interval_type: hourly + max_snaps: 1 + +# Ensure a snapshot policy weekly on Sunday at 05h00, TZ Europe/Zurich +- local_action: + module: cs_snapshot_policy + volume: ROOT-478 + schedule: '00:5:1' + interval_type: weekly + max_snaps: 1 + time_zone: 'Europe/Zurich' + +# Ensure a snapshot policy is absent +- local_action: + module: cs_snapshot_policy + volume: ROOT-478 + interval_type: hourly + state: absent +''' + +RETURN = ''' +--- +id: + description: UUID of the snapshot policy. + returned: success + type: string + sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f +interval_type: + description: interval type of the snapshot policy. + returned: success + type: string + sample: daily +schedule: + description: schedule of the snapshot policy. + returned: success + type: string + sample: +max_snaps: + description: maximum number of snapshots retained. + returned: success + type: int + sample: 10 +time_zone: + description: the time zone of the snapshot policy. + returned: success + type: string + sample: Etc/UTC +volume: + description: the volume of the snapshot policy. + returned: success + type: string + sample: Etc/UTC +zone: + description: Name of zone the volume is related to. + returned: success + type: string + sample: ch-gva-2 +project: + description: Name of project the volume is related to. + returned: success + type: string + sample: Production +account: + description: Account the volume is related to. + returned: success + type: string + sample: example account +domain: + description: Domain the volume is related to. + returned: success + type: string + sample: example domain +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackSnapshotPolicy(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackSnapshotPolicy, self).__init__(module) + self.returns = { + 'schedule': 'schedule', + 'timezone': 'time_zone', + 'maxsnaps': 'max_snaps', + } + self.interval_types = { + 'hourly': 0, + 'daily': 1, + 'weekly': 2, + 'monthly': 3, + } + self.volume = None + + def get_interval_type(self): + interval_type = self.module.params.get('interval_type') + return self.interval_types[interval_type] + + def get_volume(self, key=None): + if self.volume: + return self._get_by_key(key, self.volume) + + args = { + 'name': self.module.params.get('volume'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'virtualmachineid': self.get_vm(key='id'), + 'type': self.module.params.get('volume_type'), + } + volumes = self.cs.listVolumes(**args) + if volumes: + if volumes['count'] > 1: + device_id = self.module.params.get('device_id') + if not device_id: + self.module.fail_json(msg="Found more then 1 volume: combine params 'vm', 'volume_type', 'device_id' and/or 'volume' to select the volume") + else: + for v in volumes['volume']: + if v.get('deviceid') == device_id: + self.volume = v + return self._get_by_key(key, self.volume) + self.module.fail_json(msg="No volume found with device id %s" % device_id) + self.volume = volumes['volume'][0] + return self._get_by_key(key, self.volume) + return None + + def get_snapshot_policy(self): + args = { + 'volumeid': self.get_volume(key='id') + } + policies = self.cs.listSnapshotPolicies(**args) + if policies: + for policy in policies['snapshotpolicy']: + if policy['intervaltype'] == self.get_interval_type(): + return policy + return None + + def present_snapshot_policy(self): + required_params = [ + 'schedule', + ] + self.module.fail_on_missing_params(required_params=required_params) + + policy = self.get_snapshot_policy() + args = { + 'id': policy.get('id') if policy else None, + 'intervaltype': self.module.params.get('interval_type'), + 'schedule': self.module.params.get('schedule'), + 'maxsnaps': self.module.params.get('max_snaps'), + 'timezone': self.module.params.get('time_zone'), + 'volumeid': self.get_volume(key='id') + } + if not policy or (policy and self.has_changed(policy, args, only_keys=['schedule', 'maxsnaps', 'timezone'])): + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.createSnapshotPolicy(**args) + policy = res['snapshotpolicy'] + if 'errortext' in policy: + self.module.fail_json(msg="Failed: '%s'" % policy['errortext']) + return policy + + def absent_snapshot_policy(self): + policy = self.get_snapshot_policy() + if policy: + self.result['changed'] = True + args = { + 'id': policy['id'] + } + if not self.module.check_mode: + res = self.cs.deleteSnapshotPolicies(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % policy['errortext']) + return policy + + def get_result(self, policy): + super(AnsibleCloudStackSnapshotPolicy, self).get_result(policy) + if policy and 'intervaltype' in policy: + for key, value in self.interval_types.items(): + if value == policy['intervaltype']: + self.result['interval_type'] = key + break + volume = self.get_volume() + if volume: + volume_results = { + 'volume': volume.get('name'), + 'zone': volume.get('zonename'), + 'project': volume.get('project'), + 'account': volume.get('account'), + 'domain': volume.get('domain'), + } + self.result.update(volume_results) + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + volume=dict(default=None), + volume_type=dict(choices=['DATADISK', 'ROOT'], default=None), + vm=dict(default=None), + device_id=dict(type='int', default=None), + vpc=dict(default=None), + interval_type=dict(default='daily', choices=['hourly', 'daily', 'weekly', 'monthly'], aliases=['interval']), + schedule=dict(default=None), + time_zone=dict(default='UTC', aliases=['timezone']), + max_snaps=dict(type='int', default=8, aliases=['max']), + state=dict(choices=['present', 'absent'], default='present'), + domain=dict(default=None), + account=dict(default=None), + project=dict(default=None), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + required_one_of = ( + ['vm', 'volume'], + ), + supports_check_mode=True + ) + + try: + acs_snapshot_policy = AnsibleCloudStackSnapshotPolicy(module) + + state = module.params.get('state') + if state in ['absent']: + policy = acs_snapshot_policy.absent_snapshot_policy() + else: + policy = acs_snapshot_policy.present_snapshot_policy() + + result = acs_snapshot_policy.get_result(policy) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_sshkeypair.py b/cloud/cloudstack/cs_sshkeypair.py index ebd906f7d5c..2724c58c71d 100644 --- a/cloud/cloudstack/cs_sshkeypair.py +++ b/cloud/cloudstack/cs_sshkeypair.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_sshkeypair @@ -64,15 +68,24 @@ EXAMPLES = ''' # create a new private / public key pair: -- local_action: cs_sshkeypair name=linus@example.com +- cs_sshkeypair: + name: linus@example.com + delegate_to: localhost register: key -- debug: msg='private key is {{ key.private_key }}' +- debug: + msg: 'Private key is {{ key.private_key }}' # remove a public key by its name: -- local_action: cs_sshkeypair name=linus@example.com state=absent +- cs_sshkeypair: + name: linus@example.com + state: absent + delegate_to: localhost # register your existing local public key: -- local_action: cs_sshkeypair name=linus@example.com public_key='{{ lookup('file', '~/.ssh/id_rsa.pub') }}' +- cs_sshkeypair: + name: linus@example.com + public_key: '{{ lookup('file', '~/.ssh/id_rsa.pub') }}' + delegate_to: localhost ''' RETURN = ''' @@ -99,13 +112,6 @@ sample: "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCkeFYjI+4k8bWfIRMzp4pCzhlopNydbbwRu824P5ilD4ATWMUG\nvEtuCQ2Mp5k5Bma30CdYHgh2/SbxC5RxXSUKTUJtTKpoJUy8PAhb1nn9dnfkC2oU\naRVi9NRUgypTIZxMpgooHOxvAzWxbZCyh1W+91Ld3FNaGxTLqTgeevY84wIDAQAB\nAoGAcwQwgLyUwsNB1vmjWwE0QEmvHS4FlhZyahhi4hGfZvbzAxSWHIK7YUT1c8KU\n9XsThEIN8aJ3GvcoL3OAqNKRnoNb14neejVHkYRadhxqc0GVN6AUIyCqoEMpvhFI\nQrinM572ORzv5ffRjCTbvZcYlW+sqFKNo5e8pYIB8TigpFECQQDu7bg9vkvg8xPs\nkP1K+EH0vsR6vUfy+m3euXjnbJtiP7RoTkZk0JQMOmexgy1qQhISWT0e451wd62v\nJ7M0trl5AkEAsDivJnMIlCCCypwPN4tdNUYpe9dtidR1zLmb3SA7wXk5xMUgLZI9\ncWPjBCMt0KKShdDhQ+hjXAyKQLF7iAPuOwJABjdHCMwvmy2XwhrPjCjDRoPEBtFv\n0sFzJE08+QBZVogDwIbwy+SlRWArnHGmN9J6N+H8dhZD3U4vxZPJ1MBAOQJBAJxO\nCv1dt1Q76gbwmYa49LnWO+F+2cgRTVODpr5iYt5fOmBQQRRqzFkRMkFvOqn+KVzM\nQ6LKM6dn8BEl295vLhUCQQCVDWzoSk3GjL3sOjfAUTyAj8VAXM69llaptxWWySPM\nE9pA+8rYmHfohYFx7FD5/KWCO+sfmxTNB48X0uwyE8tO\n-----END RSA PRIVATE KEY-----\n" ''' - -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - try: import sshpubkeys has_lib_sshpubkeys = True @@ -205,29 +211,22 @@ def _get_ssh_fingerprint(self, public_key): def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + public_key = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + )) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - public_key = dict(default=None), - domain = dict(default=None), - account = dict(default=None), - project = dict(default=None), - state = dict(choices=['present', 'absent'], default='present'), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - if not has_lib_sshpubkeys: module.fail_json(msg="python library sshpubkeys required: pip install sshpubkeys") @@ -245,7 +244,7 @@ def main(): result = acs_sshkey.get_result(ssh_key) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_staticnat.py b/cloud/cloudstack/cs_staticnat.py index 4b73d86e32b..a805a1c8bb5 100644 --- a/cloud/cloudstack/cs_staticnat.py +++ b/cloud/cloudstack/cs_staticnat.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_staticnat @@ -42,6 +46,18 @@ - VM guest NIC secondary IP address for the static NAT. required: false default: false + network: + description: + - Network the IP address is related to. + required: false + default: null + version_added: "2.2" + vpc: + description: + - Name of the VPC. + required: false + default: null + version_added: "2.3" state: description: - State of the static NAT. @@ -140,13 +156,6 @@ sample: example domain ''' - -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -161,35 +170,6 @@ def __init__(self, module): 'ipaddress': 'ip_address', 'vmipaddress': 'vm_guest_ip', } - self.vm_default_nic = None - - -# TODO: move it to cloudstack utils, also used in cs_portforward - def get_vm_guest_ip(self): - vm_guest_ip = self.module.params.get('vm_guest_ip') - default_nic = self.get_vm_default_nic() - - if not vm_guest_ip: - return default_nic['ipaddress'] - - for secondary_ip in default_nic['secondaryip']: - if vm_guest_ip == secondary_ip['ipaddress']: - return vm_guest_ip - self.module.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip) - - -# TODO: move it to cloudstack utils, also used in cs_portforward - def get_vm_default_nic(self): - if self.vm_default_nic: - return self.vm_default_nic - - nics = self.cs.listNics(virtualmachineid=self.get_vm(key='id')) - if nics: - for n in nics['nic']: - if n['isdefault']: - self.vm_default_nic = n - return self.vm_default_nic - self.module.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm')) def create_static_nat(self, ip_address): @@ -198,6 +178,7 @@ def create_static_nat(self, ip_address): args['virtualmachineid'] = self.get_vm(key='id') args['ipaddressid'] = ip_address['id'] args['vmguestip'] = self.get_vm_guest_ip() + args['networkid'] = self.get_network(key='id') if not self.module.check_mode: res = self.cs.enableStaticNat(**args) if 'errortext' in res: @@ -217,13 +198,13 @@ def update_static_nat(self, ip_address): # make an alias, so we can use _has_changed() ip_address['vmguestip'] = ip_address['vmipaddress'] - if self._has_changed(args, ip_address): + if self.has_changed(args, ip_address, ['vmguestip', 'virtualmachineid']): self.result['changed'] = True if not self.module.check_mode: res = self.cs.disableStaticNat(ipaddressid=ip_address['id']) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) - res = self._poll_job(res, 'staticnat') + self.poll_job(res, 'staticnat') res = self.cs.enableStaticNat(**args) if 'errortext' in res: self.module.fail_json(msg="Failed: '%s'" % res['errortext']) @@ -253,38 +234,32 @@ def absent_static_nat(self): self.module.fail_json(msg="Failed: '%s'" % res['errortext']) poll_async = self.module.params.get('poll_async') if poll_async: - res = self._poll_job(res, 'staticnat') + self.poll_job(res, 'staticnat') return ip_address - def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + ip_address = dict(required=True), + vm = dict(default=None), + vm_guest_ip = dict(default=None), + network = dict(default=None), + vpc = dict(default=None), + state = dict(choices=['present', 'absent'], default='present'), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + module = AnsibleModule( - argument_spec = dict( - ip_address = dict(required=True), - vm = dict(default=None), - vm_guest_ip = dict(default=None), - state = dict(choices=['present', 'absent'], default='present'), - zone = dict(default=None), - domain = dict(default=None), - account = dict(default=None), - project = dict(default=None), - poll_async = dict(choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=cs_required_together(), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_static_nat = AnsibleCloudStackStaticNat(module) @@ -296,7 +271,7 @@ def main(): result = acs_static_nat.get_result(ip_address) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_template.py b/cloud/cloudstack/cs_template.py index c6c482f9c0f..7e6d74e9c65 100644 --- a/cloud/cloudstack/cs_template.py +++ b/cloud/cloudstack/cs_template.py @@ -18,12 +18,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_template short_description: Manages templates on Apache CloudStack based clouds. description: - - Register a template from URL, create a template from a ROOT volume of a stopped VM or its snapshot and delete templates. + - Register a template from URL, create a template from a ROOT volume of a stopped VM or its snapshot, extract and delete templates. version_added: '2.0' author: "René Moser (@resmo)" options: @@ -33,7 +37,8 @@ required: true url: description: - - URL of where the template is hosted. + - URL of where the template is hosted on C(state=present). + - URL to which the template would be extracted on C(state=extracted). - Mutually exclusive with C(vm). required: false default: null @@ -88,10 +93,27 @@ default: false cross_zones: description: - - Whether the template should be syned across zones. - - Only used if C(state) is present. + - Whether the template should be synced or removed across zones. + - Only used if C(state) is present or absent. required: false default: false + mode: + description: + - Mode for the template extraction. + - Only used if C(state=extracted). + required: false + default: 'http_download' + choices: [ 'http_download', 'ftp_upload' ] + domain: + description: + - Domain the template, snapshot or VM is related to. + required: false + default: null + account: + description: + - Account the template, snapshot or VM is related to. + required: false + default: null project: description: - Name of the project the template to be registered in. @@ -114,7 +136,7 @@ - Name the hypervisor to be used for creating the new template. - Relevant when using C(state=present). required: false - default: none + default: null choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ] requires_hvm: description: @@ -167,14 +189,14 @@ display_text: description: - Display text of the template. - required: true + required: false default: null state: description: - State of the template. required: false default: 'present' - choices: [ 'present', 'absent' ] + choices: [ 'present', 'absent', 'extacted' ] poll_async: description: - Poll async jobs until job has finished. @@ -219,6 +241,7 @@ - local_action: module: cs_template name: systemvm-4.2 + cross_zones: yes state: absent ''' @@ -314,6 +337,21 @@ returned: success type: string sample: VMware +mode: + description: Mode of extraction + returned: success + type: string + sample: http_download +state: + description: State of the extracted template + returned: success + type: string + sample: DOWNLOAD_URL_CREATED +url: + description: Url to which the template is extracted to + returned: success + type: string + sample: "http://1.2.3.4/userdata/eb307f13-4aca-45e8-b157-a414a14e6b04.ova" tags: description: List of resource tags associated with the template. returned: success @@ -341,12 +379,6 @@ sample: Production ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -370,6 +402,9 @@ def __init__(self, module): 'ispublic': 'is_public', 'format': 'format', 'hypervisor': 'hypervisor', + 'url': 'url', + 'extractMode': 'mode', + 'state': 'state', } @@ -445,11 +480,17 @@ def create_template(self): poll_async = self.module.params.get('poll_async') if poll_async: - template = self._poll_job(template, 'template') + template = self.poll_job(template, 'template') return template def register_template(self): + required_params = [ + 'format', + 'url', + 'hypervisor', + ] + self.module.fail_on_missing_params(required_params=required_params) template = self.get_template() if not template: self.result['changed'] = True @@ -501,11 +542,36 @@ def get_template(self): return templates['template'][0] else: for i in templates['template']: - if i['checksum'] == checksum: + if 'checksum' in i and i['checksum'] == checksum: return i return None + def extract_template(self): + template = self.get_template() + if not template: + self.module.fail_json(msg="Failed: template not found") + + args = {} + args['id'] = template['id'] + args['url'] = self.module.params.get('url') + args['mode'] = self.module.params.get('mode') + args['zoneid'] = self.get_zone(key='id') + + self.result['changed'] = True + + if not self.module.check_mode: + template = self.cs.extractTemplate(**args) + + if 'errortext' in template: + self.module.fail_json(msg="Failed: '%s'" % template['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + template = self.poll_job(template, 'template') + return template + + def remove_template(self): template = self.get_template() if template: @@ -513,7 +579,9 @@ def remove_template(self): args = {} args['id'] = template['id'] - args['zoneid'] = self.get_zone(key='id') + + if not self.module.params.get('cross_zones'): + args['zoneid'] = self.get_zone(key='id') if not self.module.check_mode: res = self.cs.deleteTemplate(**args) @@ -523,68 +591,66 @@ def remove_template(self): poll_async = self.module.params.get('poll_async') if poll_async: - res = self._poll_job(res, 'template') + res = self.poll_job(res, 'template') return template def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + display_text = dict(default=None), + url = dict(default=None), + vm = dict(default=None), + snapshot = dict(default=None), + os_type = dict(default=None), + is_ready = dict(type='bool', default=False), + is_public = dict(type='bool', default=True), + is_featured = dict(type='bool', default=False), + is_dynamically_scalable = dict(type='bool', default=False), + is_extractable = dict(type='bool', default=False), + is_routing = dict(type='bool', default=False), + checksum = dict(default=None), + template_filter = dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']), + hypervisor = dict(choices=CS_HYPERVISORS, default=None), + requires_hvm = dict(type='bool', default=False), + password_enabled = dict(type='bool', default=False), + template_tag = dict(default=None), + sshkey_enabled = dict(type='bool', default=False), + format = dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA'], default=None), + details = dict(default=None), + bits = dict(type='int', choices=[ 32, 64 ], default=64), + state = dict(choices=['present', 'absent', 'extracted'], default='present'), + cross_zones = dict(type='bool', default=False), + mode = dict(choices=['http_download', 'ftp_upload'], default='http_download'), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True), - display_text = dict(default=None), - url = dict(default=None), - vm = dict(default=None), - snapshot = dict(default=None), - os_type = dict(default=None), - is_ready = dict(type='bool', choices=BOOLEANS, default=False), - is_public = dict(type='bool', choices=BOOLEANS, default=True), - is_featured = dict(type='bool', choices=BOOLEANS, default=False), - is_dynamically_scalable = dict(type='bool', choices=BOOLEANS, default=False), - is_extractable = dict(type='bool', choices=BOOLEANS, default=False), - is_routing = dict(type='bool', choices=BOOLEANS, default=False), - checksum = dict(default=None), - template_filter = dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']), - hypervisor = dict(choices=['KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM', 'Simulator'], default=None), - requires_hvm = dict(type='bool', choices=BOOLEANS, default=False), - password_enabled = dict(type='bool', choices=BOOLEANS, default=False), - template_tag = dict(default=None), - sshkey_enabled = dict(type='bool', choices=BOOLEANS, default=False), - format = dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA'], default=None), - details = dict(default=None), - bits = dict(type='int', choices=[ 32, 64 ], default=64), - state = dict(choices=['present', 'absent'], default='present'), - cross_zones = dict(type='bool', choices=BOOLEANS, default=False), - zone = dict(default=None), - domain = dict(default=None), - account = dict(default=None), - project = dict(default=None), - poll_async = dict(type='bool', choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), + argument_spec=argument_spec, + required_together=cs_required_together(), mutually_exclusive = ( ['url', 'vm'], - ), - required_together = ( - ['api_key', 'api_secret', 'api_url'], - ['format', 'url', 'hypervisor'], + ['zone', 'cross_zones'], ), supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_tpl = AnsibleCloudStackTemplate(module) state = module.params.get('state') if state in ['absent']: tpl = acs_tpl.remove_template() + + elif state in ['extracted']: + tpl = acs_tpl.extract_template() + else: if module.params.get('url'): tpl = acs_tpl.register_template() @@ -595,7 +661,7 @@ def main(): result = acs_tpl.get_result(tpl) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_user.py b/cloud/cloudstack/cs_user.py new file mode 100644 index 00000000000..f9f43322e47 --- /dev/null +++ b/cloud/cloudstack/cs_user.py @@ -0,0 +1,455 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_user +short_description: Manages users on Apache CloudStack based clouds. +description: + - Create, update, disable, lock, enable and remove users. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + username: + description: + - Username of the user. + required: true + account: + description: + - Account the user will be created under. + - Required on C(state=present). + required: false + default: null + password: + description: + - Password of the user to be created. + - Required on C(state=present). + - Only considered on creation and will not be updated if user exists. + required: false + default: null + first_name: + description: + - First name of the user. + - Required on C(state=present). + required: false + default: null + last_name: + description: + - Last name of the user. + - Required on C(state=present). + required: false + default: null + email: + description: + - Email of the user. + - Required on C(state=present). + required: false + default: null + timezone: + description: + - Timezone of the user. + required: false + default: null + domain: + description: + - Domain the user is related to. + required: false + default: 'ROOT' + state: + description: + - State of the user. + - C(unlocked) is an alias for C(enabled). + required: false + default: 'present' + choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# create an user in domain 'CUSTOMERS' +local_action: + module: cs_user + account: developers + username: johndoe + password: S3Cur3 + last_name: Doe + first_name: John + email: john.doe@example.com + domain: CUSTOMERS + +# Lock an existing user in domain 'CUSTOMERS' +local_action: + module: cs_user + username: johndoe + domain: CUSTOMERS + state: locked + +# Disable an existing user in domain 'CUSTOMERS' +local_action: + module: cs_user + username: johndoe + domain: CUSTOMERS + state: disabled + +# Enable/unlock an existing user in domain 'CUSTOMERS' +local_action: + module: cs_user + username: johndoe + domain: CUSTOMERS + state: enabled + +# Remove an user in domain 'CUSTOMERS' +local_action: + module: cs_user + name: customer_xy + domain: CUSTOMERS + state: absent +''' + +RETURN = ''' +--- +id: + description: UUID of the user. + returned: success + type: string + sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8 +username: + description: Username of the user. + returned: success + type: string + sample: johndoe +fist_name: + description: First name of the user. + returned: success + type: string + sample: John +last_name: + description: Last name of the user. + returned: success + type: string + sample: Doe +email: + description: Emailof the user. + returned: success + type: string + sample: john.doe@example.com +api_key: + description: API key of the user. + returned: success + type: string + sample: JLhcg8VWi8DoFqL2sSLZMXmGojcLnFrOBTipvBHJjySODcV4mCOo29W2duzPv5cALaZnXj5QxDx3xQfaQt3DKg +api_secret: + description: API secret of the user. + returned: success + type: string + sample: FUELo3LB9fa1UopjTLPdqLv_6OXQMJZv9g9N4B_Ao3HFz8d6IGFCV9MbPFNM8mwz00wbMevja1DoUNDvI8C9-g +account: + description: Account name of the user. + returned: success + type: string + sample: developers +account_type: + description: Type of the account. + returned: success + type: string + sample: user +timezone: + description: Timezone of the user. + returned: success + type: string + sample: enabled +created: + description: Date the user was created. + returned: success + type: string + sample: Doe +state: + description: State of the user. + returned: success + type: string + sample: enabled +domain: + description: Domain the user is related. + returned: success + type: string + sample: ROOT +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackUser(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackUser, self).__init__(module) + self.returns = { + 'username': 'username', + 'firstname': 'first_name', + 'lastname': 'last_name', + 'email': 'email', + 'secretkey': 'api_secret', + 'apikey': 'api_key', + 'timezone': 'timezone', + } + self.account_types = { + 'user': 0, + 'root_admin': 1, + 'domain_admin': 2, + } + self.user = None + + + def get_account_type(self): + account_type = self.module.params.get('account_type') + return self.account_types[account_type] + + + def get_user(self): + if not self.user: + args = {} + args['domainid'] = self.get_domain('id') + users = self.cs.listUsers(**args) + if users: + user_name = self.module.params.get('username') + for u in users['user']: + if user_name.lower() == u['username'].lower(): + self.user = u + break + return self.user + + + def enable_user(self): + user = self.get_user() + if not user: + user = self.present_user() + + if user['state'].lower() != 'enabled': + self.result['changed'] = True + args = {} + args['id'] = user['id'] + if not self.module.check_mode: + res = self.cs.enableUser(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + user = res['user'] + return user + + + def lock_user(self): + user = self.get_user() + if not user: + user = self.present_user() + + # we need to enable the user to lock it. + if user['state'].lower() == 'disabled': + user = self.enable_user() + + if user['state'].lower() != 'locked': + self.result['changed'] = True + args = {} + args['id'] = user['id'] + if not self.module.check_mode: + res = self.cs.lockUser(**args) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + user = res['user'] + return user + + + def disable_user(self): + user = self.get_user() + if not user: + user = self.present_user() + + if user['state'].lower() != 'disabled': + self.result['changed'] = True + args = {} + args['id'] = user['id'] + if not self.module.check_mode: + user = self.cs.disableUser(**args) + if 'errortext' in user: + self.module.fail_json(msg="Failed: '%s'" % user['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + user = self.poll_job(user, 'user') + return user + + + def present_user(self): + missing_params = [] + for required_params in [ + 'account', + 'email', + 'password', + 'first_name', + 'last_name', + ]: + if not self.module.params.get(required_params): + missing_params.append(required_params) + if missing_params: + self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params)) + + user = self.get_user() + if user: + user = self._update_user(user) + else: + user = self._create_user(user) + return user + + + def _create_user(self, user): + self.result['changed'] = True + + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain('id') + args['username'] = self.module.params.get('username') + args['password'] = self.module.params.get('password') + args['firstname'] = self.module.params.get('first_name') + args['lastname'] = self.module.params.get('last_name') + args['email'] = self.module.params.get('email') + args['timezone'] = self.module.params.get('timezone') + if not self.module.check_mode: + res = self.cs.createUser(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + user = res['user'] + # register user api keys + res = self.cs.registerUserKeys(id=user['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + user.update(res['userkeys']) + return user + + + def _update_user(self, user): + args = {} + args['id'] = user['id'] + args['firstname'] = self.module.params.get('first_name') + args['lastname'] = self.module.params.get('last_name') + args['email'] = self.module.params.get('email') + args['timezone'] = self.module.params.get('timezone') + if self.has_changed(args, user): + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.updateUser(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + user = res['user'] + # register user api keys + if 'apikey' not in user: + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.registerUserKeys(id=user['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + user.update(res['userkeys']) + return user + + + def absent_user(self): + user = self.get_user() + if user: + self.result['changed'] = True + + if not self.module.check_mode: + res = self.cs.deleteUser(id=user['id']) + + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + return user + + + def get_result(self, user): + super(AnsibleCloudStackUser, self).get_result(user) + if user: + if 'accounttype' in user: + for key,value in self.account_types.items(): + if value == user['accounttype']: + self.result['account_type'] = key + break + return self.result + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + username = dict(required=True), + account = dict(default=None), + state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'), + domain = dict(default='ROOT'), + email = dict(default=None), + first_name = dict(default=None), + last_name = dict(default=None), + password = dict(default=None, no_log=True), + timezone = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + try: + acs_acc = AnsibleCloudStackUser(module) + + state = module.params.get('state') + + if state in ['absent']: + user = acs_acc.absent_user() + + elif state in ['enabled', 'unlocked']: + user = acs_acc.enable_user() + + elif state in ['disabled']: + user = acs_acc.disable_user() + + elif state in ['locked']: + user = acs_acc.lock_user() + + else: + user = acs_acc.present_user() + + result = acs_acc.get_result(user) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_vmsnapshot.py b/cloud/cloudstack/cs_vmsnapshot.py index c9e815e4730..e3b43820a56 100644 --- a/cloud/cloudstack/cs_vmsnapshot.py +++ b/cloud/cloudstack/cs_vmsnapshot.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cs_vmsnapshot @@ -162,12 +166,6 @@ sample: Production ''' -try: - from cs import CloudStack, CloudStackException, read_config - has_lib_cs = True -except ImportError: - has_lib_cs = False - # import cloudstack common from ansible.module_utils.cloudstack import * @@ -215,7 +213,7 @@ def create_snapshot(self): poll_async = self.module.params.get('poll_async') if res and poll_async: - snapshot = self._poll_job(res, 'vmsnapshot') + snapshot = self.poll_job(res, 'vmsnapshot') return snapshot @@ -232,7 +230,7 @@ def remove_snapshot(self): poll_async = self.module.params.get('poll_async') if res and poll_async: - res = self._poll_job(res, 'vmsnapshot') + res = self.poll_job(res, 'vmsnapshot') return snapshot @@ -249,7 +247,7 @@ def revert_vm_to_snapshot(self): poll_async = self.module.params.get('poll_async') if res and poll_async: - res = self._poll_job(res, 'vmsnapshot') + res = self.poll_job(res, 'vmsnapshot') return snapshot self.module.fail_json(msg="snapshot not found, could not revert VM") @@ -257,34 +255,31 @@ def revert_vm_to_snapshot(self): def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True, aliases=['display_name']), + vm = dict(required=True), + description = dict(default=None), + zone = dict(default=None), + snapshot_memory = dict(type='bool', default=False), + state = dict(choices=['present', 'absent', 'revert'], default='present'), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + + required_together = cs_required_together() + required_together.extend([ + ['icmp_type', 'icmp_code'], + ]) + module = AnsibleModule( - argument_spec = dict( - name = dict(required=True, aliases=['display_name']), - vm = dict(required=True), - description = dict(default=None), - zone = dict(default=None), - snapshot_memory = dict(choices=BOOLEANS, default=False), - state = dict(choices=['present', 'absent', 'revert'], default='present'), - domain = dict(default=None), - account = dict(default=None), - project = dict(default=None), - poll_async = dict(choices=BOOLEANS, default=True), - api_key = dict(default=None), - api_secret = dict(default=None, no_log=True), - api_url = dict(default=None), - api_http_method = dict(choices=['get', 'post'], default='get'), - api_timeout = dict(type='int', default=10), - ), - required_together = ( - ['icmp_type', 'icmp_code'], - ['api_key', 'api_secret', 'api_url'], - ), + argument_spec=argument_spec, + required_together=required_together, supports_check_mode=True ) - if not has_lib_cs: - module.fail_json(msg="python library cs required: pip install cs") - try: acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module) @@ -298,7 +293,7 @@ def main(): result = acs_vmsnapshot.get_result(snapshot) - except CloudStackException, e: + except CloudStackException as e: module.fail_json(msg='CloudStackException: %s' % str(e)) module.exit_json(**result) diff --git a/cloud/cloudstack/cs_volume.py b/cloud/cloudstack/cs_volume.py new file mode 100644 index 00000000000..36071e0d78a --- /dev/null +++ b/cloud/cloudstack/cs_volume.py @@ -0,0 +1,496 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Jefferson Girão +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_volume +short_description: Manages volumes on Apache CloudStack based clouds. +description: + - Create, destroy, attach, detach volumes. +version_added: "2.1" +author: + - "Jefferson Girão (@jeffersongirao)" + - "René Moser (@resmo)" +options: + name: + description: + - Name of the volume. + - C(name) can only contain ASCII letters. + required: true + account: + description: + - Account the volume is related to. + required: false + default: null + custom_id: + description: + - Custom id to the resource. + - Allowed to Root Admins only. + required: false + default: null + disk_offering: + description: + - Name of the disk offering to be used. + - Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present). + required: false + default: null + display_volume: + description: + - Whether to display the volume to the end user or not. + - Allowed to Root Admins only. + required: false + default: true + domain: + description: + - Name of the domain the volume to be deployed in. + required: false + default: null + max_iops: + description: + - Max iops + required: false + default: null + min_iops: + description: + - Min iops + required: false + default: null + project: + description: + - Name of the project the volume to be deployed in. + required: false + default: null + size: + description: + - Size of disk in GB + required: false + default: null + snapshot: + description: + - The snapshot name for the disk volume. + - Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present). + required: false + default: null + force: + description: + - Force removal of volume even it is attached to a VM. + - Considered on C(state=absnet) only. + required: false + default: false + shrink_ok: + description: + - Whether to allow to shrink the volume. + required: false + default: false + vm: + description: + - Name of the virtual machine to attach the volume to. + required: false + default: null + zone: + description: + - Name of the zone in which the volume should be deployed. + - If not set, default zone is used. + required: false + default: null + state: + description: + - State of the volume. + required: false + default: 'present' + choices: [ 'present', 'absent', 'attached', 'detached' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create volume within project, zone with specified storage options +- local_action: + module: cs_volume + name: web-vm-1-volume + project: Integration + zone: ch-zrh-ix-01 + disk_offering: PerfPlus Storage + size: 20 + +# Create/attach volume to instance +- local_action: + module: cs_volume + name: web-vm-1-volume + disk_offering: PerfPlus Storage + size: 20 + vm: web-vm-1 + state: attached + +# Detach volume +- local_action: + module: cs_volume + name: web-vm-1-volume + state: detached + +# Remove volume +- local_action: + module: cs_volume + name: web-vm-1-volume + state: absent +''' + +RETURN = ''' +id: + description: ID of the volume. + returned: success + type: string + sample: +name: + description: Name of the volume. + returned: success + type: string + sample: web-volume-01 +display_name: + description: Display name of the volume. + returned: success + type: string + sample: web-volume-01 +group: + description: Group the volume belongs to + returned: success + type: string + sample: web +domain: + description: Domain the volume belongs to + returned: success + type: string + sample: example domain +project: + description: Project the volume belongs to + returned: success + type: string + sample: Production +zone: + description: Name of zone the volume is in. + returned: success + type: string + sample: ch-gva-2 +created: + description: Date of the volume was created. + returned: success + type: string + sample: 2014-12-01T14:57:57+0100 +attached: + description: Date of the volume was attached. + returned: success + type: string + sample: 2014-12-01T14:57:57+0100 +type: + description: Disk volume type. + returned: success + type: string + sample: DATADISK +size: + description: Size of disk volume. + returned: success + type: string + sample: 20 +vm: + description: Name of the vm the volume is attached to (not returned when detached) + returned: success + type: string + sample: web-01 +state: + description: State of the volume + returned: success + type: string + sample: Attached +device_id: + description: Id of the device on user vm the volume is attached to (not returned when detached) + returned: success + type: string + sample: 1 +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackVolume(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVolume, self).__init__(module) + self.returns = { + 'group': 'group', + 'attached': 'attached', + 'vmname': 'vm', + 'deviceid': 'device_id', + 'type': 'type', + 'size': 'size', + } + self.volume = None + + #TODO implement in cloudstack utils + def get_disk_offering(self, key=None): + disk_offering = self.module.params.get('disk_offering') + if not disk_offering: + return None + + # Do not add domain filter for disk offering listing. + disk_offerings = self.cs.listDiskOfferings() + if disk_offerings: + for d in disk_offerings['diskoffering']: + if disk_offering in [d['displaytext'], d['name'], d['id']]: + return self._get_by_key(key, d) + self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering) + + + def get_volume(self): + if not self.volume: + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['zoneid'] = self.get_zone(key='id') + args['displayvolume'] = self.module.params.get('display_volume') + args['type'] = 'DATADISK' + + volumes = self.cs.listVolumes(**args) + if volumes: + volume_name = self.module.params.get('name') + for v in volumes['volume']: + if volume_name.lower() == v['name'].lower(): + self.volume = v + break + return self.volume + + + def get_snapshot(self, key=None): + snapshot = self.module.params.get('snapshot') + if not snapshot: + return None + + args = {} + args['name'] = snapshot + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + + snapshots = self.cs.listSnapshots(**args) + if snapshots: + return self._get_by_key(key, snapshots['snapshot'][0]) + self.module.fail_json(msg="Snapshot with name %s not found" % snapshot) + + + def present_volume(self): + volume = self.get_volume() + if volume: + volume = self.update_volume(volume) + else: + disk_offering_id = self.get_disk_offering(key='id') + snapshot_id = self.get_snapshot(key='id') + + if not disk_offering_id and not snapshot_id: + self.module.fail_json(msg="Required one of: disk_offering,snapshot") + + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['diskofferingid'] = disk_offering_id + args['displayvolume'] = self.module.params.get('display_volume') + args['maxiops'] = self.module.params.get('max_iops') + args['miniops'] = self.module.params.get('min_iops') + args['projectid'] = self.get_project(key='id') + args['size'] = self.module.params.get('size') + args['snapshotid'] = snapshot_id + args['zoneid'] = self.get_zone(key='id') + + if not self.module.check_mode: + res = self.cs.createVolume(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + return volume + + + def attached_volume(self): + volume = self.present_volume() + + if volume: + if volume.get('virtualmachineid') != self.get_vm(key='id'): + self.result['changed'] = True + + if not self.module.check_mode: + volume = self.detached_volume() + + if 'attached' not in volume: + self.result['changed'] = True + + args = {} + args['id'] = volume['id'] + args['virtualmachineid'] = self.get_vm(key='id') + args['deviceid'] = self.module.params.get('device_id') + + if not self.module.check_mode: + res = self.cs.attachVolume(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + return volume + + + def detached_volume(self): + volume = self.present_volume() + + if volume: + if 'attached' not in volume: + return volume + + self.result['changed'] = True + + if not self.module.check_mode: + res = self.cs.detachVolume(id=volume['id']) + if 'errortext' in volume: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + return volume + + + def absent_volume(self): + volume = self.get_volume() + + if volume: + if 'attached' in volume and not self.module.params.get('force'): + self.module.fail_json(msg="Volume '%s' is attached, use force=true for detaching and removing the volume." % volume.get('name')) + + self.result['changed'] = True + if not self.module.check_mode: + volume = self.detached_volume() + + res = self.cs.deleteVolume(id=volume['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self.poll_job(res, 'volume') + + return volume + + + def update_volume(self, volume): + args_resize = {} + args_resize['id'] = volume['id'] + args_resize['diskofferingid'] = self.get_disk_offering(key='id') + args_resize['maxiops'] = self.module.params.get('max_iops') + args_resize['miniops'] = self.module.params.get('min_iops') + args_resize['size'] = self.module.params.get('size') + + # change unit from bytes to giga bytes to compare with args + volume_copy = volume.copy() + volume_copy['size'] = volume_copy['size'] / (2**30) + + if self.has_changed(args_resize, volume_copy): + + self.result['changed'] = True + if not self.module.check_mode: + args_resize['shrinkok'] = self.module.params.get('shrink_ok') + res = self.cs.resizeVolume(**args_resize) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + self.volume = volume + + return volume + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + disk_offering = dict(default=None), + display_volume = dict(type='bool', default=None), + max_iops = dict(type='int', default=None), + min_iops = dict(type='int', default=None), + size = dict(type='int', default=None), + snapshot = dict(default=None), + vm = dict(default=None), + device_id = dict(type='int', default=None), + custom_id = dict(default=None), + force = dict(type='bool', default=False), + shrink_ok = dict(type='bool', default=False), + state = dict(choices=['present', 'absent', 'attached', 'detached'], default='present'), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + mutually_exclusive = ( + ['snapshot', 'disk_offering'], + ), + supports_check_mode=True + ) + + try: + acs_vol = AnsibleCloudStackVolume(module) + + state = module.params.get('state') + + if state in ['absent']: + volume = acs_vol.absent_volume() + elif state in ['attached']: + volume = acs_vol.attached_volume() + elif state in ['detached']: + volume = acs_vol.detached_volume() + else: + volume = acs_vol.present_volume() + + result = acs_vol.get_result(volume) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_vpc.py b/cloud/cloudstack/cs_vpc.py new file mode 100644 index 00000000000..1495b865500 --- /dev/null +++ b/cloud/cloudstack/cs_vpc.py @@ -0,0 +1,391 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it an/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_vpc +short_description: "Manages VPCs on Apache CloudStack based clouds." +description: + - "Create, update and delete VPCs." +version_added: "2.3" +author: "René Moser (@resmo)" +options: + name: + description: + - "Name of the VPC." + required: true + display_text: + description: + - "Display text of the VPC." + - "If not set, C(name) will be used for creating." + required: false + default: null + cidr: + description: + - "CIDR of the VPC, e.g. 10.1.0.0/16" + - "All VPC guest networks' CIDRs must be within this CIDR." + - "Required on C(state=present)." + required: false + default: null + network_domain: + description: + - "Network domain for the VPC." + - "All networks inside the VPC will belong to this domain." + required: false + default: null + vpc_offering: + description: + - "Name of the VPC offering." + - "If not set, default VPC offering is used." + required: false + default: null + state: + description: + - "State of the VPC." + required: false + default: present + choices: + - present + - absent + - restarted + domain: + description: + - "Domain the VPC is related to." + required: false + default: null + account: + description: + - "Account the VPC is related to." + required: false + default: null + project: + description: + - "Name of the project the VPC is related to." + required: false + default: null + zone: + description: + - "Name of the zone." + - "If not set, default zone is used." + required: false + default: null + tags: + description: + - "List of tags. Tags are a list of dictionaries having keys C(key) and C(value)." + - "For deleting all tags, set an empty list e.g. C(tags: [])." + required: false + default: null + aliases: + - tag + poll_async: + description: + - "Poll async jobs until job has finished." + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Ensure a VPC is present +- local_action: + module: cs_vpc + name: my_vpc + display_text: My example VPC + cidr: 10.10.0.0/16 + +# Ensure a VPC is absent +- local_action: + module: cs_vpc + name: my_vpc + state: absent + +# Ensure a VPC is restarted +- local_action: + module: cs_vpc + name: my_vpc + state: restarted +''' + +RETURN = ''' +--- +id: + description: "UUID of the VPC." + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: "Name of the VPC." + returned: success + type: string + sample: my_vpc +display_text: + description: "Display text of the VPC." + returned: success + type: string + sample: My example VPC +cidr: + description: "CIDR of the VPC." + returned: success + type: string + sample: 10.10.0.0/16 +network_domain: + description: "Network domain of the VPC." + returned: success + type: string + sample: example.com +region_level_vpc: + description: "Whether the VPC is region level or not." + returned: success + type: boolean + sample: true +restart_required: + description: "Wheter the VPC router needs a restart or not." + returned: success + type: boolean + sample: true +distributed_vpc_router: + description: "Whether the VPC uses distributed router or not." + returned: success + type: boolean + sample: true +redundant_vpc_router: + description: "Whether the VPC has redundant routers or not." + returned: success + type: boolean + sample: true +domain: + description: "Domain the VPC is related to." + returned: success + type: string + sample: example domain +account: + description: "Account the VPC is related to." + returned: success + type: string + sample: example account +project: + description: "Name of project the VPC is related to." + returned: success + type: string + sample: Production +zone: + description: "Name of zone the VPC is in." + returned: success + type: string + sample: ch-gva-2 +state: + description: "State of the VPC." + returned: success + type: string + sample: Enabled +tags: + description: "List of resource tags associated with the VPC." + returned: success + type: dict + sample: '[ { "key": "foo", "value": "bar" } ]' +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackVpc(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVpc, self).__init__(module) + self.returns = { + 'cidr': 'cidr', + 'networkdomain': 'network_domain', + 'redundantvpcrouter': 'redundant_vpc_router', + 'distributedvpcrouter': 'distributed_vpc_router', + 'regionlevelvpc': 'region_level_vpc', + 'restartrequired': 'restart_required', + } + self.vpc = None + self.vpc_offering = None + + def get_vpc_offering(self, key=None): + if self.vpc_offering: + return self._get_by_key(key, self.vpc_offering) + + vpc_offering = self.module.params.get('vpc_offering') + args = {} + if vpc_offering: + args['name'] = vpc_offering + else: + args['isdefault'] = True + + vpc_offerings = self.cs.listVPCOfferings(**args) + if vpc_offerings: + self.vpc_offering = vpc_offerings['vpcoffering'][0] + return self._get_by_key(key, self.vpc_offering) + self.module.fail_json(msg="VPC offering '%s' not found" % vpc_offering) + + def get_vpc(self): + if self.vpc: + return self.vpc + args = { + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + } + vpcs = self.cs.listVPCs() + if vpcs: + vpc_name = self.module.params.get('name') + for v in vpcs['vpc']: + if vpc_name.lower() in [ v['name'].lower(), v['id']]: + self.vpc = v + break + return self.vpc + + def restart_vpc(self): + self.result['changed'] = True + vpc = self.get_vpc() + if vpc and not self.module.check_mode: + args = { + 'id': vpc['id'], + } + res = self.cs.restartVPC(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'vpc') + return vpc + + def present_vpc(self): + vpc = self.get_vpc() + if not vpc: + vpc = self._create_vpc(vpc) + else: + vpc = self._update_vpc(vpc) + + if vpc: + vpc = self.ensure_tags(resource=vpc, resource_type='Vpc') + return vpc + + def _create_vpc(self, vpc): + self.result['changed'] = True + args = { + 'name': self.module.params.get('name'), + 'displaytext': self.get_or_fallback('display_text', 'name'), + 'vpcofferingid': self.get_vpc_offering(key='id'), + 'cidr': self.module.params.get('cidr'), + 'account': self.get_account(key='name'), + 'domainid': self.get_domain(key='id'), + 'projectid': self.get_project(key='id'), + 'zoneid': self.get_zone(key='id'), + } + self.result['diff']['after'] = args + if not self.module.check_mode: + res = self.cs.createVPC(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + vpc = self.poll_job(res, 'vpc') + return vpc + + def _update_vpc(self, vpc): + args = { + 'id': vpc['id'], + 'displaytext': self.module.params.get('display_text'), + } + if self.has_changed(args, vpc): + self.result['changed'] = True + if not self.module.check_mode: + res = self.cs.updateVPC(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + vpc = self.poll_job(res, 'vpc') + return vpc + + def absent_vpc(self): + vpc = self.get_vpc() + if vpc: + self.result['changed'] = True + self.result['diff']['before'] = vpc + if not self.module.check_mode: + res = self.cs.deleteVPC(id=vpc['id']) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + + poll_async = self.module.params.get('poll_async') + if poll_async: + self.poll_job(res, 'vpc') + return vpc + + +def main(): + argument_spec=cs_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + cidr=dict(default=None), + display_text=dict(default=None), + vpc_offering=dict(default=None), + network_domain=dict(default=None), + state=dict(choices=['present', 'absent', 'restarted'], default='present'), + domain=dict(default=None), + account=dict(default=None), + project=dict(default=None), + zone=dict(default=None), + tags=dict(type='list', aliases=['tag'], default=None), + poll_async=dict(type='bool', default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + required_if=[ + ('state', 'present', ['cidr']), + ], + supports_check_mode=True, + ) + + try: + acs_vpc = AnsibleCloudStackVpc(module) + + state = module.params.get('state') + if state == 'absent': + vpc = acs_vpc.absent_vpc() + elif state == 'restarted': + vpc = acs_vpc.restart_vpc() + else: + vpc = acs_vpc.present_vpc() + + result = acs_vpc.get_result(vpc) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_zone.py b/cloud/cloudstack/cs_zone.py new file mode 100644 index 00000000000..1dd5dd64221 --- /dev/null +++ b/cloud/cloudstack/cs_zone.py @@ -0,0 +1,406 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_zone +short_description: Manages zones on Apache CloudStack based clouds. +description: + - Create, update and remove zones. +version_added: "2.1" +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the zone. + required: true + id: + description: + - uuid of the exising zone. + default: null + required: false + state: + description: + - State of the zone. + required: false + default: 'present' + choices: [ 'present', 'enabled', 'disabled', 'absent' ] + domain: + description: + - Domain the zone is related to. + - Zone is a public zone if not set. + required: false + default: null + network_domain: + description: + - Network domain for the zone. + required: false + default: null + network_type: + description: + - Network type of the zone. + required: false + default: basic + choices: [ 'basic', 'advanced' ] + dns1: + description: + - First DNS for the zone. + - Required if C(state=present) + required: false + default: null + dns2: + description: + - Second DNS for the zone. + required: false + default: null + internal_dns1: + description: + - First internal DNS for the zone. + - If not set C(dns1) will be used on C(state=present). + required: false + default: null + internal_dns2: + description: + - Second internal DNS for the zone. + required: false + default: null + dns1_ipv6: + description: + - First DNS for IPv6 for the zone. + required: false + default: null + dns2_ipv6: + description: + - Second DNS for IPv6 for the zone. + required: false + default: null + guest_cidr_address: + description: + - Guest CIDR address for the zone. + required: false + default: null + dhcp_provider: + description: + - DHCP provider for the Zone. + required: false + default: null +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Ensure a zone is present +- local_action: + module: cs_zone + name: ch-zrh-ix-01 + dns1: 8.8.8.8 + dns2: 8.8.4.4 + network_type: basic + +# Ensure a zone is disabled +- local_action: + module: cs_zone + name: ch-zrh-ix-01 + state: disabled + +# Ensure a zone is enabled +- local_action: + module: cs_zone + name: ch-zrh-ix-01 + state: enabled + +# Ensure a zone is absent +- local_action: + module: cs_zone + name: ch-zrh-ix-01 + state: absent +''' + +RETURN = ''' +--- +id: + description: UUID of the zone. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the zone. + returned: success + type: string + sample: zone01 +dns1: + description: First DNS for the zone. + returned: success + type: string + sample: 8.8.8.8 +dns2: + description: Second DNS for the zone. + returned: success + type: string + sample: 8.8.4.4 +internal_dns1: + description: First internal DNS for the zone. + returned: success + type: string + sample: 8.8.8.8 +internal_dns2: + description: Second internal DNS for the zone. + returned: success + type: string + sample: 8.8.4.4 +dns1_ipv6: + description: First IPv6 DNS for the zone. + returned: success + type: string + sample: "2001:4860:4860::8888" +dns2_ipv6: + description: Second IPv6 DNS for the zone. + returned: success + type: string + sample: "2001:4860:4860::8844" +allocation_state: + description: State of the zone. + returned: success + type: string + sample: Enabled +domain: + description: Domain the zone is related to. + returned: success + type: string + sample: ROOT +network_domain: + description: Network domain for the zone. + returned: success + type: string + sample: example.com +network_type: + description: Network type for the zone. + returned: success + type: string + sample: basic +local_storage_enabled: + description: Local storage offering enabled. + returned: success + type: bool + sample: false +securitygroups_enabled: + description: Security groups support is enabled. + returned: success + type: bool + sample: false +guest_cidr_address: + description: Guest CIDR address for the zone + returned: success + type: string + sample: 10.1.1.0/24 +dhcp_provider: + description: DHCP provider for the zone + returned: success + type: string + sample: VirtualRouter +zone_token: + description: Zone token + returned: success + type: string + sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7 +tags: + description: List of resource tags associated with the zone. + returned: success + type: dict + sample: [ { "key": "foo", "value": "bar" } ] +''' + +# import cloudstack common +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackZone(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackZone, self).__init__(module) + self.returns = { + 'dns1': 'dns1', + 'dns2': 'dns2', + 'internaldns1': 'internal_dns1', + 'internaldns2': 'internal_dns2', + 'ipv6dns1': 'dns1_ipv6', + 'ipv6dns2': 'dns2_ipv6', + 'domain': 'network_domain', + 'networktype': 'network_type', + 'securitygroupsenabled': 'securitygroups_enabled', + 'localstorageenabled': 'local_storage_enabled', + 'guestcidraddress': 'guest_cidr_address', + 'dhcpprovider': 'dhcp_provider', + 'allocationstate': 'allocation_state', + 'zonetoken': 'zone_token', + } + self.zone = None + + + def _get_common_zone_args(self): + args = {} + args['name'] = self.module.params.get('name') + args['dns1'] = self.module.params.get('dns1') + args['dns2'] = self.module.params.get('dns2') + args['internaldns1'] = self.get_or_fallback('internal_dns1', 'dns1') + args['internaldns2'] = self.get_or_fallback('internal_dns2', 'dns2') + args['ipv6dns1'] = self.module.params.get('dns1_ipv6') + args['ipv6dns2'] = self.module.params.get('dns2_ipv6') + args['networktype'] = self.module.params.get('network_type') + args['domain'] = self.module.params.get('network_domain') + args['localstorageenabled'] = self.module.params.get('local_storage_enabled') + args['guestcidraddress'] = self.module.params.get('guest_cidr_address') + args['dhcpprovider'] = self.module.params.get('dhcp_provider') + state = self.module.params.get('state') + if state in [ 'enabled', 'disabled']: + args['allocationstate'] = state.capitalize() + return args + + + def get_zone(self): + if not self.zone: + args = {} + + uuid = self.module.params.get('id') + if uuid: + args['id'] = uuid + zones = self.cs.listZones(**args) + if zones: + self.zone = zones['zone'][0] + return self.zone + + args['name'] = self.module.params.get('name') + zones = self.cs.listZones(**args) + if zones: + self.zone = zones['zone'][0] + return self.zone + + + def present_zone(self): + zone = self.get_zone() + if zone: + zone = self._update_zone() + else: + zone = self._create_zone() + return zone + + + def _create_zone(self): + required_params = [ + 'dns1', + ] + self.module.fail_on_missing_params(required_params=required_params) + + self.result['changed'] = True + + args = self._get_common_zone_args() + args['domainid'] = self.get_domain(key='id') + args['securitygroupenabled'] = self.module.params.get('securitygroups_enabled') + + zone = None + if not self.module.check_mode: + res = self.cs.createZone(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + zone = res['zone'] + return zone + + + def _update_zone(self): + zone = self.get_zone() + + args = self._get_common_zone_args() + args['id'] = zone['id'] + + if self.has_changed(args, zone): + self.result['changed'] = True + + if not self.module.check_mode: + res = self.cs.updateZone(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + zone = res['zone'] + return zone + + + def absent_zone(self): + zone = self.get_zone() + if zone: + self.result['changed'] = True + + args = {} + args['id'] = zone['id'] + + if not self.module.check_mode: + res = self.cs.deleteZone(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + return zone + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + id = dict(default=None), + name = dict(required=True), + dns1 = dict(default=None), + dns2 = dict(default=None), + internal_dns1 = dict(default=None), + internal_dns2 = dict(default=None), + dns1_ipv6 = dict(default=None), + dns2_ipv6 = dict(default=None), + network_type = dict(default='basic', choices=['Basic', 'basic', 'Advanced', 'advanced']), + network_domain = dict(default=None), + guest_cidr_address = dict(default=None), + dhcp_provider = dict(default=None), + local_storage_enabled = dict(default=None), + securitygroups_enabled = dict(default=None), + state = dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'), + domain = dict(default=None), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + try: + acs_zone = AnsibleCloudStackZone(module) + + state = module.params.get('state') + if state in ['absent']: + zone = acs_zone.absent_zone() + else: + zone = acs_zone.present_zone() + + result = acs_zone.get_result(zone) + + except CloudStackException as e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/cloudstack/cs_zone_facts.py b/cloud/cloudstack/cs_zone_facts.py new file mode 100644 index 00000000000..74894b7494e --- /dev/null +++ b/cloud/cloudstack/cs_zone_facts.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cs_zone_facts +short_description: Gathering facts of zones from Apache CloudStack based clouds. +description: + - Gathering facts from the API of a zone. +version_added: "2.1" +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the zone. + required: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +- cs_zone_facts: + name: ch-gva-1 + delegate_to: localhost + +- debug: + var: cloudstack_zone +''' + +RETURN = ''' +--- +cloudstack_zone.id: + description: UUID of the zone. + returned: success + type: string + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +cloudstack_zone.name: + description: Name of the zone. + returned: success + type: string + sample: zone01 +cloudstack_zone.dns1: + description: First DNS for the zone. + returned: success + type: string + sample: 8.8.8.8 +cloudstack_zone.dns2: + description: Second DNS for the zone. + returned: success + type: string + sample: 8.8.4.4 +cloudstack_zone.internal_dns1: + description: First internal DNS for the zone. + returned: success + type: string + sample: 8.8.8.8 +cloudstack_zone.internal_dns2: + description: Second internal DNS for the zone. + returned: success + type: string + sample: 8.8.4.4 +cloudstack_zone.dns1_ipv6: + description: First IPv6 DNS for the zone. + returned: success + type: string + sample: "2001:4860:4860::8888" +cloudstack_zone.dns2_ipv6: + description: Second IPv6 DNS for the zone. + returned: success + type: string + sample: "2001:4860:4860::8844" +cloudstack_zone.allocation_state: + description: State of the zone. + returned: success + type: string + sample: Enabled +cloudstack_zone.domain: + description: Domain the zone is related to. + returned: success + type: string + sample: ROOT +cloudstack_zone.network_domain: + description: Network domain for the zone. + returned: success + type: string + sample: example.com +cloudstack_zone.network_type: + description: Network type for the zone. + returned: success + type: string + sample: basic +cloudstack_zone.local_storage_enabled: + description: Local storage offering enabled. + returned: success + type: bool + sample: false +cloudstack_zone.securitygroups_enabled: + description: Security groups support is enabled. + returned: success + type: bool + sample: false +cloudstack_zone.guest_cidr_address: + description: Guest CIDR address for the zone + returned: success + type: string + sample: 10.1.1.0/24 +cloudstack_zone.dhcp_provider: + description: DHCP provider for the zone + returned: success + type: string + sample: VirtualRouter +cloudstack_zone.zone_token: + description: Zone token + returned: success + type: string + sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7 +cloudstack_zone.tags: + description: List of resource tags associated with the zone. + returned: success + type: dict + sample: [ { "key": "foo", "value": "bar" } ] +''' + +import base64 + +# import cloudstack common +from ansible.module_utils.cloudstack import * + +class AnsibleCloudStackZoneFacts(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackZoneFacts, self).__init__(module) + self.returns = { + 'dns1': 'dns1', + 'dns2': 'dns2', + 'internaldns1': 'internal_dns1', + 'internaldns2': 'internal_dns2', + 'ipv6dns1': 'dns1_ipv6', + 'ipv6dns2': 'dns2_ipv6', + 'domain': 'network_domain', + 'networktype': 'network_type', + 'securitygroupsenabled': 'securitygroups_enabled', + 'localstorageenabled': 'local_storage_enabled', + 'guestcidraddress': 'guest_cidr_address', + 'dhcpprovider': 'dhcp_provider', + 'allocationstate': 'allocation_state', + 'zonetoken': 'zone_token', + } + self.facts = { + 'cloudstack_zone': None, + } + + + def get_zone(self): + if not self.zone: + # TODO: add param key signature in get_zone() + self.module.params['zone'] = self.module.params.get('name') + super(AnsibleCloudStackZoneFacts, self).get_zone() + return self.zone + + + def run(self): + zone = self.get_zone() + self.facts['cloudstack_zone'] = self.get_result(zone) + return self.facts + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False, + ) + + cs_zone_facts = AnsibleCloudStackZoneFacts(module=module).run() + cs_facts_result = dict(changed=False, ansible_facts=cs_zone_facts) + module.exit_json(**cs_facts_result) + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/google/gcdns_record.py b/cloud/google/gcdns_record.py new file mode 100644 index 00000000000..7c209c5cbad --- /dev/null +++ b/cloud/google/gcdns_record.py @@ -0,0 +1,794 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2015 CallFire Inc. +# +# This file is part of Ansible. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: gcdns_record +short_description: Creates or removes resource records in Google Cloud DNS +description: + - Creates or removes resource records in Google Cloud DNS. +version_added: "2.2" +author: "William Albert (@walbert947)" +requirements: + - "python >= 2.6" + - "apache-libcloud >= 0.19.0" +options: + state: + description: + - Whether the given resource record should or should not be present. + required: false + choices: ["present", "absent"] + default: "present" + record: + description: + - The fully-qualified domain name of the resource record. + required: true + aliases: ['name'] + zone: + description: + - The DNS domain name of the zone (e.g., example.com). + - One of either I(zone) or I(zone_id) must be specified as an + option, or the module will fail. + - If both I(zone) and I(zone_id) are specifed, I(zone_id) will be + used. + required: false + zone_id: + description: + - The Google Cloud ID of the zone (e.g., example-com). + - One of either I(zone) or I(zone_id) must be specified as an + option, or the module will fail. + - These usually take the form of domain names with the dots replaced + with dashes. A zone ID will never have any dots in it. + - I(zone_id) can be faster than I(zone) in projects with a large + number of zones. + - If both I(zone) and I(zone_id) are specifed, I(zone_id) will be + used. + required: false + type: + description: + - The type of resource record to add. + required: true + choices: [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ] + record_data: + description: + - The record_data to use for the resource record. + - I(record_data) must be specified if I(state) is C(present) or + I(overwrite) is C(True), or the module will fail. + - Valid record_data vary based on the record's I(type). In addition, + resource records that contain a DNS domain name in the value + field (e.g., CNAME, PTR, SRV, .etc) MUST include a trailing dot + in the value. + - Individual string record_data for TXT records must be enclosed in + double quotes. + - For resource records that have the same name but different + record_data (e.g., multiple A records), they must be defined as + multiple list entries in a single record. + required: false + aliases: ['value'] + ttl: + description: + - The amount of time in seconds that a resource record will remain + cached by a caching resolver. + required: false + default: 300 + overwrite: + description: + - Whether an attempt to overwrite an existing record should succeed + or fail. The behavior of this option depends on I(state). + - If I(state) is C(present) and I(overwrite) is C(True), this + module will replace an existing resource record of the same name + with the provided I(record_data). If I(state) is C(present) and + I(overwrite) is C(False), this module will fail if there is an + existing resource record with the same name and type, but + different resource data. + - If I(state) is C(absent) and I(overwrite) is C(True), this + module will remove the given resource record unconditionally. + If I(state) is C(absent) and I(overwrite) is C(False), this + module will fail if the provided record_data do not match exactly + with the existing resource record's record_data. + required: false + choices: [True, False] + default: False + service_account_email: + description: + - The e-mail address for a service account with access to Google + Cloud DNS. + required: false + default: null + pem_file: + description: + - The path to the PEM file associated with the service account + email. + - This option is deprecated and may be removed in a future release. + Use I(credentials_file) instead. + required: false + default: null + credentials_file: + description: + - The path to the JSON file associated with the service account + email. + required: false + default: null + project_id: + description: + - The Google Cloud Platform project ID to use. + required: false + default: null +notes: + - See also M(gcdns_zone). + - This modules's underlying library does not support in-place updates for + DNS resource records. Instead, resource records are quickly deleted and + recreated. + - SOA records are technically supported, but their functionality is limited + to verifying that a zone's existing SOA record matches a pre-determined + value. The SOA record cannot be updated. + - Root NS records cannot be updated. + - NAPTR records are not supported. +''' + +EXAMPLES = ''' +# Create an A record. +- gcdns_record: + record: 'www1.example.com' + zone: 'example.com' + type: A + value: '1.2.3.4' + +# Update an existing record. +- gcdns_record: + record: 'www1.example.com' + zone: 'example.com' + type: A + overwrite: true + value: '5.6.7.8' + +# Remove an A record. +- gcdns_record: + record: 'www1.example.com' + zone_id: 'example-com' + state: absent + type: A + value: '5.6.7.8' + +# Create a CNAME record. +- gcdns_record: + record: 'www.example.com' + zone_id: 'example-com' + type: CNAME + value: 'www.example.com.' # Note the trailing dot + +# Create an MX record with a custom TTL. +- gcdns_record: + record: 'example.com' + zone: 'example.com' + type: MX + ttl: 3600 + value: '10 mail.example.com.' # Note the trailing dot + +# Create multiple A records with the same name. +- gcdns_record: + record: 'api.example.com' + zone_id: 'example-com' + type: A + record_data: + - '192.0.2.23' + - '10.4.5.6' + - '198.51.100.5' + - '203.0.113.10' + +# Change the value of an existing record with multiple record_data. +- gcdns_record: + record: 'api.example.com' + zone: 'example.com' + type: A + overwrite: true + record_data: # WARNING: All values in a record will be replaced + - '192.0.2.23' + - '192.0.2.42' # The changed record + - '198.51.100.5' + - '203.0.113.10' + +# Safely remove a multi-line record. +- gcdns_record: + record: 'api.example.com' + zone_id: 'example-com' + state: absent + type: A + record_data: # NOTE: All of the values must match exactly + - '192.0.2.23' + - '192.0.2.42' + - '198.51.100.5' + - '203.0.113.10' + +# Unconditionally remove a record. +- gcdns_record: + record: 'api.example.com' + zone_id: 'example-com' + state: absent + overwrite: true # overwrite is true, so no values are needed + type: A + +# Create an AAAA record +- gcdns_record: + record: 'www1.example.com' + zone: 'example.com' + type: AAAA + value: 'fd00:db8::1' + +# Create a PTR record +- gcdns_record: + record: '10.5.168.192.in-addr.arpa' + zone: '5.168.192.in-addr.arpa' + type: PTR + value: 'api.example.com.' # Note the trailing dot. + +# Create an NS record +- gcdns_record: + record: 'subdomain.example.com' + zone: 'example.com' + type: NS + ttl: 21600 + record_data: + - 'ns-cloud-d1.googledomains.com.' # Note the trailing dots on values + - 'ns-cloud-d2.googledomains.com.' + - 'ns-cloud-d3.googledomains.com.' + - 'ns-cloud-d4.googledomains.com.' + +# Create a TXT record +- gcdns_record: + record: 'example.com' + zone_id: 'example-com' + type: TXT + record_data: + - '"v=spf1 include:_spf.google.com -all"' # A single-string TXT value + - '"hello " "world"' # A multi-string TXT value +''' + +RETURN = ''' +overwrite: + description: Whether to the module was allowed to overwrite the record + returned: success + type: boolean + sample: True +record: + description: Fully-qualified domain name of the resource record + returned: success + type: string + sample: mail.example.com. +state: + description: Whether the record is present or absent + returned: success + type: string + sample: present +ttl: + description: The time-to-live of the resource record + returned: success + type: int + sample: 300 +type: + description: The type of the resource record + returned: success + type: string + sample: A +record_data: + description: The resource record values + returned: success + type: list + sample: ['5.6.7.8', '9.10.11.12'] +zone: + description: The dns name of the zone + returned: success + type: string + sample: example.com. +zone_id: + description: The Google Cloud DNS ID of the zone + returned: success + type: string + sample: example-com +''' + + +################################################################################ +# Imports +################################################################################ + +import socket +from distutils.version import LooseVersion + +try: + from libcloud import __version__ as LIBCLOUD_VERSION + from libcloud.common.google import InvalidRequestError + from libcloud.common.types import LibcloudError + from libcloud.dns.types import Provider + from libcloud.dns.types import RecordDoesNotExistError + from libcloud.dns.types import ZoneDoesNotExistError + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + + +################################################################################ +# Constants +################################################################################ + +# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS +# v1 API. Earlier versions contained the beta v1 API, which has since been +# deprecated and decommissioned. +MINIMUM_LIBCLOUD_VERSION = '0.19.0' + +# The libcloud Google Cloud DNS provider. +PROVIDER = Provider.GOOGLE + +# The records that libcloud's Google Cloud DNS provider supports. +# +# Libcloud has a RECORD_TYPE_MAP dictionary in the provider that also contains +# this information and is the authoritative source on which records are +# supported, but accessing the dictionary requires creating a Google Cloud DNS +# driver object, which is done in a helper module. +# +# I'm hard-coding the supported record types here, because they (hopefully!) +# shouldn't change much, and it allows me to use it as a "choices" parameter +# in an AnsibleModule argument_spec. +SUPPORTED_RECORD_TYPES = [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ] + + +################################################################################ +# Functions +################################################################################ + +def create_record(module, gcdns, zone, record): + """Creates or overwrites a resource record.""" + + overwrite = module.boolean(module.params['overwrite']) + record_name = module.params['record'] + record_type = module.params['type'] + ttl = module.params['ttl'] + record_data = module.params['record_data'] + data = dict(ttl=ttl, rrdatas=record_data) + + # Google Cloud DNS wants the trailing dot on all DNS names. + if record_name[-1] != '.': + record_name = record_name + '.' + + # If we found a record, we need to check if the values match. + if record is not None: + # If the record matches, we obviously don't have to change anything. + if _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data): + return False + + # The record doesn't match, so we need to check if we can overwrite it. + if not overwrite: + module.fail_json( + msg = 'cannot overwrite existing record, overwrite protection enabled', + changed = False + ) + + # The record either doesn't exist, or it exists and we can overwrite it. + if record is None and not module.check_mode: + # There's no existing record, so we'll just create it. + try: + gcdns.create_record(record_name, zone, record_type, data) + except InvalidRequestError as error: + if error.code == 'invalid': + # The resource record name and type are valid by themselves, but + # not when combined (e.g., an 'A' record with "www.example.com" + # as its value). + module.fail_json( + msg = 'value is invalid for the given type: ' + + "%s, got value: %s" % (record_type, record_data), + changed = False + ) + + elif error.code == 'cnameResourceRecordSetConflict': + # We're attempting to create a CNAME resource record when we + # already have another type of resource record with the name + # domain name. + module.fail_json( + msg = "non-CNAME resource record already exists: %s" % record_name, + changed = False + ) + + else: + # The error is something else that we don't know how to handle, + # so we'll just re-raise the exception. + raise + + elif record is not None and not module.check_mode: + # The Google provider in libcloud doesn't support updating a record in + # place, so if the record already exists, we need to delete it and + # recreate it using the new information. + gcdns.delete_record(record) + + try: + gcdns.create_record(record_name, zone, record_type, data) + except InvalidRequestError: + # Something blew up when creating the record. This will usually be a + # result of invalid value data in the new record. Unfortunately, we + # already changed the state of the record by deleting the old one, + # so we'll try to roll back before failing out. + try: + gcdns.create_record(record.name, record.zone, record.type, record.data) + module.fail_json( + msg = 'error updating record, the original record was restored', + changed = False + ) + except LibcloudError: + # We deleted the old record, couldn't create the new record, and + # couldn't roll back. That really sucks. We'll dump the original + # record to the failure output so the user can resore it if + # necessary. + module.fail_json( + msg = 'error updating record, and could not restore original record, ' + + "original name: %s " % record.name + + "original zone: %s " % record.zone + + "original type: %s " % record.type + + "original data: %s" % record.data, + changed = True) + + return True + + +def remove_record(module, gcdns, record): + """Remove a resource record.""" + + overwrite = module.boolean(module.params['overwrite']) + ttl = module.params['ttl'] + record_data = module.params['record_data'] + + # If there is no record, we're obviously done. + if record is None: + return False + + # If there is an existing record, do our values match the values of the + # existing record? + if not overwrite: + if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data): + module.fail_json( + msg = 'cannot delete due to non-matching ttl or record_data: ' + + "ttl: %d, record_data: %s " % (ttl, record_data) + + "original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']), + changed = False + ) + + # If we got to this point, we're okay to delete the record. + if not module.check_mode: + gcdns.delete_record(record) + + return True + + +def _get_record(gcdns, zone, record_type, record_name): + """Gets the record object for a given FQDN.""" + + # The record ID is a combination of its type and FQDN. For example, the + # ID of an A record for www.example.com would be 'A:www.example.com.' + record_id = "%s:%s" % (record_type, record_name) + + try: + return gcdns.get_record(zone.id, record_id) + except RecordDoesNotExistError: + return None + + +def _get_zone(gcdns, zone_name, zone_id): + """Gets the zone object for a given domain name.""" + + if zone_id is not None: + try: + return gcdns.get_zone(zone_id) + except ZoneDoesNotExistError: + return None + + # To create a zone, we need to supply a domain name. However, to delete a + # zone, we need to supply a zone ID. Zone ID's are often based on domain + # names, but that's not guaranteed, so we'll iterate through the list of + # zones to see if we can find a matching domain name. + available_zones = gcdns.iterate_zones() + found_zone = None + + for zone in available_zones: + if zone.domain == zone_name: + found_zone = zone + break + + return found_zone + + +def _records_match(old_ttl, old_record_data, new_ttl, new_record_data): + """Checks to see if original and new TTL and values match.""" + + matches = True + + if old_ttl != new_ttl: + matches = False + if old_record_data != new_record_data: + matches = False + + return matches + + +def _sanity_check(module): + """Run sanity checks that don't depend on info from the zone/record.""" + + overwrite = module.params['overwrite'] + record_name = module.params['record'] + record_type = module.params['type'] + state = module.params['state'] + ttl = module.params['ttl'] + record_data = module.params['record_data'] + + # Apache libcloud needs to be installed and at least the minimum version. + if not HAS_LIBCLOUD: + module.fail_json( + msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION, + changed = False + ) + elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION: + module.fail_json( + msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION, + changed = False + ) + + # A negative TTL is not permitted (how would they even work?!). + if ttl < 0: + module.fail_json( + msg = 'TTL cannot be less than zero, got: %d' % ttl, + changed = False + ) + + # Deleting SOA records is not permitted. + if record_type == 'SOA' and state == 'absent': + module.fail_json(msg='cannot delete SOA records', changed=False) + + # Updating SOA records is not permitted. + if record_type == 'SOA' and state == 'present' and overwrite: + module.fail_json(msg='cannot update SOA records', changed=False) + + # Some sanity checks depend on what value was supplied. + if record_data is not None and (state == 'present' or not overwrite): + # A records must contain valid IPv4 addresses. + if record_type == 'A': + for value in record_data: + try: + socket.inet_aton(value) + except socket.error: + module.fail_json( + msg = 'invalid A record value, got: %s' % value, + changed = False + ) + + # AAAA records must contain valid IPv6 addresses. + if record_type == 'AAAA': + for value in record_data: + try: + socket.inet_pton(socket.AF_INET6, value) + except socket.error: + module.fail_json( + msg = 'invalid AAAA record value, got: %s' % value, + changed = False + ) + + # CNAME and SOA records can't have multiple values. + if record_type in ['CNAME', 'SOA'] and len(record_data) > 1: + module.fail_json( + msg = 'CNAME or SOA records cannot have more than one value, ' + + "got: %s" % record_data, + changed = False + ) + + # Google Cloud DNS does not support wildcard NS records. + if record_type == 'NS' and record_name[0] == '*': + module.fail_json( + msg = "wildcard NS records not allowed, got: %s" % record_name, + changed = False + ) + + # Values for txt records must begin and end with a double quote. + if record_type == 'TXT': + for value in record_data: + if value[0] != '"' and value[-1] != '"': + module.fail_json( + msg = 'TXT record_data must be enclosed in double quotes, ' + + 'got: %s' % value, + changed = False + ) + + +def _additional_sanity_checks(module, zone): + """Run input sanity checks that depend on info from the zone/record.""" + + overwrite = module.params['overwrite'] + record_name = module.params['record'] + record_type = module.params['type'] + state = module.params['state'] + + # CNAME records are not allowed to have the same name as the root domain. + if record_type == 'CNAME' and record_name == zone.domain: + module.fail_json( + msg = 'CNAME records cannot match the zone name', + changed = False + ) + + # The root domain must always have an NS record. + if record_type == 'NS' and record_name == zone.domain and state == 'absent': + module.fail_json( + msg = 'cannot delete root NS records', + changed = False + ) + + # Updating NS records with the name as the root domain is not allowed + # because libcloud does not support in-place updates and root domain NS + # records cannot be removed. + if record_type == 'NS' and record_name == zone.domain and overwrite: + module.fail_json( + msg = 'cannot update existing root NS records', + changed = False + ) + + # SOA records with names that don't match the root domain are not permitted + # (and wouldn't make sense anyway). + if record_type == 'SOA' and record_name != zone.domain: + module.fail_json( + msg = 'non-root SOA records are not permitted, got: %s' % record_name, + changed = False + ) + + +################################################################################ +# Main +################################################################################ + +def main(): + """Main function""" + + module = AnsibleModule( + argument_spec = dict( + state = dict(default='present', choices=['present', 'absent'], type='str'), + record = dict(required=True, aliases=['name'], type='str'), + zone = dict(type='str'), + zone_id = dict(type='str'), + type = dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'), + record_data = dict(aliases=['value'], type='list'), + ttl = dict(default=300, type='int'), + overwrite = dict(default=False, type='bool'), + service_account_email = dict(type='str'), + pem_file = dict(type='path'), + credentials_file = dict(type='path'), + project_id = dict(type='str') + ), + required_if = [ + ('state', 'present', ['record_data']), + ('overwrite', False, ['record_data']) + ], + required_one_of = [['zone', 'zone_id']], + supports_check_mode = True + ) + + _sanity_check(module) + + record_name = module.params['record'] + record_type = module.params['type'] + state = module.params['state'] + ttl = module.params['ttl'] + zone_name = module.params['zone'] + zone_id = module.params['zone_id'] + + json_output = dict( + state = state, + record = record_name, + zone = zone_name, + zone_id = zone_id, + type = record_type, + record_data = module.params['record_data'], + ttl = ttl, + overwrite = module.boolean(module.params['overwrite']) + ) + + # Google Cloud DNS wants the trailing dot on all DNS names. + if zone_name is not None and zone_name[-1] != '.': + zone_name = zone_name + '.' + if record_name[-1] != '.': + record_name = record_name + '.' + + # Build a connection object that we can use to connect with Google Cloud + # DNS. + gcdns = gcdns_connect(module, provider=PROVIDER) + + # We need to check that the zone we're creating a record for actually + # exists. + zone = _get_zone(gcdns, zone_name, zone_id) + if zone is None and zone_name is not None: + module.fail_json( + msg = 'zone name was not found: %s' % zone_name, + changed = False + ) + elif zone is None and zone_id is not None: + module.fail_json( + msg = 'zone id was not found: %s' % zone_id, + changed = False + ) + + # Populate the returns with the actual zone information. + json_output['zone'] = zone.domain + json_output['zone_id'] = zone.id + + # We also need to check if the record we want to create or remove actually + # exists. + try: + record = _get_record(gcdns, zone, record_type, record_name) + except InvalidRequestError: + # We gave Google Cloud DNS an invalid DNS record name. + module.fail_json( + msg = 'record name is invalid: %s' % record_name, + changed = False + ) + + _additional_sanity_checks(module, zone) + + diff = dict() + + # Build the 'before' diff + if record is None: + diff['before'] = '' + diff['before_header'] = '' + else: + diff['before'] = dict( + record = record.data['name'], + type = record.data['type'], + record_data = record.data['rrdatas'], + ttl = record.data['ttl'] + ) + diff['before_header'] = "%s:%s" % (record_type, record_name) + + # Create, remove, or modify the record. + if state == 'present': + diff['after'] = dict( + record = record_name, + type = record_type, + record_data = module.params['record_data'], + ttl = ttl + ) + diff['after_header'] = "%s:%s" % (record_type, record_name) + + changed = create_record(module, gcdns, zone, record) + + elif state == 'absent': + diff['after'] = '' + diff['after_header'] = '' + + changed = remove_record(module, gcdns, record) + + module.exit_json(changed=changed, diff=diff, **json_output) + + +from ansible.module_utils.basic import * +from ansible.module_utils.gcdns import * + +if __name__ == '__main__': + main() diff --git a/cloud/google/gcdns_zone.py b/cloud/google/gcdns_zone.py new file mode 100644 index 00000000000..683cb881899 --- /dev/null +++ b/cloud/google/gcdns_zone.py @@ -0,0 +1,385 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2015 CallFire Inc. +# +# This file is part of Ansible. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +################################################################################ +# Documentation +################################################################################ + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: gcdns_zone +short_description: Creates or removes zones in Google Cloud DNS +description: + - Creates or removes managed zones in Google Cloud DNS. +version_added: "2.2" +author: "William Albert (@walbert947)" +requirements: + - "python >= 2.6" + - "apache-libcloud >= 0.19.0" +options: + state: + description: + - Whether the given zone should or should not be present. + required: false + choices: ["present", "absent"] + default: "present" + zone: + description: + - The DNS domain name of the zone. + - This is NOT the Google Cloud DNS zone ID (e.g., example-com). If + you attempt to specify a zone ID, this module will attempt to + create a TLD and will fail. + required: true + aliases: ['name'] + description: + description: + - An arbitrary text string to use for the zone description. + required: false + default: "" + service_account_email: + description: + - The e-mail address for a service account with access to Google + Cloud DNS. + required: false + default: null + pem_file: + description: + - The path to the PEM file associated with the service account + email. + - This option is deprecated and may be removed in a future release. + Use I(credentials_file) instead. + required: false + default: null + credentials_file: + description: + - The path to the JSON file associated with the service account + email. + required: false + default: null + project_id: + description: + - The Google Cloud Platform project ID to use. + required: false + default: null +notes: + - See also M(gcdns_record). + - Zones that are newly created must still be set up with a domain registrar + before they can be used. +''' + +EXAMPLES = ''' +# Basic zone creation example. +- name: Create a basic zone with the minimum number of parameters. + gcdns_zone: zone=example.com + +# Zone removal example. +- name: Remove a zone. + gcdns_zone: zone=example.com state=absent + +# Zone creation with description +- name: Creating a zone with a description + gcdns_zone: zone=example.com description="This is an awesome zone" +''' + +RETURN = ''' +description: + description: The zone's description + returned: success + type: string + sample: This is an awesome zone +state: + description: Whether the zone is present or absent + returned: success + type: string + sample: present +zone: + description: The zone's DNS name + returned: success + type: string + sample: example.com. +''' + + +################################################################################ +# Imports +################################################################################ + +from distutils.version import LooseVersion + +try: + from libcloud import __version__ as LIBCLOUD_VERSION + from libcloud.common.google import InvalidRequestError + from libcloud.common.google import ResourceExistsError + from libcloud.common.google import ResourceNotFoundError + from libcloud.dns.types import Provider + HAS_LIBCLOUD = True +except ImportError: + HAS_LIBCLOUD = False + + +################################################################################ +# Constants +################################################################################ + +# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS +# v1 API. Earlier versions contained the beta v1 API, which has since been +# deprecated and decommissioned. +MINIMUM_LIBCLOUD_VERSION = '0.19.0' + +# The libcloud Google Cloud DNS provider. +PROVIDER = Provider.GOOGLE + +# The URL used to verify ownership of a zone in Google Cloud DNS. +ZONE_VERIFICATION_URL= 'https://www.google.com/webmasters/verification/' + +################################################################################ +# Functions +################################################################################ + +def create_zone(module, gcdns, zone): + """Creates a new Google Cloud DNS zone.""" + + description = module.params['description'] + extra = dict(description = description) + zone_name = module.params['zone'] + + # Google Cloud DNS wants the trailing dot on the domain name. + if zone_name[-1] != '.': + zone_name = zone_name + '.' + + # If we got a zone back, then the domain exists. + if zone is not None: + return False + + # The zone doesn't exist yet. + try: + if not module.check_mode: + gcdns.create_zone(domain=zone_name, extra=extra) + return True + + except ResourceExistsError: + # The zone already exists. We checked for this already, so either + # Google is lying, or someone was a ninja and created the zone + # within milliseconds of us checking for its existence. In any case, + # the zone has already been created, so we have nothing more to do. + return False + + except InvalidRequestError as error: + if error.code == 'invalid': + # The zone name or a parameter might be completely invalid. This is + # typically caused by an illegal DNS name (e.g. foo..com). + module.fail_json( + msg = "zone name is not a valid DNS name: %s" % zone_name, + changed = False + ) + + elif error.code == 'managedZoneDnsNameNotAvailable': + # Google Cloud DNS will refuse to create zones with certain domain + # names, such as TLDs, ccTLDs, or special domain names such as + # example.com. + module.fail_json( + msg = "zone name is reserved or already in use: %s" % zone_name, + changed = False + ) + + elif error.code == 'verifyManagedZoneDnsNameOwnership': + # This domain name needs to be verified before Google will create + # it. This occurs when a user attempts to create a zone which shares + # a domain name with a zone hosted elsewhere in Google Cloud DNS. + module.fail_json( + msg = "ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL), + changed = False + ) + + else: + # The error is something else that we don't know how to handle, + # so we'll just re-raise the exception. + raise + + +def remove_zone(module, gcdns, zone): + """Removes an existing Google Cloud DNS zone.""" + + # If there's no zone, then we're obviously done. + if zone is None: + return False + + # An empty zone will have two resource records: + # 1. An NS record with a list of authoritative name servers + # 2. An SOA record + # If any additional resource records are present, Google Cloud DNS will + # refuse to remove the zone. + if len(zone.list_records()) > 2: + module.fail_json( + msg = "zone is not empty and cannot be removed: %s" % zone.domain, + changed = False + ) + + try: + if not module.check_mode: + gcdns.delete_zone(zone) + return True + + except ResourceNotFoundError: + # When we performed our check, the zone existed. It may have been + # deleted by something else. It's gone, so whatever. + return False + + except InvalidRequestError as error: + if error.code == 'containerNotEmpty': + # When we performed our check, the zone existed and was empty. In + # the milliseconds between the check and the removal command, + # records were added to the zone. + module.fail_json( + msg = "zone is not empty and cannot be removed: %s" % zone.domain, + changed = False + ) + + else: + # The error is something else that we don't know how to handle, + # so we'll just re-raise the exception. + raise + + +def _get_zone(gcdns, zone_name): + """Gets the zone object for a given domain name.""" + + # To create a zone, we need to supply a zone name. However, to delete a + # zone, we need to supply a zone ID. Zone ID's are often based on zone + # names, but that's not guaranteed, so we'll iterate through the list of + # zones to see if we can find a matching name. + available_zones = gcdns.iterate_zones() + found_zone = None + + for zone in available_zones: + if zone.domain == zone_name: + found_zone = zone + break + + return found_zone + +def _sanity_check(module): + """Run module sanity checks.""" + + zone_name = module.params['zone'] + + # Apache libcloud needs to be installed and at least the minimum version. + if not HAS_LIBCLOUD: + module.fail_json( + msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION, + changed = False + ) + elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION: + module.fail_json( + msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION, + changed = False + ) + + # Google Cloud DNS does not support the creation of TLDs. + if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1: + module.fail_json( + msg = 'cannot create top-level domain: %s' % zone_name, + changed = False + ) + +################################################################################ +# Main +################################################################################ + +def main(): + """Main function""" + + module = AnsibleModule( + argument_spec = dict( + state = dict(default='present', choices=['present', 'absent'], type='str'), + zone = dict(required=True, aliases=['name'], type='str'), + description = dict(default='', type='str'), + service_account_email = dict(type='str'), + pem_file = dict(type='path'), + credentials_file = dict(type='path'), + project_id = dict(type='str') + ), + supports_check_mode = True + ) + + _sanity_check(module) + + zone_name = module.params['zone'] + state = module.params['state'] + + # Google Cloud DNS wants the trailing dot on the domain name. + if zone_name[-1] != '.': + zone_name = zone_name + '.' + + json_output = dict( + state = state, + zone = zone_name, + description = module.params['description'] + ) + + # Build a connection object that was can use to connect with Google + # Cloud DNS. + gcdns = gcdns_connect(module, provider=PROVIDER) + + # We need to check if the zone we're attempting to create already exists. + zone = _get_zone(gcdns, zone_name) + + diff = dict() + + # Build the 'before' diff + if zone is None: + diff['before'] = '' + diff['before_header'] = '' + else: + diff['before'] = dict( + zone = zone.domain, + description = zone.extra['description'] + ) + diff['before_header'] = zone_name + + # Create or remove the zone. + if state == 'present': + diff['after'] = dict( + zone = zone_name, + description = module.params['description'] + ) + diff['after_header'] = zone_name + + changed = create_zone(module, gcdns, zone) + + elif state == 'absent': + diff['after'] = '' + diff['after_header'] = '' + + changed = remove_zone(module, gcdns, zone) + + module.exit_json(changed=changed, diff=diff, **json_output) + + +from ansible.module_utils.basic import * +from ansible.module_utils.gcdns import * + +if __name__ == '__main__': + main() diff --git a/cloud/google/gce_img.py b/cloud/google/gce_img.py index 5775a94794d..e340808539a 100644 --- a/cloud/google/gce_img.py +++ b/cloud/google/gce_img.py @@ -18,6 +18,10 @@ """An Ansible module to utilize GCE image resources.""" +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: gce_img @@ -33,55 +37,58 @@ - the name of the image to create or delete required: true default: null - aliases: [] description: description: - an optional description required: false default: null - aliases: [] + family: + description: + - an optional family name + required: false + default: null + version_added: "2.2" source: description: - the source disk or the Google Cloud Storage URI to create the image from required: false default: null - aliases: [] state: description: - desired state of the image required: false default: "present" choices: ["present", "absent"] - aliases: [] zone: description: - the zone of the disk specified by source required: false default: "us-central1-a" - aliases: [] + timeout: + description: + - timeout for the operation + required: false + default: 180 + version_added: "2.0" service_account_email: description: - service account email required: false default: null - aliases: [] pem_file: description: - path to the pem file associated with the service account email required: false default: null - aliases: [] project_id: description: - your GCE project ID required: false default: null - aliases: [] - requirements: - "python >= 2.6" - "apache-libcloud" -author: "Peter Tan (@tanpeter)" +author: "Tom Melendez (supertom)" ''' EXAMPLES = ''' @@ -108,9 +115,9 @@ state: absent ''' -import sys try: + import libcloud from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.common.google import GoogleBaseError @@ -121,6 +128,9 @@ except ImportError: has_libcloud = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.gce import gce_connect + GCS_URI = 'https://storage.googleapis.com/' @@ -130,6 +140,8 @@ def create_image(gce, name, module): source = module.params.get('source') zone = module.params.get('zone') desc = module.params.get('description') + timeout = module.params.get('timeout') + family = module.params.get('family') if not source: module.fail_json(msg='Must supply a source', changed=False) @@ -146,16 +158,24 @@ def create_image(gce, name, module): except ResourceNotFoundError: module.fail_json(msg='Disk %s not found in zone %s' % (source, zone), changed=False) - except GoogleBaseError, e: + except GoogleBaseError as e: module.fail_json(msg=str(e), changed=False) + gce_extra_args = {} + if family is not None: + gce_extra_args['family'] = family + + old_timeout = gce.connection.timeout try: - gce.ex_create_image(name, volume, desc, False) + gce.connection.timeout = timeout + gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args) return True except ResourceExistsError: return False - except GoogleBaseError, e: + except GoogleBaseError as e: module.fail_json(msg=str(e), changed=False) + finally: + gce.connection.timeout = old_timeout def delete_image(gce, name, module): @@ -165,7 +185,7 @@ def delete_image(gce, name, module): return True except ResourceNotFoundError: return False - except GoogleBaseError, e: + except GoogleBaseError as e: module.fail_json(msg=str(e), changed=False) @@ -173,13 +193,15 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), + family=dict(), description=dict(), source=dict(), state=dict(default='present', choices=['present', 'absent']), zone=dict(default='us-central1-a'), service_account_email=dict(), - pem_file=dict(), + pem_file=dict(type='path'), project_id=dict(), + timeout=dict(type='int', default=180) ) ) @@ -190,8 +212,13 @@ def main(): name = module.params.get('name') state = module.params.get('state') + family = module.params.get('family') changed = False + if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1': + module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option", + changed=False) + # user wants to create an image. if state == 'present': changed = create_image(gce, name, module) @@ -201,10 +228,6 @@ def main(): changed = delete_image(gce, name, module) module.exit_json(changed=changed, name=name) - sys.exit(0) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/google/gce_tag.py b/cloud/google/gce_tag.py index 186f570b3f1..7122a2398a0 100644 --- a/cloud/google/gce_tag.py +++ b/cloud/google/gce_tag.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: gce_tag @@ -100,6 +104,9 @@ except ImportError: HAS_LIBCLOUD = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.gce import gce_connect + def add_tags(gce, module, instance_name, tags): """Add tags to instance.""" @@ -117,7 +124,7 @@ def add_tags(gce, module, instance_name, tags): node = gce.ex_get_node(instance_name, zone=zone) except ResourceNotFoundError: module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False) - except GoogleBaseError, e: + except GoogleBaseError as e: module.fail_json(msg=str(e), changed=False) node_tags = node.extra['tags'] @@ -156,7 +163,7 @@ def remove_tags(gce, module, instance_name, tags): node = gce.ex_get_node(instance_name, zone=zone) except ResourceNotFoundError: module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False) - except GoogleBaseError, e: + except GoogleBaseError as e: module.fail_json(msg=str(e), changed=False) node_tags = node.extra['tags'] @@ -188,7 +195,7 @@ def main(): state=dict(default='present', choices=['present', 'absent']), zone=dict(default='us-central1-a'), service_account_email=dict(), - pem_file=dict(), + pem_file=dict(type='path'), project_id=dict(), ) ) @@ -219,12 +226,7 @@ def main(): changed, tags_changed = remove_tags(gce, module, instance_name, tags) module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone) - sys.exit(0) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.gce import * if __name__ == '__main__': main() - diff --git a/cloud/lxc/lxc_container.py b/cloud/lxc/lxc_container.py index adb9637acf9..d3b6804ce50 100644 --- a/cloud/lxc/lxc_container.py +++ b/cloud/lxc/lxc_container.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: lxc_container @@ -57,7 +61,7 @@ description: - Path to the LXC configuration file. required: false - default: /etc/lxc/default.conf + default: null lv_name: description: - Name of the logical volume, defaults to the container name. @@ -144,7 +148,7 @@ description: - Path the save the archived container. If the path does not exist the archive method will attempt to create it. - default: /tmp + default: null archive_compression: choices: - gzip @@ -268,7 +272,8 @@ register: lvm_container_info - name: Debug info on container "test-container-lvm" - debug: var=lvm_container_info + debug: + var: lvm_container_info - name: Run a command in a container and ensure its in a "stopped" state. lxc_container: @@ -334,7 +339,8 @@ register: clone_container_info - name: debug info on container "test-container" - debug: var=clone_container_info + debug: + var: clone_container_info - name: Clone a container using snapshot lxc_container: @@ -364,7 +370,7 @@ - name: Destroy a container lxc_container: - name: "{{ item }}" + name: '{{ item }}' state: absent with_items: - test-container-stopped @@ -381,6 +387,50 @@ - test-container-new-archive-destroyed-clone """ +RETURN=""" +lxc_container: + description: container information + returned: success + type: list + contains: + name: + description: name of the lxc container + returned: success + type: string + sample: test_host + init_pid: + description: pid of the lxc init process + returned: success + type: int + sample: 19786 + interfaces: + description: list of the container's network interfaces + returned: success + type: list + sample: [ "eth0", "lo" ] + ips: + description: list of ips + returned: success + type: list + sample: [ "10.0.3.3" ] + state: + description: resulting state of the container + returned: success + type: string + sample: "running" + archive: + description: resulting state of the container + returned: success, when archive is true + type: string + sample: "/tmp/test-container-config.tar" + clone: + description: if the container was cloned + returned: success, when clone_name is specified + type: boolean + sample: True +""" + +import re try: import lxc @@ -515,13 +565,8 @@ def create_script(command): import subprocess import tempfile - # Ensure that the directory /opt exists. - if not path.isdir('/opt'): - os.mkdir('/opt') - - # Create the script. - script_file = path.join('/opt', '.lxc-attach-script') - f = open(script_file, 'wb') + (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script') + f = os.fdopen(fd, 'wb') try: f.write(ATTACH_TEMPLATE % {'container_command': command}) f.flush() @@ -529,16 +574,13 @@ def create_script(command): f.close() # Ensure the script is executable. - os.chmod(script_file, 1755) - - # Get temporary directory. - tempdir = tempfile.gettempdir() + os.chmod(script_file, int('0700',8)) # Output log file. - stdout_file = open(path.join(tempdir, 'lxc-attach-script.log'), 'ab') + stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab') # Error log file. - stderr_file = open(path.join(tempdir, 'lxc-attach-script.err'), 'ab') + stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab') # Execute the script command. try: @@ -567,6 +609,7 @@ def __init__(self, module): self.state = self.module.params.get('state', None) self.state_change = False self.lxc_vg = None + self.lxc_path = self.module.params.get('lxc_path', None) self.container_name = self.module.params['name'] self.container = self.get_container_bind() self.archive_info = None @@ -591,7 +634,7 @@ def _roundup(num): return num @staticmethod - def _container_exists(container_name): + def _container_exists(container_name, lxc_path=None): """Check if a container exists. :param container_name: Name of the container. @@ -599,7 +642,7 @@ def _container_exists(container_name): :returns: True or False if the container is found. :rtype: ``bol`` """ - if [i for i in lxc.list_containers() if i == container_name]: + if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]: return True else: return False @@ -637,52 +680,31 @@ def _get_vars(self, variables): variables.pop(v, None) return_dict = dict() + false_values = [None, ''] + BOOLEANS_FALSE for k, v in variables.items(): _var = self.module.params.get(k) - if not [i for i in [None, ''] + BOOLEANS_FALSE if i == _var]: + if _var not in false_values: return_dict[v] = _var else: return return_dict - def _run_command(self, build_command, unsafe_shell=False, timeout=600): + def _run_command(self, build_command, unsafe_shell=False): """Return information from running an Ansible Command. This will squash the build command list into a string and then execute the command via Ansible. The output is returned to the method. This output is returned as `return_code`, `stdout`, `stderr`. - Prior to running the command the method will look to see if the LXC - lockfile is present. If the lockfile "/var/lock/subsys/lxc" the method - will wait upto 10 minutes for it to be gone; polling every 5 seconds. - :param build_command: Used for the command and all options. :type build_command: ``list`` :param unsafe_shell: Enable or Disable unsafe sell commands. :type unsafe_shell: ``bol`` - :param timeout: Time before the container create process quites. - :type timeout: ``int`` """ - lockfile = '/var/lock/subsys/lxc' - - for _ in xrange(timeout): - if os.path.exists(lockfile): - time.sleep(1) - else: - return self.module.run_command( - ' '.join(build_command), - use_unsafe_shell=unsafe_shell - ) - else: - message = ( - 'The LXC subsystem is locked and after 5 minutes it never' - ' became unlocked. Lockfile [ %s ]' % lockfile - ) - self.failure( - error='LXC subsystem locked', - rc=0, - msg=message - ) + return self.module.run_command( + ' '.join(build_command), + use_unsafe_shell=unsafe_shell + ) def _config(self): """Configure an LXC container. @@ -710,10 +732,13 @@ def _config(self): config_change = False for key, value in parsed_options: + key = key.strip() + value = value.strip() new_entry = '%s = %s\n' % (key, value) + keyre = re.compile(r'%s(\s+)?=' % key) for option_line in container_config: # Look for key in config - if option_line.startswith(key): + if keyre.match(option_line): _, _value = option_line.split('=', 1) config_value = ' '.join(_value.split()) line_index = container_config.index(option_line) @@ -879,7 +904,8 @@ def _container_data(self): 'interfaces': self.container.get_interfaces(), 'ips': self.container.get_ips(), 'state': self._get_state(), - 'init_pid': int(self.container.init_pid) + 'init_pid': int(self.container.init_pid), + 'name' : self.container_name, } def _unfreeze(self): @@ -903,7 +929,7 @@ def _get_state(self): :rtype: ``str`` """ - if self._container_exists(container_name=self.container_name): + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): return str(self.container.state).lower() else: return str('absent') @@ -968,7 +994,7 @@ def _check_clone(self): clone_name = self.module.params.get('clone_name') if clone_name: - if not self._container_exists(container_name=clone_name): + if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path): self.clone_info = { 'cloned': self._container_create_clone() } @@ -985,7 +1011,7 @@ def _destroyed(self, timeout=60): """ for _ in xrange(timeout): - if not self._container_exists(container_name=self.container_name): + if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): break # Check if the container needs to have an archive created. @@ -1024,7 +1050,7 @@ def _frozen(self, count=0): """ self.check_count(count=count, method='frozen') - if self._container_exists(container_name=self.container_name): + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): self._execute_command() # Perform any configuration updates @@ -1061,7 +1087,7 @@ def _restarted(self, count=0): """ self.check_count(count=count, method='restart') - if self._container_exists(container_name=self.container_name): + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): self._execute_command() # Perform any configuration updates @@ -1094,7 +1120,7 @@ def _stopped(self, count=0): """ self.check_count(count=count, method='stop') - if self._container_exists(container_name=self.container_name): + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): self._execute_command() # Perform any configuration updates @@ -1124,7 +1150,7 @@ def _started(self, count=0): """ self.check_count(count=count, method='start') - if self._container_exists(container_name=self.container_name): + if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path): container_state = self._get_state() if container_state == 'running': pass @@ -1330,6 +1356,8 @@ def _create_tar(self, source_dir): :type source_dir: ``str`` """ + old_umask = os.umask(int('0077',8)) + archive_path = self.module.params.get('archive_path') if not os.path.isdir(archive_path): os.makedirs(archive_path) @@ -1360,6 +1388,9 @@ def _create_tar(self, source_dir): build_command=build_command, unsafe_shell=True ) + + os.umask(old_umask) + if rc != 0: self.failure( err=err, @@ -1642,8 +1673,7 @@ def main(): type='str' ), config=dict( - type='str', - default='/etc/lxc/default.conf' + type='path', ), vg_name=dict( type='str', @@ -1661,7 +1691,7 @@ def main(): default='5G' ), directory=dict( - type='str' + type='path' ), zfs_root=dict( type='str' @@ -1670,7 +1700,7 @@ def main(): type='str' ), lxc_path=dict( - type='str' + type='path' ), state=dict( choices=LXC_ANSIBLE_STATES.keys(), @@ -1683,7 +1713,7 @@ def main(): type='str' ), container_log=dict( - choices=BOOLEANS, + type='bool', default='false' ), container_log_level=dict( @@ -1695,16 +1725,15 @@ def main(): required=False ), clone_snapshot=dict( - choices=BOOLEANS, + type='bool', default='false' ), archive=dict( - choices=BOOLEANS, + type='bool', default='false' ), archive_path=dict( - type='str', - default='/tmp' + type='path', ), archive_compression=dict( choices=LXC_COMPRESSION_MAP.keys(), @@ -1712,6 +1741,9 @@ def main(): ) ), supports_check_mode=False, + required_if = ([ + ('archive', True, ['archive_path']) + ]), ) if not HAS_LXC: @@ -1729,4 +1761,5 @@ def main(): # import module bits from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/lxd/__init__.py b/cloud/lxd/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/lxd/lxd_container.py b/cloud/lxd/lxd_container.py new file mode 100644 index 00000000000..b4eaa5739a7 --- /dev/null +++ b/cloud/lxd/lxd_container.py @@ -0,0 +1,615 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Hiroaki Nakamura +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: lxd_container +short_description: Manage LXD Containers +version_added: "2.2" +description: + - Management of LXD containers +author: "Hiroaki Nakamura (@hnakamur)" +options: + name: + description: + - Name of a container. + required: true + architecture: + description: + - The archiecture for the container (e.g. "x86_64" or "i686"). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) + required: false + config: + description: + - 'The config for the container (e.g. {"limits.cpu": "2"}). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)' + - If the container already exists and its "config" value in metadata + obtained from + GET /1.0/containers/ + U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname) + are different, they this module tries to apply the configurations. + - The key starts with 'volatile.' are ignored for this comparison. + - Not all config values are supported to apply the existing container. + Maybe you need to delete and recreate a container. + required: false + devices: + description: + - 'The devices for the container + (e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)' + required: false + ephemeral: + description: + - Whether or not the container is ephemeral (e.g. true or false). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) + required: false + source: + description: + - 'The source for the container + (e.g. { "type": "image", + "mode": "pull", + "server": "https://images.linuxcontainers.org", + "protocol": "lxd", + "alias": "ubuntu/xenial/amd64" }). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)' + required: false + state: + choices: + - started + - stopped + - restarted + - absent + - frozen + description: + - Define the state of a container. + required: false + default: started + timeout: + description: + - A timeout for changing the state of the container. + - This is also used as a timeout for waiting until IPv4 addresses + are set to the all network interfaces in the container after + starting or restarting. + required: false + default: 30 + wait_for_ipv4_addresses: + description: + - If this is true, the M(lxd_container) waits until IPv4 addresses + are set to the all network interfaces in the container after + starting or restarting. + required: false + default: false + force_stop: + description: + - If this is true, the M(lxd_container) forces to stop the container + when it stops or restarts the container. + required: false + default: false + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + key_file: + description: + - The client certificate key file path. + required: false + default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])' + cert_file: + description: + - The client certificate file path. + required: false + default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])' + trust_password: + description: + - The client trusted password. + - You need to set this password on the LXD server before + running this module using the following command. + lxc config set core.trust_password + See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/) + - If trust_password is set, this module send a request for + authentication before sending any requests. + required: false +notes: + - Containers must have a unique name. If you attempt to create a container + with a name that already existed in the users namespace the module will + simply return as "unchanged". + - There are two ways to can run commands in containers, using the command + module or using the ansible lxd connection plugin bundled in Ansible >= + 2.1, the later requires python to be installed in the container which can + be done with the command module. + - You can copy a file from the host to the container + with the Ansible M(copy) and M(templater) module and the `lxd` connection plugin. + See the example below. + - You can copy a file in the creatd container to the localhost + with `command=lxc file pull container_name/dir/filename filename`. + See the first example below. +''' + +EXAMPLES = ''' +# An example for creating a Ubuntu container and install python +- hosts: localhost + connection: local + tasks: + - name: Create a started container + lxd_container: + name: mycontainer + state: started + source: + type: image + mode: pull + server: https://images.linuxcontainers.org + protocol: lxd + alias: ubuntu/xenial/amd64 + profiles: ["default"] + wait_for_ipv4_addresses: true + timeout: 600 + + - name: check python is installed in container + delegate_to: mycontainer + raw: dpkg -s python + register: python_install_check + failed_when: python_install_check.rc not in [0, 1] + changed_when: false + + - name: install python in container + delegate_to: mycontainer + raw: apt-get install -y python + when: python_install_check.rc == 1 + +# An example for deleting a container +- hosts: localhost + connection: local + tasks: + - name: Delete a container + lxd_container: + name: mycontainer + state: absent + +# An example for restarting a container +- hosts: localhost + connection: local + tasks: + - name: Restart a container + lxd_container: + name: mycontainer + state: restarted + +# An example for restarting a container using https to connect to the LXD server +- hosts: localhost + connection: local + tasks: + - name: Restart a container + lxd_container: + url: https://127.0.0.1:8443 + # These cert_file and key_file values are equal to the default values. + #cert_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + #key_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + trust_password: mypassword + name: mycontainer + state: restarted + +# Note your container must be in the inventory for the below example. +# +# [containers] +# mycontainer ansible_connection=lxd +# +- hosts: + - mycontainer + tasks: + - name: copy /etc/hosts in the created container to localhost with name "mycontainer-hosts" + fetch: + src: /etc/hosts + dest: /tmp/mycontainer-hosts + flat: true +''' + +RETURN=''' +addresses: + description: Mapping from the network device name to a list of IPv4 addresses in the container + returned: when state is started or restarted + type: object + sample: {"eth0": ["10.155.92.191"]} +old_state: + description: The old state of the container + returned: when state is started or restarted + type: string + sample: "stopped" +logs: + description: The logs of requests and responses. + returned: when ansible-playbook is invoked with -vvvv. + type: list + sample: "(too long to be placed here)" +actions: + description: List of actions performed for the container. + returned: success + type: list + sample: '["create", "start"]' +''' + +import os +from ansible.module_utils.lxd import LXDClient, LXDClientException + +# LXD_ANSIBLE_STATES is a map of states that contain values of methods used +# when a particular state is evoked. +LXD_ANSIBLE_STATES = { + 'started': '_started', + 'stopped': '_stopped', + 'restarted': '_restarted', + 'absent': '_destroyed', + 'frozen': '_frozen' +} + +# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible +# lxc_container module state parameter value. +ANSIBLE_LXD_STATES = { + 'Running': 'started', + 'Stopped': 'stopped', + 'Frozen': 'frozen', +} + +# CONFIG_PARAMS is a list of config attribute names. +CONFIG_PARAMS = [ + 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source' +] + +try: + callable(all) +except NameError: + # For python <2.5 + # This definition is copied from https://docs.python.org/2/library/functions.html#all + def all(iterable): + for element in iterable: + if not element: + return False + return True + +class LXDContainerManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.name = self.module.params['name'] + self._build_config() + + self.state = self.module.params['state'] + + self.timeout = self.module.params['timeout'] + self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses'] + self.force_stop = self.module.params['force_stop'] + self.addresses = None + + self.url = self.module.params['url'] + self.key_file = self.module.params.get('key_file', None) + self.cert_file = self.module.params.get('cert_file', None) + self.debug = self.module._verbosity >= 4 + try: + self.client = LXDClient( + self.url, key_file=self.key_file, cert_file=self.cert_file, + debug=self.debug + ) + except LXDClientException as e: + self.module.fail_json(msg=e.msg) + self.trust_password = self.module.params.get('trust_password', None) + self.actions = [] + + def _build_config(self): + self.config = {} + for attr in CONFIG_PARAMS: + param_val = self.module.params.get(attr, None) + if param_val is not None: + self.config[attr] = param_val + + def _get_container_json(self): + return self.client.do( + 'GET', '/1.0/containers/{0}'.format(self.name), + ok_error_codes=[404] + ) + + def _get_container_state_json(self): + return self.client.do( + 'GET', '/1.0/containers/{0}/state'.format(self.name), + ok_error_codes=[404] + ) + + @staticmethod + def _container_json_to_module_state(resp_json): + if resp_json['type'] == 'error': + return 'absent' + return ANSIBLE_LXD_STATES[resp_json['metadata']['status']] + + def _change_state(self, action, force_stop=False): + body_json={'action': action, 'timeout': self.timeout} + if force_stop: + body_json['force'] = True + return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json) + + def _create_container(self): + config = self.config.copy() + config['name'] = self.name + self.client.do('POST', '/1.0/containers', config) + self.actions.append('create') + + def _start_container(self): + self._change_state('start') + self.actions.append('start') + + def _stop_container(self): + self._change_state('stop', self.force_stop) + self.actions.append('stop') + + def _restart_container(self): + self._change_state('restart', self.force_stop) + self.actions.append('restart') + + def _delete_container(self): + self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name)) + self.actions.append('delete') + + def _freeze_container(self): + self._change_state('freeze') + self.actions.append('freeze') + + def _unfreeze_container(self): + self._change_state('unfreeze') + self.actions.append('unfreez') + + def _container_ipv4_addresses(self, ignore_devices=['lo']): + resp_json = self._get_container_state_json() + network = resp_json['metadata']['network'] or {} + network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {} + addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {} + return addresses + + @staticmethod + def _has_all_ipv4_addresses(addresses): + return len(addresses) > 0 and all([len(v) > 0 for v in addresses.itervalues()]) + + def _get_addresses(self): + try: + due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout) + while datetime.datetime.now() < due: + time.sleep(1) + addresses = self._container_ipv4_addresses() + if self._has_all_ipv4_addresses(addresses): + self.addresses = addresses + return + except LXDClientException as e: + e.msg = 'timeout for getting IPv4 addresses' + raise + + def _started(self): + if self.old_state == 'absent': + self._create_container() + self._start_container() + else: + if self.old_state == 'frozen': + self._unfreeze_container() + elif self.old_state == 'stopped': + self._start_container() + if self._needs_to_apply_container_configs(): + self._apply_container_configs() + if self.wait_for_ipv4_addresses: + self._get_addresses() + + def _stopped(self): + if self.old_state == 'absent': + self._create_container() + else: + if self.old_state == 'stopped': + if self._needs_to_apply_container_configs(): + self._start_container() + self._apply_container_configs() + self._stop_container() + else: + if self.old_state == 'frozen': + self._unfreeze_container() + if self._needs_to_apply_container_configs(): + self._apply_container_configs() + self._stop_container() + + def _restarted(self): + if self.old_state == 'absent': + self._create_container() + self._start_container() + else: + if self.old_state == 'frozen': + self._unfreeze_container() + if self._needs_to_apply_container_configs(): + self._apply_container_configs() + self._restart_container() + if self.wait_for_ipv4_addresses: + self._get_addresses() + + def _destroyed(self): + if self.old_state != 'absent': + if self.old_state == 'frozen': + self._unfreeze_container() + if self.old_state != 'stopped': + self._stop_container() + self._delete_container() + + def _frozen(self): + if self.old_state == 'absent': + self._create_container() + self._start_container() + self._freeze_container() + else: + if self.old_state == 'stopped': + self._start_container() + if self._needs_to_apply_container_configs(): + self._apply_container_configs() + self._freeze_container() + + def _needs_to_change_container_config(self, key): + if key not in self.config: + return False + if key == 'config': + old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.')) + else: + old_configs = self.old_container_json['metadata'][key] + return self.config[key] != old_configs + + def _needs_to_apply_container_configs(self): + return ( + self._needs_to_change_container_config('architecture') or + self._needs_to_change_container_config('config') or + self._needs_to_change_container_config('ephemeral') or + self._needs_to_change_container_config('devices') or + self._needs_to_change_container_config('profiles') + ) + + def _apply_container_configs(self): + old_metadata = self.old_container_json['metadata'] + body_json = { + 'architecture': old_metadata['architecture'], + 'config': old_metadata['config'], + 'devices': old_metadata['devices'], + 'profiles': old_metadata['profiles'] + } + if self._needs_to_change_container_config('architecture'): + body_json['architecture'] = self.config['architecture'] + if self._needs_to_change_container_config('config'): + for k, v in self.config['config'].items(): + body_json['config'][k] = v + if self._needs_to_change_container_config('ephemeral'): + body_json['ephemeral'] = self.config['ephemeral'] + if self._needs_to_change_container_config('devices'): + body_json['devices'] = self.config['devices'] + if self._needs_to_change_container_config('profiles'): + body_json['profiles'] = self.config['profiles'] + self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json) + self.actions.append('apply_container_configs') + + def run(self): + """Run the main method.""" + + try: + if self.trust_password is not None: + self.client.authenticate(self.trust_password) + + self.old_container_json = self._get_container_json() + self.old_state = self._container_json_to_module_state(self.old_container_json) + action = getattr(self, LXD_ANSIBLE_STATES[self.state]) + action() + + state_changed = len(self.actions) > 0 + result_json = { + 'log_verbosity': self.module._verbosity, + 'changed': state_changed, + 'old_state': self.old_state, + 'actions': self.actions + } + if self.client.debug: + result_json['logs'] = self.client.logs + if self.addresses is not None: + result_json['addresses'] = self.addresses + self.module.exit_json(**result_json) + except LXDClientException as e: + state_changed = len(self.actions) > 0 + fail_params = { + 'msg': e.msg, + 'changed': state_changed, + 'actions': self.actions + } + if self.client.debug: + fail_params['logs'] = e.kwargs['logs'] + self.module.fail_json(**fail_params) + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + architecture=dict( + type='str', + ), + config=dict( + type='dict', + ), + description=dict( + type='str', + ), + devices=dict( + type='dict', + ), + ephemeral=dict( + type='bool', + ), + profiles=dict( + type='list', + ), + source=dict( + type='dict', + ), + state=dict( + choices=LXD_ANSIBLE_STATES.keys(), + default='started' + ), + timeout=dict( + type='int', + default=30 + ), + wait_for_ipv4_addresses=dict( + type='bool', + default=False + ), + force_stop=dict( + type='bool', + default=False + ), + url=dict( + type='str', + default='unix:/var/lib/lxd/unix.socket' + ), + key_file=dict( + type='str', + default='{}/.config/lxc/client.key'.format(os.environ['HOME']) + ), + cert_file=dict( + type='str', + default='{}/.config/lxc/client.crt'.format(os.environ['HOME']) + ), + trust_password=dict( + type='str', + ) + ), + supports_check_mode=False, + ) + + lxd_manage = LXDContainerManagement(module=module) + lxd_manage.run() + +# import module bits +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/lxd/lxd_profile.py b/cloud/lxd/lxd_profile.py new file mode 100644 index 00000000000..546d0c09ea4 --- /dev/null +++ b/cloud/lxd/lxd_profile.py @@ -0,0 +1,378 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Hiroaki Nakamura +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: lxd_profile +short_description: Manage LXD profiles +version_added: "2.2" +description: + - Management of LXD profiles +author: "Hiroaki Nakamura (@hnakamur)" +options: + name: + description: + - Name of a profile. + required: true + config: + description: + - 'The config for the container (e.g. {"limits.memory": "4GB"}). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' + - If the profile already exists and its "config" value in metadata + obtained from + GET /1.0/profiles/ + U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19) + are different, they this module tries to apply the configurations. + - Not all config values are supported to apply the existing profile. + Maybe you need to delete and recreate a profile. + required: false + devices: + description: + - 'The devices for the profile + (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}). + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)' + required: false + new_name: + description: + - A new name of a profile. + - If this parameter is specified a profile will be renamed to this name. + See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11) + required: false + state: + choices: + - present + - absent + description: + - Define the state of a profile. + required: false + default: present + url: + description: + - The unix domain socket path or the https URL for the LXD server. + required: false + default: unix:/var/lib/lxd/unix.socket + key_file: + description: + - The client certificate key file path. + required: false + default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])' + cert_file: + description: + - The client certificate file path. + required: false + default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])' + trust_password: + description: + - The client trusted password. + - You need to set this password on the LXD server before + running this module using the following command. + lxc config set core.trust_password + See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/) + - If trust_password is set, this module send a request for + authentication before sending any requests. + required: false +notes: + - Profiles must have a unique name. If you attempt to create a profile + with a name that already existed in the users namespace the module will + simply return as "unchanged". +''' + +EXAMPLES = ''' +# An example for creating a profile +- hosts: localhost + connection: local + tasks: + - name: Create a profile + lxd_profile: + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + +# An example for creating a profile via http connection +- hosts: localhost + connection: local + tasks: + - name: create macvlan profile + lxd_profile: + url: https://127.0.0.1:8443 + # These cert_file and key_file values are equal to the default values. + #cert_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt" + #key_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key" + trust_password: mypassword + name: macvlan + state: present + config: {} + description: my macvlan profile + devices: + eth0: + nictype: macvlan + parent: br0 + type: nic + +# An example for deleting a profile +- hosts: localhost + connection: local + tasks: + - name: Delete a profile + lxd_profile: + name: macvlan + state: absent + +# An example for renaming a profile +- hosts: localhost + connection: local + tasks: + - name: Rename a profile + lxd_profile: + name: macvlan + new_name: macvlan2 + state: present +''' + +RETURN=''' +old_state: + description: The old state of the profile + returned: success + type: string + sample: "absent" +logs: + description: The logs of requests and responses. + returned: when ansible-playbook is invoked with -vvvv. + type: list + sample: "(too long to be placed here)" +actions: + description: List of actions performed for the profile. + returned: success + type: list + sample: '["create"]' +''' + +import os +from ansible.module_utils.lxd import LXDClient, LXDClientException + +# PROFILE_STATES is a list for states supported +PROFILES_STATES = [ + 'present', 'absent' +] + +# CONFIG_PARAMS is a list of config attribute names. +CONFIG_PARAMS = [ + 'config', 'description', 'devices' +] + +class LXDProfileManagement(object): + def __init__(self, module): + """Management of LXC containers via Ansible. + + :param module: Processed Ansible Module. + :type module: ``object`` + """ + self.module = module + self.name = self.module.params['name'] + self._build_config() + self.state = self.module.params['state'] + self.new_name = self.module.params.get('new_name', None) + + self.url = self.module.params['url'] + self.key_file = self.module.params.get('key_file', None) + self.cert_file = self.module.params.get('cert_file', None) + self.debug = self.module._verbosity >= 4 + try: + self.client = LXDClient( + self.url, key_file=self.key_file, cert_file=self.cert_file, + debug=self.debug + ) + except LXDClientException as e: + self.module.fail_json(msg=e.msg) + self.trust_password = self.module.params.get('trust_password', None) + self.actions = [] + + def _build_config(self): + self.config = {} + for attr in CONFIG_PARAMS: + param_val = self.module.params.get(attr, None) + if param_val is not None: + self.config[attr] = param_val + + def _get_profile_json(self): + return self.client.do( + 'GET', '/1.0/profiles/{0}'.format(self.name), + ok_error_codes=[404] + ) + + @staticmethod + def _profile_json_to_module_state(resp_json): + if resp_json['type'] == 'error': + return 'absent' + return 'present' + + def _update_profile(self): + if self.state == 'present': + if self.old_state == 'absent': + if self.new_name is None: + self._create_profile() + else: + self.module.fail_json( + msg='new_name must not be set when the profile does not exist and the specified state is present', + changed=False) + else: + if self.new_name is not None and self.new_name != self.name: + self._rename_profile() + if self._needs_to_apply_profile_configs(): + self._apply_profile_configs() + elif self.state == 'absent': + if self.old_state == 'present': + if self.new_name is None: + self._delete_profile() + else: + self.module.fail_json( + msg='new_name must not be set when the profile exists and the specified state is absent', + changed=False) + + def _create_profile(self): + config = self.config.copy() + config['name'] = self.name + self.client.do('POST', '/1.0/profiles', config) + self.actions.append('create') + + def _rename_profile(self): + config = {'name': self.new_name} + self.client.do('POST', '/1.0/profiles/{}'.format(self.name), config) + self.actions.append('rename') + self.name = self.new_name + + def _needs_to_change_profile_config(self, key): + if key not in self.config: + return False + old_configs = self.old_profile_json['metadata'].get(key, None) + return self.config[key] != old_configs + + def _needs_to_apply_profile_configs(self): + return ( + self._needs_to_change_profile_config('config') or + self._needs_to_change_profile_config('description') or + self._needs_to_change_profile_config('devices') + ) + + def _apply_profile_configs(self): + config = self.old_profile_json.copy() + for k, v in self.config.iteritems(): + config[k] = v + self.client.do('PUT', '/1.0/profiles/{}'.format(self.name), config) + self.actions.append('apply_profile_configs') + + def _delete_profile(self): + self.client.do('DELETE', '/1.0/profiles/{}'.format(self.name)) + self.actions.append('delete') + + def run(self): + """Run the main method.""" + + try: + if self.trust_password is not None: + self.client.authenticate(self.trust_password) + + self.old_profile_json = self._get_profile_json() + self.old_state = self._profile_json_to_module_state(self.old_profile_json) + self._update_profile() + + state_changed = len(self.actions) > 0 + result_json = { + 'changed': state_changed, + 'old_state': self.old_state, + 'actions': self.actions + } + if self.client.debug: + result_json['logs'] = self.client.logs + self.module.exit_json(**result_json) + except LXDClientException as e: + state_changed = len(self.actions) > 0 + fail_params = { + 'msg': e.msg, + 'changed': state_changed, + 'actions': self.actions + } + if self.client.debug: + fail_params['logs'] = e.kwargs['logs'] + self.module.fail_json(**fail_params) + + +def main(): + """Ansible Main module.""" + + module = AnsibleModule( + argument_spec=dict( + name=dict( + type='str', + required=True + ), + new_name=dict( + type='str', + ), + config=dict( + type='dict', + ), + description=dict( + type='str', + ), + devices=dict( + type='dict', + ), + state=dict( + choices=PROFILES_STATES, + default='present' + ), + url=dict( + type='str', + default='unix:/var/lib/lxd/unix.socket' + ), + key_file=dict( + type='str', + default='{}/.config/lxc/client.key'.format(os.environ['HOME']) + ), + cert_file=dict( + type='str', + default='{}/.config/lxc/client.crt'.format(os.environ['HOME']) + ), + trust_password=dict( + type='str', + ) + ), + supports_check_mode=False, + ) + + lxd_manage = LXDProfileManagement(module=module) + lxd_manage.run() + +# import module bits +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/misc/ovirt.py b/cloud/misc/ovirt.py index 6e8f3281dc5..af89998258c 100644 --- a/cloud/misc/ovirt.py +++ b/cloud/misc/ovirt.py @@ -17,6 +17,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ovirt @@ -144,6 +148,48 @@ default: null required: false aliases: [] + instance_dns: + description: + - define the instance's Primary DNS server + required: false + aliases: [ dns ] + version_added: "2.1" + instance_domain: + description: + - define the instance's Domain + required: false + aliases: [ domain ] + version_added: "2.1" + instance_hostname: + description: + - define the instance's Hostname + required: false + aliases: [ hostname ] + version_added: "2.1" + instance_ip: + description: + - define the instance's IP + required: false + aliases: [ ip ] + version_added: "2.1" + instance_netmask: + description: + - define the instance's Netmask + required: false + aliases: [ netmask ] + version_added: "2.1" + instance_rootpw: + description: + - define the instance's Root password + required: false + aliases: [ rootpw ] + version_added: "2.1" + instance_key: + description: + - define the instance's Authorized key + required: false + aliases: [ key ] + version_added: "2.1" state: description: - create, terminate or remove instances @@ -159,62 +205,74 @@ EXAMPLES = ''' # Basic example provisioning from image. -action: ovirt > - user=admin@internal - url=https://ovirt.example.com - instance_name=ansiblevm04 - password=secret - image=centos_64 - zone=cluster01 - resource_type=template" +ovirt: + user: admin@internal + url: https://ovirt.example.com + instance_name: ansiblevm04 + password: secret + image: centos_64 + zone: cluster01 + resource_type: template" # Full example to create new instance from scratch -action: ovirt > - instance_name=testansible - resource_type=new - instance_type=server - user=admin@internal - password=secret - url=https://ovirt.example.com - instance_disksize=10 - zone=cluster01 - region=datacenter1 - instance_cpus=1 - instance_nic=nic1 - instance_network=rhevm - instance_mem=1000 - disk_alloc=thin - sdomain=FIBER01 - instance_cores=1 - instance_os=rhel_6x64 - disk_int=virtio" +ovirt: + instance_name: testansible + resource_type: new + instance_type: server + user: admin@internal + password: secret + url: https://ovirt.example.com + instance_disksize: 10 + zone: cluster01 + region: datacenter1 + instance_cpus: 1 + instance_nic: nic1 + instance_network: rhevm + instance_mem: 1000 + disk_alloc: thin + sdomain: FIBER01 + instance_cores: 1 + instance_os: rhel_6x64 + disk_int: virtio" # stopping an instance -action: ovirt > - instance_name=testansible - state=stopped - user=admin@internal - password=secret - url=https://ovirt.example.com +ovirt: + instance_name: testansible + state: stopped + user: admin@internal + password: secret + url: https://ovirt.example.com # starting an instance -action: ovirt > - instance_name=testansible - state=started - user=admin@internal - password=secret - url=https://ovirt.example.com - +ovirt: + instance_name: testansible + state: started + user: admin@internal + password: secret + url: https://ovirt.example.com + +# starting an instance with cloud init information +ovirt: + instance_name: testansible + state: started + user: admin@internal + password: secret + url: https://ovirt.example.com + hostname: testansible + domain: ansible.local + ip: 192.0.2.100 + netmask: 255.255.255.0 + gateway: 192.0.2.1 + rootpw: bigsecret ''' -import sys try: from ovirtsdk.api import API from ovirtsdk.xml import params + HAS_OVIRTSDK = True except ImportError: - print "failed=True msg='ovirtsdk required for this module'" - sys.exit(1) + HAS_OVIRTSDK = False # ------------------------------------------------------------------- # # create connection with API @@ -224,8 +282,7 @@ def conn(url, user, password): try: value = api.test() except: - print "error connecting to the oVirt API" - sys.exit(1) + raise Exception("error connecting to the oVirt API") return api # ------------------------------------------------------------------- # @@ -253,17 +310,16 @@ def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, try: conn.vms.add(vmparams) except: - print "Error creating VM with specified parameters" - sys.exit(1) + raise Exception("Error creating VM with specified parameters") vm = conn.vms.get(name=vmname) try: vm.disks.add(vmdisk) except: - print "Error attaching disk" + raise Exception("Error attaching disk") try: vm.nics.add(nic_net1) except: - print "Error adding nic" + raise Exception("Error adding nic") # create an instance from a template @@ -272,14 +328,28 @@ def create_vm_template(conn, vmname, image, zone): try: conn.vms.add(vmparams) except: - print 'error adding template %s' % image - sys.exit(1) + raise Exception('error adding template %s' % image) # start instance -def vm_start(conn, vmname): +def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None, + domain=None, dns=None, rootpw=None, key=None): vm = conn.vms.get(name=vmname) - vm.start() + use_cloud_init = False + nics = None + nic = None + if hostname or ip or netmask or gateway or domain or dns or rootpw or key: + use_cloud_init = True + if ip and netmask and gateway: + ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway) + nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True) + nics = params.Nics() + nics = params.GuestNicsConfiguration(nic_configuration=[nic]) + initialization=params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root', + root_password=rootpw, nic_configurations=nics, dns_servers=dns, + authorized_ssh_keys=key) + action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization)) + vm.start(action=action) # Stop instance def vm_stop(conn, vmname): @@ -306,7 +376,6 @@ def vm_remove(conn, vmname): # Get the VMs status def vm_status(conn, vmname): status = conn.vms.get(name=vmname).status.state - print "vm status is : %s" % status return status @@ -315,10 +384,8 @@ def get_vm(conn, vmname): vm = conn.vms.get(name=vmname) if vm == None: name = "empty" - print "vmname: %s" % name else: name = vm.get_name() - print "vmname: %s" % name return name # ------------------------------------------------------------------- # @@ -337,7 +404,7 @@ def main(): user = dict(required=True), url = dict(required=True), instance_name = dict(required=True, aliases=['vmname']), - password = dict(required=True), + password = dict(required=True, no_log=True), image = dict(), resource_type = dict(choices=['new', 'template']), zone = dict(), @@ -351,11 +418,22 @@ def main(): disk_int = dict(default='virtio', choices=['virtio', 'ide']), instance_os = dict(aliases=['vmos']), instance_cores = dict(default=1, aliases=['vmcores']), + instance_hostname = dict(aliases=['hostname']), + instance_ip = dict(aliases=['ip']), + instance_netmask = dict(aliases=['netmask']), + instance_gateway = dict(aliases=['gateway']), + instance_domain = dict(aliases=['domain']), + instance_dns = dict(aliases=['dns']), + instance_rootpw = dict(aliases=['rootpw']), + instance_key = dict(aliases=['key']), sdomain = dict(), region = dict(), ) ) + if not HAS_OVIRTSDK: + module.fail_json(msg='ovirtsdk required for this module') + state = module.params['state'] user = module.params['user'] url = module.params['url'] @@ -376,17 +454,34 @@ def main(): vmcores = module.params['instance_cores'] # number of cores sdomain = module.params['sdomain'] # storage domain to store disk on region = module.params['region'] # oVirt Datacenter + hostname = module.params['instance_hostname'] + ip = module.params['instance_ip'] + netmask = module.params['instance_netmask'] + gateway = module.params['instance_gateway'] + domain = module.params['instance_domain'] + dns = module.params['instance_dns'] + rootpw = module.params['instance_rootpw'] + key = module.params['instance_key'] #initialize connection - c = conn(url+"/api", user, password) + try: + c = conn(url+"/api", user, password) + except Exception as e: + module.fail_json(msg='%s' % e) if state == 'present': if get_vm(c, vmname) == "empty": if resource_type == 'template': - create_vm_template(c, vmname, image, zone) + try: + create_vm_template(c, vmname, image, zone) + except Exception as e: + module.fail_json(msg='%s' % e) module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image)) elif resource_type == 'new': # FIXME: refactor, use keyword args. - create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int) + try: + create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int) + except Exception as e: + module.fail_json(msg='%s' % e) module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname) else: module.exit_json(changed=False, msg="You did not specify a resource type") @@ -397,7 +492,8 @@ def main(): if vm_status(c, vmname) == 'up': module.exit_json(changed=False, msg="VM %s is already running" % vmname) else: - vm_start(c, vmname) + #vm_start(c, vmname) + vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key) module.exit_json(changed=True, msg="VM %s started" % vmname) if state == 'shutdown': @@ -426,4 +522,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/cloud/misc/proxmox.py b/cloud/misc/proxmox.py index 7be4361edbe..c404519d499 100644 --- a/cloud/misc/proxmox.py +++ b/cloud/misc/proxmox.py @@ -14,12 +14,17 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: proxmox short_description: management of instances in Proxmox VE cluster description: - allows you to create/delete/stop instances in Proxmox VE cluster + - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older) version_added: "2.0" options: api_host: @@ -39,8 +44,10 @@ vmid: description: - the instance id + - if not set, the next available VM ID will be fetched from ProxmoxAPI. + - if not set, will be fetched from PromoxAPI based on the hostname default: null - required: true + required: false validate_certs: description: - enable / disable https certificate verification @@ -54,6 +61,12 @@ - for another states will be autodiscovered default: null required: false + pool: + description: + - Proxmox VE resource pool + default: null + required: false + version_added: "2.3" password: description: - the instance root password @@ -64,6 +77,7 @@ description: - the instance hostname - required only for C(state=present) + - must be unique if vmid is not passed default: null required: false ostemplate: @@ -97,7 +111,14 @@ - specifies network interfaces for the container default: null required: false - type: string + type: A hash/dictionary defining interfaces + mounts: + description: + - specifies additional mounts (separate disks) for the container + default: null + required: false + type: A hash/dictionary defining mount points + version_added: "2.2" ip_address: description: - specifies the address the container will be assigned @@ -156,34 +177,123 @@ default: present notes: - Requires proxmoxer and requests modules on host. This modules can be installed with pip. -requirements: [ "proxmoxer", "requests" ] +requirements: [ "proxmoxer", "python >= 2.7", "requests" ] author: "Sergei Antipov @UnderGreen" ''' EXAMPLES = ''' # Create new container with minimal options -- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +# Create new container automatically selecting the next available vmid. +- proxmox: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' # Create new container with minimal options with force(it will rewrite existing container) -- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' force=yes +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + force: yes # Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) -- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + +# Create new container with minimal options defining network interface with dhcp +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}' + +# Create new container with minimal options defining network interface with static ip +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}' + +# Create new container with minimal options defining a mount +- proxmox: + vmid: 100 + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + password: 123456 + hostname: example.org + ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz' + mounts: '{"mp0":"local:8,mp=/mnt/test/"}' # Start container -- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=started +- proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: started # Stop container -- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped +- proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: stopped # Stop container with force -- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' force=yes state=stopped +- proxmox: + vmid: 100 + api_user: root@pam + api_passwordL 1q2w3e + api_host: node1 + force: yes + state: stopped # Restart container(stopped or mounted container you can't restart) -- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped +- proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: stopped # Remove container -- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=absent +- proxmox: + vmid: 100 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + state: absent ''' import os @@ -195,19 +305,43 @@ except ImportError: HAS_PROXMOXER = False +VZ_TYPE=None + +def get_nextvmid(proxmox): + try: + vmid = proxmox.cluster.nextid.get() + return vmid + except Exception as e: + module.fail_json(msg="Unable to get next vmid. Failed with exception: %s") + +def get_vmid(proxmox, hostname): + return [ vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm['name'] == hostname ] + def get_instance(proxmox, vmid): return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ] -def content_check(proxmox, node, ostemplate, storage): - return [ True for cnt in proxmox.nodes(node).storage(storage).content.get() if cnt['volid'] == ostemplate ] +def content_check(proxmox, node, ostemplate, template_store): + return [ True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate ] def node_check(proxmox, node): return [ True for nd in proxmox.nodes.get() if nd['node'] == node ] def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs): proxmox_node = proxmox.nodes(node) - taskid = proxmox_node.openvz.create(vmid=vmid, storage=storage, memory=memory, swap=swap, - cpus=cpus, disk=disk, **kwargs) + kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None) + if VZ_TYPE =='lxc': + kwargs['cpulimit']=cpus + kwargs['rootfs']=disk + if 'netif' in kwargs: + kwargs.update(kwargs['netif']) + del kwargs['netif'] + if 'mounts' in kwargs: + kwargs.update(kwargs['mounts']) + del kwargs['mounts'] + else: + kwargs['cpus']=cpus + kwargs['disk']=disk + taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs) while timeout: if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' @@ -222,7 +356,7 @@ def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, sw return False def start_instance(module, proxmox, vm, vmid, timeout): - taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.start.post() + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post() while timeout: if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): @@ -237,9 +371,9 @@ def start_instance(module, proxmox, vm, vmid, timeout): def stop_instance(module, proxmox, vm, vmid, timeout, force): if force: - taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post(forceStop=1) + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) else: - taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.shutdown.post() + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post() while timeout: if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): @@ -253,7 +387,7 @@ def stop_instance(module, proxmox, vm, vmid, timeout, force): return False def umount_instance(module, proxmox, vm, vmid, timeout): - taskid = proxmox.nodes(vm[0]['node']).openvz(vmid).status.umount.post() + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post() while timeout: if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): @@ -272,25 +406,27 @@ def main(): api_host = dict(required=True), api_user = dict(required=True), api_password = dict(no_log=True), - vmid = dict(required=True), - validate_certs = dict(type='bool', choices=BOOLEANS, default='no'), + vmid = dict(required=False), + validate_certs = dict(type='bool', default='no'), node = dict(), + pool = dict(), password = dict(no_log=True), hostname = dict(), ostemplate = dict(), - disk = dict(type='int', default=3), + disk = dict(type='str', default='3'), cpus = dict(type='int', default=1), memory = dict(type='int', default=512), swap = dict(type='int', default=0), - netif = dict(), + netif = dict(type='dict'), + mounts = dict(type='dict'), ip_address = dict(), - onboot = dict(type='bool', choices=BOOLEANS, default='no'), + onboot = dict(type='bool', default='no'), storage = dict(default='local'), cpuunits = dict(type='int', default=1000), nameserver = dict(), searchdomain = dict(), timeout = dict(type='int', default=30), - force = dict(type='bool', choices=BOOLEANS, default='no'), + force = dict(type='bool', default='no'), state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']), ) ) @@ -310,37 +446,57 @@ def main(): memory = module.params['memory'] swap = module.params['swap'] storage = module.params['storage'] + hostname = module.params['hostname'] + if module.params['ostemplate'] is not None: + template_store = module.params['ostemplate'].split(":")[0] timeout = module.params['timeout'] # If password not set get it from PROXMOX_PASSWORD env if not api_password: try: api_password = os.environ['PROXMOX_PASSWORD'] - except KeyError, e: + except KeyError as e: module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') try: proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) - except Exception, e: + global VZ_TYPE + VZ_TYPE = 'openvz' if float(proxmox.version.get()['version']) < 4.0 else 'lxc' + + except Exception as e: module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + # If vmid not set get the Next VM id from ProxmoxAPI + # If hostname is set get the VM id from ProxmoxAPI + if not vmid and state == 'present': + vmid = get_nextvmid(proxmox) + elif not vmid and hostname: + vmid = get_vmid(proxmox, hostname)[0] + elif not vmid: + module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state) + if state == 'present': try: if get_instance(proxmox, vmid) and not module.params['force']: module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid) + # If no vmid was passed, there cannot be another VM named 'hostname' + if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']: + module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0])) elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']): module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm') elif not node_check(proxmox, node): module.fail_json(msg="node '%s' not exists in cluster" % node) - elif not content_check(proxmox, node, module.params['ostemplate'], storage): + elif not content_check(proxmox, node, module.params['ostemplate'], template_store): module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s" - % (module.params['ostemplate'], node, storage)) + % (module.params['ostemplate'], node, template_store)) create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, + pool = module.params['pool'], password = module.params['password'], hostname = module.params['hostname'], ostemplate = module.params['ostemplate'], netif = module.params['netif'], + mounts = module.params['mounts'], ip_address = module.params['ip_address'], onboot = int(module.params['onboot']), cpuunits = module.params['cpuunits'], @@ -349,20 +505,20 @@ def main(): force = int(module.params['force'])) module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate'])) - except Exception, e: - module.fail_json(msg="creation of VM %s failed with exception: %s" % ( vmid, e )) + except Exception as e: + module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e )) elif state == 'started': try: vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) - if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running': + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is already running" % vmid) if start_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s started" % vmid) - except Exception, e: + except Exception as e: module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e )) elif state == 'stopped': @@ -371,7 +527,7 @@ def main(): if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) - if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted': + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': if module.params['force']: if umount_instance(module, proxmox, vm, vmid, timeout): module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) @@ -379,12 +535,12 @@ def main(): module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. " "You can use force option to umount it.") % vmid) - if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped': + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid) if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']): module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) - except Exception, e: + except Exception as e: module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e )) elif state == 'restarted': @@ -392,14 +548,14 @@ def main(): vm = get_instance(proxmox, vmid) if not vm: module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid) - if ( proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'stopped' - or proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted' ): + if ( getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' + or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted' ): module.exit_json(changed=False, msg="VM %s is not running" % vmid) if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and start_instance(module, proxmox, vm, vmid, timeout) ): module.exit_json(changed=True, msg="VM %s is restarted" % vmid) - except Exception, e: + except Exception as e: module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e )) elif state == 'absent': @@ -408,13 +564,13 @@ def main(): if not vm: module.exit_json(changed=False, msg="VM %s does not exist" % vmid) - if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'running': + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) - if proxmox.nodes(vm[0]['node']).openvz(vmid).status.current.get()['status'] == 'mounted': + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted': module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid) - taskid = proxmox.nodes(vm[0]['node']).openvz.delete(vmid) + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid) while timeout: if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): @@ -425,9 +581,11 @@ def main(): % proxmox_node.tasks(taskid).log.get()[:1]) time.sleep(1) - except Exception, e: + except Exception as e: module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e )) # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/cloud/misc/proxmox_kvm.py b/cloud/misc/proxmox_kvm.py new file mode 100644 index 00000000000..e77f266b42a --- /dev/null +++ b/cloud/misc/proxmox_kvm.py @@ -0,0 +1,1058 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Abdoul Bah (@helldorado) + +""" +Ansible module to manage Qemu(KVM) instance in Proxmox VE cluster. +This module is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. +This software is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +You should have received a copy of the GNU General Public License +along with this software. If not, see . +""" + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: proxmox_kvm +short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster. +description: + - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster. +version_added: "2.3" +author: "Abdoul Bah (@helldorado) " +options: + acpi: + description: + - Specify if ACPI should be enables/disabled. + required: false + default: "yes" + choices: [ "yes", "no" ] + type: boolean + agent: + description: + - Specify if the QEMU GuestAgent should be enabled/disabled. + required: false + default: null + choices: [ "yes", "no" ] + type: boolean + args: + description: + - Pass arbitrary arguments to kvm. + - This option is for experts only! + default: "-serial unix:/var/run/qemu-server/VMID.serial,server,nowait" + required: false + type: string + api_host: + description: + - Specify the target host of the Proxmox VE cluster. + required: true + api_user: + description: + - Specify the user to authenticate with. + required: true + api_password: + description: + - Specify the password to authenticate with. + - You can use C(PROXMOX_PASSWORD) environment variable. + default: null + required: false + autostart: + description: + - Specify, if the VM should be automatically restarted after crash (currently ignored in PVE API). + required: false + default: "no" + choices: [ "yes", "no" ] + type: boolean + balloon: + description: + - Specify the amount of RAM for the VM in MB. + - Using zero disables the balloon driver. + required: false + default: 0 + type: integer + bios: + description: + - Specify the BIOS implementation. + choices: ['seabios', 'ovmf'] + required: false + default: null + type: string + boot: + description: + - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n). + - You can combine to set order. + required: false + default: cnd + type: string + bootdisk: + description: + - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+) + required: false + default: null + type: string + cores: + description: + - Specify number of cores per socket. + required: false + default: 1 + type: integer + cpu: + description: + - Specify emulated CPU type. + required: false + default: kvm64 + type: string + cpulimit: + description: + - Specify if CPU usage will be limited. Value 0 indicates no CPU limit. + - If the computer has 2 CPUs, it has total of '2' CPU time + required: false + default: null + type: integer + cpuunits: + description: + - Specify CPU weight for a VM. + - You can disable fair-scheduler configuration by setting this to 0 + default: 1000 + required: false + type: integer + delete: + description: + - Specify a list of settings you want to delete. + required: false + default: null + type: string + description: + description: + - Specify the description for the VM. Only used on the configuration web interface. + - This is saved as comment inside the configuration file. + required: false + default: null + type: string + digest: + description: + - Specify if to prevent changes if current configuration file has different SHA1 digest. + - This can be used to prevent concurrent modifications. + required: false + default: null + type: string + force: + description: + - Allow to force stop VM. + - Can be used only with states C(stopped), C(restarted). + default: null + choices: [ "yes", "no" ] + required: false + type: boolean + freeze: + description: + - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution). + required: false + default: null + choices: [ "yes", "no" ] + type: boolean + hostpci: + description: + - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}'). + - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0""). + - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers). + - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model). + - C(rombar=boolean) I(default=1) Specify whether or not the device’s ROM will be visible in the guest’s memory map. + - C(x-vga=boolean) I(default=0) Enable vfio-vga device support. + - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care. + required: false + default: null + type: A hash/dictionary defining host pci devices + hotplug: + description: + - Selectively enable hotplug features. + - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb'). + - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb'). + required: false + default: null + type: string + hugepages: + description: + - Enable/disable hugepages memory. + choices: ['any', '2', '1024'] + required: false + default: null + type: string + ide: + description: + - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}'). + - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol). + required: false + default: null + type: A hash/dictionary defining ide + keyboard: + description: + - Sets the keyboard layout for VNC server. + required: false + default: null + type: string + kvm: + description: + - Enable/disable KVM hardware virtualization. + required: false + default: "yes" + choices: [ "yes", "no" ] + type: boolean + localtime: + description: + - Sets the real time clock to local time. + - This is enabled by default if ostype indicates a Microsoft OS. + required: false + default: null + choices: [ "yes", "no" ] + type: boolean + lock: + description: + - Lock/unlock the VM. + choices: ['migrate', 'backup', 'snapshot', 'rollback'] + required: false + default: null + type: string + machine: + description: + - Specifies the Qemu machine type. + - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?)) + required: false + default: null + type: string + memory: + description: + - Memory size in MB for instance. + required: false + default: 512 + type: integer + migrate_downtime: + description: + - Sets maximum tolerated downtime (in seconds) for migrations. + required: false + default: null + type: integer + migrate_speed: + description: + - Sets maximum speed (in MB/s) for migrations. + - A value of 0 is no limit. + required: false + default: null + type: integer + name: + description: + - Specifies the VM name. Only used on the configuration web interface. + - Required only for C(state=present). + default: null + required: false + net: + description: + - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}'). + - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",brigde="value",rate="value",tag="value",firewall="1|0",trunks="vlanid""). + - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3). + - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified. + - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'. + - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'. + - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services. + default: null + required: false + type: A hash/dictionary defining interfaces + node: + description: + - Proxmox VE node, where the new VM will be created. + - Only required for C(state=present). + - For other states, it will be autodiscovered. + default: null + required: false + numa: + description: + - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}'). + - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N. + - Values allowed are - C("cpu="",hostnodes="",memory="number",policy="(bind|interleave|preferred)""). + - C(cpus) CPUs accessing this NUMA node. + - C(hostnodes) Host NUMA nodes to use. + - C(memory) Amount of memory this NUMA node provides. + - C(policy) NUMA allocation policy. + default: null + required: false + type: A hash/dictionary defining NUMA topology + onboot: + description: + - Specifies whether a VM will be started during system bootup. + default: "yes" + choices: [ "yes", "no" ] + required: false + type: boolean + ostype: + description: + - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems. + - The l26 is Linux 2.6/3.X Kernel. + choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'l24', 'l26', 'solaris'] + default: l26 + required: false + type: string + parallel: + description: + - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}'). + - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2. + - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+"). + default: null + required: false + type: A hash/dictionary defining host parallel devices + protection: + description: + - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations. + default: null + choices: [ "yes", "no" ] + required: false + type: boolean + reboot: + description: + - Allow reboot. If set to yes, the VM exit on reboot. + default: null + choices: [ "yes", "no" ] + required: false + type: boolean + revert: + description: + - Revert a pending change. + default: null + required: false + type: string + sata: + description: + - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}'). + - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol). + default: null + required: false + type: A hash/dictionary defining sata + scsi: + description: + - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}'). + - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol). + default: null + required: false + type: A hash/dictionary defining scsi + scsihw: + description: + - Specifies the SCSI controller model. + choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi'] + required: false + default: null + type: string + serial: + description: + - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}'). + - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3. + - Values allowed are - C((/dev/.+|socket)). + - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care. + default: null + required: false + type: A hash/dictionary defining serial + shares: + description: + - Rets amount of memory shares for auto-ballooning. (0 - 50000). + - The larger the number is, the more memory this VM gets. + - The number is relative to weights of all other running VMs. + - Using 0 disables auto-ballooning, this means no limit. + required: false + default: null + type: integer + skiplock: + description: + - Ignore locks + - Only root is allowed to use this option. + required: false + default: null + choices: [ "yes", "no" ] + type: boolean + smbios: + description: + - Specifies SMBIOS type 1 fields. + required: false + default: null + type: string + sockets: + description: + - Sets the number of CPU sockets. (1 - N). + required: false + default: 1 + type: integer + startdate: + description: + - Sets the initial date of the real time clock. + - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25'). + required: false + default: null + type: string + startup: + description: + - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]). + - Order is a non-negative number defining the general startup order. + - Shutdown in done with reverse ordering. + required: false + default: null + type: string + state: + description: + - Indicates desired state of the instance. + - If C(current), the current state of the VM will be fecthed. You can acces it with C(results.status) + choices: ['present', 'started', 'absent', 'stopped', 'restarted','current'] + required: false + default: present + tablet: + description: + - Enables/disables the USB tablet device. + required: false + choices: [ "yes", "no" ] + default: "no" + type: boolean + tdf: + description: + - Enables/disables time drift fix. + required: false + default: null + choices: [ "yes", "no" ] + type: boolean + template: + description: + - Enables/disables the template. + required: false + default: "no" + choices: [ "yes", "no" ] + type: boolean + timeout: + description: + - Timeout for operations. + default: 30 + required: false + type: integer + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. + default: "no" + choices: [ "yes", "no" ] + required: false + type: boolean + vcpus: + description: + - Sets number of hotplugged vcpus. + required: false + default: null + type: integer + vga: + description: + - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'. + choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4'] + required: false + default: std + virtio: + description: + - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}'). + - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15. + - Values allowed are - C("storage:size,format=value"). + - C(storage) is the storage identifier where to create the disk. + - C(size) is the size of the disk in GB. + - C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol). + required: false + default: null + type: A hash/dictionary defining virtio + vmid: + description: + - Specifies the VM ID. Instead use I(name) parameter. + - If vmid is not set, the next available VM ID will be fetched from ProxmoxAPI. + default: null + required: false + watchdog: + description: + - Creates a virtual hardware watchdog device. + required: false + default: null + type: string +Notes: + - Requires proxmoxer and requests modules on host. This modules can be installed with pip. +requirements: [ "proxmoxer", "requests" ] +''' + +EXAMPLES = ''' +# Create new VM with minimal options +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + +# Create new VM with minimal options and given vmid +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + vmid : 100 + +# Create new VM with two network interface options. +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + net : '{"net0":"virtio,bridge=vmbr1,rate=200", "net1":"e1000,bridge=vmbr2,"}' + +# Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus. +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + net : '{"net0":"virtio,bridge=vmbr1,rate=200"}' + virtio : '{"virtio0":"VMs_LVM:10", "virtio1":"VMs:2,format=qcow2", "virtio2":"VMs:5,format=raw"}' + cores : 4 + vcpus : 2 + +# Create new VM and lock it for snapashot. +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + lock : snapshot + +# Create new VM and set protection to disable the remove VM and remove disk operations +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + protection : yes + +# Start VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : started + +# Stop VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : stopped + +# Stop VM with force +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : stopped + force : yes + +# Restart VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : restarted + +# Remove VM +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : absent + +# Get VM current state +- proxmox_kvm: + api_user : root@pam + api_password: secret + api_host : helldorado + name : spynal + node : sabrewulf + state : current +''' + +RETURN = ''' +devices: + description: The list of devices created or used. + returned: success + type: dict + sample: ' + { + "ide0": "VMS_LVM:vm-115-disk-1", + "ide1": "VMs:115/vm-115-disk-3.raw", + "virtio0": "VMS_LVM:vm-115-disk-2", + "virtio1": "VMs:115/vm-115-disk-1.qcow2", + "virtio2": "VMs:115/vm-115-disk-2.raw" + }' +mac: + description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE. + returned: success + type: dict + sample: ' + { + "net0": "3E:6E:97:D2:31:9F", + "net1": "B6:A1:FC:EF:78:A4" + }' +vmid: + description: The VM vmid. + returned: success + type: int + sample: 115 +status: + description: + - The current virtual machine status. + - Returned only when C(state=current) + returned: success + type: dict + sample: '{ + "changed": false, + "msg": "VM kropta with vmid = 110 is running", + "status": "running" + }' +''' + +import os +import time + + +try: + from proxmoxer import ProxmoxAPI + HAS_PROXMOXER = True +except ImportError: + HAS_PROXMOXER = False + +VZ_TYPE='qemu' + +def get_nextvmid(proxmox): + try: + vmid = proxmox.cluster.nextid.get() + return vmid + except Exception as e: + module.fail_json(msg="Unable to get next vmid. Failed with exception: %s") + +def get_vmid(proxmox, name): + return [ vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm['name'] == name ] + +def get_vm(proxmox, vmid): + return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ] + +def node_check(proxmox, node): + return [ True for nd in proxmox.nodes.get() if nd['node'] == node ] + +def get_vminfo(module, proxmox, node, vmid, **kwargs): + global results + results = {} + mac = {} + devices = {} + try: + vm = proxmox.nodes(node).qemu(vmid).config.get() + except Exception as e: + module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e)) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None) + + # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] + for k in kwargs.keys(): + if isinstance(kwargs[k], dict): + kwargs.update(kwargs[k]) + del kwargs[k] + + # Split information by type + for k, v in kwargs.iteritems(): + if re.match(r'net[0-9]', k) is not None: + interface = k + k = vm[k] + k = re.search('=(.*?),', k).group(1) + mac[interface] = k + if re.match(r'virtio[0-9]', k) is not None or re.match(r'ide[0-9]', k) is not None or re.match(r'scsi[0-9]', k) is not None or re.match(r'sata[0-9]', k) is not None: + device = k + k = vm[k] + k = re.search('(.*?),', k).group(1) + devices[device] = k + + results['mac'] = mac + results['devices'] = devices + results['vmid'] = int(vmid) + +def create_vm(module, proxmox, vmid, node, name, memory, cpu, cores, sockets, timeout, **kwargs): + # Available only in PVE 4 + only_v4 = ['force','protection','skiplock'] + # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm. + vm_args = "-serial unix:/var/run/qemu-server/{}.serial,server,nowait".format(vmid) + + proxmox_node = proxmox.nodes(node) + + # Sanitize kwargs. Remove not defined args and ensure True and False converted to int. + kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None) + kwargs.update(dict([k, int(v)] for k, v in kwargs.iteritems() if isinstance(v, bool))) + + # The features work only on PVE 4 + if PVE_MAJOR_VERSION < 4: + for p in only_v4: + if p in kwargs: + del kwargs[p] + + # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n] + for k in kwargs.keys(): + if isinstance(kwargs[k], dict): + kwargs.update(kwargs[k]) + del kwargs[k] + + # -args and skiplock require root@pam user + if module.params['api_user'] == "root@pam" and module.params['args'] is None: + kwargs['args'] = vm_args + elif module.params['api_user'] == "root@pam" and module.params['args'] is not None: + kwargs['args'] = module.params['args'] + elif module.params['api_user'] != "root@pam" and module.params['args'] is not None: + module.fail_json(msg='args parameter require root@pam user. ') + + if module.params['api_user'] != "root@pam" and module.params['skiplock'] is not None: + module.fail_json(msg='skiplock parameter require root@pam user. ') + + taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) + + while timeout: + if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' + and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' + % proxmox_node.tasks(taskid).log.get()[:1]) + time.sleep(1) + return False + +def start_vm(module, proxmox, vm, vmid, timeout): + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post() + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' + % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def stop_vm(module, proxmox, vm, vmid, timeout, force): + if force: + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1) + else: + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post() + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + return True + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' + % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1]) + + time.sleep(1) + return False + +def main(): + module = AnsibleModule( + argument_spec = dict( + acpi = dict(type='bool', default='yes'), + agent = dict(type='bool'), + args = dict(type='str', default=None), + api_host = dict(required=True), + api_user = dict(required=True), + api_password = dict(no_log=True), + autostart = dict(type='bool', default='no'), + balloon = dict(type='int',default=0), + bios = dict(choices=['seabios', 'ovmf']), + boot = dict(type='str', default='cnd'), + bootdisk = dict(type='str'), + cores = dict(type='int', default=1), + cpu = dict(type='str', default='kvm64'), + cpulimit = dict(type='int'), + cpuunits = dict(type='int', default=1000), + delete = dict(type='str'), + description = dict(type='str'), + digest = dict(type='str'), + force = dict(type='bool', default=None), + freeze = dict(type='bool'), + hostpci = dict(type='dict'), + hotplug = dict(type='str'), + hugepages = dict(choices=['any', '2', '1024']), + ide = dict(type='dict', default=None), + keyboard = dict(type='str'), + kvm = dict(type='bool', default='yes'), + localtime = dict(type='bool'), + lock = dict(choices=['migrate', 'backup', 'snapshot', 'rollback']), + machine = dict(type='str'), + memory = dict(type='int', default=512), + migrate_downtime = dict(type='int'), + migrate_speed = dict(type='int'), + name = dict(type='str'), + net = dict(type='dict'), + node = dict(), + numa = dict(type='dict'), + onboot = dict(type='bool', default='yes'), + ostype = dict(default='l26', choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'l24', 'l26', 'solaris']), + parallel = dict(type='dict'), + protection = dict(type='bool'), + reboot = dict(type='bool'), + revert = dict(), + sata = dict(type='dict'), + scsi = dict(type='dict'), + scsihw = dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']), + serial = dict(type='dict'), + shares = dict(type='int'), + skiplock = dict(type='bool'), + smbios = dict(type='str'), + sockets = dict(type='int', default=1), + startdate = dict(type='str'), + startup = dict(), + state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']), + tablet = dict(type='bool', default='no'), + tdf = dict(type='bool'), + template = dict(type='bool', default='no'), + timeout = dict(type='int', default=30), + validate_certs = dict(type='bool', default='no'), + vcpus = dict(type='int', default=None), + vga = dict(default='std', choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']), + virtio = dict(type='dict', default=None), + vmid = dict(type='int', default=None), + watchdog = dict(), + ) + ) + + if not HAS_PROXMOXER: + module.fail_json(msg='proxmoxer required for this module') + + api_user = module.params['api_user'] + api_host = module.params['api_host'] + api_password = module.params['api_password'] + cpu = module.params['cpu'] + cores = module.params['cores'] + memory = module.params['memory'] + name = module.params['name'] + node = module.params['node'] + sockets = module.params['sockets'], + state = module.params['state'] + timeout = module.params['timeout'] + validate_certs = module.params['validate_certs'] + + # If password not set get it from PROXMOX_PASSWORD env + if not api_password: + try: + api_password = os.environ['PROXMOX_PASSWORD'] + except KeyError as e: + module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') + + try: + proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) + global VZ_TYPE + global PVE_MAJOR_VERSION + PVE_MAJOR_VERSION = 3 if float(proxmox.version.get()['version']) < 4.0 else 4 + except Exception as e: + module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) + + + # If vmid not set get the Next VM id from ProxmoxAPI + # If vm name is set get the VM id from ProxmoxAPI + if module.params['vmid'] is not None: + vmid = module.params['vmid'] + elif state == 'present': + vmid = get_nextvmid(proxmox) + elif module.params['name'] is not None: + vmid = get_vmid(proxmox, name)[0] + + if state == 'present': + try: + if get_vm(proxmox, vmid) and not module.params['force']: + module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid) + elif get_vmid(proxmox, name) and not module.params['force']: + module.exit_json(changed=False, msg="VM with name <%s> already exists" % name) + elif not (node, module.params['name']): + module.fail_json(msg='node, name is mandatory for creating vm') + elif not node_check(proxmox, node): + module.fail_json(msg="node '%s' does not exist in cluster" % node) + + create_vm(module, proxmox, vmid, node, name, memory, cpu, cores, sockets, timeout, + acpi = module.params['acpi'], + agent = module.params['agent'], + autostart = module.params['autostart'], + balloon = module.params['balloon'], + bios = module.params['bios'], + boot = module.params['boot'], + bootdisk = module.params['bootdisk'], + cpulimit = module.params['cpulimit'], + cpuunits = module.params['cpuunits'], + delete = module.params['delete'], + description = module.params['description'], + digest = module.params['digest'], + force = module.params['force'], + freeze = module.params['freeze'], + hostpci = module.params['hostpci'], + hotplug = module.params['hotplug'], + hugepages = module.params['hugepages'], + ide = module.params['ide'], + keyboard = module.params['keyboard'], + kvm = module.params['kvm'], + localtime = module.params['localtime'], + lock = module.params['lock'], + machine = module.params['machine'], + migrate_downtime = module.params['migrate_downtime'], + migrate_speed = module.params['migrate_speed'], + net = module.params['net'], + numa = module.params['numa'], + onboot = module.params['onboot'], + ostype = module.params['ostype'], + parallel = module.params['parallel'], + protection = module.params['protection'], + reboot = module.params['reboot'], + revert = module.params['revert'], + sata = module.params['sata'], + scsi = module.params['scsi'], + scsihw = module.params['scsihw'], + serial = module.params['serial'], + shares = module.params['shares'], + skiplock = module.params['skiplock'], + smbios1 = module.params['smbios'], + startdate = module.params['startdate'], + startup = module.params['startup'], + tablet = module.params['tablet'], + tdf = module.params['tdf'], + template = module.params['template'], + vcpus = module.params['vcpus'], + vga = module.params['vga'], + virtio = module.params['virtio'], + watchdog = module.params['watchdog']) + + get_vminfo(module, proxmox, node, vmid, + ide = module.params['ide'], + net = module.params['net'], + sata = module.params['sata'], + scsi = module.params['scsi'], + virtio = module.params['virtio']) + module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results) + except Exception as e: + module.fail_json(msg="creation of %s VM %s with vmid %s failed with exception: %s" % ( VZ_TYPE, name, vmid, e )) + + elif state == 'started': + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid) + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is already running" % vmid) + + if start_vm(module, proxmox, vm, vmid, timeout): + module.exit_json(changed=True, msg="VM %s started" % vmid) + except Exception as e: + module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'stopped': + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': + module.exit_json(changed=False, msg="VM %s is already stopped" % vmid) + + if stop_vm(module, proxmox, vm, vmid, timeout, force = module.params['force']): + module.exit_json(changed=True, msg="VM %s is shutting down" % vmid) + except Exception as e: + module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'restarted': + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped': + module.exit_json(changed=False, msg="VM %s is not running" % vmid) + + if ( stop_vm(module, proxmox, vm, vmid, timeout, force = module.params['force']) and + start_vm(module, proxmox, vm, vmid, timeout) ): + module.exit_json(changed=True, msg="VM %s is restarted" % vmid) + except Exception as e: + module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'absent': + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.exit_json(changed=False, msg="VM %s does not exist" % vmid) + + if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running': + module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid) + + taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid) + while timeout: + if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' + and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ): + module.exit_json(changed=True, msg="VM %s removed" % vmid) + timeout = timeout - 1 + if timeout == 0: + module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' + % proxmox_node.tasks(taskid).log.get()[:1]) + + time.sleep(1) + except Exception as e: + module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e )) + + elif state == 'current': + status = {} + try: + vm = get_vm(proxmox, vmid) + if not vm: + module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid) + current = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] + status['status'] = current + if status: + module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status) + except Exception as e: + module.fail_json(msg="Unable to get vm {} with vmid = {} status: ".format(name, vmid) + str(e)) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/misc/proxmox_template.py b/cloud/misc/proxmox_template.py index 7fed47f7260..64c9b96cb62 100644 --- a/cloud/misc/proxmox_template.py +++ b/cloud/misc/proxmox_template.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: proxmox_template @@ -98,16 +102,39 @@ EXAMPLES = ''' # Upload new openvz template with minimal options -- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz' +- proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + src: ~/ubuntu-14.04-x86_64.tar.gz # Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before) -- proxmox_template: node='uk-mc02' api_user='root@pam' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz' +- proxmox_template: + node: uk-mc02 + api_user: root@pam + api_host: node1 + src: ~/ubuntu-14.04-x86_64.tar.gz # Upload new openvz template with all options and force overwrite -- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' content_type='vztmpl' src='~/ubuntu-14.04-x86_64.tar.gz' force=yes +- proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + storage: local + content_type: vztmpl + src: ~/ubuntu-14.04-x86_64.tar.gz + force: yes # Delete template with minimal options -- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' template='ubuntu-14.04-x86_64.tar.gz' state=absent +- proxmox_template: + node: uk-mc02 + api_user: root@pam + api_password: 1q2w3e + api_host: node1 + template: ubuntu-14.04-x86_64.tar.gz + state: absent ''' import os @@ -156,14 +183,14 @@ def main(): api_host = dict(required=True), api_user = dict(required=True), api_password = dict(no_log=True), - validate_certs = dict(type='bool', choices=BOOLEANS, default='no'), + validate_certs = dict(type='bool', default='no'), node = dict(), src = dict(), template = dict(), content_type = dict(default='vztmpl', choices=['vztmpl','iso']), storage = dict(default='local'), timeout = dict(type='int', default=30), - force = dict(type='bool', choices=BOOLEANS, default='no'), + force = dict(type='bool', default='no'), state = dict(default='present', choices=['present', 'absent']), ) ) @@ -184,12 +211,12 @@ def main(): if not api_password: try: api_password = os.environ['PROXMOX_PASSWORD'] - except KeyError, e: + except KeyError as e: module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable') try: proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs) - except Exception, e: + except Exception as e: module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e) if state == 'present': @@ -209,7 +236,7 @@ def main(): if upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout): module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template)) - except Exception, e: + except Exception as e: module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e )) elif state == 'absent': @@ -224,9 +251,11 @@ def main(): if delete_template(module, proxmox, node, storage, content_type, template, timeout): module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template)) - except Exception, e: + except Exception as e: module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e )) # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/cloud/misc/rhevm.py b/cloud/misc/rhevm.py new file mode 100644 index 00000000000..8789e880281 --- /dev/null +++ b/cloud/misc/rhevm.py @@ -0,0 +1,1534 @@ +#!/usr/bin/python + +# (c) 2016, Timothy Vandenbrande +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: rhevm +author: Timothy Vandenbrande +short_description: RHEV/oVirt automation +description: + - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform. +version_added: "2.2" +requirements: + - ovirtsdk +options: + user: + description: + - The user to authenticate with. + default: "admin@internal" + required: false + server: + description: + - The name/ip of your RHEV-m/oVirt instance. + default: "127.0.0.1" + required: false + port: + description: + - The port on which the API is reacheable. + default: "443" + required: false + insecure_api: + description: + - A boolean switch to make a secure or insecure connection to the server. + default: false + required: false + name: + description: + - The name of the VM. + cluster: + description: + - The rhev/ovirt cluster in which you want you VM to start. + required: false + datacenter: + description: + - The rhev/ovirt datacenter in which you want you VM to start. + required: false + default: "Default" + state: + description: + - This serves to create/remove/update or powermanage your VM. + default: "present" + required: false + choices: ['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info'] + image: + description: + - The template to use for the VM. + default: null + required: false + type: + description: + - To define if the VM is a server or desktop. + default: server + required: false + choices: [ 'server', 'desktop', 'host' ] + vmhost: + description: + - The host you wish your VM to run on. + required: false + vmcpu: + description: + - The number of CPUs you want in your VM. + default: "2" + required: false + cpu_share: + description: + - This parameter is used to configure the cpu share. + default: "0" + required: false + vmmem: + description: + - The amount of memory you want your VM to use (in GB). + default: "1" + required: false + osver: + description: + - The operationsystem option in RHEV/oVirt. + default: "rhel_6x64" + required: false + mempol: + description: + - The minimum amount of memory you wish to reserve for this system. + default: "1" + required: false + vm_ha: + description: + - To make your VM High Available. + default: true + required: false + disks: + description: + - This option uses complex arguments and is a list of disks with the options name, size and domain. + required: false + ifaces: + description: + - This option uses complex arguments and is a list of interfaces with the options name and vlan. + aliases: ['nics', 'interfaces'] + required: false + boot_order: + description: + - This option uses complex arguments and is a list of items that specify the bootorder. + default: ["network","hd"] + required: false + del_prot: + description: + - This option sets the delete protection checkbox. + default: true + required: false + cd_drive: + description: + - The CD you wish to have mounted on the VM when I(state = 'CD'). + default: null + required: false + timeout: + description: + - The timeout you wish to define for power actions. + - When I(state = 'up') + - When I(state = 'down') + - When I(state = 'restarted') + default: null + required: false +''' + +RETURN = ''' +vm: + description: Returns all of the VMs variables and execution. + returned: always + type: dict + sample: '{ + "boot_order": [ + "hd", + "network" + ], + "changed": true, + "changes": [ + "Delete Protection" + ], + "cluster": "C1", + "cpu_share": "0", + "created": false, + "datacenter": "Default", + "del_prot": true, + "disks": [ + { + "domain": "ssd-san", + "name": "OS", + "size": 40 + } + ], + "eth0": "00:00:5E:00:53:00", + "eth1": "00:00:5E:00:53:01", + "eth2": "00:00:5E:00:53:02", + "exists": true, + "failed": false, + "ifaces": [ + { + "name": "eth0", + "vlan": "Management" + }, + { + "name": "eth1", + "vlan": "Internal" + }, + { + "name": "eth2", + "vlan": "External" + } + ], + "image": false, + "mempol": "0", + "msg": [ + "VM exists", + "cpu_share was already set to 0", + "VM high availability was already set to True", + "The boot order has already been set", + "VM delete protection has been set to True", + "Disk web2_Disk0_OS already exists", + "The VM starting host was already set to host416" + ], + "name": "web2", + "type": "server", + "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b", + "vm_ha": true, + "vmcpu": "4", + "vmhost": "host416", + "vmmem": "16" + }' +''' + +EXAMPLES = ''' +# basic get info from VM + action: rhevm + args: + name: "demo" + user: "{{ rhev.admin.name }}" + password: "{{ rhev.admin.pass }}" + server: "rhevm01" + state: "info" + +# basic create example from image + action: rhevm + args: + name: "demo" + user: "{{ rhev.admin.name }}" + password: "{{ rhev.admin.pass }}" + server: "rhevm01" + state: "present" + image: "centos7_x64" + cluster: "centos" + +# power management + action: rhevm + args: + name: "uptime_server" + user: "{{ rhev.admin.name }}" + password: "{{ rhev.admin.pass }}" + server: "rhevm01" + cluster: "RH" + state: "down" + image: "centos7_x64" + cluster: "centos + +# multi disk, multi nic create example + action: rhevm + args: + name: "server007" + user: "{{ rhev.admin.name }}" + password: "{{ rhev.admin.pass }}" + server: "rhevm01" + cluster: "RH" + state: "present" + type: "server" + vmcpu: 4 + vmmem: 2 + ifaces: + - name: "eth0" + vlan: "vlan2202" + - name: "eth1" + vlan: "vlan36" + - name: "eth2" + vlan: "vlan38" + - name: "eth3" + vlan: "vlan2202" + disks: + - name: "root" + size: 10 + domain: "ssd-san" + - name: "swap" + size: 10 + domain: "15kiscsi-san" + - name: "opt" + size: 10 + domain: "15kiscsi-san" + - name: "var" + size: 10 + domain: "10kiscsi-san" + - name: "home" + size: 10 + domain: "sata-san" + boot_order: + - "network" + - "hd" + +# add a CD to the disk cd_drive + action: rhevm + args: + name: 'server007' + user: "{{ rhev.admin.name }}" + password: "{{ rhev.admin.pass }}" + state: 'cd' + cd_drive: 'rhev-tools-setup.iso' + +# new host deployment + host network configuration + action: rhevm + args: + name: "ovirt_node007" + password: "{{ rhevm.admin.pass }}" + type: "host" + state: present + cluster: "rhevm01" + ifaces: + - name: em1 + - name: em2 + - name: p3p1 + ip: '172.31.224.200' + netmask: '255.255.254.0' + - name: p3p2 + ip: '172.31.225.200' + netmask: '255.255.254.0' + - name: bond0 + bond: + - em1 + - em2 + network: 'rhevm' + ip: '172.31.222.200' + netmask: '255.255.255.0' + management: True + - name: bond0.36 + network: 'vlan36' + ip: '10.2.36.200' + netmask: '255.255.254.0' + gateway: '10.2.36.254' + - name: bond0.2202 + network: 'vlan2202' + - name: bond0.38 + network: 'vlan38' +''' + +import time +import sys +import traceback +import json + +try: + from ovirtsdk.api import API + from ovirtsdk.xml import params + HAS_SDK = True +except ImportError: + HAS_SDK = False + +RHEV_FAILED = 1 +RHEV_SUCCESS = 0 +RHEV_UNAVAILABLE = 2 + +RHEV_TYPE_OPTS = ['server', 'desktop', 'host'] +STATE_OPTS = ['ping', 'present', 'absent', 'up', 'down', 'restart', 'cd', 'info'] + +global msg, changed, failed +msg = [] +changed = False +failed = False + + +class RHEVConn(object): + 'Connection to RHEV-M' + def __init__(self, module): + self.module = module + + user = module.params.get('user') + password = module.params.get('password') + server = module.params.get('server') + port = module.params.get('port') + insecure_api = module.params.get('insecure_api') + + url = "https://%s:%s" % (server, port) + + try: + api = API(url=url, username=user, password=password, insecure=str(insecure_api)) + api.test() + self.conn = api + except: + raise Exception("Failed to connect to RHEV-M.") + + def __del__(self): + self.conn.disconnect() + + def createVMimage(self, name, cluster, template): + try: + vmparams = params.VM( + name=name, + cluster=self.conn.clusters.get(name=cluster), + template=self.conn.templates.get(name=template), + disks=params.Disks(clone=True) + ) + self.conn.vms.add(vmparams) + setMsg("VM is created") + setChanged() + return True + except Exception as e: + setMsg("Failed to create VM") + setMsg(str(e)) + setFailed() + return False + + def createVM(self, name, cluster, os, actiontype): + try: + vmparams = params.VM( + name=name, + cluster=self.conn.clusters.get(name=cluster), + os=params.OperatingSystem(type_=os), + template=self.conn.templates.get(name="Blank"), + type_=actiontype + ) + self.conn.vms.add(vmparams) + setMsg("VM is created") + setChanged() + return True + except Exception as e: + setMsg("Failed to create VM") + setMsg(str(e)) + setFailed() + return False + + def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot): + VM = self.get_VM(vmname) + + newdisk = params.Disk( + name=diskname, + size=1024 * 1024 * 1024 * int(disksize), + wipe_after_delete=True, + sparse=diskallocationtype, + interface=diskinterface, + format=diskformat, + bootable=diskboot, + storage_domains=params.StorageDomains( + storage_domain=[self.get_domain(diskdomain)] + ) + ) + + try: + VM.disks.add(newdisk) + VM.update() + setMsg("Successfully added disk " + diskname) + setChanged() + except Exception as e: + setFailed() + setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.") + setMsg(str(e)) + return False + + try: + currentdisk = VM.disks.get(name=diskname) + attempt = 1 + while currentdisk.status.state != 'ok': + currentdisk = VM.disks.get(name=diskname) + if attempt == 100: + setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state))) + raise + else: + attempt += 1 + time.sleep(2) + setMsg("The disk " + diskname + " is ready.") + except Exception as e: + setFailed() + setMsg("Error getting the state of " + diskname + ".") + setMsg(str(e)) + return False + return True + + def createNIC(self, vmname, nicname, vlan, interface): + VM = self.get_VM(vmname) + CLUSTER = self.get_cluster_byid(VM.cluster.id) + DC = self.get_DC_byid(CLUSTER.data_center.id) + newnic = params.NIC( + name=nicname, + network=DC.networks.get(name=vlan), + interface=interface + ) + + try: + VM.nics.add(newnic) + VM.update() + setMsg("Successfully added iface " + nicname) + setChanged() + except Exception as e: + setFailed() + setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.") + setMsg(str(e)) + return False + + try: + currentnic = VM.nics.get(name=nicname) + attempt = 1 + while currentnic.active is not True: + currentnic = VM.nics.get(name=nicname) + if attempt == 100: + setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active))) + raise + else: + attempt += 1 + time.sleep(2) + setMsg("The iface " + nicname + " is ready.") + except Exception as e: + setFailed() + setMsg("Error getting the state of " + nicname + ".") + setMsg(str(e)) + return False + return True + + def get_DC(self, dc_name): + return self.conn.datacenters.get(name=dc_name) + + def get_DC_byid(self, dc_id): + return self.conn.datacenters.get(id=dc_id) + + def get_VM(self, vm_name): + return self.conn.vms.get(name=vm_name) + + def get_cluster_byid(self, cluster_id): + return self.conn.clusters.get(id=cluster_id) + + def get_cluster(self, cluster_name): + return self.conn.clusters.get(name=cluster_name) + + def get_domain_byid(self, dom_id): + return self.conn.storagedomains.get(id=dom_id) + + def get_domain(self, domain_name): + return self.conn.storagedomains.get(name=domain_name) + + def get_disk(self, disk): + return self.conn.disks.get(disk) + + def get_network(self, dc_name, network_name): + return self.get_DC(dc_name).networks.get(network_name) + + def get_network_byid(self, network_id): + return self.conn.networks.get(id=network_id) + + def get_NIC(self, vm_name, nic_name): + return self.get_VM(vm_name).nics.get(nic_name) + + def get_Host(self, host_name): + return self.conn.hosts.get(name=host_name) + + def get_Host_byid(self, host_id): + return self.conn.hosts.get(id=host_id) + + def set_Memory(self, name, memory): + VM = self.get_VM(name) + VM.memory = int(int(memory) * 1024 * 1024 * 1024) + try: + VM.update() + setMsg("The Memory has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update memory.") + setMsg(str(e)) + setFailed() + return False + + def set_Memory_Policy(self, name, memory_policy): + VM = self.get_VM(name) + VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024) + try: + VM.update() + setMsg("The memory policy has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update memory policy.") + setMsg(str(e)) + setFailed() + return False + + def set_CPU(self, name, cpu): + VM = self.get_VM(name) + VM.cpu.topology.cores = int(cpu) + try: + VM.update() + setMsg("The number of CPUs has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update the number of CPUs.") + setMsg(str(e)) + setFailed() + return False + + def set_CPU_share(self, name, cpu_share): + VM = self.get_VM(name) + VM.cpu_shares = int(cpu_share) + try: + VM.update() + setMsg("The CPU share has been updated.") + setChanged() + return True + except Exception as e: + setMsg("Failed to update the CPU share.") + setMsg(str(e)) + setFailed() + return False + + def set_Disk(self, diskname, disksize, diskinterface, diskboot): + DISK = self.get_disk(diskname) + setMsg("Checking disk " + diskname) + if DISK.get_bootable() != diskboot: + try: + DISK.set_bootable(diskboot) + setMsg("Updated the boot option on the disk.") + setChanged() + except Exception as e: + setMsg("Failed to set the boot option on the disk.") + setMsg(str(e)) + setFailed() + return False + else: + setMsg("The boot option of the disk is correct") + if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)): + try: + DISK.size = (1024 * 1024 * 1024 * int(disksize)) + setMsg("Updated the size of the disk.") + setChanged() + except Exception as e: + setMsg("Failed to update the size of the disk.") + setMsg(str(e)) + setFailed() + return False + elif int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)): + setMsg("Shrinking disks is not supported") + setMsg(str(e)) + setFailed() + return False + else: + setMsg("The size of the disk is correct") + if str(DISK.interface) != str(diskinterface): + try: + DISK.interface = diskinterface + setMsg("Updated the interface of the disk.") + setChanged() + except Exception as e: + setMsg("Failed to update the interface of the disk.") + setMsg(str(e)) + setFailed() + return False + else: + setMsg("The interface of the disk is correct") + return True + + def set_NIC(self, vmname, nicname, newname, vlan, interface): + NIC = self.get_NIC(vmname, nicname) + VM = self.get_VM(vmname) + CLUSTER = self.get_cluster_byid(VM.cluster.id) + DC = self.get_DC_byid(CLUSTER.data_center.id) + NETWORK = self.get_network(str(DC.name), vlan) + checkFail() + if NIC.name != newname: + NIC.name = newname + setMsg('Updating iface name to ' + newname) + setChanged() + if str(NIC.network.id) != str(NETWORK.id): + NIC.set_network(NETWORK) + setMsg('Updating iface network to ' + vlan) + setChanged() + if NIC.interface != interface: + NIC.interface = interface + setMsg('Updating iface interface to ' + interface) + setChanged() + try: + NIC.update() + setMsg('iface has succesfully been updated.') + except Exception as e: + setMsg("Failed to update the iface.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_DeleteProtection(self, vmname, del_prot): + VM = self.get_VM(vmname) + VM.delete_protected = del_prot + try: + VM.update() + setChanged() + except Exception as e: + setMsg("Failed to update delete protection.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_BootOrder(self, vmname, boot_order): + VM = self.get_VM(vmname) + bootorder = [] + for device in boot_order: + bootorder.append(params.Boot(dev=device)) + VM.os.boot = bootorder + + try: + VM.update() + setChanged() + except Exception as e: + setMsg("Failed to update the boot order.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_Host(self, host_name, cluster, ifaces): + HOST = self.get_Host(host_name) + CLUSTER = self.get_cluster(cluster) + + if HOST is None: + setMsg("Host does not exist.") + ifacelist = dict() + networklist = [] + manageip = '' + + try: + for iface in ifaces: + try: + setMsg('creating host interface ' + iface['name']) + if 'management' in iface: + manageip = iface['ip'] + if 'boot_protocol' not in iface: + if 'ip' in iface: + iface['boot_protocol'] = 'static' + else: + iface['boot_protocol'] = 'none' + if 'ip' not in iface: + iface['ip'] = '' + if 'netmask' not in iface: + iface['netmask'] = '' + if 'gateway' not in iface: + iface['gateway'] = '' + + if 'network' in iface: + if 'bond' in iface: + bond = [] + for slave in iface['bond']: + bond.append(ifacelist[slave]) + try: + tmpiface = params.Bonding( + slaves = params.Slaves(host_nic = bond), + options = params.Options( + option = [ + params.Option(name = 'miimon', value = '100'), + params.Option(name = 'mode', value = '4') + ] + ) + ) + except Exception as e: + setMsg('Failed to create the bond for ' + iface['name']) + setFailed() + setMsg(str(e)) + return False + try: + tmpnetwork = params.HostNIC( + network = params.Network(name = iface['network']), + name = iface['name'], + boot_protocol = iface['boot_protocol'], + ip = params.IP( + address = iface['ip'], + netmask = iface['netmask'], + gateway = iface['gateway'] + ), + override_configuration = True, + bonding = tmpiface) + networklist.append(tmpnetwork) + setMsg('Applying network ' + iface['name']) + except Exception as e: + setMsg('Failed to set' + iface['name'] + ' as network interface') + setFailed() + setMsg(str(e)) + return False + else: + tmpnetwork = params.HostNIC( + network = params.Network(name = iface['network']), + name = iface['name'], + boot_protocol = iface['boot_protocol'], + ip = params.IP( + address = iface['ip'], + netmask = iface['netmask'], + gateway = iface['gateway'] + )) + networklist.append(tmpnetwork) + setMsg('Applying network ' + iface['name']) + else: + tmpiface = params.HostNIC( + name=iface['name'], + network=params.Network(), + boot_protocol=iface['boot_protocol'], + ip=params.IP( + address=iface['ip'], + netmask=iface['netmask'], + gateway=iface['gateway'] + )) + ifacelist[iface['name']] = tmpiface + except Exception as e: + setMsg('Failed to set ' + iface['name']) + setFailed() + setMsg(str(e)) + return False + except Exception as e: + setMsg('Failed to set networks') + setMsg(str(e)) + setFailed() + return False + + if manageip == '': + setMsg('No management network is defined') + setFailed() + return False + + try: + HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey')) + if self.conn.hosts.add(HOST): + setChanged() + HOST = self.get_Host(host_name) + state = HOST.status.state + while (state != 'non_operational' and state != 'up'): + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + if state == 'non_responsive': + setMsg('Failed to add host to RHEVM') + setFailed() + return False + + setMsg('status host: up') + time.sleep(5) + + HOST = self.get_Host(host_name) + state = HOST.status.state + setMsg('State before setting to maintenance: ' + str(state)) + HOST.deactivate() + while state != 'maintenance': + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + setMsg('status host: maintenance') + + try: + HOST.nics.setupnetworks(params.Action( + force=True, + check_connectivity = False, + host_nics = params.HostNics(host_nic = networklist) + )) + setMsg('nics are set') + except Exception as e: + setMsg('Failed to apply networkconfig') + setFailed() + setMsg(str(e)) + return False + + try: + HOST.commitnetconfig() + setMsg('Network config is saved') + except Exception as e: + setMsg('Failed to save networkconfig') + setFailed() + setMsg(str(e)) + return False + except Exception as e: + if 'The Host name is already in use' in str(e): + setMsg("Host already exists") + else: + setMsg("Failed to add host") + setFailed() + setMsg(str(e)) + return False + + HOST.activate() + while state != 'up': + HOST = self.get_Host(host_name) + state = HOST.status.state + time.sleep(1) + if state == 'non_responsive': + setMsg('Failed to apply networkconfig.') + setFailed() + return False + setMsg('status host: up') + else: + setMsg("Host exists.") + + return True + + def del_NIC(self, vmname, nicname): + return self.get_NIC(vmname, nicname).delete() + + def remove_VM(self, vmname): + VM = self.get_VM(vmname) + try: + VM.delete() + except Exception as e: + setMsg("Failed to remove VM.") + setMsg(str(e)) + setFailed() + return False + return True + + def start_VM(self, vmname, timeout): + VM = self.get_VM(vmname) + try: + VM.start() + except Exception as e: + setMsg("Failed to start VM.") + setMsg(str(e)) + setFailed() + return False + return self.wait_VM(vmname, "up", timeout) + + def wait_VM(self, vmname, state, timeout): + VM = self.get_VM(vmname) + while VM.status.state != state: + VM = self.get_VM(vmname) + time.sleep(10) + if timeout is not False: + timeout -= 10 + if timeout <= 0: + setMsg("Timeout expired") + setFailed() + return False + return True + + def stop_VM(self, vmname, timeout): + VM = self.get_VM(vmname) + try: + VM.stop() + except Exception as e: + setMsg("Failed to stop VM.") + setMsg(str(e)) + setFailed() + return False + return self.wait_VM(vmname, "down", timeout) + + def set_CD(self, vmname, cd_drive): + VM = self.get_VM(vmname) + try: + if str(VM.status.state) == 'down': + cdrom = params.CdRom(file=cd_iso) + VM.cdroms.add(cdrom) + setMsg("Attached the image.") + setChanged() + else: + cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000") + cdrom.set_file(cd_iso) + cdrom.update(current=True) + setMsg("Attached the image.") + setChanged() + except Exception as e: + setMsg("Failed to attach image.") + setMsg(str(e)) + setFailed() + return False + return True + + def set_VM_Host(self, vmname, vmhost): + VM = self.get_VM(vmname) + HOST = self.get_Host(vmhost) + try: + VM.placement_policy.host = HOST + VM.update() + setMsg("Set startup host to " + vmhost) + setChanged() + except Exception as e: + setMsg("Failed to set startup host.") + setMsg(str(e)) + setFailed() + return False + return True + + def migrate_VM(self, vmname, vmhost): + VM = self.get_VM(vmname) + + HOST = self.get_Host_byid(VM.host.id) + if str(HOST.name) != vmhost: + try: + vm.migrate( + action=params.Action( + host=params.Host( + name=vmhost, + ) + ), + ) + setChanged() + setMsg("VM migrated to " + vmhost) + except Exception as e: + setMsg("Failed to set startup host.") + setMsg(str(e)) + setFailed() + return False + return True + + def remove_CD(self, vmname): + VM = self.get_VM(vmname) + try: + VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete() + setMsg("Removed the image.") + setChanged() + except Exception as e: + setMsg("Failed to remove the image.") + setMsg(str(e)) + setFailed() + return False + return True + + +class RHEV(object): + def __init__(self, module): + self.module = module + + def __get_conn(self): + self.conn = RHEVConn(self.module) + return self.conn + + def test(self): + self.__get_conn() + return "OK" + + def getVM(self, name): + self.__get_conn() + VM = self.conn.get_VM(name) + if VM: + vminfo = dict() + vminfo['uuid'] = VM.id + vminfo['name'] = VM.name + vminfo['status'] = VM.status.state + vminfo['cpu_cores'] = VM.cpu.topology.cores + vminfo['cpu_sockets'] = VM.cpu.topology.sockets + vminfo['cpu_shares'] = VM.cpu_shares + vminfo['memory'] = (int(VM.memory) / 1024 / 1024 / 1024) + vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) / 1024 / 1024 / 1024) + vminfo['os'] = VM.get_os().type_ + vminfo['del_prot'] = VM.delete_protected + try: + vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name) + except Exception as e: + vminfo['host'] = None + vminfo['boot_order'] = [] + for boot_dev in VM.os.get_boot(): + vminfo['boot_order'].append(str(boot_dev.dev)) + vminfo['disks'] = [] + for DISK in VM.disks.list(): + disk = dict() + disk['name'] = DISK.name + disk['size'] = (int(DISK.size) / 1024 / 1024 / 1024) + disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name) + disk['interface'] = DISK.interface + vminfo['disks'].append(disk) + vminfo['ifaces'] = [] + for NIC in VM.nics.list(): + iface = dict() + iface['name'] = str(NIC.name) + iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name) + iface['interface'] = NIC.interface + iface['mac'] = NIC.mac.address + vminfo['ifaces'].append(iface) + vminfo[str(NIC.name)] = NIC.mac.address + CLUSTER = self.conn.get_cluster_byid(VM.cluster.id) + if CLUSTER: + vminfo['cluster'] = CLUSTER.name + else: + vminfo = False + return vminfo + + def createVMimage(self, name, cluster, template, disks): + self.__get_conn() + return self.conn.createVMimage(name, cluster, template, disks) + + def createVM(self, name, cluster, os, actiontype): + self.__get_conn() + return self.conn.createVM(name, cluster, os, actiontype) + + def setMemory(self, name, memory): + self.__get_conn() + return self.conn.set_Memory(name, memory) + + def setMemoryPolicy(self, name, memory_policy): + self.__get_conn() + return self.conn.set_Memory_Policy(name, memory_policy) + + def setCPU(self, name, cpu): + self.__get_conn() + return self.conn.set_CPU(name, cpu) + + def setCPUShare(self, name, cpu_share): + self.__get_conn() + return self.conn.set_CPU_share(name, cpu_share) + + def setDisks(self, name, disks): + self.__get_conn() + counter = 0 + bootselect = False + for disk in disks: + if 'bootable' in disk: + if disk['bootable'] is True: + bootselect = True + + for disk in disks: + diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_') + disksize = disk.get('size', 1) + diskdomain = disk.get('domain', None) + if diskdomain is None: + setMsg("`domain` is a required disk key.") + setFailed() + return False + diskinterface = disk.get('interface', 'virtio') + diskformat = disk.get('format', 'raw') + diskallocationtype = disk.get('thin', False) + diskboot = disk.get('bootable', False) + + if bootselect is False and counter == 0: + diskboot = True + + DISK = self.conn.get_disk(diskname) + + if DISK is None: + self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot) + else: + self.conn.set_Disk(diskname, disksize, diskinterface, diskboot) + checkFail() + counter += 1 + + return True + + def setNetworks(self, vmname, ifaces): + self.__get_conn() + VM = self.conn.get_VM(vmname) + + counter = 0 + length = len(ifaces) + + for NIC in VM.nics.list(): + if counter < length: + iface = ifaces[counter] + name = iface.get('name', None) + if name is None: + setMsg("`name` is a required iface key.") + setFailed() + elif str(name) != str(NIC.name): + setMsg("ifaces are in the wrong order, rebuilding everything.") + for NIC in VM.nics.list(): + self.conn.del_NIC(vmname, NIC.name) + self.setNetworks(vmname, ifaces) + checkFail() + return True + vlan = iface.get('vlan', None) + if vlan is None: + setMsg("`vlan` is a required iface key.") + setFailed() + checkFail() + interface = iface.get('interface', 'virtio') + self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface) + else: + self.conn.del_NIC(vmname, NIC.name) + counter += 1 + checkFail() + + while counter < length: + iface = ifaces[counter] + name = iface.get('name', None) + if name is None: + setMsg("`name` is a required iface key.") + setFailed() + vlan = iface.get('vlan', None) + if vlan is None: + setMsg("`vlan` is a required iface key.") + setFailed() + if failed is True: + return False + interface = iface.get('interface', 'virtio') + self.conn.createNIC(vmname, name, vlan, interface) + + counter += 1 + checkFail() + return True + + def setDeleteProtection(self, vmname, del_prot): + self.__get_conn() + VM = self.conn.get_VM(vmname) + if bool(VM.delete_protected) != bool(del_prot): + self.conn.set_DeleteProtection(vmname, del_prot) + checkFail() + setMsg("`delete protection` has been updated.") + else: + setMsg("`delete protection` already has the right value.") + return True + + def setBootOrder(self, vmname, boot_order): + self.__get_conn() + VM = self.conn.get_VM(vmname) + bootorder = [] + for boot_dev in VM.os.get_boot(): + bootorder.append(str(boot_dev.dev)) + + if boot_order != bootorder: + self.conn.set_BootOrder(vmname, boot_order) + setMsg('The boot order has been set') + else: + setMsg('The boot order has already been set') + return True + + def removeVM(self, vmname): + self.__get_conn() + self.setPower(vmname, "down", 300) + return self.conn.remove_VM(vmname) + + def setPower(self, vmname, state, timeout): + self.__get_conn() + VM = self.conn.get_VM(vmname) + if VM is None: + setMsg("VM does not exist.") + setFailed() + return False + + if state == VM.status.state: + setMsg("VM state was already " + state) + else: + if state == "up": + setMsg("VM is going to start") + self.conn.start_VM(vmname, timeout) + setChanged() + elif state == "down": + setMsg("VM is going to stop") + self.conn.stop_VM(vmname, timeout) + setChanged() + elif state == "restarted": + self.setPower(vmname, "down", timeout) + checkFail() + self.setPower(vmname, "up", timeout) + checkFail() + setMsg("the vm state is set to " + state) + return True + + def setCD(self, vmname, cd_drive): + self.__get_conn() + if cd_drive: + return self.conn.set_CD(vmname, cd_drive) + else: + return self.conn.remove_CD(vmname) + + def setVMHost(self, vmname, vmhost): + self.__get_conn() + return self.conn.set_VM_Host(vmname, vmhost) + + VM = self.conn.get_VM(vmname) + HOST = self.conn.get_Host(vmhost) + + if VM.placement_policy.host is None: + self.conn.set_VM_Host(vmname, vmhost) + elif str(VM.placement_policy.host.id) != str(HOST.id): + self.conn.set_VM_Host(vmname, vmhost) + else: + setMsg("VM's startup host was already set to " + vmhost) + checkFail() + + if str(VM.status.state) == "up": + self.conn.migrate_VM(vmname, vmhost) + checkFail() + + return True + + def setHost(self, hostname, cluster, ifaces): + self.__get_conn() + return self.conn.set_Host(hostname, cluster, ifaces) + + +def checkFail(): + if failed: + module.fail_json(msg=msg) + else: + return True + + +def setFailed(): + global failed + failed = True + + +def setChanged(): + global changed + changed = True + + +def setMsg(message): + global failed + msg.append(message) + + +def core(module): + + r = RHEV(module) + + state = module.params.get('state', 'present') + + if state == 'ping': + r.test() + return RHEV_SUCCESS, {"ping": "pong"} + elif state == 'info': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + vminfo = r.getVM(name) + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + elif state == 'present': + created = False + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + actiontype = module.params.get('type') + if actiontype == 'server' or actiontype == 'desktop': + vminfo = r.getVM(name) + if vminfo: + setMsg('VM exists') + else: + # Create VM + cluster = module.params.get('cluster') + if cluster is None: + setMsg("cluster is a required argument.") + setFailed() + template = module.params.get('image') + if template: + disks = module.params.get('disks') + if disks is None: + setMsg("disks is a required argument.") + setFailed() + checkFail() + if r.createVMimage(name, cluster, template, disks) is False: + return RHEV_FAILED, vminfo + else: + os = module.params.get('osver') + if os is None: + setMsg("osver is a required argument.") + setFailed() + checkFail() + if r.createVM(name, cluster, os, actiontype) is False: + return RHEV_FAILED, vminfo + created = True + + # Set MEMORY and MEMORY POLICY + vminfo = r.getVM(name) + memory = module.params.get('vmmem') + if memory is not None: + memory_policy = module.params.get('mempol') + if int(memory_policy) == 0: + memory_policy = memory + mem_pol_nok = True + if int(vminfo['mem_pol']) == int(memory_policy): + setMsg("Memory is correct") + mem_pol_nok = False + + mem_nok = True + if int(vminfo['memory']) == int(memory): + setMsg("Memory is correct") + mem_nok = False + + if memory_policy > memory: + setMsg('memory_policy cannot have a higher value than memory.') + return RHEV_FAILED, msg + + if mem_nok and mem_pol_nok: + if int(memory_policy) > int(vminfo['memory']): + r.setMemory(vminfo['name'], memory) + r.setMemoryPolicy(vminfo['name'], memory_policy) + else: + r.setMemoryPolicy(vminfo['name'], memory_policy) + r.setMemory(vminfo['name'], memory) + elif mem_nok: + r.setMemory(vminfo['name'], memory) + elif mem_pol_nok: + r.setMemoryPolicy(vminfo['name'], memory_policy) + checkFail() + + # Set CPU + cpu = module.params.get('vmcpu') + if int(vminfo['cpu_cores']) == int(cpu): + setMsg("Number of CPUs is correct") + else: + if r.setCPU(vminfo['name'], cpu) is False: + return RHEV_FAILED, msg + + # Set CPU SHARE + cpu_share = module.params.get('cpu_share') + if cpu_share is not None: + if int(vminfo['cpu_shares']) == int(cpu_share): + setMsg("CPU share is correct.") + else: + if r.setCPUShare(vminfo['name'], cpu_share) is False: + return RHEV_FAILED, msg + + # Set DISKS + disks = module.params.get('disks') + if disks is not None: + if r.setDisks(vminfo['name'], disks) is False: + return RHEV_FAILED, msg + + # Set NETWORKS + ifaces = module.params.get('ifaces', None) + if ifaces is not None: + if r.setNetworks(vminfo['name'], ifaces) is False: + return RHEV_FAILED, msg + + # Set Delete Protection + del_prot = module.params.get('del_prot') + if r.setDeleteProtection(vminfo['name'], del_prot) is False: + return RHEV_FAILED, msg + + # Set Boot Order + boot_order = module.params.get('boot_order') + if r.setBootOrder(vminfo['name'], boot_order) is False: + return RHEV_FAILED, msg + + # Set VM Host + vmhost = module.params.get('vmhost') + if vmhost is not False and vmhost is not "False": + if r.setVMHost(vminfo['name'], vmhost) is False: + return RHEV_FAILED, msg + + vminfo = r.getVM(name) + vminfo['created'] = created + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + if actiontype == 'host': + cluster = module.params.get('cluster') + if cluster is None: + setMsg("cluster is a required argument.") + setFailed() + ifaces = module.params.get('ifaces') + if ifaces is None: + setMsg("ifaces is a required argument.") + setFailed() + if r.setHost(name, cluster, ifaces) is False: + return RHEV_FAILED, msg + return RHEV_SUCCESS, {'changed': changed, 'msg': msg} + + elif state == 'absent': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + actiontype = module.params.get('type') + if actiontype == 'server' or actiontype == 'desktop': + vminfo = r.getVM(name) + if vminfo: + setMsg('VM exists') + + # Set Delete Protection + del_prot = module.params.get('del_prot') + if r.setDeleteProtection(vminfo['name'], del_prot) is False: + return RHEV_FAILED, msg + + # Remove VM + if r.removeVM(vminfo['name']) is False: + return RHEV_FAILED, msg + setMsg('VM has been removed.') + vminfo['state'] = 'DELETED' + else: + setMsg('VM was already removed.') + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + elif state == 'up' or state == 'down' or state == 'restarted': + name = module.params.get('name') + if not name: + setMsg("`name` is a required argument.") + return RHEV_FAILED, msg + timeout = module.params.get('timeout') + if r.setPower(name, state, timeout) is False: + return RHEV_FAILED, msg + vminfo = r.getVM(name) + return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo} + + elif state == 'cd': + name = module.params.get('name') + cd_drive = module.params.get('cd_drive') + if r.setCD(name, cd_drive) is False: + return RHEV_FAILED, msg + return RHEV_SUCCESS, {'changed': changed, 'msg': msg} + + +def main(): + global module + module = AnsibleModule( + argument_spec = dict( + state = dict(default='present', choices=['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']), + user = dict(default="admin@internal"), + password = dict(required=True), + server = dict(default="127.0.0.1"), + port = dict(default="443"), + insecure_api = dict(default=False, type='bool'), + name = dict(), + image = dict(default=False), + datacenter = dict(default="Default"), + type = dict(default="server", choices=['server', 'desktop', 'host']), + cluster = dict(default=''), + vmhost = dict(default=False), + vmcpu = dict(default="2"), + vmmem = dict(default="1"), + disks = dict(), + osver = dict(default="rhel_6x64"), + ifaces = dict(aliases=['nics', 'interfaces']), + timeout = dict(default=False), + mempol = dict(default="1"), + vm_ha = dict(default=True), + cpu_share = dict(default="0"), + boot_order = dict(default=["network", "hd"]), + del_prot = dict(default=True, type="bool"), + cd_drive = dict(default=False) + ), + ) + + if not HAS_SDK: + module.fail_json( + msg='The `ovirtsdk` module is not importable. Check the requirements.' + ) + + rc = RHEV_SUCCESS + try: + rc, result = core(module) + except Exception as e: + module.fail_json(msg=str(e)) + + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=result) + else: + module.exit_json(**result) + + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/misc/virt.py b/cloud/misc/virt.py index b59c7ed3de3..3e9c098f3d3 100644 --- a/cloud/misc/virt.py +++ b/cloud/misc/virt.py @@ -15,6 +15,10 @@ along with this program. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: virt @@ -66,7 +70,9 @@ EXAMPLES = ''' # a playbook task line: -- virt: name=alpha state=running +- virt: + name: alpha + state: running # /usr/bin/ansible invocations ansible host -m virt -a "name=alpha command=status" @@ -76,14 +82,35 @@ # a playbook example of defining and launching an LXC guest tasks: - name: define vm - virt: name=foo - command=define - xml="{{ lookup('template', 'container-template.xml.j2') }}" - uri=lxc:/// + virt: + name: foo + command: define + xml: '{{ lookup('template', 'container-template.xml.j2') }}' + uri: 'lxc:///' - name: start vm - virt: name=foo state=running uri=lxc:/// + virt: + name: foo + state: running + uri: 'lxc:///' ''' +RETURN = ''' +# for list_vms command +list_vms: + description: The list of vms defined on the remote system + type: dictionary + returned: success + sample: [ + "build.example.org", + "dev.example.org" + ] +# for status command +status: + description: The status of the VM, among running, crashed, paused and shutdown + type: string + sample: "success" + returned: success +''' VIRT_FAILED = 1 VIRT_SUCCESS = 0 VIRT_UNAVAILABLE=2 @@ -128,6 +155,9 @@ def __init__(self, uri, module): if "xen" in stdout: conn = libvirt.open(None) + elif "esx" in uri: + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None] + conn = libvirt.openAuth(uri, auth) else: conn = libvirt.open(uri) @@ -410,7 +440,7 @@ def core(module): if state and command=='list_vms': res = v.list_vms(state=state) - if type(res) != dict: + if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res @@ -457,13 +487,13 @@ def core(module): res = {'changed': True, 'created': guest} return VIRT_SUCCESS, res res = getattr(v, command)(guest) - if type(res) != dict: + if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res elif hasattr(v, command): res = getattr(v, command)() - if type(res) != dict: + if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res @@ -490,7 +520,8 @@ def main(): rc = VIRT_SUCCESS try: rc, result = core(module) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) if rc != 0: # something went wrong emit the msg @@ -501,4 +532,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/cloud/misc/virt_net.py b/cloud/misc/virt_net.py old mode 100755 new mode 100644 index 21cdca5fbd7..a37c7ca9e38 --- a/cloud/misc/virt_net.py +++ b/cloud/misc/virt_net.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: virt_net @@ -47,10 +51,11 @@ required: false choices: [ "define", "create", "start", "stop", "destroy", "undefine", "get_xml", "list_nets", "facts", - "info", "status"] + "info", "status", "modify"] description: - in addition to state management, various non-idempotent commands are available. See examples. + Modify was added in version 2.1 autostart: required: false choices: ["yes", "no"] @@ -73,50 +78,72 @@ EXAMPLES = ''' # Define a new network -- virt_net: command=define name=br_nat xml='{{ lookup("template", "network/bridge.xml.j2") }}' +- virt_net: + command: define + name: br_nat + xml: '{{ lookup("template", "network/bridge.xml.j2") }}' # Start a network -- virt_net: command=create name=br_nat +- virt_net: + command: create + name: br_nat # List available networks -- virt_net: command=list_nets +- virt_net: + command: list_nets # Get XML data of a specified network -- virt_net: command=get_xml name=br_nat +- virt_net: + command: get_xml + name: br_nat # Stop a network -- virt_net: command=destroy name=br_nat +- virt_net: + command: destroy + name: br_nat # Undefine a network -- virt_net: command=undefine name=br_nat +- virt_net: + command: undefine + name: br_nat # Gather facts about networks # Facts will be available as 'ansible_libvirt_networks' -- virt_net: command=facts +- virt_net: + command: facts # Gather information about network managed by 'libvirt' remotely using uri -- virt_net: command=info uri='{{ item }}' - with_items: libvirt_uris +- virt_net: + command: info + uri: '{{ item }}' + with_items: '{{ libvirt_uris }}' register: networks # Ensure that a network is active (needs to be defined and built first) -- virt_net: state=active name=br_nat +- virt_net: + state: active + name: br_nat # Ensure that a network is inactive -- virt_net: state=inactive name=br_nat +- virt_net: + state: inactive + name: br_nat # Ensure that a given network will be started at boot -- virt_net: autostart=yes name=br_nat +- virt_net: + autostart: yes + name: br_nat # Disable autostart for a given network -- virt_net: autostart=no name=br_nat +- virt_net: + autostart: no + name: br_nat ''' VIRT_FAILED = 1 VIRT_SUCCESS = 0 VIRT_UNAVAILABLE=2 -import sys try: import libvirt @@ -132,9 +159,13 @@ else: HAS_XML = True +from ansible.module_utils.basic import AnsibleModule + + ALL_COMMANDS = [] ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', - 'undefine', 'destroy', 'get_xml', 'define'] + 'undefine', 'destroy', 'get_xml', 'define', + 'modify' ] HOST_COMMANDS = [ 'list_nets', 'facts', 'info' ] ALL_COMMANDS.extend(ENTRY_COMMANDS) ALL_COMMANDS.extend(HOST_COMMANDS) @@ -206,6 +237,48 @@ def create(self, entryid): if not state: return self.module.exit_json(changed=True) + def modify(self, entryid, xml): + network = self.find_entry(entryid) + # identify what type of entry is given in the xml + new_data = etree.fromstring(xml) + old_data = etree.fromstring(network.XMLDesc(0)) + if new_data.tag == 'host': + mac_addr = new_data.get('mac') + hosts = old_data.xpath('/network/ip/dhcp/host') + # find the one mac we're looking for + host = None + for h in hosts: + if h.get('mac') == mac_addr: + host = h + break + if host is None: + # add the host + if not self.module.check_mode: + res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST, + libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST, + -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT) + else: + # pretend there was a change + res = 0 + if res == 0: + return True + else: + # change the host + if host.get('name') == new_data.get('name') and host.get('ip') == new_data.get('ip'): + return False + else: + if not self.module.check_mode: + res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY, + libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST, + -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT) + else: + # pretend there was a change + res = 0 + if res == 0: + return True + # command, section, parentIndex, xml, flags=0 + self.module.fail_json(msg='updating this is not supported yet '+unicode(xml)) + def destroy(self, entryid): if not self.module.check_mode: return self.find_entry(entryid).destroy() @@ -301,7 +374,7 @@ def define_from_xml(self, entryid, xml): return self.conn.networkDefineXML(xml) else: try: - state = self.find_entry(entryid) + self.find_entry(entryid) except: return self.module.exit_json(changed=True) @@ -344,6 +417,9 @@ def set_autostart(self, entryid, state): def create(self, entryid): return self.conn.create(entryid) + + def modify(self, entryid, xml): + return self.conn.modify(entryid, xml) def start(self, entryid): return self.conn.create(entryid) @@ -381,17 +457,17 @@ def facts(self, facts_mode='facts'): try: results[entry]["forward_mode"] = self.conn.get_forward(entry) - except ValueError as e: + except ValueError: pass try: results[entry]["domain"] = self.conn.get_domain(entry) - except ValueError as e: + except ValueError: pass try: results[entry]["macaddress"] = self.conn.get_macaddress(entry) - except ValueError as e: + except ValueError: pass facts = dict() @@ -417,7 +493,7 @@ def core(module): if state and command == 'list_nets': res = v.list_nets(state=state) - if type(res) != dict: + if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res @@ -460,39 +536,43 @@ def core(module): if command in ENTRY_COMMANDS: if not name: module.fail_json(msg = "%s requires 1 argument: name" % command) - if command == 'define': + if command in ('define', 'modify'): if not xml: - module.fail_json(msg = "define requires xml argument") + module.fail_json(msg = command+" requires xml argument") try: v.get_net(name) except EntryNotFound: v.define(name, xml) res = {'changed': True, 'created': name} + else: + if command == 'modify': + mod = v.modify(name, xml) + res = {'changed': mod, 'modified': name} return VIRT_SUCCESS, res res = getattr(v, command)(name) - if type(res) != dict: + if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res elif hasattr(v, command): res = getattr(v, command)() - if type(res) != dict: + if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res else: - module.fail_json(msg="Command %s not recognized" % basecmd) + module.fail_json(msg="Command %s not recognized" % command) - if autostart: + if autostart is not None: if not name: module.fail_json(msg = "state change requires a specified name") res['changed'] = False - if autostart == 'yes': + if autostart: if not v.get_autostart(name): res['changed'] = True res['msg'] = v.set_autostart(name, True) - elif autostart == 'no': + else: if v.get_autostart(name): res['changed'] = True res['msg'] = v.set_autostart(name, False) @@ -511,7 +591,7 @@ def main(): command = dict(choices=ALL_COMMANDS), uri = dict(default='qemu:///system'), xml = dict(), - autostart = dict(choices=['yes', 'no']) + autostart = dict(type='bool') ), supports_check_mode = True ) @@ -529,7 +609,7 @@ def main(): rc = VIRT_SUCCESS try: rc, result = core(module) - except Exception, e: + except Exception as e: module.fail_json(msg=str(e)) if rc != 0: # something went wrong emit the msg @@ -538,6 +618,5 @@ def main(): module.exit_json(**result) -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/misc/virt_pool.py b/cloud/misc/virt_pool.py old mode 100755 new mode 100644 index 1089269fc84..4a24dffee08 --- a/cloud/misc/virt_pool.py +++ b/cloud/misc/virt_pool.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: virt_pool @@ -79,57 +83,82 @@ EXAMPLES = ''' # Define a new storage pool -- virt_pool: command=define name=vms xml='{{ lookup("template", "pool/dir.xml.j2") }}' +- virt_pool: + command: define + name: vms + xml: '{{ lookup("template", "pool/dir.xml.j2") }}' # Build a storage pool if it does not exist -- virt_pool: command=build name=vms +- virt_pool: + command: build + name: vms # Start a storage pool -- virt_pool: command=create name=vms +- virt_pool: + command: create + name: vms # List available pools -- virt_pool: command=list_pools +- virt_pool: + command: list_pools # Get XML data of a specified pool -- virt_pool: command=get_xml name=vms +- virt_pool: + command: get_xml + name: vms # Stop a storage pool -- virt_pool: command=destroy name=vms +- virt_pool: + command: destroy + name: vms # Delete a storage pool (destroys contents) -- virt_pool: command=delete name=vms +- virt_pool: + command: delete + name: vms # Undefine a storage pool -- virt_pool: command=undefine name=vms +- virt_pool: + command: undefine + name: vms # Gather facts about storage pools # Facts will be available as 'ansible_libvirt_pools' -- virt_pool: command=facts +- virt_pool: + command: facts # Gather information about pools managed by 'libvirt' remotely using uri -- virt_pool: command=info uri='{{ item }}' - with_items: libvirt_uris +- virt_pool: + command: info + uri: '{{ item }}' + with_items: '{{ libvirt_uris }}' register: storage_pools # Ensure that a pool is active (needs to be defined and built first) -- virt_pool: state=active name=vms +- virt_pool: + state: active + name: vms # Ensure that a pool is inactive -- virt_pool: state=inactive name=vms +- virt_pool: + state: inactive + name: vms # Ensure that a given pool will be started at boot -- virt_pool: autostart=yes name=vms +- virt_pool: + autostart: yes + name: vms # Disable autostart for a given pool -- virt_pool: autostart=no name=vms +- virt_pool: + autostart: no + name: vms ''' VIRT_FAILED = 1 VIRT_SUCCESS = 0 VIRT_UNAVAILABLE=2 -import sys - try: import libvirt except ImportError: @@ -144,6 +173,9 @@ else: HAS_XML = True +from ansible.module_utils.basic import AnsibleModule + + ALL_COMMANDS = [] ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete', 'undefine', 'destroy', 'get_xml', 'define', 'refresh'] @@ -389,7 +421,7 @@ def define_from_xml(self, entryid, xml): return self.conn.storagePoolDefineXML(xml) else: try: - state = self.find_entry(entryid) + self.find_entry(entryid) except: return self.module.exit_json(changed=True) @@ -499,23 +531,23 @@ def facts(self, facts_mode='facts'): try: results[entry]["host"] = self.conn.get_host(entry) - except ValueError as e: + except ValueError: pass try: results[entry]["source_path"] = self.conn.get_source_path(entry) - except ValueError as e: + except ValueError: pass try: results[entry]["format"] = self.conn.get_format(entry) - except ValueError as e: + except ValueError: pass try: devices = self.conn.get_devices(entry) results[entry]["devices"] = devices - except ValueError as e: + except ValueError: pass else: @@ -545,7 +577,7 @@ def core(module): if state and command == 'list_pools': res = v.list_pools(state=state) - if type(res) != dict: + if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res @@ -607,38 +639,38 @@ def core(module): return VIRT_SUCCESS, res elif command == 'build': res = v.build(name, mode) - if type(res) != dict: + if not isinstance(res, dict): res = { 'changed': True, command: res } return VIRT_SUCCESS, res elif command == 'delete': res = v.delete(name, mode) - if type(res) != dict: + if not isinstance(res, dict): res = { 'changed': True, command: res } return VIRT_SUCCESS, res res = getattr(v, command)(name) - if type(res) != dict: + if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res elif hasattr(v, command): res = getattr(v, command)() - if type(res) != dict: + if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res else: - module.fail_json(msg="Command %s not recognized" % basecmd) + module.fail_json(msg="Command %s not recognized" % command) - if autostart: + if autostart is not None: if not name: module.fail_json(msg = "state change requires a specified name") res['changed'] = False - if autostart == 'yes': + if autostart: if not v.get_autostart(name): res['changed'] = True res['msg'] = v.set_autostart(name, True) - elif autostart == 'no': + else: if v.get_autostart(name): res['changed'] = True res['msg'] = v.set_autostart(name, False) @@ -657,7 +689,7 @@ def main(): command = dict(choices=ALL_COMMANDS), uri = dict(default='qemu:///system'), xml = dict(), - autostart = dict(choices=['yes', 'no']), + autostart = dict(type='bool'), mode = dict(choices=ALL_MODES), ), supports_check_mode = True @@ -676,7 +708,7 @@ def main(): rc = VIRT_SUCCESS try: rc, result = core(module) - except Exception, e: + except Exception as e: module.fail_json(msg=str(e)) if rc != 0: # something went wrong emit the msg @@ -685,6 +717,5 @@ def main(): module.exit_json(**result) -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/openstack/__init__.py b/cloud/openstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/openstack/os_flavor_facts.py b/cloud/openstack/os_flavor_facts.py new file mode 100644 index 00000000000..c6e938b63b1 --- /dev/null +++ b/cloud/openstack/os_flavor_facts.py @@ -0,0 +1,249 @@ +#!/usr/bin/python + +# Copyright (c) 2015 IBM +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +import re + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +from distutils.version import StrictVersion + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_flavor_facts +short_description: Retrieve facts about one or more flavors +author: "David Shrewsbury (@Shrews)" +version_added: "2.1" +description: + - Retrieve facts about available OpenStack instance flavors. By default, + facts about ALL flavors are retrieved. Filters can be applied to get + facts for only matching flavors. For example, you can filter on the + amount of RAM available to the flavor, or the number of virtual CPUs + available to the flavor, or both. When specifying multiple filters, + *ALL* filters must match on a flavor before that flavor is returned as + a fact. +notes: + - This module creates a new top-level C(openstack_flavors) fact, which + contains a list of unsorted flavors. +requirements: + - "python >= 2.6" + - "shade" +options: + name: + description: + - A flavor name. Cannot be used with I(ram) or I(vcpus) or I(ephemeral). + required: false + default: None + ram: + description: + - "A string used for filtering flavors based on the amount of RAM + (in MB) desired. This string accepts the following special values: + 'MIN' (return flavors with the minimum amount of RAM), and 'MAX' + (return flavors with the maximum amount of RAM)." + + - "A specific amount of RAM may also be specified. Any flavors with this + exact amount of RAM will be returned." + + - "A range of acceptable RAM may be given using a special syntax. Simply + prefix the amount of RAM with one of these acceptable range values: + '<', '>', '<=', '>='. These values represent less than, greater than, + less than or equal to, and greater than or equal to, respectively." + required: false + default: false + vcpus: + description: + - A string used for filtering flavors based on the number of virtual + CPUs desired. Format is the same as the I(ram) parameter. + required: false + default: false + limit: + description: + - Limits the number of flavors returned. All matching flavors are + returned by default. + required: false + default: None + ephemeral: + description: + - A string used for filtering flavors based on the amount of ephemeral + storage. Format is the same as the I(ram) parameter + required: false + default: false + version_added: "2.3" +extends_documentation_fragment: openstack +''' + +EXAMPLES = ''' +# Gather facts about all available flavors +- os_flavor_facts: + cloud: mycloud + +# Gather facts for the flavor named "xlarge-flavor" +- os_flavor_facts: + cloud: mycloud + name: "xlarge-flavor" + +# Get all flavors that have exactly 512 MB of RAM. +- os_flavor_facts: + cloud: mycloud + ram: "512" + +# Get all flavors that have 1024 MB or more of RAM. +- os_flavor_facts: + cloud: mycloud + ram: ">=1024" + +# Get a single flavor that has the minimum amount of RAM. Using the 'limit' +# option will guarantee only a single flavor is returned. +- os_flavor_facts: + cloud: mycloud + ram: "MIN" + limit: 1 + +# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs. +- os_flavor_facts: + cloud: mycloud + ram: ">=1024" + vcpus: "2" + +# Get all flavors with 1024 MB of RAM or more, exactly 2 virtual CPUs, and +# less than 30gb of ephemeral storage. +- os_flavor_facts: + cloud: mycloud + ram: ">=1024" + vcpus: "2" + ephemeral: "<30" +''' + + +RETURN = ''' +openstack_flavors: + description: Dictionary describing the flavors. + returned: On success. + type: dictionary + contains: + id: + description: Flavor ID. + returned: success + type: string + sample: "515256b8-7027-4d73-aa54-4e30a4a4a339" + name: + description: Flavor name. + returned: success + type: string + sample: "tiny" + disk: + description: Size of local disk, in GB. + returned: success + type: int + sample: 10 + ephemeral: + description: Ephemeral space size, in GB. + returned: success + type: int + sample: 10 + ram: + description: Amount of memory, in MB. + returned: success + type: int + sample: 1024 + swap: + description: Swap space size, in MB. + returned: success + type: int + sample: 100 + vcpus: + description: Number of virtual CPUs. + returned: success + type: int + sample: 2 + is_public: + description: Make flavor accessible to the public. + returned: success + type: bool + sample: true +''' + + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + ram=dict(required=False, default=None), + vcpus=dict(required=False, default=None), + limit=dict(required=False, default=None, type='int'), + ephemeral=dict(required=False, default=None), + ) + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[ + ['name', 'ram'], + ['name', 'vcpus'], + ['name', 'ephemeral'] + ] + ) + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + name = module.params['name'] + vcpus = module.params['vcpus'] + ram = module.params['ram'] + ephemeral = module.params['ephemeral'] + limit = module.params['limit'] + + try: + cloud = shade.openstack_cloud(**module.params) + if name: + flavors = cloud.search_flavors(filters={'name': name}) + + else: + flavors = cloud.list_flavors() + filters = {} + if vcpus: + filters['vcpus'] = vcpus + if ram: + filters['ram'] = ram + if ephemeral: + filters['ephemeral'] = ephemeral + if filters: + # Range search added in 1.5.0 + if StrictVersion(shade.__version__) < StrictVersion('1.5.0'): + module.fail_json(msg="Shade >= 1.5.0 needed for this functionality") + flavors = cloud.range_search(flavors, filters) + + if limit is not None: + flavors = flavors[:limit] + + module.exit_json(changed=False, + ansible_facts=dict(openstack_flavors=flavors)) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_group.py b/cloud/openstack/os_group.py new file mode 100644 index 00000000000..2347efb483f --- /dev/null +++ b/cloud/openstack/os_group.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# Copyright (c) 2016 IBM +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_group +short_description: Manage OpenStack Identity Groups +extends_documentation_fragment: openstack +version_added: "2.1" +author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)" +description: + - Manage OpenStack Identity Groups. Groups can be created, deleted or + updated. Only the I(description) value can be updated. +options: + name: + description: + - Group name + required: true + description: + description: + - Group description + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Create a group named "demo" +- os_group: + cloud: mycloud + state: present + name: demo + description: "Demo Group" + +# Update the description on existing "demo" group +- os_group: + cloud: mycloud + state: present + name: demo + description: "Something else" + +# Delete group named "demo" +- os_group: + cloud: mycloud + state: absent + name: demo +''' + +RETURN = ''' +group: + description: Dictionary describing the group. + returned: On success when I(state) is 'present'. + type: dictionary + contains: + id: + description: Unique group ID + type: string + sample: "ee6156ff04c645f481a6738311aea0b0" + name: + description: Group name + type: string + sample: "demo" + description: + description: Group description + type: string + sample: "Demo Group" + domain_id: + description: Domain for the group + type: string + sample: "default" +''' + + +def _system_state_change(state, description, group): + if state == 'present' and not group: + return True + if state == 'present' and description is not None and group.description != description: + return True + if state == 'absent' and group: + return True + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + description=dict(required=False, default=None), + state=dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + name = module.params.pop('name') + description = module.params.pop('description') + state = module.params.pop('state') + + try: + cloud = shade.operator_cloud(**module.params) + group = cloud.get_group(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(state, description, group)) + + if state == 'present': + if group is None: + group = cloud.create_group( + name=name, description=description) + changed = True + else: + if description is not None and group.description != description: + group = cloud.update_group( + group.id, description=description) + changed = True + else: + changed = False + module.exit_json(changed=changed, group=group) + + elif state == 'absent': + if group is None: + changed=False + else: + cloud.delete_group(group.id) + changed=True + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_ironic_inspect.py b/cloud/openstack/os_ironic_inspect.py new file mode 100644 index 00000000000..b436f7f0429 --- /dev/null +++ b/cloud/openstack/os_ironic_inspect.py @@ -0,0 +1,173 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# (c) 2015-2016, Hewlett Packard Enterprise Development Company LP +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +from distutils.version import StrictVersion + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_ironic_inspect +short_description: Explicitly triggers baremetal node introspection in ironic. +extends_documentation_fragment: openstack +author: "Julia Kreger (@juliakreger)" +version_added: "2.1" +description: + - Requests Ironic to set a node into inspect state in order to collect metadata regarding the node. + This command may be out of band or in-band depending on the ironic driver configuration. + This is only possible on nodes in 'manageable' and 'available' state. +options: + mac: + description: + - unique mac address that is used to attempt to identify the host. + required: false + default: None + uuid: + description: + - globally unique identifier (UUID) to identify the host. + required: false + default: None + name: + description: + - unique name identifier to identify the host in Ironic. + required: false + default: None + ironic_url: + description: + - If noauth mode is utilized, this is required to be set to the endpoint URL for the Ironic API. + Use with "auth" and "auth_type" settings set to None. + required: false + default: None + timeout: + description: + - A timeout in seconds to tell the role to wait for the node to complete introspection if wait is set to True. + required: false + default: 1200 + +requirements: ["shade"] +''' + +RETURN = ''' +ansible_facts: + description: Dictionary of new facts representing discovered properties of the node.. + returned: changed + type: dictionary + contains: + memory_mb: + description: Amount of node memory as updated in the node properties + type: string + sample: "1024" + cpu_arch: + description: Detected CPU architecture type + type: string + sample: "x86_64" + local_gb: + description: Total size of local disk storage as updaed in node properties. + type: string + sample: "10" + cpus: + description: Count of cpu cores defined in the updated node properties. + type: string + sample: "1" +''' + +EXAMPLES = ''' +# Invoke node inspection +- os_ironic_inspect: + name: "testnode1" +''' + + +def _choose_id_value(module): + if module.params['uuid']: + return module.params['uuid'] + if module.params['name']: + return module.params['name'] + return None + + +def main(): + argument_spec = openstack_full_argument_spec( + auth_type=dict(required=False), + uuid=dict(required=False), + name=dict(required=False), + mac=dict(required=False), + ironic_url=dict(required=False), + timeout=dict(default=1200, type='int', required=False), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + if StrictVersion(shade.__version__) < StrictVersion('1.0.0'): + module.fail_json(msg="To utilize this module, the installed version of" + "the shade library MUST be >=1.0.0") + + if (module.params['auth_type'] in [None, 'None'] and + module.params['ironic_url'] is None): + module.fail_json(msg="Authentication appears to be disabled, " + "Please define an ironic_url parameter") + + if (module.params['ironic_url'] and + module.params['auth_type'] in [None, 'None']): + module.params['auth'] = dict( + endpoint=module.params['ironic_url'] + ) + + try: + cloud = shade.operator_cloud(**module.params) + + if module.params['name'] or module.params['uuid']: + server = cloud.get_machine(_choose_id_value(module)) + elif module.params['mac']: + server = cloud.get_machine_by_mac(module.params['mac']) + else: + module.fail_json(msg="The worlds did not align, " + "the host was not found as " + "no name, uuid, or mac was " + "defined.") + if server: + cloud.inspect_machine(server['uuid'], module.params['wait']) + # TODO(TheJulia): diff properties, ?and ports? and determine + # if a change occured. In theory, the node is always changed + # if introspection is able to update the record. + module.exit_json(changed=True, + ansible_facts=server['properties']) + + else: + module.fail_json(msg="node not found.") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == "__main__": + main() diff --git a/cloud/openstack/os_keystone_domain.py b/cloud/openstack/os_keystone_domain.py new file mode 100644 index 00000000000..b355971e8b5 --- /dev/null +++ b/cloud/openstack/os_keystone_domain.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_keystone_domain +short_description: Manage OpenStack Identity Domains +extends_documentation_fragment: openstack +version_added: "2.1" +description: + - Create, update, or delete OpenStack Identity domains. If a domain + with the supplied name already exists, it will be updated with the + new description and enabled attributes. +options: + name: + description: + - Name that has to be given to the instance + required: true + description: + description: + - Description of the domain + required: false + default: None + enabled: + description: + - Is the domain enabled + required: false + default: True + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Create a domain +- os_keystone_domain: + cloud: mycloud + state: present + name: demo + description: Demo Domain + +# Delete a domain +- os_keystone_domain: + cloud: mycloud + state: absent + name: demo +''' + +RETURN = ''' +domain: + description: Dictionary describing the domain. + returned: On success when I(state) is 'present' + type: dictionary + contains: + id: + description: Domain ID. + type: string + sample: "474acfe5-be34-494c-b339-50f06aa143e4" + name: + description: Domain name. + type: string + sample: "demo" + description: + description: Domain description. + type: string + sample: "Demo Domain" + enabled: + description: Domain description. + type: boolean + sample: True + +id: + description: The domain ID. + returned: On success when I(state) is 'present' + type: string + sample: "474acfe5-be34-494c-b339-50f06aa143e4" +''' + +def _needs_update(module, domain): + if domain.description != module.params['description']: + return True + if domain.enabled != module.params['enabled']: + return True + return False + +def _system_state_change(module, domain): + state = module.params['state'] + if state == 'absent' and domain: + return True + + if state == 'present': + if domain is None: + return True + return _needs_update(module, domain) + + return False + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + description=dict(default=None), + enabled=dict(default=True, type='bool'), + state=dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + name = module.params['name'] + description = module.params['description'] + enabled = module.params['enabled'] + state = module.params['state'] + + try: + cloud = shade.operator_cloud(**module.params) + + domains = cloud.search_domains(filters=dict(name=name)) + + if len(domains) > 1: + module.fail_json(msg='Domain name %s is not unique' % name) + elif len(domains) == 1: + domain = domains[0] + else: + domain = None + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, domain)) + + if state == 'present': + if domain is None: + domain = cloud.create_domain( + name=name, description=description, enabled=enabled) + changed = True + else: + if _needs_update(module, domain): + domain = cloud.update_domain( + domain.id, name=name, description=description, + enabled=enabled) + changed = True + else: + changed = False + module.exit_json(changed=changed, domain=domain, id=domain.id) + + elif state == 'absent': + if domain is None: + changed=False + else: + cloud.delete_domain(domain.id) + changed=True + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_keystone_domain_facts.py b/cloud/openstack/os_keystone_domain_facts.py new file mode 100644 index 00000000000..9e363415210 --- /dev/null +++ b/cloud/openstack/os_keystone_domain_facts.py @@ -0,0 +1,144 @@ +#!/usr/bin/python +# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_keystone_domain_facts +short_description: Retrieve facts about one or more OpenStack domains +extends_documentation_fragment: openstack +version_added: "2.1" +author: "Ricardo Carrillo Cruz (@rcarrillocruz)" +description: + - Retrieve facts about a one or more OpenStack domains +requirements: + - "python >= 2.6" + - "shade" +options: + name: + description: + - Name or ID of the domain + required: true + filters: + description: + - A dictionary of meta data to use for further filtering. Elements of + this dictionary may be additional dictionaries. + required: false + default: None +''' + +EXAMPLES = ''' +# Gather facts about previously created domain +- os_keystone_domain_facts: + cloud: awesomecloud +- debug: + var: openstack_domains + +# Gather facts about a previously created domain by name +- os_keystone_domain_facts: + cloud: awesomecloud + name: demodomain +- debug: + var: openstack_domains + +# Gather facts about a previously created domain with filter +- os_keystone_domain_facts + cloud: awesomecloud + name: demodomain + filters: + enabled: False +- debug: + var: openstack_domains +''' + + +RETURN = ''' +openstack_domains: + description: has all the OpenStack facts about domains + returned: always, but can be null + type: complex + contains: + id: + description: Unique UUID. + returned: success + type: string + name: + description: Name given to the domain. + returned: success + type: string + description: + description: Description of the domain. + returned: success + type: string + enabled: + description: Flag to indicate if the domain is enabled. + returned: success + type: bool +''' + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + filters=dict(required=False, type='dict', default=None), + ) + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[ + ['name', 'filters'], + ] + ) + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + try: + name = module.params['name'] + filters = module.params['filters'] + + opcloud = shade.operator_cloud(**module.params) + + if name: + # Let's suppose user is passing domain ID + try: + domains = cloud.get_domain(name) + except: + domains = opcloud.search_domains(filters={'name': name}) + + else: + domains = opcloud.search_domains(filters) + + module.exit_json(changed=False, ansible_facts=dict( + openstack_domains=domains)) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_keystone_role.py b/cloud/openstack/os_keystone_role.py new file mode 100644 index 00000000000..db5b0027c05 --- /dev/null +++ b/cloud/openstack/os_keystone_role.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# Copyright (c) 2016 IBM +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_keystone_role +short_description: Manage OpenStack Identity Roles +extends_documentation_fragment: openstack +version_added: "2.1" +author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)" +description: + - Manage OpenStack Identity Roles. +options: + name: + description: + - Role Name + required: true + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Create a role named "demo" +- os_keystone_role: + cloud: mycloud + state: present + name: demo + +# Delete the role named "demo" +- os_keystone_role: + cloud: mycloud + state: absent + name: demo +''' + +RETURN = ''' +role: + description: Dictionary describing the role. + returned: On success when I(state) is 'present'. + type: dictionary + contains: + id: + description: Unique role ID. + type: string + sample: "677bfab34c844a01b88a217aa12ec4c2" + name: + description: Role name. + type: string + sample: "demo" +''' + + +def _system_state_change(state, role): + if state == 'present' and not role: + return True + if state == 'absent' and role: + return True + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + state=dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + name = module.params.pop('name') + state = module.params.pop('state') + + try: + cloud = shade.operator_cloud(**module.params) + + role = cloud.get_role(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(state, role)) + + if state == 'present': + if role is None: + role = cloud.create_role(name) + changed = True + else: + changed = False + module.exit_json(changed=changed, role=role) + elif state == 'absent': + if role is None: + changed=False + else: + cloud.delete_role(name) + changed=True + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_keystone_service.py b/cloud/openstack/os_keystone_service.py new file mode 100644 index 00000000000..d23f2881621 --- /dev/null +++ b/cloud/openstack/os_keystone_service.py @@ -0,0 +1,214 @@ +#!/usr/bin/python +# Copyright 2016 Sam Yaple +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +from distutils.version import StrictVersion + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_keystone_service +short_description: Manage OpenStack Identity services +extends_documentation_fragment: openstack +author: "Sam Yaple (@SamYaple)" +version_added: "2.2" +description: + - Create, update, or delete OpenStack Identity service. If a service + with the supplied name already exists, it will be updated with the + new description and enabled attributes. +options: + name: + description: + - Name of the service + required: true + description: + description: + - Description of the service + required: false + default: None + enabled: + description: + - Is the service enabled + required: false + default: True + service_type: + description: + - The type of service + required: true + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Create a service for glance +- os_keystone_service: + cloud: mycloud + state: present + name: glance + service_type: image + description: OpenStack Image Service +# Delete a service +- os_keystone_service: + cloud: mycloud + state: absent + name: glance + service_type: image +''' + +RETURN = ''' +service: + description: Dictionary describing the service. + returned: On success when I(state) is 'present' + type: dictionary + contains: + id: + description: Service ID. + type: string + sample: "3292f020780b4d5baf27ff7e1d224c44" + name: + description: Service name. + type: string + sample: "glance" + service_type: + description: Service type. + type: string + sample: "image" + description: + description: Service description. + type: string + sample: "OpenStack Image Service" + enabled: + description: Service status. + type: boolean + sample: True +id: + description: The service ID. + returned: On success when I(state) is 'present' + type: string + sample: "3292f020780b4d5baf27ff7e1d224c44" +''' + + +def _needs_update(module, service): + if service.enabled != module.params['enabled']: + return True + if service.description is not None and \ + service.description != module.params['description']: + return True + return False + + +def _system_state_change(module, service): + state = module.params['state'] + if state == 'absent' and service: + return True + + if state == 'present': + if service is None: + return True + return _needs_update(module, service) + + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + description=dict(default=None), + enabled=dict(default=True, type='bool'), + name=dict(required=True), + service_type=dict(required=True), + state=dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + if StrictVersion(shade.__version__) < StrictVersion('1.6.0'): + module.fail_json(msg="To utilize this module, the installed version of" + "the shade library MUST be >=1.6.0") + + description = module.params['description'] + enabled = module.params['enabled'] + name = module.params['name'] + state = module.params['state'] + service_type = module.params['service_type'] + + try: + cloud = shade.operator_cloud(**module.params) + + services = cloud.search_services(name_or_id=name, + filters=dict(type=service_type)) + + if len(services) > 1: + module.fail_json(msg='Service name %s and type %s are not unique' % + (name, service_type)) + elif len(services) == 1: + service = services[0] + else: + service = None + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, service)) + + if state == 'present': + if service is None: + service = cloud.create_service(name=name, + description=description, type=service_type, enabled=True) + changed = True + else: + if _needs_update(module, service): + service = cloud.update_service( + service.id, name=name, type=service_type, enabled=enabled, + description=description) + changed = True + else: + changed = False + module.exit_json(changed=changed, service=service, id=service.id) + + elif state == 'absent': + if service is None: + changed=False + else: + cloud.delete_service(service.id) + changed=True + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_port_facts.py b/cloud/openstack/os_port_facts.py new file mode 100644 index 00000000000..0da37d88ef4 --- /dev/null +++ b/cloud/openstack/os_port_facts.py @@ -0,0 +1,229 @@ +#!/usr/bin/python + +# Copyright (c) 2016 IBM +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: os_port_facts +short_description: Retrieve facts about ports within OpenStack. +version_added: "2.1" +author: "David Shrewsbury (@Shrews)" +description: + - Retrieve facts about ports from OpenStack. +notes: + - Facts are placed in the C(openstack_ports) variable. +requirements: + - "python >= 2.6" + - "shade" +options: + port: + description: + - Unique name or ID of a port. + required: false + default: null + filters: + description: + - A dictionary of meta data to use for further filtering. Elements + of this dictionary will be matched against the returned port + dictionaries. Matching is currently limited to strings within + the port dictionary, or strings within nested dictionaries. + required: false + default: null +extends_documentation_fragment: openstack +''' + +EXAMPLES = ''' +# Gather facts about all ports +- os_port_facts: + cloud: mycloud + +# Gather facts about a single port +- os_port_facts: + cloud: mycloud + port: 6140317d-e676-31e1-8a4a-b1913814a471 + +# Gather facts about all ports that have device_id set to a specific value +# and with a status of ACTIVE. +- os_port_facts: + cloud: mycloud + filters: + device_id: 1038a010-3a37-4a9d-82ea-652f1da36597 + status: ACTIVE +''' + +RETURN = ''' +openstack_ports: + description: List of port dictionaries. A subset of the dictionary keys + listed below may be returned, depending on your cloud provider. + returned: always, but can be null + type: complex + contains: + admin_state_up: + description: The administrative state of the router, which is + up (true) or down (false). + returned: success + type: boolean + sample: true + allowed_address_pairs: + description: A set of zero or more allowed address pairs. An + address pair consists of an IP address and MAC address. + returned: success + type: list + sample: [] + "binding:host_id": + description: The UUID of the host where the port is allocated. + returned: success + type: string + sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759" + "binding:profile": + description: A dictionary the enables the application running on + the host to pass and receive VIF port-specific + information to the plug-in. + returned: success + type: dict + sample: {} + "binding:vif_details": + description: A dictionary that enables the application to pass + information about functions that the Networking API + provides. + returned: success + type: dict + sample: {"port_filter": true} + "binding:vif_type": + description: The VIF type for the port. + returned: success + type: dict + sample: "ovs" + "binding:vnic_type": + description: The virtual network interface card (vNIC) type that is + bound to the neutron port. + returned: success + type: string + sample: "normal" + device_id: + description: The UUID of the device that uses this port. + returned: success + type: string + sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759" + device_owner: + description: The UUID of the entity that uses this port. + returned: success + type: string + sample: "network:router_interface" + dns_assignment: + description: DNS assignment information. + returned: success + type: list + dns_name: + description: DNS name + returned: success + type: string + sample: "" + extra_dhcp_opts: + description: A set of zero or more extra DHCP option pairs. + An option pair consists of an option value and name. + returned: success + type: list + sample: [] + fixed_ips: + description: The IP addresses for the port. Includes the IP address + and UUID of the subnet. + returned: success + type: list + id: + description: The UUID of the port. + returned: success + type: string + sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de" + ip_address: + description: The IP address. + returned: success + type: string + sample: "127.0.0.1" + mac_address: + description: The MAC address. + returned: success + type: string + sample: "00:00:5E:00:53:42" + name: + description: The port name. + returned: success + type: string + sample: "port_name" + network_id: + description: The UUID of the attached network. + returned: success + type: string + sample: "dd1ede4f-3952-4131-aab6-3b8902268c7d" + port_security_enabled: + description: The port security status. The status is enabled (true) or disabled (false). + returned: success + type: boolean + sample: false + security_groups: + description: The UUIDs of any attached security groups. + returned: success + type: list + status: + description: The port status. + returned: success + type: string + sample: "ACTIVE" + tenant_id: + description: The UUID of the tenant who owns the network. + returned: success + type: string + sample: "51fce036d7984ba6af4f6c849f65ef00" +''' + + +def main(): + argument_spec = openstack_full_argument_spec( + port=dict(required=False), + filters=dict(type='dict', required=False), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + port = module.params.pop('port') + filters = module.params.pop('filters') + + try: + cloud = shade.openstack_cloud(**module.params) + ports = cloud.search_ports(port, filters) + module.exit_json(changed=False, ansible_facts=dict( + openstack_ports=ports)) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_project.py b/cloud/openstack/os_project.py new file mode 100644 index 00000000000..22f50107558 --- /dev/null +++ b/cloud/openstack/os_project.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# Copyright (c) 2015 IBM Corporation +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +from distutils.version import StrictVersion + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_project +short_description: Manage OpenStack Projects +extends_documentation_fragment: openstack +version_added: "2.0" +author: "Alberto Gireud (@agireud)" +description: + - Manage OpenStack Projects. Projects can be created, + updated or deleted using this module. A project will be updated + if I(name) matches an existing project and I(state) is present. + The value for I(name) cannot be updated without deleting and + re-creating the project. +options: + name: + description: + - Name for the project + required: true + description: + description: + - Description for the project + required: false + default: None + domain_id: + description: + - Domain id to create the project in if the cloud supports domains. + The domain_id parameter requires shade >= 1.8.0 + required: false + default: None + aliases: ['domain'] + enabled: + description: + - Is the project enabled + required: false + default: True + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Create a project +- os_project: + cloud: mycloud + state: present + name: demoproject + description: demodescription + domain_id: demoid + enabled: True + +# Delete a project +- os_project: + cloud: mycloud + state: absent + name: demoproject +''' + + +RETURN = ''' +project: + description: Dictionary describing the project. + returned: On success when I(state) is 'present' + type: dictionary + contains: + id: + description: Project ID + type: string + sample: "f59382db809c43139982ca4189404650" + name: + description: Project name + type: string + sample: "demoproject" + description: + description: Project description + type: string + sample: "demodescription" + enabled: + description: Boolean to indicate if project is enabled + type: bool + sample: True +''' + +def _needs_update(module, project): + keys = ('description', 'enabled') + for key in keys: + if module.params[key] is not None and module.params[key] != project.get(key): + return True + + return False + +def _system_state_change(module, project): + state = module.params['state'] + if state == 'present': + if project is None: + changed = True + else: + if _needs_update(module, project): + changed = True + else: + changed = False + + elif state == 'absent': + if project is None: + changed=False + else: + changed=True + + return changed; + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + description=dict(required=False, default=None), + domain_id=dict(required=False, default=None, aliases=['domain']), + enabled=dict(default=True, type='bool'), + state=dict(default='present', choices=['absent', 'present']) + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule( + argument_spec, + supports_check_mode=True, + **module_kwargs + ) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + name = module.params['name'] + description = module.params['description'] + domain = module.params.pop('domain_id') + enabled = module.params['enabled'] + state = module.params['state'] + + if domain and StrictVersion(shade.__version__) < StrictVersion('1.8.0'): + module.fail_json(msg="The domain argument requires shade >=1.8.0") + + try: + if domain: + opcloud = shade.operator_cloud(**module.params) + try: + # We assume admin is passing domain id + dom = opcloud.get_domain(domain)['id'] + domain = dom + except: + # If we fail, maybe admin is passing a domain name. + # Note that domains have unique names, just like id. + try: + dom = opcloud.search_domains(filters={'name': domain})[0]['id'] + domain = dom + except: + # Ok, let's hope the user is non-admin and passing a sane id + pass + + cloud = shade.openstack_cloud(**module.params) + + if domain: + project = cloud.get_project(name, domain_id=domain) + else: + project = cloud.get_project(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, project)) + + if state == 'present': + if project is None: + project = cloud.create_project( + name=name, description=description, + domain_id=domain, + enabled=enabled) + changed = True + else: + if _needs_update(module, project): + project = cloud.update_project( + project['id'], description=description, + enabled=enabled) + changed = True + else: + changed = False + module.exit_json(changed=changed, project=project) + + elif state == 'absent': + if project is None: + changed=False + else: + cloud.delete_project(project['id']) + changed=True + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message, extra_data=e.extra_data) + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_project_facts.py b/cloud/openstack/os_project_facts.py new file mode 100644 index 00000000000..856b6304ce7 --- /dev/null +++ b/cloud/openstack/os_project_facts.py @@ -0,0 +1,171 @@ +#!/usr/bin/python +# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_project_facts +short_description: Retrieve facts about one or more OpenStack projects +extends_documentation_fragment: openstack +version_added: "2.1" +author: "Ricardo Carrillo Cruz (@rcarrillocruz)" +description: + - Retrieve facts about a one or more OpenStack projects +requirements: + - "python >= 2.6" + - "shade" +options: + name: + description: + - Name or ID of the project + required: true + domain: + description: + - Name or ID of the domain containing the project if the cloud supports domains + required: false + default: None + filters: + description: + - A dictionary of meta data to use for further filtering. Elements of + this dictionary may be additional dictionaries. + required: false + default: None +''' + +EXAMPLES = ''' +# Gather facts about previously created projects +- os_project_facts: + cloud: awesomecloud +- debug: + var: openstack_projects + +# Gather facts about a previously created project by name +- os_project_facts: + cloud: awesomecloud + name: demoproject +- debug: + var: openstack_projects + +# Gather facts about a previously created project in a specific domain +- os_project_facts + cloud: awesomecloud + name: demoproject + domain: admindomain +- debug: + var: openstack_projects + +# Gather facts about a previously created project in a specific domain + with filter +- os_project_facts + cloud: awesomecloud + name: demoproject + domain: admindomain + filters: + enabled: False +- debug: + var: openstack_projects +''' + + +RETURN = ''' +openstack_projects: + description: has all the OpenStack facts about projects + returned: always, but can be null + type: complex + contains: + id: + description: Unique UUID. + returned: success + type: string + name: + description: Name given to the project. + returned: success + type: string + description: + description: Description of the project + returned: success + type: string + enabled: + description: Flag to indicate if the project is enabled + returned: success + type: bool + domain_id: + description: Domain ID containing the project (keystone v3 clouds only) + returned: success + type: bool +''' + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + domain=dict(required=False, default=None), + filters=dict(required=False, type='dict', default=None), + ) + + module = AnsibleModule(argument_spec) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + try: + name = module.params['name'] + domain = module.params['domain'] + filters = module.params['filters'] + + opcloud = shade.operator_cloud(**module.params) + + if domain: + try: + # We assume admin is passing domain id + dom = opcloud.get_domain(domain)['id'] + domain = dom + except: + # If we fail, maybe admin is passing a domain name. + # Note that domains have unique names, just like id. + dom = opcloud.search_domains(filters={'name': domain}) + if dom: + domain = dom[0]['id'] + else: + module.fail_json(msg='Domain name or ID does not exist') + + if not filters: + filters = {} + + filters['domain_id'] = domain + + projects = opcloud.search_projects(name, filters) + module.exit_json(changed=False, ansible_facts=dict( + openstack_projects=projects)) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_recordset.py b/cloud/openstack/os_recordset.py new file mode 100644 index 00000000000..62fa8564102 --- /dev/null +++ b/cloud/openstack/os_recordset.py @@ -0,0 +1,246 @@ +#!/usr/bin/python +# Copyright (c) 2016 Hewlett-Packard Enterprise +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +from distutils.version import StrictVersion + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_recordset +short_description: Manage OpenStack DNS recordsets +extends_documentation_fragment: openstack +version_added: "2.2" +author: "Ricardo Carrillo Cruz (@rcarrillocruz)" +description: + - Manage OpenStack DNS recordsets. Recordsets can be created, deleted or + updated. Only the I(records), I(description), and I(ttl) values + can be updated. +options: + zone: + description: + - Zone managing the recordset + required: true + name: + description: + - Name of the recordset + required: true + recordset_type: + description: + - Recordset type + required: true + records: + description: + - List of recordset definitions + required: true + description: + description: + - Description of the recordset + required: false + default: None + ttl: + description: + - TTL (Time To Live) value in seconds + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Create a recordset named "www.example.net." +- os_recordset: + cloud: mycloud + state: present + zone: example.net. + name: www + recordset_type: primary + records: ['10.1.1.1'] + description: test recordset + ttl: 3600 + +# Update the TTL on existing "www.example.net." recordset +- os_recordset: + cloud: mycloud + state: present + zone: example.net. + name: www + ttl: 7200 + +# Delete recorset named "www.example.net." +- os_recordset: + cloud: mycloud + state: absent + zone: example.net. + name: www +''' + +RETURN = ''' +recordset: + description: Dictionary describing the recordset. + returned: On success when I(state) is 'present'. + type: dictionary + contains: + id: + description: Unique recordset ID + type: string + sample: "c1c530a3-3619-46f3-b0f6-236927b2618c" + name: + description: Recordset name + type: string + sample: "www.example.net." + zone_id: + description: Zone id + type: string + sample: 9508e177-41d8-434e-962c-6fe6ca880af7 + type: + description: Recordset type + type: string + sample: "A" + description: + description: Recordset description + type: string + sample: "Test description" + ttl: + description: Zone TTL value + type: int + sample: 3600 + records: + description: Recordset records + type: list + sample: ['10.0.0.1'] +''' + + +def _system_state_change(state, records, description, ttl, zone, recordset): + if state == 'present': + if recordset is None: + return True + if records is not None and recordset.records != records: + return True + if description is not None and recordset.description != description: + return True + if ttl is not None and recordset.ttl != ttl: + return True + if state == 'absent' and recordset: + return True + return False + +def main(): + argument_spec = openstack_full_argument_spec( + zone=dict(required=True), + name=dict(required=True), + recordset_type=dict(required=False), + records=dict(required=False, type='list'), + description=dict(required=False, default=None), + ttl=dict(required=False, default=None, type='int'), + state=dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, + required_if=[ + ('state', 'present', + ['recordset_type', 'records'])], + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + if StrictVersion(shade.__version__) <= StrictVersion('1.8.0'): + module.fail_json(msg="To utilize this module, the installed version of " + "the shade library MUST be >1.8.0") + + zone = module.params.get('zone') + name = module.params.get('name') + state = module.params.get('state') + + try: + cloud = shade.openstack_cloud(**module.params) + recordset = cloud.get_recordset(zone, name + '.' + zone) + + + if state == 'present': + recordset_type = module.params.get('recordset_type') + records = module.params.get('records') + description = module.params.get('description') + ttl = module.params.get('ttl') + + if module.check_mode: + module.exit_json(changed=_system_state_change(state, + records, description, + ttl, zone, + recordset)) + + if recordset is None: + recordset = cloud.create_recordset( + zone=zone, name=name, recordset_type=recordset_type, + records=records, description=description, ttl=ttl) + changed = True + else: + if records is None: + records = [] + + pre_update_recordset = recordset + changed = _system_state_change(state, records, + description, ttl, + zone, pre_update_recordset) + if changed: + zone = cloud.update_recordset( + zone, name + '.' + zone, + records=records, + description=description, + ttl=ttl) + module.exit_json(changed=changed, recordset=recordset) + + elif state == 'absent': + if module.check_mode: + module.exit_json(changed=_system_state_change(state, + None, None, + None, + None, recordset)) + + if recordset is None: + changed=False + else: + cloud.delete_recordset(zone, name + '.' + zone) + changed=True + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_server_group.py b/cloud/openstack/os_server_group.py new file mode 100644 index 00000000000..0103fef8670 --- /dev/null +++ b/cloud/openstack/os_server_group.py @@ -0,0 +1,186 @@ +#!/usr/bin/python + +# Copyright (c) 2016 Catalyst IT Limited +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_server_group +short_description: Manage OpenStack server groups +extends_documentation_fragment: openstack +version_added: "2.2" +author: "Lingxian Kong (@kong)" +description: + - Add or remove server groups from OpenStack. +options: + state: + description: + - Indicate desired state of the resource. When I(state) is 'present', + then I(policies) is required. + choices: ['present', 'absent'] + required: false + default: present + name: + description: + - Server group name. + required: true + policies: + description: + - A list of one or more policy names to associate with the server + group. The list must contain at least one policy name. The current + valid policy names are anti-affinity, affinity, soft-anti-affinity + and soft-affinity. + required: false +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Create a server group with 'affinity' policy. +- os_server_group: + state: present + auth: + auth_url: https://api.cloud.catalyst.net.nz:5000/v2.0 + username: admin + password: admin + project_name: admin + name: my_server_group + policies: + - affinity + +# Delete 'my_server_group' server group. +- os_server_group: + state: absent + auth: + auth_url: https://api.cloud.catalyst.net.nz:5000/v2.0 + username: admin + password: admin + project_name: admin + name: my_server_group +''' + +RETURN = ''' +id: + description: Unique UUID. + returned: success + type: string +name: + description: The name of the server group. + returned: success + type: string +policies: + description: A list of one or more policy names of the server group. + returned: success + type: list of strings +members: + description: A list of members in the server group. + returned: success + type: list of strings +metadata: + description: Metadata key and value pairs. + returned: success + type: dict +project_id: + description: The project ID who owns the server group. + returned: success + type: string +user_id: + description: The user ID who owns the server group. + returned: success + type: string +''' + + +def _system_state_change(state, server_group): + if state == 'present' and not server_group: + return True + if state == 'absent' and server_group: + return True + + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + policies=dict(required=False, type='list'), + state=dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule( + argument_spec, + supports_check_mode=True, + **module_kwargs + ) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + name = module.params['name'] + policies = module.params['policies'] + state = module.params['state'] + + try: + cloud = shade.openstack_cloud(**module.params) + server_group = cloud.get_server_group(name) + + if module.check_mode: + module.exit_json( + changed=_system_state_change(state, server_group) + ) + + changed = False + if state == 'present': + if not server_group: + if not policies: + module.fail_json( + msg="Parameter 'policies' is required in Server Group " + "Create" + ) + server_group = cloud.create_server_group(name, policies) + changed = True + + module.exit_json( + changed=changed, + id=server_group['id'], + server_group=server_group + ) + if state == 'absent': + if server_group: + cloud.delete_server_group(server_group['id']) + changed = True + module.exit_json(changed=changed) + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e), extra_data=e.extra_data) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_stack.py b/cloud/openstack/os_stack.py new file mode 100644 index 00000000000..fc42b62112e --- /dev/null +++ b/cloud/openstack/os_stack.py @@ -0,0 +1,267 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2016, Mathieu Bultel +# (c) 2016, Steve Baker +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +from time import sleep +from distutils.version import StrictVersion +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_stack +short_description: Add/Remove Heat Stack +extends_documentation_fragment: openstack +version_added: "2.2" +author: "Mathieu Bultel (matbu), Steve Baker (steveb)" +description: + - Add or Remove a Stack to an OpenStack Heat +options: + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + required: false + default: present + name: + description: + - Name of the stack that should be created, name could be char and digit, no space + required: true + template: + description: + - Path of the template file to use for the stack creation + required: false + default: None + environment: + description: + - List of environment files that should be used for the stack creation + required: false + default: None + parameters: + description: + - Dictionary of parameters for the stack creation + required: false + default: None + rollback: + description: + - Rollback stack creation + required: false + default: false + timeout: + description: + - Maximum number of seconds to wait for the stack creation + required: false + default: 3600 +requirements: + - "python >= 2.6" + - "shade" +''' +EXAMPLES = ''' +--- +- name: create stack + ignore_errors: True + register: stack_create + os_stack: + name: "{{ stack_name }}" + state: present + template: "/path/to/my_stack.yaml" + environment: + - /path/to/resource-registry.yaml + - /path/to/environment.yaml + parameters: + bmc_flavor: m1.medium + bmc_image: CentOS + key_name: default + private_net: {{ private_net_param }} + node_count: 2 + name: undercloud + image: CentOS + my_flavor: m1.large + external_net: {{ external_net_param }} +''' + +RETURN = ''' +id: + description: Stack ID. + type: string + sample: "97a3f543-8136-4570-920e-fd7605c989d6" + +stack: + action: + description: Action, could be Create or Update. + type: string + sample: "CREATE" + creation_time: + description: Time when the action has been made. + type: string + sample: "2016-07-05T17:38:12Z" + description: + description: Description of the Stack provided in the heat template. + type: string + sample: "HOT template to create a new instance and networks" + id: + description: Stack ID. + type: string + sample: "97a3f543-8136-4570-920e-fd7605c989d6" + name: + description: Name of the Stack + type: string + sample: "test-stack" + identifier: + description: Identifier of the current Stack action. + type: string + sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6" + links: + description: Links to the current Stack. + type: list of dict + sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']" + outputs: + description: Output returned by the Stack. + type: list of dict + sample: "{'description': 'IP address of server1 in private network', + 'output_key': 'server1_private_ip', + 'output_value': '10.1.10.103'}" + parameters: + description: Parameters of the current Stack + type: dict + sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101', + 'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6', + 'OS::stack_name': 'test-stack', + 'stack_status': 'CREATE_COMPLETE', + 'stack_status_reason': 'Stack CREATE completed successfully', + 'status': 'COMPLETE', + 'template_description': 'HOT template to create a new instance and networks', + 'timeout_mins': 60, + 'updated_time': null}" +''' + +def _create_stack(module, stack, cloud): + try: + stack = cloud.create_stack(module.params['name'], + template_file=module.params['template'], + environment_files=module.params['environment'], + timeout=module.params['timeout'], + wait=True, + rollback=module.params['rollback'], + **module.params['parameters']) + + stack = cloud.get_stack(stack.id, None) + if stack.stack_status == 'CREATE_COMPLETE': + return stack + else: + return False + module.fail_json(msg = "Failure in creating stack: ".format(stack)) + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + +def _update_stack(module, stack, cloud): + try: + stack = cloud.update_stack( + module.params['name'], + template_file=module.params['template'], + environment_files=module.params['environment'], + timeout=module.params['timeout'], + rollback=module.params['rollback'], + wait=module.params['wait'], + **module.params['parameters']) + + if stack['stack_status'] == 'UPDATE_COMPLETE': + return stack + else: + module.fail_json(msg = "Failure in updating stack: %s" % + stack['stack_status_reason']) + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + +def _system_state_change(module, stack, cloud): + state = module.params['state'] + if state == 'present': + if not stack: + return True + if state == 'absent' and stack: + return True + return False + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + template=dict(default=None), + environment=dict(default=None, type='list'), + parameters=dict(default={}, type='dict'), + rollback=dict(default=False, type='bool'), + timeout=dict(default=3600, type='int'), + state=dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + # stack API introduced in 1.8.0 + if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.8.0')): + module.fail_json(msg='shade 1.8.0 or higher is required for this module') + + state = module.params['state'] + name = module.params['name'] + # Check for required parameters when state == 'present' + if state == 'present': + for p in ['template']: + if not module.params[p]: + module.fail_json(msg='%s required with present state' % p) + + try: + cloud = shade.openstack_cloud(**module.params) + stack = cloud.get_stack(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, stack, + cloud)) + + if state == 'present': + if not stack: + stack = _create_stack(module, stack, cloud) + else: + stack = _update_stack(module, stack, cloud) + changed = True + module.exit_json(changed=changed, + stack=stack, + id=stack.id) + elif state == 'absent': + if not stack: + changed = False + else: + changed = True + if not cloud.delete_stack(name, wait=module.params['wait']): + module.fail_json(msg='delete stack failed for stack: %s' % name) + module.exit_json(changed=changed) + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_user_facts.py b/cloud/openstack/os_user_facts.py new file mode 100644 index 00000000000..52af5b8e621 --- /dev/null +++ b/cloud/openstack/os_user_facts.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_user_facts +short_description: Retrieve facts about one or more OpenStack users +extends_documentation_fragment: openstack +version_added: "2.1" +author: "Ricardo Carrillo Cruz (@rcarrillocruz)" +description: + - Retrieve facts about a one or more OpenStack users +requirements: + - "python >= 2.6" + - "shade" +options: + name: + description: + - Name or ID of the user + required: true + domain: + description: + - Name or ID of the domain containing the user if the cloud supports domains + required: false + default: None + filters: + description: + - A dictionary of meta data to use for further filtering. Elements of + this dictionary may be additional dictionaries. + required: false + default: None +''' + +EXAMPLES = ''' +# Gather facts about previously created users +- os_user_facts: + cloud: awesomecloud +- debug: + var: openstack_users + +# Gather facts about a previously created user by name +- os_user_facts: + cloud: awesomecloud + name: demouser +- debug: + var: openstack_users + +# Gather facts about a previously created user in a specific domain +- os_user_facts + cloud: awesomecloud + name: demouser + domain: admindomain +- debug: + var: openstack_users + +# Gather facts about a previously created user in a specific domain + with filter +- os_user_facts + cloud: awesomecloud + name: demouser + domain: admindomain + filters: + enabled: False +- debug: + var: openstack_users +''' + + +RETURN = ''' +openstack_users: + description: has all the OpenStack facts about users + returned: always, but can be null + type: complex + contains: + id: + description: Unique UUID. + returned: success + type: string + name: + description: Name given to the user. + returned: success + type: string + enabled: + description: Flag to indicate if the user is enabled + returned: success + type: bool + domain_id: + description: Domain ID containing the user + returned: success + type: string + default_project_id: + description: Default project ID of the user + returned: success + type: string + email: + description: Email of the user + returned: success + type: string + username: + description: Username of the user + returned: success + type: string +''' + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + domain=dict(required=False, default=None), + filters=dict(required=False, type='dict', default=None), + ) + + module = AnsibleModule(argument_spec) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + try: + name = module.params['name'] + domain = module.params['domain'] + filters = module.params['filters'] + + opcloud = shade.operator_cloud(**module.params) + + if domain: + try: + # We assume admin is passing domain id + dom = opcloud.get_domain(domain)['id'] + domain = dom + except: + # If we fail, maybe admin is passing a domain name. + # Note that domains have unique names, just like id. + dom = opcloud.search_domains(filters={'name': domain}) + if dom: + domain = dom[0]['id'] + else: + module.fail_json(msg='Domain name or ID does not exist') + + if not filters: + filters = {} + + filters['domain_id'] = domain + + users = opcloud.search_users(name, + filters) + module.exit_json(changed=False, ansible_facts=dict( + openstack_users=users)) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_user_role.py b/cloud/openstack/os_user_role.py new file mode 100644 index 00000000000..41b0b73e075 --- /dev/null +++ b/cloud/openstack/os_user_role.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# Copyright (c) 2016 IBM +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +from distutils.version import StrictVersion + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_user_role +short_description: Associate OpenStack Identity users and roles +extends_documentation_fragment: openstack +author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)" +version_added: "2.1" +description: + - Grant and revoke roles in either project or domain context for + OpenStack Identity Users. +options: + role: + description: + - Name or ID for the role. + required: true + user: + description: + - Name or ID for the user. If I(user) is not specified, then + I(group) is required. Both may not be specified. + required: false + default: null + group: + description: + - Name or ID for the group. Valid only with keystone version 3. + If I(group) is not specified, then I(user) is required. Both + may not be specified. + required: false + default: null + project: + description: + - Name or ID of the project to scope the role assocation to. + If you are using keystone version 2, then this value is required. + required: false + default: null + domain: + description: + - ID of the domain to scope the role association to. Valid only with + keystone version 3, and required if I(project) is not specified. + required: false + default: null + state: + description: + - Should the roles be present or absent on the user. + choices: [present, absent] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Grant an admin role on the user admin in the project project1 +- os_user_role: + cloud: mycloud + user: admin + role: admin + project: project1 + +# Revoke the admin role from the user barney in the newyork domain +- os_user_role: + cloud: mycloud + state: absent + user: barney + role: admin + domain: newyork +''' + +RETURN = ''' +# +''' + +def _system_state_change(state, assignment): + if state == 'present' and not assignment: + return True + elif state == 'absent' and assignment: + return True + return False + + +def _build_kwargs(user, group, project, domain): + kwargs = {} + if user: + kwargs['user'] = user + if group: + kwargs['group'] = group + if project: + kwargs['project'] = project + if domain: + kwargs['domain'] = domain + return kwargs + + +def main(): + argument_spec = openstack_full_argument_spec( + role=dict(required=True), + user=dict(required=False), + group=dict(required=False), + project=dict(required=False), + domain=dict(required=False), + state=dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs( + required_one_of=[ + ['user', 'group'] + ]) + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + # role grant/revoke API introduced in 1.5.0 + if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.5.0')): + module.fail_json(msg='shade 1.5.0 or higher is required for this module') + + role = module.params.pop('role') + user = module.params.pop('user') + group = module.params.pop('group') + project = module.params.pop('project') + domain = module.params.pop('domain') + state = module.params.pop('state') + + try: + cloud = shade.operator_cloud(**module.params) + + filters = {} + + r = cloud.get_role(role) + if r is None: + module.fail_json(msg="Role %s is not valid" % role) + filters['role'] = r['id'] + + if user: + u = cloud.get_user(user) + if u is None: + module.fail_json(msg="User %s is not valid" % user) + filters['user'] = u['id'] + if group: + g = cloud.get_group(group) + if g is None: + module.fail_json(msg="Group %s is not valid" % group) + filters['group'] = g['id'] + if domain: + d = cloud.get_domain(domain) + if d is None: + module.fail_json(msg="Domain %s is not valid" % domain) + filters['domain'] = d['id'] + if project: + if domain: + p = cloud.get_project(project, domain_id=filters['domain']) + else: + p = cloud.get_project(project) + + if p is None: + module.fail_json(msg="Project %s is not valid" % project) + filters['project'] = p['id'] + + assignment = cloud.list_role_assignments(filters=filters) + + if module.check_mode: + module.exit_json(changed=_system_state_change(state, assignment)) + + changed = False + + if state == 'present': + if not assignment: + kwargs = _build_kwargs(user, group, project, domain) + cloud.grant_role(role, **kwargs) + changed = True + + elif state == 'absent': + if assignment: + kwargs = _build_kwargs(user, group, project, domain) + cloud.revoke_role(role, **kwargs) + changed=True + + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/openstack/os_zone.py b/cloud/openstack/os_zone.py new file mode 100644 index 00000000000..a733d80ab22 --- /dev/null +++ b/cloud/openstack/os_zone.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +# Copyright (c) 2016 Hewlett-Packard Enterprise +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +from distutils.version import StrictVersion + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: os_zone +short_description: Manage OpenStack DNS zones +extends_documentation_fragment: openstack +version_added: "2.2" +author: "Ricardo Carrillo Cruz (@rcarrillocruz)" +description: + - Manage OpenStack DNS zones. Zones can be created, deleted or + updated. Only the I(email), I(description), I(ttl) and I(masters) values + can be updated. +options: + name: + description: + - Zone name + required: true + zone_type: + description: + - Zone type + choices: [primary, secondary] + default: None + email: + description: + - Email of the zone owner (only applies if zone_type is primary) + required: false + description: + description: + - Zone description + required: false + default: None + ttl: + description: + - TTL (Time To Live) value in seconds + required: false + default: None + masters: + description: + - Master nameservers (only applies if zone_type is secondary) + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Create a zone named "example.net" +- os_zone: + cloud: mycloud + state: present + name: example.net. + zone_type: primary + email: test@example.net + description: Test zone + ttl: 3600 + +# Update the TTL on existing "example.net." zone +- os_zone: + cloud: mycloud + state: present + name: example.net. + ttl: 7200 + +# Delete zone named "example.net." +- os_zone: + cloud: mycloud + state: absent + name: example.net. +''' + +RETURN = ''' +zone: + description: Dictionary describing the zone. + returned: On success when I(state) is 'present'. + type: dictionary + contains: + id: + description: Unique zone ID + type: string + sample: "c1c530a3-3619-46f3-b0f6-236927b2618c" + name: + description: Zone name + type: string + sample: "example.net." + type: + description: Zone type + type: string + sample: "PRIMARY" + email: + description: Zone owner email + type: string + sample: "test@example.net" + description: + description: Zone description + type: string + sample: "Test description" + ttl: + description: Zone TTL value + type: int + sample: 3600 + masters: + description: Zone master nameservers + type: list + sample: [] +''' + + +def _system_state_change(state, email, description, ttl, masters, zone): + if state == 'present': + if not zone: + return True + if email is not None and zone.email != email: + return True + if description is not None and zone.description != description: + return True + if ttl is not None and zone.ttl != ttl: + return True + if masters is not None and zone.masters != masters: + return True + if state == 'absent' and zone: + return True + return False + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + zone_type=dict(required=False, choice=['primary', 'secondary']), + email=dict(required=False, default=None), + description=dict(required=False, default=None), + ttl=dict(required=False, default=None, type='int'), + masters=dict(required=False, default=None, type='list'), + state=dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + if StrictVersion(shade.__version__) < StrictVersion('1.8.0'): + module.fail_json(msg="To utilize this module, the installed version of" + "the shade library MUST be >=1.8.0") + + name = module.params.get('name') + state = module.params.get('state') + + try: + cloud = shade.openstack_cloud(**module.params) + zone = cloud.get_zone(name) + + + if state == 'present': + zone_type = module.params.get('zone_type') + email = module.params.get('email') + description = module.params.get('description') + ttl = module.params.get('ttl') + masters = module.params.get('masters') + + if module.check_mode: + module.exit_json(changed=_system_state_change(state, email, + description, ttl, + masters, zone)) + + if zone is None: + zone = cloud.create_zone( + name=name, zone_type=zone_type, email=email, + description=description, ttl=ttl, masters=masters) + changed = True + else: + if masters is None: + masters = [] + + pre_update_zone = zone + changed = _system_state_change(state, email, + description, ttl, + masters, pre_update_zone) + if changed: + zone = cloud.update_zone( + name, email=email, + description=description, + ttl=ttl, masters=masters) + module.exit_json(changed=changed, zone=zone) + + elif state == 'absent': + if module.check_mode: + module.exit_json(changed=_system_state_change(state, None, + None, None, + None, zone)) + + if zone is None: + changed=False + else: + cloud.delete_zone(name) + changed=True + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() diff --git a/cloud/ovh/__init__.py b/cloud/ovh/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/ovh/ovh_ip_loadbalancing_backend.py b/cloud/ovh/ovh_ip_loadbalancing_backend.py new file mode 100644 index 00000000000..3499e73a92f --- /dev/null +++ b/cloud/ovh/ovh_ip_loadbalancing_backend.py @@ -0,0 +1,316 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovh_ip_loadbalancing_backend +short_description: Manage OVH IP LoadBalancing backends +description: + - Manage OVH (French European hosting provider) LoadBalancing IP backends +version_added: "2.2" +author: Pascal HERAUD @pascalheraud +notes: + - Uses the python OVH Api U(https://github.com/ovh/python-ovh). + You have to create an application (a key and secret) with a consummer + key as described into U(https://eu.api.ovh.com/g934.first_step_with_api) +requirements: + - ovh > 0.3.5 +options: + name: + required: true + description: + - Name of the LoadBalancing internal name (ip-X.X.X.X) + backend: + required: true + description: + - The IP address of the backend to update / modify / delete + state: + required: false + default: present + choices: ['present', 'absent'] + description: + - Determines wether the backend is to be created/modified + or deleted + probe: + required: false + default: none + choices: ['none', 'http', 'icmp' , 'oco'] + description: + - Determines the type of probe to use for this backend + weight: + required: false + default: 8 + description: + - Determines the weight for this backend + endpoint: + required: true + description: + - The endpoint to use ( for instance ovh-eu) + application_key: + required: true + description: + - The applicationKey to use + application_secret: + required: true + description: + - The application secret to use + consumer_key: + required: true + description: + - The consumer key to use + timeout: + required: false + type: "int" + default: 120 + description: + - The timeout in seconds used to wait for a task to be + completed. Default is 120 seconds. + +''' + +EXAMPLES = ''' +# Adds or modify the backend '212.1.1.1' to a +# loadbalancing 'ip-1.1.1.1' +- ovh_ip_loadbalancing: + name: ip-1.1.1.1 + backend: 212.1.1.1 + state: present + probe: none + weight: 8 + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey + +# Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1' +- ovh_ip_loadbalancing: + name: ip-1.1.1.1 + backend: 212.1.1.1 + state: absent + endpoint: ovh-eu + application_key: yourkey + application_secret: yoursecret + consumer_key: yourconsumerkey +''' + +RETURN = ''' +''' + +import time +try: + import ovh + import ovh.exceptions + from ovh.exceptions import APIError + HAS_OVH = True +except ImportError: + HAS_OVH = False + +def getOvhClient(ansibleModule): + endpoint = ansibleModule.params.get('endpoint') + application_key = ansibleModule.params.get('application_key') + application_secret = ansibleModule.params.get('application_secret') + consumer_key = ansibleModule.params.get('consumer_key') + + return ovh.Client( + endpoint=endpoint, + application_key=application_key, + application_secret=application_secret, + consumer_key=consumer_key + ) + + +def waitForNoTask(client, name, timeout): + currentTimeout = timeout + while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0: + time.sleep(1) # Delay for 1 sec + currentTimeout -= 1 + if currentTimeout < 0: + return False + return True + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + backend=dict(required=True), + weight=dict(default=8, type='int'), + probe=dict(default='none', + choices=['none', 'http', 'icmp', 'oco']), + state=dict(default='present', choices=['present', 'absent']), + endpoint=dict(required=True), + application_key=dict(required=True, no_log=True), + application_secret=dict(required=True, no_log=True), + consumer_key=dict(required=True, no_log=True), + timeout=dict(default=120, type='int') + ) + ) + + if not HAS_OVH: + module.fail_json(msg='ovh-api python module' + 'is required to run this module ') + + # Get parameters + name = module.params.get('name') + state = module.params.get('state') + backend = module.params.get('backend') + weight = long(module.params.get('weight')) + probe = module.params.get('probe') + timeout = module.params.get('timeout') + + # Connect to OVH API + client = getOvhClient(module) + + # Check that the load balancing exists + try: + loadBalancings = client.get('/ip/loadBalancing') + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of loadBalancing, ' + 'check application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + + if name not in loadBalancings: + module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name)) + + # Check that no task is pending before going on + try: + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for no pending ' + 'tasks before executing the module '.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of pending tasks ' + 'of the loadBalancing, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + try: + backends = client.get('/ip/loadBalancing/{0}/backend'.format(name)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the list of backends ' + 'of the loadBalancing, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + backendExists = backend in backends + moduleChanged = False + if state == "absent": + if backendExists: + # Remove backend + try: + client.delete( + '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of removing backend task'.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for deleting the backend, ' + 'check application key, secret, consumerkey and ' + 'parameters. Error returned by OVH api was : {0}' + .format(apiError)) + moduleChanged = True + else: + if backendExists: + # Get properties + try: + backendProperties = client.get( + '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for getting the backend properties, ' + 'check application key, secret, consumerkey and ' + 'parameters. Error returned by OVH api was : {0}' + .format(apiError)) + + if (backendProperties['weight'] != weight): + # Change weight + try: + client.post( + '/ip/loadBalancing/{0}/backend/{1}/setWeight' + .format(name, backend), weight=weight) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion ' + 'of setWeight to backend task' + .format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for updating the weight of the ' + 'backend, check application key, secret, consumerkey ' + 'and parameters. Error returned by OVH api was : {0}' + .format(apiError)) + moduleChanged = True + + if (backendProperties['probe'] != probe): + # Change probe + backendProperties['probe'] = probe + try: + client.put( + '/ip/loadBalancing/{0}/backend/{1}' + .format(name, backend), probe=probe) + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion of ' + 'setProbe to backend task' + .format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for updating the propbe of ' + 'the backend, check application key, secret, ' + 'consumerkey and parameters. Error returned by OVH api ' + 'was : {0}' + .format(apiError)) + moduleChanged = True + + else: + # Creates backend + try: + try: + client.post('/ip/loadBalancing/{0}/backend'.format(name), + ipBackend=backend, probe=probe, weight=weight) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for creating the backend, check ' + 'application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}' + .format(apiError)) + + if not waitForNoTask(client, name, timeout): + module.fail_json( + msg='Timeout of {0} seconds while waiting for completion of ' + 'backend creation task'.format(timeout)) + except APIError as apiError: + module.fail_json( + msg='Unable to call OVH api for creating the backend, check ' + 'application key, secret, consumerkey and parameters. ' + 'Error returned by OVH api was : {0}'.format(apiError)) + moduleChanged = True + + module.exit_json(changed=moduleChanged) + +# import module snippets +from ansible.module_utils.basic import AnsibleModule + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/__init__.py b/cloud/ovirt/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/ovirt/ovirt_affinity_labels.py b/cloud/ovirt/ovirt_affinity_labels.py new file mode 100644 index 00000000000..5a680f92976 --- /dev/null +++ b/cloud/ovirt/ovirt_affinity_labels.py @@ -0,0 +1,207 @@ +#!/usr/bin/pythonapi/ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +from collections import defaultdict +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + create_connection, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_affinity_labels +short_description: Module to affinity labels in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "This module manage affinity labels in oVirt. It can also manage assignments + of those labels to hosts and VMs." +options: + name: + description: + - "Name of the the affinity label to manage." + required: true + state: + description: + - "Should the affinity label be present or absent." + choices: ['present', 'absent'] + default: present + cluster: + description: + - "Name of the cluster where vms and hosts resides." + vms: + description: + - "List of the VMs names, which should have assigned this affinity label." + hosts: + description: + - "List of the hosts names, which should have assigned this affinity label." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Create(if not exists) and assign affinity label to vms vm1 and vm2 and host host1 +- ovirt_affinity_labels: + name: mylabel + cluster: mycluster + vms: + - vm1 + - vm2 + hosts: + - host1 + +# To detach all VMs from label +- ovirt_affinity_labels: + name: mylabel + cluster: mycluster + vms: [] + +# Remove affinity label +- ovirt_affinity_labels: + state: absent + name: mylabel +''' + +RETURN = ''' +id: + description: ID of the affinity label which is managed + returned: On success if affinity label is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +template: + description: "Dictionary of all the affinity label attributes. Affinity label attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/affinity_label." + returned: On success if affinity label is found. +''' + + +class AffinityLabelsModule(BaseModule): + + def build_entity(self): + return otypes.AffinityLabel(name=self._module.params['name']) + + def post_create(self, entity): + self.update_check(entity) + + def pre_remove(self, entity): + self._module.params['vms'] = [] + self._module.params['hosts'] = [] + self.update_check(entity) + + def _update_label_assignments(self, entity, name, label_obj_type): + objs_service = getattr(self._connection.system_service(), '%s_service' % name)() + if self._module.params[name] is not None: + objs = self._connection.follow_link(getattr(entity, name)) + objs_names = defaultdict(list) + for obj in objs: + labeled_entity = objs_service.service(obj.id).get() + if self._module.params['cluster'] is None: + objs_names[labeled_entity.name].append(obj.id) + elif self._connection.follow_link(labeled_entity.cluster).name == self._module.params['cluster']: + objs_names[labeled_entity.name].append(obj.id) + + for obj in self._module.params[name]: + if obj not in objs_names: + for obj_id in objs_service.list( + search='name=%s and cluster=%s' % (obj, self._module.params['cluster']) + ): + label_service = getattr(self._service.service(entity.id), '%s_service' % name)() + if not self._module.check_mode: + label_service.add(**{ + name[:-1]: label_obj_type(id=obj_id.id) + }) + self.changed = True + + for obj in objs_names: + if obj not in self._module.params[name]: + label_service = getattr(self._service.service(entity.id), '%s_service' % name)() + if not self._module.check_mode: + for obj_id in objs_names[obj]: + label_service.service(obj_id).remove() + self.changed = True + + def update_check(self, entity): + self._update_label_assignments(entity, 'vms', otypes.Vm) + self._update_label_assignments(entity, 'hosts', otypes.Host) + return True + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + cluster=dict(default=None), + name=dict(default=None, required=True), + vms=dict(default=None, type='list'), + hosts=dict(default=None, type='list'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ('state', 'present', ['cluster']), + ], + ) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + affinity_labels_service = connection.system_service().affinity_labels_service() + affinity_labels_module = AffinityLabelsModule( + connection=connection, + module=module, + service=affinity_labels_service, + ) + + state = module.params['state'] + if state == 'present': + ret = affinity_labels_module.create() + elif state == 'absent': + ret = affinity_labels_module.remove() + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_affinity_labels_facts.py b/cloud/ovirt/ovirt_affinity_labels_facts.py new file mode 100644 index 00000000000..0708b7d880b --- /dev/null +++ b/cloud/ovirt/ovirt_affinity_labels_facts.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import fnmatch +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_affinity_labels_facts +short_description: Retrieve facts about one or more oVirt affinity labels +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt affinity labels." +notes: + - "This module creates a new top-level C(affinity_labels) fact, which + contains a list of affinity labels." +options: + name: + description: + - "Name of the affinity labels which should be listed." + vm: + description: + - "Name of the VM, which affinity labels should be listed." + host: + description: + - "Name of the host, which affinity labels should be listed." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all affinity labels, which names start with C(label): +- ovirt_affinity_labels_facts: + name: label* +- debug: + var: affinity_labels + +# Gather facts about all affinity labels, which are assigned to VMs +# which names start with C(postgres): +- ovirt_affinity_labels_facts: + vm: postgres* +- debug: + var: affinity_labels + +# Gather facts about all affinity labels, which are assigned to hosts +# which names start with C(west): +- ovirt_affinity_labels_facts: + host: west* +- debug: + var: affinity_labels + +# Gather facts about all affinity labels, which are assigned to hosts +# which names start with C(west) or VMs which names start with C(postgres): +- ovirt_affinity_labels_facts: + host: west* + vm: postgres* +- debug: + var: affinity_labels +''' + +RETURN = ''' +ovirt_vms: + description: "List of dictionaries describing the affinity labels. Affinity labels attribues are mapped to dictionary keys, + all affinity labels attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/affinity_label." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + name=dict(default=None), + host=dict(default=None), + vm=dict(default=None), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + affinity_labels_service = connection.system_service().affinity_labels_service() + labels = [] + all_labels = affinity_labels_service.list() + if module.params['name']: + labels.extend([ + l for l in all_labels + if fnmatch.fnmatch(l.name, module.params['name']) + ]) + if module.params['host']: + hosts_service = connection.system_service().hosts_service() + labels.extend([ + label + for label in all_labels + for host in connection.follow_link(label.hosts) + if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host']) + ]) + if module.params['vm']: + vms_service = connection.system_service().vms_service() + labels.extend([ + label + for label in all_labels + for vm in connection.follow_link(label.vms) + if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm']) + ]) + + if not (module.params['vm'] or module.params['host'] or module.params['name']): + labels = all_labels + + module.exit_json( + changed=False, + ansible_facts=dict( + affinity_labels=[ + get_dict_of_struct(l) for l in labels + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_auth.py b/cloud/ovirt/ovirt_auth.py new file mode 100644 index 00000000000..6f43fe8d029 --- /dev/null +++ b/cloud/ovirt/ovirt_auth.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4 as sdk +except ImportError: + pass + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_auth +short_description: "Module to manage authentication to oVirt." +author: "Ondra Machacek (@machacekondra)" +version_added: "2.2" +description: + - "This module authenticates to oVirt engine and creates SSO token, which should be later used in + all other oVirt modules, so all modules don't need to perform login and logout. + This module returns an Ansible fact called I(ovirt_auth). Every module can use this + fact as C(auth) parameter, to perform authentication." +options: + state: + default: present + choices: ['present', 'absent'] + description: + - "Specifies if a token should be created or revoked." + username: + required: True + description: + - "The name of the user. For example: I(admin@internal)." + password: + required: True + description: + - "The password of the user." + url: + required: True + description: + - "A string containing the base URL of the server. + For example: I(https://server.example.com/ovirt-engine/api)." + insecure: + required: False + description: + - "A boolean flag that indicates if the server TLS certificate and host name should be checked." + ca_file: + required: False + description: + - "A PEM file containing the trusted CA certificates. The + certificate presented by the server will be verified using these CA + certificates. If C(ca_file) parameter is not set, system wide + CA certificate store is used." + timeout: + required: False + description: + - "The maximum total time to wait for the response, in + seconds. A value of zero (the default) means wait forever. If + the timeout expires before the response is received an exception + will be raised." + compress: + required: False + description: + - "A boolean flag indicating if the SDK should ask + the server to send compressed responses. The default is I(True). + Note that this is a hint for the server, and that it may return + uncompressed data even when this parameter is set to I(True)." + kerberos: + required: False + description: + - "A boolean flag indicating if Kerberos authentication + should be used instead of the default basic authentication." +notes: + - "Everytime you use ovirt_auth module to obtain ticket, you need to also revoke the ticket, + when you no longer need it, otherwise the ticket would be revoked by engine when it expires. + For an example of how to achieve that, please take a look at I(examples) section." +''' + +EXAMPLES = ''' +tasks: + - block: + # Create a vault with `ovirt_password` variable which store your + # oVirt user's password, and include that yaml file with variable: + - include_vars: ovirt_password.yml + + - name: Obtain SSO token with using username/password credentials: + ovirt_auth: + url: https://ovirt.example.com/ovirt-engine/api + username: admin@internal + ca_file: ca.pem + password: "{{ ovirt_password }}" + + # Previous task generated I(ovirt_auth) fact, which you can later use + # in different modules as follows: + - ovirt_vms: + auth: "{{ ovirt_auth }}" + state: absent + name: myvm + + always: + - name: Always revoke the SSO token + ovirt_auth: + state: absent + ovirt_auth: "{{ ovirt_auth }}" +''' + +RETURN = ''' +ovirt_auth: + description: Authentication facts, needed to perform authentication to oVirt. + returned: success + type: dictionary + contains: + token: + description: SSO token which is used for connection to oVirt engine. + returned: success + type: string + sample: "kdfVWp9ZgeewBXV-iq3Js1-xQJZPSEQ334FLb3eksoEPRaab07DhZ8ED8ghz9lJd-MQ2GqtRIeqhvhCkrUWQPw" + url: + description: URL of the oVirt engine API endpoint. + returned: success + type: string + sample: "https://ovirt.example.com/ovirt-engine/api" + ca_file: + description: CA file, which is used to verify SSL/TLS connection. + returned: success + type: string + sample: "ca.pem" + insecure: + description: Flag indicating if insecure connection is used. + returned: success + type: bool + sample: False + timeout: + description: Number of seconds to wait for response. + returned: success + type: int + sample: 0 + compress: + description: Flag indicating if compression is used for connection. + returned: success + type: bool + sample: True + kerberos: + description: Flag indicating if kerberos is used for authentication. + returned: success + type: bool + sample: False +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + url=dict(default=None), + username=dict(default=None), + password=dict(default=None, no_log=True), + ca_file=dict(default=None, type='path'), + insecure=dict(required=False, type='bool', default=False), + timeout=dict(required=False, type='int', default=0), + compress=dict(required=False, type='bool', default=True), + kerberos=dict(required=False, type='bool', default=False), + state=dict(default='present', choices=['present', 'absent']), + ovirt_auth=dict(required=None, type='dict'), + ), + required_if=[ + ('state', 'absent', ['ovirt_auth']), + ('state', 'present', ['username', 'password', 'url']), + ], + ) + check_sdk(module) + + state = module.params.get('state') + if state == 'present': + params = module.params + elif state == 'absent': + params = module.params['ovirt_auth'] + + connection = sdk.Connection( + url=params.get('url'), + username=params.get('username'), + password=params.get('password'), + ca_file=params.get('ca_file'), + insecure=params.get('insecure'), + timeout=params.get('timeout'), + compress=params.get('compress'), + kerberos=params.get('kerberos'), + token=params.get('token'), + ) + try: + token = connection.authenticate() + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_auth=dict( + token=token, + url=params.get('url'), + ca_file=params.get('ca_file'), + insecure=params.get('insecure'), + timeout=params.get('timeout'), + compress=params.get('compress'), + kerberos=params.get('kerberos'), + ) if state == 'present' else dict() + ) + ) + except Exception as e: + module.fail_json(msg="Error: %s" % e) + finally: + # Close the connection, but don't revoke token + connection.close(logout=state == 'absent') + + +from ansible.module_utils.basic import * +from ansible.module_utils.ovirt import * +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_clusters.py b/cloud/ovirt/ovirt_clusters.py new file mode 100644 index 00000000000..c40ffcddd8f --- /dev/null +++ b/cloud/ovirt/ovirt_clusters.py @@ -0,0 +1,564 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + create_connection, + equal, + ovirt_full_argument_spec, + search_by_name, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_clusters +short_description: Module to manage clusters in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage clusters in oVirt" +options: + name: + description: + - "Name of the the cluster to manage." + required: true + state: + description: + - "Should the cluster be present or absent" + choices: ['present', 'absent'] + default: present + datacenter: + description: + - "Datacenter name where cluster reside." + description: + description: + - "Description of the cluster." + comment: + description: + - "Comment of the cluster." + network: + description: + - "Management network of cluster to access cluster hosts." + ballooning: + description: + - "If (True) enable memory balloon optimization. Memory balloon is used to + re-distribute / reclaim the host memory based on VM needs + in a dynamic way." + virt: + description: + - "If (True), hosts in this cluster will be used to run virtual machines." + gluster: + description: + - "If (True), hosts in this cluster will be used as Gluster Storage + server nodes, and not for running virtual machines." + - "By default the cluster is created for virtual machine hosts." + threads_as_cores: + description: + - "If (True) the exposed host threads would be treated as cores + which can be utilized by virtual machines." + ksm: + description: + - "I (True) MoM enables to run Kernel Same-page Merging (KSM) when + necessary and when it can yield a memory saving benefit that + outweighs its CPU cost." + ksm_numa: + description: + - "If (True) enables KSM C(ksm) for best berformance inside NUMA nodes." + ha_reservation: + description: + - "If (True) enable the oVirt to monitor cluster capacity for highly + available virtual machines." + trusted_service: + description: + - "If (True) enable integration with an OpenAttestation server." + vm_reason: + description: + - "If (True) enable an optional reason field when a virtual machine + is shut down from the Manager, allowing the administrator to + provide an explanation for the maintenance." + host_reason: + description: + - "If (True) enable an optional reason field when a host is placed + into maintenance mode from the Manager, allowing the administrator + to provide an explanation for the maintenance." + memory_policy: + description: + - "I(disabled) - Disables memory page sharing." + - "I(server) - Sets the memory page sharing threshold to 150% of the system memory on each host." + - "I(desktop) - Sets the memory page sharing threshold to 200% of the system memory on each host." + choices: ['disabled', 'server', 'desktop'] + rng_sources: + description: + - "List that specify the random number generator devices that all hosts in the cluster will use." + - "Supported generators are: I(hwrng) and I(random)." + spice_proxy: + description: + - "The proxy by which the SPICE client will connect to virtual machines." + - "The address must be in the following format: I(protocol://[host]:[port])" + fence_enabled: + description: + - "If (True) enables fencing on the cluster." + - "Fencing is enabled by default." + fence_skip_if_sd_active: + description: + - "If (True) any hosts in the cluster that are Non Responsive + and still connected to storage will not be fenced." + fence_skip_if_connectivity_broken: + description: + - "If (True) fencing will be temporarily disabled if the percentage + of hosts in the cluster that are experiencing connectivity issues + is greater than or equal to the defined threshold." + - "The threshold can be specified by C(fence_connectivity_threshold)." + fence_connectivity_threshold: + description: + - "The threshold used by C(fence_skip_if_connectivity_broken)." + resilience_policy: + description: + - "The resilience policy defines how the virtual machines are prioritized in the migration." + - "Following values are supported:" + - "C(do_not_migrate) - Prevents virtual machines from being migrated. " + - "C(migrate) - Migrates all virtual machines in order of their defined priority." + - "C(migrate_highly_available) - Migrates only highly available virtual machines to prevent overloading other hosts." + choices: ['do_not_migrate', 'migrate', 'migrate_highly_available'] + migration_bandwidth: + description: + - "The bandwidth settings define the maximum bandwidth of both outgoing and incoming migrations per host." + - "Following bandwith options are supported:" + - "C(auto) - Bandwidth is copied from the I(rate limit) [Mbps] setting in the data center host network QoS." + - "C(hypervisor_default) - Bandwidth is controlled by local VDSM setting on sending host." + - "C(custom) - Defined by user (in Mbps)." + choices: ['auto', 'hypervisor_default', 'custom'] + migration_bandwidth_limit: + description: + - "Set the I(custom) migration bandwidth limit." + - "This parameter is used only when C(migration_bandwidth) is I(custom)." + migration_auto_converge: + description: + - "If (True) auto-convergence is used during live migration of virtual machines." + - "Used only when C(migration_policy) is set to I(legacy)." + - "Following options are supported:" + - "C(true) - Override the global setting to I(true)." + - "C(false) - Override the global setting to I(false)." + - "C(inherit) - Use value which is set globally." + choices: ['true', 'false', 'inherit'] + migration_compressed: + description: + - "If (True) compression is used during live migration of the virtual machine." + - "Used only when C(migration_policy) is set to I(legacy)." + - "Following options are supported:" + - "C(true) - Override the global setting to I(true)." + - "C(false) - Override the global setting to I(false)." + - "C(inherit) - Use value which is set globally." + choices: ['true', 'false', 'inherit'] + migration_policy: + description: + - "A migration policy defines the conditions for live migrating + virtual machines in the event of host failure." + - "Following policies are supported:" + - "C(legacy) - Legacy behavior of 3.6 version." + - "C(minimal_downtime) - Virtual machines should not experience any significant downtime." + - "C(suspend_workload) - Virtual machines may experience a more significant downtime." + choices: ['legacy', 'minimal_downtime', 'suspend_workload'] + serial_policy: + description: + - "Specify a serial number policy for the virtual machines in the cluster." + - "Following options are supported:" + - "C(vm) - Sets the virtual machine's UUID as its serial number." + - "C(host) - Sets the host's UUID as the virtual machine's serial number." + - "C(custom) - Allows you to specify a custom serial number in C(serial_policy_value)." + serial_policy_value: + description: + - "Allows you to specify a custom serial number." + - "This parameter is used only when C(serial_policy) is I(custom)." + scheduling_policy: + description: + - "Name of the scheduling policy to be used for cluster." + cpu_arch: + description: + - "CPU architecture of cluster." + choices: ['x86_64', 'ppc64', 'undefined'] + cpu_type: + description: + - "CPU codename. For example I(Intel SandyBridge Family)." + switch_type: + description: + - "Type of switch to be used by all networks in given cluster. + Either I(legacy) which is using linux brigde or I(ovs) using + Open vSwitch." + choices: ['legacy', 'ovs'] + compatibility_version: + description: + - "The compatibility version of the cluster. All hosts in this + cluster must support at least this compatibility version." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Create cluster +- ovirt_clusters: + datacenter: mydatacenter + name: mycluster + cpu_type: Intel SandyBridge Family + description: mycluster + compatibility_version: 4.0 + +# Create virt service cluster: +- ovirt_clusters: + datacenter: mydatacenter + name: mycluster + cpu_type: Intel Nehalem Family + description: mycluster + switch_type: legacy + compatibility_version: 4.0 + ballooning: true + gluster: false + threads_as_cores: true + ha_reservation: true + trusted_service: false + host_reason: false + vm_reason: true + ksm_numa: true + memory_policy: server + rng_sources: + - hwrng + - random + +# Remove cluster +- ovirt_clusters: + state: absent + name: mycluster +''' + +RETURN = ''' +id: + description: ID of the cluster which is managed + returned: On success if cluster is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +cluster: + description: "Dictionary of all the cluster attributes. Cluster attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/cluster." + returned: On success if cluster is found. +''' + + +class ClustersModule(BaseModule): + + def __get_major(self, full_version): + if full_version is None: + return None + if isinstance(full_version, otypes.Version): + return full_version.major + return int(full_version.split('.')[0]) + + def __get_minor(self, full_version): + if full_version is None: + return None + if isinstance(full_version, otypes.Version): + return full_version.minor + return int(full_version.split('.')[1]) + + def param(self, name, default=None): + return self._module.params.get(name, default) + + def _get_memory_policy(self): + memory_policy = self.param('memory_policy') + if memory_policy == 'desktop': + return 200 + elif memory_policy == 'server': + return 150 + elif memory_policy == 'disabled': + return 100 + + def _get_policy_id(self): + # These are hardcoded IDs, once there is API, please fix this. + # legacy - 00000000-0000-0000-0000-000000000000 + # minimal downtime - 80554327-0569-496b-bdeb-fcbbf52b827b + # suspend workload if needed - 80554327-0569-496b-bdeb-fcbbf52b827c + migration_policy = self.param('migration_policy') + if migration_policy == 'legacy': + return '00000000-0000-0000-0000-000000000000' + elif migration_policy == 'minimal_downtime': + return '80554327-0569-496b-bdeb-fcbbf52b827b' + elif migration_policy == 'suspend_workload': + return '80554327-0569-496b-bdeb-fcbbf52b827c' + + def _get_sched_policy(self): + sched_policy = None + if self.param('serial_policy'): + sched_policies_service = self._connection.system_service().scheduling_policies_service() + sched_policy = search_by_name(sched_policies_service, self.param('scheduling_policy')) + if not sched_policy: + raise Exception("Scheduling policy '%s' was not found" % self.param('scheduling_policy')) + + return sched_policy + + def build_entity(self): + sched_policy = self._get_sched_policy() + return otypes.Cluster( + name=self.param('name'), + comment=self.param('comment'), + description=self.param('description'), + ballooning_enabled=self.param('ballooning'), + gluster_service=self.param('gluster'), + virt_service=self.param('virt'), + threads_as_cores=self.param('threads_as_cores'), + ha_reservation=self.param('ha_reservation'), + trusted_service=self.param('trusted_service'), + optional_reason=self.param('vm_reason'), + maintenance_reason_required=self.param('host_reason'), + scheduling_policy=otypes.SchedulingPolicy( + id=sched_policy.id, + ) if sched_policy else None, + serial_number=otypes.SerialNumber( + policy=otypes.SerialNumberPolicy(self.param('serial_policy')), + value=self.param('serial_policy_value'), + ) if ( + self.param('serial_policy') is not None or + self.param('serial_policy_value') is not None + ) else None, + migration=otypes.MigrationOptions( + auto_converge=otypes.InheritableBoolean( + self.param('migration_auto_converge'), + ) if self.param('migration_auto_converge') else None, + bandwidth=otypes.MigrationBandwidth( + assignment_method=otypes.MigrationBandwidthAssignmentMethod( + self.param('migration_bandwidth'), + ) if self.param('migration_bandwidth') else None, + custom_value=self.param('migration_bandwidth_limit'), + ) if ( + self.param('migration_bandwidth') or + self.param('migration_bandwidth_limit') + ) else None, + compressed=otypes.InheritableBoolean( + self.param('migration_compressed'), + ) if self.param('migration_compressed') else None, + policy=otypes.MigrationPolicy( + id=self._get_policy_id() + ) if self.param('migration_policy') else None, + ) if ( + self.param('migration_bandwidth') is not None or + self.param('migration_bandwidth_limit') is not None or + self.param('migration_auto_converge') is not None or + self.param('migration_compressed') is not None or + self.param('migration_policy') is not None + ) else None, + error_handling=otypes.ErrorHandling( + on_error=otypes.MigrateOnError( + self.param('resilience_policy') + ), + ) if self.param('resilience_policy') else None, + fencing_policy=otypes.FencingPolicy( + enabled=( + self.param('fence_enabled') or + self.param('fence_skip_if_connectivity_broken') or + self.param('fence_skip_if_sd_active') + ), + skip_if_connectivity_broken=otypes.SkipIfConnectivityBroken( + enabled=self.param('fence_skip_if_connectivity_broken'), + threshold=self.param('fence_connectivity_threshold'), + ) if ( + self.param('fence_skip_if_connectivity_broken') is not None or + self.param('fence_connectivity_threshold') is not None + ) else None, + skip_if_sd_active=otypes.SkipIfSdActive( + enabled=self.param('fence_skip_if_sd_active'), + ) if self.param('fence_skip_if_sd_active') else None, + ) if ( + self.param('fence_enabled') is not None or + self.param('fence_skip_if_sd_active') is not None or + self.param('fence_skip_if_connectivity_broken') is not None or + self.param('fence_connectivity_threshold') is not None + ) else None, + display=otypes.Display( + proxy=self.param('spice_proxy'), + ) if self.param('spice_proxy') else None, + required_rng_sources=[ + otypes.RngSource(rng) for rng in self.param('rng_sources') + ] if self.param('rng_sources') else None, + memory_policy=otypes.MemoryPolicy( + over_commit=otypes.MemoryOverCommit( + percent=self._get_memory_policy(), + ), + ) if self.param('memory_policy') else None, + ksm=otypes.Ksm( + enabled=self.param('ksm') or self.param('ksm_numa'), + merge_across_nodes=not self.param('ksm_numa'), + ) if ( + self.param('ksm_numa') is not None or + self.param('ksm') is not None + ) else None, + data_center=otypes.DataCenter( + name=self.param('datacenter'), + ) if self.param('datacenter') else None, + management_network=otypes.Network( + name=self.param('network'), + ) if self.param('network') else None, + cpu=otypes.Cpu( + architecture=self.param('cpu_arch'), + type=self.param('cpu_type'), + ) if ( + self.param('cpu_arch') or self.param('cpu_type') + ) else None, + version=otypes.Version( + major=self.__get_major(self.param('compatibility_version')), + minor=self.__get_minor(self.param('compatibility_version')), + ) if self.param('compatibility_version') else None, + switch_type=otypes.SwitchType( + self.param('switch_type') + ) if self.param('switch_type') else None, + ) + + def update_check(self, entity): + return ( + equal(self.param('comment'), entity.comment) and + equal(self.param('description'), entity.description) and + equal(self.param('switch_type'), str(entity.switch_type)) and + equal(self.param('cpu_arch'), str(entity.cpu.architecture)) and + equal(self.param('cpu_type'), entity.cpu.type) and + equal(self.param('ballooning'), entity.ballooning_enabled) and + equal(self.param('gluster'), entity.gluster_service) and + equal(self.param('virt'), entity.virt_service) and + equal(self.param('threads_as_cores'), entity.threads_as_cores) and + equal(self.param('ksm_numa'), not entity.ksm.merge_across_nodes and entity.ksm.enabled) and + equal(self.param('ksm'), entity.ksm.merge_across_nodes and entity.ksm.enabled) and + equal(self.param('ha_reservation'), entity.ha_reservation) and + equal(self.param('trusted_service'), entity.trusted_service) and + equal(self.param('host_reason'), entity.maintenance_reason_required) and + equal(self.param('vm_reason'), entity.optional_reason) and + equal(self.param('spice_proxy'), getattr(entity.display, 'proxy', None)) and + equal(self.param('fence_enabled'), entity.fencing_policy.enabled) and + equal(self.param('fence_skip_if_sd_active'), entity.fencing_policy.skip_if_sd_active.enabled) and + equal(self.param('fence_skip_if_connectivity_broken'), entity.fencing_policy.skip_if_connectivity_broken.enabled) and + equal(self.param('fence_connectivity_threshold'), entity.fencing_policy.skip_if_connectivity_broken.threshold) and + equal(self.param('resilience_policy'), str(entity.error_handling.on_error)) and + equal(self.param('migration_bandwidth'), str(entity.migration.bandwidth.assignment_method)) and + equal(self.param('migration_auto_converge'), str(entity.migration.auto_converge)) and + equal(self.param('migration_compressed'), str(entity.migration.compressed)) and + equal(self.param('serial_policy'), str(entity.serial_number.policy)) and + equal(self.param('serial_policy_value'), entity.serial_number.value) and + equal(self.param('scheduling_policy'), self._get_sched_policy().name) and + equal(self._get_policy_id(), entity.migration.policy.id) and + equal(self._get_memory_policy(), entity.memory_policy.over_commit.percent) and + equal(self.__get_minor(self.param('compatibility_version')), self.__get_minor(entity.version)) and + equal(self.__get_major(self.param('compatibility_version')), self.__get_major(entity.version)) and + equal( + self.param('migration_bandwidth_limit') if self.param('migration_bandwidth') == 'custom' else None, + entity.migration.bandwidth.custom_value + ) and + equal( + sorted(self.param('rng_sources')) if self.param('rng_sources') else None, + sorted([ + str(source) for source in entity.required_rng_sources + ]) + ) + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(default=None, required=True), + ballooning=dict(default=None, type='bool', aliases=['balloon']), + gluster=dict(default=None, type='bool'), + virt=dict(default=None, type='bool'), + threads_as_cores=dict(default=None, type='bool'), + ksm_numa=dict(default=None, type='bool'), + ksm=dict(default=None, type='bool'), + ha_reservation=dict(default=None, type='bool'), + trusted_service=dict(default=None, type='bool'), + vm_reason=dict(default=None, type='bool'), + host_reason=dict(default=None, type='bool'), + memory_policy=dict(default=None, choices=['disabled', 'server', 'desktop']), + rng_sources=dict(default=None, type='list'), + spice_proxy=dict(default=None), + fence_enabled=dict(default=None, type='bool'), + fence_skip_if_sd_active=dict(default=None, type='bool'), + fence_skip_if_connectivity_broken=dict(default=None, type='bool'), + fence_connectivity_threshold=dict(default=None, type='int'), + resilience_policy=dict(default=None, choices=['migrate_highly_available', 'migrate', 'do_not_migrate']), + migration_bandwidth=dict(default=None, choices=['auto', 'hypervisor_default', 'custom']), + migration_bandwidth_limit=dict(default=None, type='int'), + migration_auto_converge=dict(default=None, choices=['true', 'false', 'inherit']), + migration_compressed=dict(default=None, choices=['true', 'false', 'inherit']), + migration_policy=dict(default=None, choices=['legacy', 'minimal_downtime', 'suspend_workload']), + serial_policy=dict(default=None, choices=['vm', 'host', 'custom']), + serial_policy_value=dict(default=None), + scheduling_policy=dict(default=None), + datacenter=dict(default=None), + description=dict(default=None), + comment=dict(default=None), + network=dict(default=None), + cpu_arch=dict(default=None, choices=['ppc64', 'undefined', 'x86_64']), + cpu_type=dict(default=None), + switch_type=dict(default=None, choices=['legacy', 'ovs']), + compatibility_version=dict(default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + clusters_service = connection.system_service().clusters_service() + clusters_module = ClustersModule( + connection=connection, + module=module, + service=clusters_service, + ) + + state = module.params['state'] + if state == 'present': + ret = clusters_module.create() + elif state == 'absent': + ret = clusters_module.remove() + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_clusters_facts.py b/cloud/ovirt/ovirt_clusters_facts.py new file mode 100644 index 00000000000..edcf680bee6 --- /dev/null +++ b/cloud/ovirt/ovirt_clusters_facts.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_clusters_facts +short_description: Retrieve facts about one or more oVirt clusters +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt clusters." +notes: + - "This module creates a new top-level C(ovirt_clusters) fact, which + contains a list of clusters." +options: + pattern: + description: + - "Search term which is accepted by oVirt search backend." + - "For example to search cluster X from datacenter Y use following pattern: + name=X and datacenter=Y" +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all clusters which names start with C: +- ovirt_clusters_facts: + pattern: name=production* +- debug: + var: ovirt_clusters +''' + +RETURN = ''' +ovirt_clusters: + description: "List of dictionaries describing the clusters. Cluster attribues are mapped to dictionary keys, + all clusters attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/cluster." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + clusters_service = connection.system_service().clusters_service() + clusters = clusters_service.list(search=module.params['pattern']) + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_clusters=[ + get_dict_of_struct(c) for c in clusters + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_datacenters.py b/cloud/ovirt/ovirt_datacenters.py new file mode 100644 index 00000000000..ef63709a5c9 --- /dev/null +++ b/cloud/ovirt/ovirt_datacenters.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + check_params, + create_connection, + equal, + ovirt_full_argument_spec, + search_by_name, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_datacenters +short_description: Module to manage data centers in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage data centers in oVirt" +options: + name: + description: + - "Name of the the data center to manage." + required: true + state: + description: + - "Should the data center be present or absent" + choices: ['present', 'absent'] + default: present + description: + description: + - "Description of the data center." + comment: + description: + - "Comment of the data center." + local: + description: + - "I(True) if the data center should be local, I(False) if should be shared." + - "Default value is set by engine." + compatibility_version: + description: + - "Compatibility version of the data center." + quota_mode: + description: + - "Quota mode of the data center. One of I(disabled), I(audit) or I(enabled)" + choices: ['disabled', 'audit', 'enabled'] + mac_pool: + description: + - "MAC pool to be used by this datacenter." + - "IMPORTANT: This option is deprecated in oVirt 4.1. You should + use C(mac_pool) in C(ovirt_clusters) module, as MAC pools are + set per cluster since 4.1." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Create datacenter +- ovirt_datacenters: + name: mydatacenter + local: True + compatibility_version: 4.0 + quota_mode: enabled + +# Remove datacenter +- ovirt_datacenters: + state: absent + name: mydatacenter +''' + +RETURN = ''' +id: + description: "ID of the managed datacenter" + returned: "On success if datacenter is found." + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +data_center: + description: "Dictionary of all the datacenter attributes. Datacenter attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/datacenter." + returned: "On success if datacenter is found." +''' + + +class DatacentersModule(BaseModule): + + def __get_major(self, full_version): + if full_version is None: + return None + if isinstance(full_version, otypes.Version): + return full_version.major + return int(full_version.split('.')[0]) + + def __get_minor(self, full_version): + if full_version is None: + return None + if isinstance(full_version, otypes.Version): + return full_version.minor + return int(full_version.split('.')[1]) + + def _get_mac_pool(self): + mac_pool = None + if self._module.params.get('mac_pool'): + mac_pool = search_by_name( + self._connection.system_service().mac_pools_service(), + self._module.params.get('mac_pool'), + ) + + return mac_pool + + def build_entity(self): + return otypes.DataCenter( + name=self._module.params['name'], + comment=self._module.params['comment'], + description=self._module.params['description'], + mac_pool=otypes.MacPool( + id=getattr(self._get_mac_pool(), 'id', None), + ) if self._module.params.get('mac_pool') else None, + quota_mode=otypes.QuotaModeType( + self._module.params['quota_mode'] + ) if self._module.params['quota_mode'] else None, + local=self._module.params['local'], + version=otypes.Version( + major=self.__get_major(self._module.params['compatibility_version']), + minor=self.__get_minor(self._module.params['compatibility_version']), + ) if self._module.params['compatibility_version'] else None, + ) + + def update_check(self, entity): + minor = self.__get_minor(self._module.params.get('compatibility_version')) + major = self.__get_major(self._module.params.get('compatibility_version')) + return ( + equal(getattr(self._get_mac_pool(), 'id', None), getattr(entity.mac_pool, 'id', None)) and + equal(self._module.params.get('comment'), entity.comment) and + equal(self._module.params.get('description'), entity.description) and + equal(self._module.params.get('quota_mode'), str(entity.quota_mode)) and + equal(self._module.params.get('local'), entity.local) and + equal(minor, self.__get_minor(entity.version)) and + equal(major, self.__get_major(entity.version)) + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(default=None, required=True), + description=dict(default=None), + local=dict(type='bool'), + compatibility_version=dict(default=None), + quota_mode=dict(choices=['disabled', 'audit', 'enabled']), + comment=dict(default=None), + mac_pool=dict(default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + check_params(module) + + try: + connection = create_connection(module.params.pop('auth')) + data_centers_service = connection.system_service().data_centers_service() + clusters_module = DatacentersModule( + connection=connection, + module=module, + service=data_centers_service, + ) + + state = module.params['state'] + if state == 'present': + ret = clusters_module.create() + elif state == 'absent': + ret = clusters_module.remove() + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_datacenters_facts.py b/cloud/ovirt/ovirt_datacenters_facts.py new file mode 100644 index 00000000000..6f812951584 --- /dev/null +++ b/cloud/ovirt/ovirt_datacenters_facts.py @@ -0,0 +1,102 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_datacenters_facts +short_description: Retrieve facts about one or more oVirt datacenters +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt datacenters." +notes: + - "This module creates a new top-level C(ovirt_datacenters) fact, which + contains a list of datacenters." +options: + pattern: + description: + - "Search term which is accepted by oVirt search backend." + - "For example to search datacenter I(X) use following pattern: I(name=X)" +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all data centers which names start with C(production): +- ovirt_datacenters_facts: + pattern: name=production* +- debug: + var: ovirt_datacenters +''' + +RETURN = ''' +ovirt_datacenters: + description: "List of dictionaries describing the datacenters. Datacenter attribues are mapped to dictionary keys, + all datacenters attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/data_center." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + datacenters_service = connection.system_service().data_centers_service() + datacenters = datacenters_service.list(search=module.params['pattern']) + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_datacenters=[ + get_dict_of_struct(c) for c in datacenters + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_disks.py b/cloud/ovirt/ovirt_disks.py new file mode 100644 index 00000000000..7730242afbf --- /dev/null +++ b/cloud/ovirt/ovirt_disks.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4 as sdk + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.ovirt import * + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_disks +short_description: "Module to manage Virtual Machine and floating disks in oVirt." +version_added: "2.2" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage Virtual Machine and floating disks in oVirt." +options: + id: + description: + - "ID of the disk to manage. Either C(id) or C(name) is required." + name: + description: + - "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required." + aliases: ['alias'] + vm_name: + description: + - "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)." + vm_id: + description: + - "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)." + state: + description: + - "Should the Virtual Machine disk be present/absent/attached/detached." + choices: ['present', 'absent', 'attached', 'detached'] + default: 'present' + size: + description: + - "Size of the disk. Size should be specified using IEC standard units. For example 10GiB, 1024MiB, etc." + interface: + description: + - "Driver of the storage interface." + choices: ['virtio', 'ide', 'virtio_scsi'] + default: 'virtio' + format: + description: + - Specify format of the disk. + - If (cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision). + - If (raw) format is used, disk storage will be allocated right away, also known as I(preallocated). + - Note that this option isn't idempotent as it's not currently possible to change format of the disk via API. + choices: ['raw', 'cow'] + storage_domain: + description: + - "Storage domain name where disk should be created. By default storage is chosen by oVirt engine." + profile: + description: + - "Disk profile name to be attached to disk. By default profile is chosen by oVirt engine." + bootable: + description: + - "I(True) if the disk should be bootable. By default when disk is created it isn't bootable." + shareable: + description: + - "I(True) if the disk should be shareable. By default when disk is created it isn't shareable." + logical_unit: + description: + - "Dictionary which describes LUN to be directly attached to VM:" + - "C(address) - Address of the storage server. Used by iSCSI." + - "C(port) - Port of the storage server. Used by iSCSI." + - "C(target) - iSCSI target." + - "C(lun_id) - LUN id." + - "C(username) - CHAP Username to be used to access storage server. Used by iSCSI." + - "C(password) - CHAP Password of the user to be used to access storage server. Used by iSCSI." + - "C(storage_type) - Storage type either I(fcp) or I(iscsi)." +extends_documentation_fragment: ovirt +''' + + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Create and attach new disk to VM +- ovirt_disks: + name: myvm_disk + vm_name: rhel7 + size: 10GiB + format: cow + interface: virtio + +# Attach logical unit to VM rhel7 +- ovirt_disks: + vm_name: rhel7 + logical_unit: + target: iqn.2016-08-09.brq.str-01:omachace + id: 1IET_000d0001 + address: 10.34.63.204 + interface: virtio + +# Detach disk from VM +- ovirt_disks: + state: detached + name: myvm_disk + vm_name: rhel7 + size: 10GiB + format: cow + interface: virtio +''' + + +RETURN = ''' +id: + description: "ID of the managed disk" + returned: "On success if disk is found." + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +disk: + description: "Dictionary of all the disk attributes. Disk attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/disk." + returned: "On success if disk is found and C(vm_id) or C(vm_name) wasn't passed." + +disk_attachment: + description: "Dictionary of all the disk attachment attributes. Disk attachment attributes can be found + on your oVirt instance at following url: + https://ovirt.example.com/ovirt-engine/api/model#types/disk_attachment." + returned: "On success if disk is found and C(vm_id) or C(vm_name) was passed and VM was found." +''' + + + +def _search_by_lun(disks_service, lun_id): + """ + Find disk by LUN ID. + """ + res = [ + disk for disk in disks_service.list(search='disk_type=lun') if ( + disk.lun_storage.id == lun_id + ) + ] + return res[0] if res else None + + +class DisksModule(BaseModule): + + def build_entity(self): + logical_unit = self._module.params.get('logical_unit') + return otypes.Disk( + id=self._module.params.get('id'), + name=self._module.params.get('name'), + description=self._module.params.get('description'), + format=otypes.DiskFormat( + self._module.params.get('format') + ) if self._module.params.get('format') else None, + sparse=False if self._module.params.get('format') == 'raw' else True, + provisioned_size=convert_to_bytes( + self._module.params.get('size') + ), + storage_domains=[ + otypes.StorageDomain( + name=self._module.params.get('storage_domain'), + ), + ], + shareable=self._module.params.get('shareable'), + lun_storage=otypes.HostStorage( + type=otypes.StorageType( + logical_unit.get('storage_type', 'iscsi') + ), + logical_units=[ + otypes.LogicalUnit( + address=logical_unit.get('address'), + port=logical_unit.get('port', 3260), + target=logical_unit.get('target'), + id=logical_unit.get('id'), + username=logical_unit.get('username'), + password=logical_unit.get('password'), + ) + ], + ) if logical_unit else None, + ) + + def update_check(self, entity): + return ( + equal(self._module.params.get('description'), entity.description) and + equal(convert_to_bytes(self._module.params.get('size')), entity.provisioned_size) and + equal(self._module.params.get('shareable'), entity.shareable) + ) + + +class DiskAttachmentsModule(DisksModule): + + def build_entity(self): + return otypes.DiskAttachment( + disk=super(DiskAttachmentsModule, self).build_entity(), + interface=otypes.DiskInterface( + self._module.params.get('interface') + ) if self._module.params.get('interface') else None, + bootable=self._module.params.get('bootable'), + active=True, + ) + + def update_check(self, entity): + return ( + equal(self._module.params.get('interface'), str(entity.interface)) and + equal(self._module.params.get('bootable'), entity.bootable) + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent', 'attached', 'detached'], + default='present' + ), + id=dict(default=None), + name=dict(default=None, aliases=['alias']), + vm_name=dict(default=None), + vm_id=dict(default=None), + size=dict(default=None), + interface=dict(default=None,), + storage_domain=dict(default=None), + profile=dict(default=None), + format=dict(default=None, choices=['raw', 'cow']), + bootable=dict(default=None, type='bool'), + shareable=dict(default=None, type='bool'), + logical_unit=dict(default=None, type='dict'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + check_params(module) + + try: + disk = None + state = module.params['state'] + connection = create_connection(module.params.pop('auth')) + disks_service = connection.system_service().disks_service() + disks_module = DisksModule( + connection=connection, + module=module, + service=disks_service, + ) + + lun = module.params.get('logical_unit') + if lun: + disk = _search_by_lun(disks_service, lun.get('id')) + + ret = None + # First take care of creating the VM, if needed: + if state == 'present' or state == 'detached' or state == 'attached': + ret = disks_module.create( + entity=disk, + result_state=otypes.DiskStatus.OK if lun is None else None, + ) + # We need to pass ID to the module, so in case we want detach/attach disk + # we have this ID specified to attach/detach method: + module.params['id'] = ret['id'] if disk is None else disk.id + elif state == 'absent': + ret = disks_module.remove() + + # If VM was passed attach/detach disks to/from the VM: + if module.params.get('vm_id') is not None or module.params.get('vm_name') is not None and state != 'absent': + vms_service = connection.system_service().vms_service() + + # If `vm_id` isn't specified, find VM by name: + vm_id = module.params['vm_id'] + if vm_id is None: + vm_id = getattr(search_by_name(vms_service, module.params['vm_name']), 'id', None) + + if vm_id is None: + module.fail_json( + msg="VM don't exists, please create it first." + ) + + disk_attachments_service = vms_service.vm_service(vm_id).disk_attachments_service() + disk_attachments_module = DiskAttachmentsModule( + connection=connection, + module=module, + service=disk_attachments_service, + changed=ret['changed'] if ret else False, + ) + + if state == 'present' or state == 'attached': + ret = disk_attachments_module.create() + elif state == 'detached': + ret = disk_attachments_module.remove() + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e)) + finally: + connection.close(logout=False) + + +from ansible.module_utils.basic import * +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_external_providers.py b/cloud/ovirt/ovirt_external_providers.py new file mode 100644 index 00000000000..9bcb38a78f3 --- /dev/null +++ b/cloud/ovirt/ovirt_external_providers.py @@ -0,0 +1,248 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_params, + check_sdk, + create_connection, + equal, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_external_providers +short_description: Module to manage external providers in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage external providers in oVirt" +options: + name: + description: + - "Name of the the external provider to manage." + state: + description: + - "Should the external be present or absent" + choices: ['present', 'absent'] + default: present + description: + description: + - "Description of the external provider." + type: + description: + - "Type of the external provider." + choices: ['os_image', 'os_network', 'os_volume', 'foreman'] + url: + description: + - "URL where external provider is hosted." + - "Applicable for those types: I(os_image), I(os_volume), I(os_network) and I(foreman)." + username: + description: + - "Username to be used for login to external provider." + - "Applicable for all types." + password: + description: + - "Password of the user specified in C(username) parameter." + - "Applicable for all types." + tenant_name: + description: + - "Name of the tenant." + - "Applicable for those types: I(os_image), I(os_volume) and I(os_network)." + aliases: ['tenant'] + authentication_url: + description: + - "Keystone authentication URL of the openstack provider." + - "Applicable for those types: I(os_image), I(os_volume) and I(os_network)." + aliases: ['auth_url'] + data_center: + description: + - "Name of the data center where provider should be attached." + - "Applicable for those type: I(os_volume)." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Add image external provider: +- ovirt_external_providers: + name: image_provider + type: os_image + url: http://10.34.63.71:9292 + username: admin + password: 123456 + tenant: admin + auth_url: http://10.34.63.71:35357/v2.0/ + +# Add foreman provider: +- ovirt_external_providers: + name: foreman_provider + type: foreman + url: https://foreman.example.com + username: admin + password: 123456 + +# Remove image external provider: +- ovirt_external_providers: + state: absent + name: image_provider + type: os_image +''' + +RETURN = ''' +id: + description: ID of the external provider which is managed + returned: On success if external provider is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +external_host_provider: + description: "Dictionary of all the external_host_provider attributes. External provider attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/external_host_provider." + returned: "On success and if parameter 'type: foreman' is used." + type: dictionary +openstack_image_provider: + description: "Dictionary of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_image_provider." + returned: "On success and if parameter 'type: os_image' is used." + type: dictionary +openstack_volume_provider: + description: "Dictionary of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_volume_provider." + returned: "On success and if parameter 'type: os_volume' is used." + type: dictionary +openstack_network_provider: + description: "Dictionary of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_network_provider." + returned: "On success and if parameter 'type: os_network' is used." + type: dictionary +''' + + +class ExternalProviderModule(BaseModule): + + def provider_type(self, provider_type): + self._provider_type = provider_type + + def build_entity(self): + provider_type = self._provider_type( + requires_authentication='username' in self._module.params, + ) + for key, value in self._module.params.items(): + if hasattr(provider_type, key): + setattr(provider_type, key, value) + + return provider_type + + def update_check(self, entity): + return ( + equal(self._module.params.get('description'), entity.description) and + equal(self._module.params.get('url'), entity.url) and + equal(self._module.params.get('authentication_url'), entity.authentication_url) and + equal(self._module.params.get('tenant_name'), getattr(entity, 'tenant_name', None)) and + equal(self._module.params.get('username'), entity.username) + ) + + +def _external_provider_service(provider_type, system_service): + if provider_type == 'os_image': + return otypes.OpenStackImageProvider, system_service.openstack_image_providers_service() + elif provider_type == 'os_network': + return otypes.OpenStackNetworkProvider, system_service.openstack_network_providers_service() + elif provider_type == 'os_volume': + return otypes.OpenStackVolumeProvider, system_service.openstack_volume_providers_service() + elif provider_type == 'foreman': + return otypes.ExternalHostProvider, system_service.external_host_providers_service() + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(default=None), + description=dict(default=None), + type=dict( + default=None, + required=True, + choices=[ + 'os_image', 'os_network', 'os_volume', 'foreman', + ], + aliases=['provider'], + ), + url=dict(default=None), + username=dict(default=None), + password=dict(default=None, no_log=True), + tenant_name=dict(default=None, aliases=['tenant']), + authentication_url=dict(default=None, aliases=['auth_url']), + data_center=dict(default=None, aliases=['data_center']), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + check_params(module) + + try: + connection = create_connection(module.params.pop('auth')) + provider_type, external_providers_service = _external_provider_service( + provider_type=module.params.pop('type'), + system_service=connection.system_service(), + ) + external_providers_module = ExternalProviderModule( + connection=connection, + module=module, + service=external_providers_service, + ) + external_providers_module.provider_type(provider_type) + + state = module.params.pop('state') + if state == 'absent': + ret = external_providers_module.remove() + elif state == 'present': + ret = external_providers_module.create() + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_external_providers_facts.py b/cloud/ovirt/ovirt_external_providers_facts.py new file mode 100644 index 00000000000..b67ec4d89d8 --- /dev/null +++ b/cloud/ovirt/ovirt_external_providers_facts.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import fnmatch +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_external_providers_facts +short_description: Retrieve facts about one or more oVirt external_providers +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt external_providers." +notes: + - "This module creates a new top-level C(ovirt_external_providers) fact, which + contains a list of external_providers." +options: + type: + description: + - "Type of the external provider." + choices: ['os_image', 'os_network', 'os_volume', 'foreman'] + required: true + name: + description: + - "Name of the external provider, can be used as glob expression." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all image external providers named C: +- ovirt_external_providers_facts: + type: os_image + name: glance +- debug: + var: ovirt_external_providers +''' + +RETURN = ''' +external_host_providers: + description: "List of dictionaries of all the external_host_provider attributes. External provider attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/external_host_provider." + returned: "On success and if parameter 'type: foreman' is used." + type: list +openstack_image_providers: + description: "List of dictionaries of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_image_provider." + returned: "On success and if parameter 'type: os_image' is used." + type: list +openstack_volume_providers: + description: "List of dictionaries of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_volume_provider." + returned: "On success and if parameter 'type: os_volume' is used." + type: list +openstack_network_providers: + description: "List of dictionaries of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_network_provider." + returned: "On success and if parameter 'type: os_network' is used." + type: list +''' + + +def _external_provider_service(provider_type, system_service): + if provider_type == 'os_image': + return system_service.openstack_image_providers_service() + elif provider_type == 'os_network': + return system_service.openstack_network_providers_service() + elif provider_type == 'os_volume': + return system_service.openstack_volume_providers_service() + elif provider_type == 'foreman': + return system_service.external_host_providers_service() + + +def main(): + argument_spec = ovirt_full_argument_spec( + name=dict(default=None, required=False), + type=dict( + default=None, + required=True, + choices=[ + 'os_image', 'os_network', 'os_volume', 'foreman', + ], + aliases=['provider'], + ), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + external_providers_service = _external_provider_service( + provider_type=module.params.pop('type'), + system_service=connection.system_service(), + ) + if module.params['name']: + external_providers = [ + e for e in external_providers_service.list() + if fnmatch.fnmatch(e.name, module.params['name']) + ] + else: + external_providers = external_providers_service.list() + + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_external_providers=[ + get_dict_of_struct(c) for c in external_providers + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_groups.py b/cloud/ovirt/ovirt_groups.py new file mode 100644 index 00000000000..34f326e64ba --- /dev/null +++ b/cloud/ovirt/ovirt_groups.py @@ -0,0 +1,182 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + check_params, + create_connection, + equal, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_groups +short_description: Module to manage groups in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage groups in oVirt" +options: + name: + description: + - "Name of the the group to manage." + required: true + state: + description: + - "Should the group be present/absent." + choices: ['present', 'absent'] + default: present + authz_name: + description: + - "Authorization provider of the group. In previous versions of oVirt known as domain." + required: true + aliases: ['domain'] + namespace: + description: + - "Namespace of the authorization provider, where group resides." + required: false +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Add group group1 from authorization provider example.com-authz +ovirt_groups: + name: group1 + domain: example.com-authz + +# Add group group1 from authorization provider example.com-authz +# In case of multi-domain Active Directory setup, you should pass +# also namespace, so it adds correct group: +ovirt_groups: + name: group1 + namespace: dc=ad2,dc=example,dc=com + domain: example.com-authz + +# Remove group group1 with authorization provider example.com-authz +ovirt_groups: + state: absent + name: group1 + domain: example.com-authz +''' + +RETURN = ''' +id: + description: ID of the group which is managed + returned: On success if group is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +group: + description: "Dictionary of all the group attributes. Group attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/group." + returned: On success if group is found. +''' + + +def _group(connection, module): + groups = connection.system_service().groups_service().list( + search="name={name}".format( + name=module.params['name'], + ) + ) + + # If found more groups, filter them by namespace and authz name: + # (filtering here, as oVirt backend doesn't support it) + if len(groups) > 1: + groups = [ + g for g in groups if ( + equal(module.params['namespace'], g.namespace) and + equal(module.params['authz_name'], g.domain.name) + ) + ] + return groups[0] if groups else None + + +class GroupsModule(BaseModule): + + def build_entity(self): + return otypes.Group( + domain=otypes.Domain( + name=self._module.params['authz_name'] + ), + name=self._module.params['name'], + namespace=self._module.params['namespace'], + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(required=True), + authz_name=dict(required=True, aliases=['domain']), + namespace=dict(default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + check_params(module) + + try: + connection = create_connection(module.params.pop('auth')) + groups_service = connection.system_service().groups_service() + groups_module = GroupsModule( + connection=connection, + module=module, + service=groups_service, + ) + group = _group(connection, module) + state = module.params['state'] + if state == 'present': + ret = groups_module.create(entity=group) + elif state == 'absent': + ret = groups_module.remove(entity=group) + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_groups_facts.py b/cloud/ovirt/ovirt_groups_facts.py new file mode 100644 index 00000000000..ab4252ffc93 --- /dev/null +++ b/cloud/ovirt/ovirt_groups_facts.py @@ -0,0 +1,102 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_groups_facts +short_description: Retrieve facts about one or more oVirt groups +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt groups." +notes: + - "This module creates a new top-level C(ovirt_groups) fact, which + contains a list of groups." +options: + pattern: + description: + - "Search term which is accepted by oVirt search backend." + - "For example to search group X use following pattern: name=X" +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all groups which names start with C(admin): +- ovirt_groups_facts: + pattern: name=admin* +- debug: + var: ovirt_groups +''' + +RETURN = ''' +ovirt_groups: + description: "List of dictionaries describing the groups. Group attribues are mapped to dictionary keys, + all groups attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/group." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + groups_service = connection.system_service().groups_service() + groups = groups_service.list(search=module.params['pattern']) + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_groups=[ + get_dict_of_struct(c) for c in groups + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_host_networks.py b/cloud/ovirt/ovirt_host_networks.py new file mode 100644 index 00000000000..edf6d3c3789 --- /dev/null +++ b/cloud/ovirt/ovirt_host_networks.py @@ -0,0 +1,368 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4 as sdk + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.ovirt import * + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_host_networks +short_description: Module to manage host networks in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage host networks in oVirt." +options: + name: + description: + - "Name of the the host to manage networks for." + required: true + state: + description: + - "Should the host be present/absent." + choices: ['present', 'absent'] + default: present + bond: + description: + - "Dictionary describing network bond:" + - "C(name) - Bond name." + - "C(mode) - Bonding mode." + - "C(interfaces) - List of interfaces to create a bond." + interface: + description: + - "Name of the network interface where logical network should be attached." + networks: + description: + - "List of dictionary describing networks to be attached to interface or bond:" + - "C(name) - Name of the logical network to be assigned to bond or interface." + - "C(boot_protocol) - Boot protocol one of the I(none), I(static) or I(dhcp)." + - "C(address) - IP address in case of I(static) boot protocol is used." + - "C(prefix) - Routing prefix in case of I(static) boot protocol is used." + - "C(gateway) - Gateway in case of I(static) boot protocol is used." + - "C(version) - IP version. Either v4 or v6." + labels: + description: + - "List of names of the network label to be assigned to bond or interface." + check: + description: + - "If I(true) verify connectivity between host and engine." + - "Network configuration changes will be rolled back if connectivity between + engine and the host is lost after changing network configuration." + save: + description: + - "If I(true) network configuration will be persistent, by default they are temporary." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Create bond on eth0 and eth1 interface, and put 'myvlan' network on top of it: +- name: Bonds + ovirt_host_networks: + name: myhost + bond: + name: bond0 + mode: 2 + interfaces: + - eth1 + - eth2 + networks: + - name: myvlan + boot_protocol: static + address: 1.2.3.4 + prefix: 24 + gateway: 1.2.3.4 + version: v4 + +# Remove bond0 bond from host interfaces: +- ovirt_host_networks: + state: absent + name: myhost + bond: + name: bond0 + +# Assign myvlan1 and myvlan2 vlans to host eth0 interface: +- ovirt_host_networks: + name: myhost + interface: eth0 + networks: + - name: myvlan1 + - name: myvlan2 + +# Remove myvlan2 vlan from host eth0 interface: +- ovirt_host_networks: + state: absent + name: myhost + interface: eth0 + networks: + - name: myvlan2 + +# Remove all networks/vlans from host eth0 interface: +- ovirt_host_networks: + state: absent + name: myhost + interface: eth0 +''' + +RETURN = ''' +id: + description: ID of the host NIC which is managed + returned: On success if host NIC is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +host_nic: + description: "Dictionary of all the host NIC attributes. Host NIC attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/host_nic." + returned: On success if host NIC is found. +''' + + +class HostNetworksModule(BaseModule): + + def build_entity(self): + return otypes.Host() + + def update_address(self, attachment, network): + # Check if there is any change in address assignenmts and + # update it if needed: + for ip in attachment.ip_address_assignments: + if str(ip.ip.version) == network.get('version'): + changed = False + if not equal(network.get('boot_protocol'), str(ip.assignment_method)): + ip.assignment_method = otypes.BootProtocol(network.get('boot_protocol')) + changed = True + if not equal(network.get('address'), ip.ip.address): + ip.ip.address = network.get('address') + changed = True + if not equal(network.get('gateway'), ip.ip.gateway): + ip.ip.gateway = network.get('gateway') + changed = True + if not equal(network.get('prefix'), int(ip.ip.netmask)): + ip.ip.netmask = str(network.get('prefix')) + changed = True + + if changed: + attachments_service.service(attachment.id).update(attachment) + self.changed = True + break + + def has_update(self, nic_service): + update = False + bond = self._module.params['bond'] + networks = self._module.params['networks'] + nic = nic_service.get() + + if nic is None: + return update + + # Check if bond configuration should be updated: + if bond: + update = not ( + equal(str(bond.get('mode')), nic.bonding.options[0].value) and + equal( + sorted(bond.get('interfaces')) if bond.get('interfaces') else None, + sorted(get_link_name(self._connection, s) for s in nic.bonding.slaves) + ) + ) + + if not networks: + return update + + # Check if networks attachments configuration should be updated: + attachments_service = nic_service.network_attachments_service() + network_names = [network.get('name') for network in networks] + + attachments = {} + for attachment in attachments_service.list(): + name = get_link_name(self._connection, attachment.network) + if name in network_names: + attachments[name] = attachment + + for network in networks: + attachment = attachments.get(network.get('name')) + # If attachment don't exsits, we need to create it: + if attachment is None: + return True + + self.update_address(attachment, network) + + return update + + def _action_save_configuration(self, entity): + if self._module.params['save']: + if not self._module.check_mode: + self._service.service(entity.id).commit_net_config() + self.changed = True + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(default=None, aliases=['host'], required=True), + bond=dict(default=None, type='dict'), + interface=dict(default=None), + networks=dict(default=None, type='list'), + labels=dict(default=None, type='list'), + check=dict(default=None, type='bool'), + save=dict(default=None, type='bool'), + ) + module = AnsibleModule(argument_spec=argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + hosts_service = connection.system_service().hosts_service() + host_networks_module = HostNetworksModule( + connection=connection, + module=module, + service=hosts_service, + ) + + host = host_networks_module.search_entity() + if host is None: + raise Exception("Host '%s' was not found." % module.params['name']) + + bond = module.params['bond'] + interface = module.params['interface'] + networks = module.params['networks'] + labels = module.params['labels'] + nic_name = bond.get('name') if bond else module.params['interface'] + + nics_service = hosts_service.host_service(host.id).nics_service() + nic = search_by_name(nics_service, nic_name) + + state = module.params['state'] + if ( + state == 'present' and + (nic is None or host_networks_module.has_update(nics_service.service(nic.id))) + ): + host_networks_module.action( + entity=host, + action='setup_networks', + post_action=host_networks_module._action_save_configuration, + check_connectivity=module.params['check'], + modified_bonds=[ + otypes.HostNic( + name=bond.get('name'), + bonding=otypes.Bonding( + options=[ + otypes.Option( + name="mode", + value=str(bond.get('mode')), + ) + ], + slaves=[ + otypes.HostNic(name=i) for i in bond.get('interfaces', []) + ], + ), + ), + ] if bond else None, + modified_labels=[ + otypes.NetworkLabel( + name=str(name), + host_nic=otypes.HostNic( + name=bond.get('name') if bond else interface + ), + ) for name in labels + ] if labels else None, + modified_network_attachments=[ + otypes.NetworkAttachment( + network=otypes.Network( + name=network['name'] + ) if network['name'] else None, + host_nic=otypes.HostNic( + name=bond.get('name') if bond else interface + ), + ip_address_assignments=[ + otypes.IpAddressAssignment( + assignment_method=otypes.BootProtocol( + network.get('boot_protocol', 'none') + ), + ip=otypes.Ip( + address=network.get('address'), + gateway=network.get('gateway'), + netmask=network.get('netmask'), + version=otypes.IpVersion( + network.get('version') + ) if network.get('version') else None, + ), + ), + ], + ) for network in networks + ] if networks else None, + ) + elif state == 'absent' and nic: + attachments_service = nics_service.nic_service(nic.id).network_attachments_service() + attachments = attachments_service.list() + if networks: + network_names = [network['name'] for network in networks] + attachments = [ + attachment for attachment in attachments + if get_link_name(connection, attachment.network) in network_names + ] + if labels or bond or attachments: + host_networks_module.action( + entity=host, + action='setup_networks', + post_action=host_networks_module._action_save_configuration, + check_connectivity=module.params['check'], + removed_bonds=[ + otypes.HostNic( + name=bond.get('name'), + ), + ] if bond else None, + removed_labels=[ + otypes.NetworkLabel( + name=str(name), + ) for name in labels + ] if labels else None, + removed_network_attachments=list(attachments), + ) + + nic = search_by_name(nics_service, nic_name) + module.exit_json(**{ + 'changed': host_networks_module.changed, + 'id': nic.id if nic else None, + 'host_nic': get_dict_of_struct(nic), + }) + except Exception as e: + module.fail_json(msg=str(e)) + finally: + connection.close(logout=False) + +from ansible.module_utils.basic import * +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_host_pm.py b/cloud/ovirt/ovirt_host_pm.py new file mode 100644 index 00000000000..41475ad7bb7 --- /dev/null +++ b/cloud/ovirt/ovirt_host_pm.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4 as sdk + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.ovirt import * + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_host_pm +short_description: Module to manage power management of hosts in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage power management of hosts in oVirt." +options: + name: + description: + - "Name of the the host to manage." + required: true + aliases: ['host'] + state: + description: + - "Should the host be present/absent." + choices: ['present', 'absent'] + default: present + address: + description: + - "Address of the power management interface." + username: + description: + - "Username to be used to connect to power management interface." + password: + description: + - "Password of the user specified in C(username) parameter." + type: + description: + - "Type of the power management. oVirt predefined values are I(drac5), I(ipmilan), I(rsa), + I(bladecenter), I(alom), I(apc), I(apc_snmp), I(eps), I(wti), I(rsb), I(cisco_ucs), + I(drac7), I(hpblade), I(ilo), I(ilo2), I(ilo3), I(ilo4), I(ilo_ssh), + but user can have defined custom type." + port: + description: + - "Power management interface port." + slot: + description: + - "Power management slot." + options: + description: + - "Dictionary of additional fence agent options." + - "Additional information about options can be found at U(https://fedorahosted.org/cluster/wiki/FenceArguments)." + encrypt_options: + description: + - "If (true) options will be encrypted when send to agent." + aliases: ['encrypt'] + order: + description: + - "Integer value specifying, by default it's added at the end." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Add fence agent to host 'myhost' +- ovirt_host_pm: + name: myhost + address: 1.2.3.4 + options: + myoption1: x + myoption2: y + username: admin + password: admin + port: 3333 + type: ipmilan + +# Remove ipmilan fence agent with address 1.2.3.4 on host 'myhost' +- ovirt_host_pm: + state: absent + name: myhost + address: 1.2.3.4 + type: ipmilan +''' + +RETURN = ''' +id: + description: ID of the agent which is managed + returned: On success if agent is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +agent: + description: "Dictionary of all the agent attributes. Agent attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/agent." + returned: On success if agent is found. +''' + + +class HostModule(BaseModule): + def build_entity(self): + return otypes.Host( + power_management=otypes.PowerManagement( + enabled=True, + ), + ) + + def update_check(self, entity): + return equal(True, entity.power_management.enabled) + + +class HostPmModule(BaseModule): + + def build_entity(self): + return otypes.Agent( + address=self._module.params['address'], + encrypt_options=self._module.params['encrypt_options'], + options=[ + otypes.Option( + name=name, + value=value, + ) for name, value in self._module.params['options'].items() + ] if self._module.params['options'] else None, + password=self._module.params['password'], + port=self._module.params['port'], + type=self._module.params['type'], + username=self._module.params['username'], + order=self._module.params.get('order', 100), + ) + + def update_check(self, entity): + return ( + equal(self._module.params.get('address'), entity.address) and + equal(self._module.params.get('encrypt_options'), entity.encrypt_options) and + equal(self._module.params.get('password'), entity.password) and + equal(self._module.params.get('username'), entity.username) and + equal(self._module.params.get('port'), entity.port) and + equal(self._module.params.get('type'), entity.type) + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(default=None, required=True, aliases=['host']), + address=dict(default=None), + username=dict(default=None), + password=dict(default=None), + type=dict(default=None), + port=dict(default=None, type='int'), + slot=dict(default=None), + options=dict(default=None, type='dict'), + encrypt_options=dict(default=None, type='bool', aliases=['encrypt']), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + hosts_service = connection.system_service().hosts_service() + host = search_by_name(hosts_service, module.params['name']) + fence_agents_service = hosts_service.host_service(host.id).fence_agents_service() + + host_pm_module = HostPmModule( + connection=connection, + module=module, + service=fence_agents_service, + ) + host_module = HostModule( + connection=connection, + module=module, + service=hosts_service, + ) + + state = module.params['state'] + if state == 'present': + agent = host_pm_module.search_entity( + search_params={ + 'address': module.params['address'], + 'type': module.params['type'], + } + ) + ret = host_pm_module.create(entity=agent) + + # Enable Power Management, if it's not enabled: + host_module.create(entity=host) + elif state == 'absent': + agent = host_pm_module.search_entity( + search_params={ + 'address': module.params['address'], + 'type': module.params['type'], + } + ) + ret = host_pm_module.remove(entity=agent) + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e)) + finally: + connection.close(logout=False) + +from ansible.module_utils.basic import * +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_hosts.py b/cloud/ovirt/ovirt_hosts.py new file mode 100644 index 00000000000..1394692f8c9 --- /dev/null +++ b/cloud/ovirt/ovirt_hosts.py @@ -0,0 +1,326 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4 as sdk + import ovirtsdk4.types as otypes + + from ovirtsdk4.types import HostStatus as hoststate +except ImportError: + pass + +from ansible.module_utils.ovirt import * + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_hosts +short_description: Module to manage hosts in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage hosts in oVirt" +options: + name: + description: + - "Name of the the host to manage." + required: true + state: + description: + - "State which should a host to be in after successful completion." + choices: ['present', 'absent', 'maintenance', 'upgraded', 'started', 'restarted', 'stopped'] + default: present + comment: + description: + - "Description of the host." + cluster: + description: + - "Name of the cluster, where host should be created." + address: + description: + - "Host address. It can be either FQDN (preferred) or IP address." + password: + description: + - "Password of the root. It's required in case C(public_key) is set to I(False)." + public_key: + description: + - "I(True) if the public key should be used to authenticate to host." + - "It's required in case C(password) is not set." + default: False + aliases: ['ssh_public_key'] + kdump_integration: + description: + - "Specify if host will have enabled Kdump integration." + choices: ['enabled', 'disabled'] + default: enabled + spm_priority: + description: + - "SPM priority of the host. Integer value from 1 to 10, where higher number means higher priority." + override_iptables: + description: + - "If True host iptables will be overridden by host deploy script." + force: + description: + - "If True host will be forcibly moved to desired state." + default: False +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Add host with username/password +- ovirt_hosts: + cluster: Default + name: myhost + address: 10.34.61.145 + password: secret + +# Add host using public key +- ovirt_hosts: + public_key: true + cluster: Default + name: myhost2 + address: 10.34.61.145 + +# Maintenance +- ovirt_hosts: + state: maintenance + name: myhost + +# Restart host using power management: +- ovirt_hosts: + state: restarted + name: myhost + +# Upgrade host +- ovirt_hosts: + state: upgraded + name: myhost + +# Remove host +- ovirt_hosts: + state: absent + name: myhost + force: True +''' + +RETURN = ''' +id: + description: ID of the host which is managed + returned: On success if host is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +host: + description: "Dictionary of all the host attributes. Host attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/host." + returned: On success if host is found. +''' + + +class HostsModule(BaseModule): + + def build_entity(self): + return otypes.Host( + name=self._module.params['name'], + cluster=otypes.Cluster( + name=self._module.params['cluster'] + ) if self._module.params['cluster'] else None, + comment=self._module.params['comment'], + address=self._module.params['address'], + root_password=self._module.params['password'], + ssh=otypes.Ssh( + authentication_method='publickey', + ) if self._module.params['public_key'] else None, + kdump_status=otypes.KdumpStatus( + self._module.params['kdump_integration'] + ) if self._module.params['kdump_integration'] else None, + spm=otypes.Spm( + priority=self._module.params['spm_priority'], + ) if self._module.params['spm_priority'] else None, + override_iptables=self._module.params['override_iptables'], + ) + + def update_check(self, entity): + return ( + equal(self._module.params.get('comment'), entity.comment) and + equal(self._module.params.get('kdump_integration'), entity.kdump_status) and + equal(self._module.params.get('spm_priority'), entity.spm.priority) + ) + + def pre_remove(self, entity): + self.action( + entity=entity, + action='deactivate', + action_condition=lambda h: h.status != hoststate.MAINTENANCE, + wait_condition=lambda h: h.status == hoststate.MAINTENANCE, + ) + + def post_update(self, entity): + if entity.status != hoststate.UP: + if not self._module.check_mode: + self._service.host_service(entity.id).activate() + self.changed = True + + +def failed_state(host): + return host.status in [ + hoststate.ERROR, + hoststate.INSTALL_FAILED, + hoststate.NON_RESPONSIVE, + hoststate.NON_OPERATIONAL, + ] + + +def control_state(host_module): + host = host_module.search_entity() + if host is None: + return + + state = host_module._module.params['state'] + host_service = host_module._service.service(host.id) + if failed_state(host): + raise Exception("Not possible to manage host '%s'." % host.name) + elif host.status in [ + hoststate.REBOOT, + hoststate.CONNECTING, + hoststate.INITIALIZING, + hoststate.INSTALLING, + hoststate.INSTALLING_OS, + ]: + wait( + service=host_service, + condition=lambda host: host.status == hoststate.UP, + fail_condition=failed_state, + ) + elif host.status == hoststate.PREPARING_FOR_MAINTENANCE: + wait( + service=host_service, + condition=lambda host: host.status == hoststate.MAINTENANCE, + fail_condition=failed_state, + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent', 'maintenance', 'upgraded', 'started', 'restarted', 'stopped'], + default='present', + ), + name=dict(required=True), + comment=dict(default=None), + cluster=dict(default=None), + address=dict(default=None), + password=dict(default=None), + public_key=dict(default=False, type='bool', aliases=['ssh_public_key']), + kdump_integration=dict(default=None, choices=['enabled', 'disabled']), + spm_priority=dict(default=None, type='int'), + override_iptables=dict(default=None, type='bool'), + force=dict(default=False, type='bool'), + timeout=dict(default=600, type='int'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + hosts_service = connection.system_service().hosts_service() + hosts_module = HostsModule( + connection=connection, + module=module, + service=hosts_service, + ) + + state = module.params['state'] + control_state(hosts_module) + if state == 'present': + ret = hosts_module.create() + hosts_module.action( + action='activate', + action_condition=lambda h: h.status == hoststate.MAINTENANCE, + wait_condition=lambda h: h.status == hoststate.UP, + fail_condition=failed_state, + ) + elif state == 'absent': + ret = hosts_module.remove() + elif state == 'maintenance': + ret = hosts_module.action( + action='deactivate', + action_condition=lambda h: h.status != hoststate.MAINTENANCE, + wait_condition=lambda h: h.status == hoststate.MAINTENANCE, + fail_condition=failed_state, + ) + elif state == 'upgraded': + ret = hosts_module.action( + action='upgrade', + action_condition=lambda h: h.update_available, + wait_condition=lambda h: h.status == hoststate.UP, + fail_condition=failed_state, + ) + elif state == 'started': + ret = hosts_module.action( + action='fence', + action_condition=lambda h: h.status == hoststate.DOWN, + wait_condition=lambda h: h.status in [hoststate.UP, hoststate.MAINTENANCE], + fail_condition=failed_state, + fence_type='start', + ) + elif state == 'stopped': + hosts_module.action( + action='deactivate', + action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN], + wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN], + fail_condition=failed_state, + ) + ret = hosts_module.action( + action='fence', + action_condition=lambda h: h.status != hoststate.DOWN, + wait_condition=lambda h: h.status == hoststate.DOWN, + fail_condition=failed_state, + fence_type='stop', + ) + elif state == 'restarted': + ret = hosts_module.action( + action='fence', + wait_condition=lambda h: h.status == hoststate.UP, + fail_condition=failed_state, + fence_type='restart', + ) + + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e)) + finally: + connection.close(logout=False) + + +from ansible.module_utils.basic import * +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_hosts_facts.py b/cloud/ovirt/ovirt_hosts_facts.py new file mode 100644 index 00000000000..ad1945e538c --- /dev/null +++ b/cloud/ovirt/ovirt_hosts_facts.py @@ -0,0 +1,99 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4 as sdk +except ImportError: + pass + +from ansible.module_utils.ovirt import * + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_hosts_facts +short_description: Retrieve facts about one or more oVirt hosts +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt hosts." +notes: + - "This module creates a new top-level C(ovirt_hosts) fact, which + contains a list of hosts." +options: + pattern: + description: + - "Search term which is accepted by oVirt search backend." + - "For example to search host X from datacenter Y use following pattern: + name=X and datacenter=Y" +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all hosts which names start with C(host) and +# belong to data center C(west): +- ovirt_hosts_facts: + pattern: name=host* and datacenter=west +- debug: + var: ovirt_hosts +''' + +RETURN = ''' +ovirt_hosts: + description: "List of dictionaries describing the hosts. Host attribues are mapped to dictionary keys, + all hosts attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/host." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + hosts_service = connection.system_service().hosts_service() + hosts = hosts_service.list(search=module.params['pattern']) + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_hosts=[ + get_dict_of_struct(c) for c in hosts + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_mac_pools.py b/cloud/ovirt/ovirt_mac_pools.py new file mode 100644 index 00000000000..622f57d89d7 --- /dev/null +++ b/cloud/ovirt/ovirt_mac_pools.py @@ -0,0 +1,180 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + equal, + create_connection, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_mac_pools +short_description: Module to manage MAC pools in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "This module manage MAC pools in oVirt." +options: + name: + description: + - "Name of the the MAC pool to manage." + required: true + description: + description: + - "Description of the MAC pool." + state: + description: + - "Should the mac pool be present or absent." + choices: ['present', 'absent'] + default: present + allow_duplicates: + description: + - "If (true) allow a MAC address to be used multiple times in a pool." + - "Default value is set by oVirt engine to I(false)." + ranges: + description: + - "List of MAC ranges. The from and to should be splitted by comma." + - "For example: 00:1a:4a:16:01:51,00:1a:4a:16:01:61" +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Create MAC pool: +- ovirt_mac_pools: + name: mymacpool + allow_duplicates: false + ranges: + - 00:1a:4a:16:01:51,00:1a:4a:16:01:61 + - 00:1a:4a:16:02:51,00:1a:4a:16:02:61 + +# Remove MAC pool: +- ovirt_mac_pools: + state: absent + name: mymacpool +''' + +RETURN = ''' +id: + description: ID of the MAC pool which is managed + returned: On success if MAC pool is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +template: + description: "Dictionary of all the MAC pool attributes. MAC pool attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/mac_pool." + returned: On success if MAC pool is found. +''' + + +class MACPoolModule(BaseModule): + + def build_entity(self): + return otypes.MacPool( + name=self._module.params['name'], + allow_duplicates=self._module.params['allow_duplicates'], + description=self._module.params['description'], + ranges=[ + otypes.Range( + from_=mac_range.split(',')[0], + to=mac_range.split(',')[1], + ) + for mac_range in self._module.params['ranges'] + ], + ) + + def _compare_ranges(self, entity): + if self._module.params['ranges'] is not None: + ranges = sorted([ + '%s,%s' % (mac_range.from_, mac_range.to) + for mac_range in entity.ranges + ]) + return equal(sorted(self._module.params['ranges']), ranges) + + return True + + def update_check(self, entity): + return ( + self._compare_ranges(entity) and + equal(self._module.params['allow_duplicates'], entity.allow_duplicates) and + equal(self._module.params['description'], entity.description) + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(default=None, required=True), + allow_duplicates=dict(default=None, type='bool'), + description=dict(default=None), + ranges=dict(default=None, type='list'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + mac_pools_service = connection.system_service().mac_pools_service() + mac_pools_module = MACPoolModule( + connection=connection, + module=module, + service=mac_pools_service, + ) + + state = module.params['state'] + if state == 'present': + ret = mac_pools_module.create() + elif state == 'absent': + ret = mac_pools_module.remove() + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_networks.py b/cloud/ovirt/ovirt_networks.py new file mode 100644 index 00000000000..047a24d3880 --- /dev/null +++ b/cloud/ovirt/ovirt_networks.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + check_params, + create_connection, + equal, + ovirt_full_argument_spec, + search_by_name, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_networks +short_description: Module to manage logical networks in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage logical networks in oVirt" +options: + name: + description: + - "Name of the the network to manage." + required: true + state: + description: + - "Should the network be present or absent" + choices: ['present', 'absent'] + default: present + datacenter: + description: + - "Datacenter name where network reside." + description: + description: + - "Description of the network." + comment: + description: + - "Comment of the network." + vlan_tag: + description: + - "Specify VLAN tag." + vm_network: + description: + - "If I(True) network will be marked as network for VM." + - "VM network carries traffic relevant to the virtual machine." + mtu: + description: + - "Maximum transmission unit (MTU) of the network." + clusters: + description: + - "List of dictionaries describing how the network is managed in specific cluster." + - "C(name) - Cluster name." + - "C(assigned) - I(true) if the network should be assigned to cluster. Default is I(true)." + - "C(required) - I(true) if the network must remain operational for all hosts associated with this network." + - "C(display) - I(true) if the network should marked as display network." + - "C(migration) - I(true) if the network should marked as migration network." + - "C(gluster) - I(true) if the network should marked as gluster network." + +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Create network +- ovirt_networks: + datacenter: mydatacenter + name: mynetwork + vlan_tag: 1 + vm_network: true + +# Remove network +- ovirt_networks: + state: absent + name: mynetwork +''' + +RETURN = ''' +id: + description: "ID of the managed network" + returned: "On success if network is found." + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +network: + description: "Dictionary of all the network attributes. Network attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/network." + returned: "On success if network is found." +''' + + +class NetworksModule(BaseModule): + + def build_entity(self): + return otypes.Network( + name=self._module.params['name'], + comment=self._module.params['comment'], + description=self._module.params['description'], + data_center=otypes.DataCenter( + name=self._module.params['datacenter'], + ) if self._module.params['datacenter'] else None, + vlan=otypes.Vlan( + self._module.params['vlan_tag'], + ) if self._module.params['vlan_tag'] else None, + usages=[ + otypes.NetworkUsage.VM if self._module.params['vm_network'] else None + ] if self._module.params['vm_network'] is not None else None, + mtu=self._module.params['mtu'], + ) + + def update_check(self, entity): + return ( + equal(self._module.params.get('comment'), entity.comment) and + equal(self._module.params.get('description'), entity.description) and + equal(self._module.params.get('vlan_tag'), getattr(entity.vlan, 'id', None)) and + equal(self._module.params.get('vm_network'), True if entity.usages else False) and + equal(self._module.params.get('mtu'), entity.mtu) + ) + + +class ClusterNetworksModule(BaseModule): + + def __init__(self, network_id, cluster_network, *args, **kwargs): + super(ClusterNetworksModule, self).__init__(*args, **kwargs) + self._network_id = network_id + self._cluster_network = cluster_network + + def build_entity(self): + return otypes.Network( + id=self._network_id, + name=self._module.params['name'], + required=self._cluster_network.get('required'), + display=self._cluster_network.get('display'), + usages=[ + otypes.NetworkUsage(usage) + for usage in ['display', 'gluster', 'migration'] + if self._cluster_network.get(usage, False) + ] if ( + self._cluster_network.get('display') is not None or + self._cluster_network.get('gluster') is not None or + self._cluster_network.get('migration') is not None + ) else None, + ) + + def update_check(self, entity): + return ( + equal(self._cluster_network.get('required'), entity.required) and + equal(self._cluster_network.get('display'), entity.display) and + equal( + sorted([ + usage + for usage in ['display', 'gluster', 'migration'] + if self._cluster_network.get(usage, False) + ]), + sorted([ + str(usage) + for usage in getattr(entity, 'usages', []) + # VM + MANAGEMENT is part of root network + if usage != otypes.NetworkUsage.VM and usage != otypes.NetworkUsage.MANAGEMENT + ]), + ) + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + datacenter=dict(default=None, required=True), + name=dict(default=None, required=True), + description=dict(default=None), + comment=dict(default=None), + vlan_tag=dict(default=None, type='int'), + vm_network=dict(default=None, type='bool'), + mtu=dict(default=None, type='int'), + clusters=dict(default=None, type='list'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + check_params(module) + + try: + connection = create_connection(module.params.pop('auth')) + clusters_service = connection.system_service().clusters_service() + networks_service = connection.system_service().networks_service() + networks_module = NetworksModule( + connection=connection, + module=module, + service=networks_service, + ) + state = module.params['state'] + network = networks_module.search_entity( + search_params={ + 'name': module.params['name'], + 'datacenter': module.params['datacenter'], + }, + ) + if state == 'present': + ret = networks_module.create(entity=network) + + # Update clusters networks: + for param_cluster in module.params.get('clusters', []): + cluster = search_by_name(clusters_service, param_cluster.get('name', None)) + if cluster is None: + raise Exception("Cluster '%s' was not found." % cluster_name) + cluster_networks_service = clusters_service.service(cluster.id).networks_service() + cluster_networks_module = ClusterNetworksModule( + network_id=ret['id'], + cluster_network=param_cluster, + connection=connection, + module=module, + service=cluster_networks_service, + ) + if param_cluster.get('assigned', True): + ret = cluster_networks_module.create() + else: + ret = cluster_networks_module.remove() + + elif state == 'absent': + ret = networks_module.remove(entity=network) + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_networks_facts.py b/cloud/ovirt/ovirt_networks_facts.py new file mode 100644 index 00000000000..974acbf95d8 --- /dev/null +++ b/cloud/ovirt/ovirt_networks_facts.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_networks_facts +short_description: Retrieve facts about one or more oVirt networks +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt networks." +notes: + - "This module creates a new top-level C(ovirt_networks) fact, which + contains a list of networks." +options: + pattern: + description: + - "Search term which is accepted by oVirt search backend." + - "For example to search network starting with string vlan1 use: name=vlan1*" +extends_documentation_fragment: ovirt +''' + + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all networks which names start with C(vlan1): +- ovirt_networks_facts: + pattern: name=vlan1* +- debug: + var: ovirt_networks +''' + + +RETURN = ''' +ovirt_networks: + description: "List of dictionaries describing the networks. Network attribues are mapped to dictionary keys, + all networks attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/network." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + networks_service = connection.system_service().networks_service() + networks = networks_service.list(search=module.params['pattern']) + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_networks=[ + get_dict_of_struct(c) for c in networks + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_nics.py b/cloud/ovirt/ovirt_nics.py new file mode 100644 index 00000000000..f0513503a9b --- /dev/null +++ b/cloud/ovirt/ovirt_nics.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + create_connection, + equal, + get_link_name, + ovirt_full_argument_spec, + search_by_name, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_nics +short_description: Module to manage network interfaces of Virtual Machines in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage network interfaces of Virtual Machines in oVirt." +options: + name: + description: + - "Name of the network interface to manage." + required: true + vm: + description: + - "Name of the Virtual Machine to manage." + required: true + state: + description: + - "Should the Virtual Machine NIC be present/absent/plugged/unplugged." + choices: ['present', 'absent', 'plugged', 'unplugged'] + default: present + network: + description: + - "Logical network to which the VM network interface should use, + by default Empty network is used if network is not specified." + profile: + description: + - "Virtual network interface profile to be attached to VM network interface." + interface: + description: + - "Type of the network interface." + choices: ['virtio', 'e1000', 'rtl8139', 'pci_passthrough', 'rtl8139_virtio', 'spapr_vlan'] + default: 'virtio' + mac_address: + description: + - "Custom MAC address of the network interface, by default it's obtained from MAC pool." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Add NIC to VM +- ovirt_nics: + state: present + vm: myvm + name: mynic + interface: e1000 + mac_address: 00:1a:4a:16:01:56 + profile: ovirtmgmt + network: ovirtmgmt + +# Plug NIC to VM +- ovirt_nics: + state: plugged + vm: myvm + name: mynic + +# Unplug NIC from VM +- ovirt_nics: + state: unplugged + vm: myvm + name: mynic + +# Remove NIC from VM +- ovirt_nics: + state: absent + vm: myvm + name: mynic +''' + +RETURN = ''' +id: + description: ID of the network interface which is managed + returned: On success if network interface is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +nic: + description: "Dictionary of all the network interface attributes. Network interface attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/nic." + returned: On success if network interface is found. +''' + + +class VmNicsModule(BaseModule): + + def __init__(self, *args, **kwargs): + super(VmNicsModule, self).__init__(*args, **kwargs) + self.vnic_id = None + + @property + def vnic_id(self): + return self._vnic_id + + @vnic_id.setter + def vnic_id(self, vnic_id): + self._vnic_id = vnic_id + + def build_entity(self): + return otypes.Nic( + name=self._module.params.get('name'), + interface=otypes.NicInterface( + self._module.params.get('interface') + ) if self._module.params.get('interface') else None, + vnic_profile=otypes.VnicProfile( + id=self.vnic_id, + ) if self.vnic_id else None, + mac=otypes.Mac( + address=self._module.params.get('mac_address') + ) if self._module.params.get('mac_address') else None, + ) + + def update_check(self, entity): + return ( + equal(self._module.params.get('interface'), str(entity.interface)) and + equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile)) and + equal(self._module.params.get('mac_address'), entity.mac.address) + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent', 'plugged', 'unplugged'], + default='present' + ), + vm=dict(required=True), + name=dict(required=True), + interface=dict(default=None), + profile=dict(default=None), + network=dict(default=None), + mac_address=dict(default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + + try: + # Locate the service that manages the virtual machines and use it to + # search for the NIC: + connection = create_connection(module.params.pop('auth')) + vms_service = connection.system_service().vms_service() + + # Locate the VM, where we will manage NICs: + vm_name = module.params.get('vm') + vm = search_by_name(vms_service, vm_name) + if vm is None: + raise Exception("VM '%s' was not found." % vm_name) + + # Locate the service that manages the virtual machines NICs: + vm_service = vms_service.vm_service(vm.id) + nics_service = vm_service.nics_service() + vmnics_module = VmNicsModule( + connection=connection, + module=module, + service=nics_service, + ) + + # Find vNIC id of the network interface (if any): + profile = module.params.get('profile') + if profile and module.params['network']: + cluster_name = get_link_name(connection, vm.cluster) + dcs_service = connection.system_service().data_centers_service() + dc = dcs_service.list(search='Clusters.name=%s' % cluster_name)[0] + networks_service = dcs_service.service(dc.id).networks_service() + network = search_by_name(networks_service, module.params['network']) + for vnic in connection.system_service().vnic_profiles_service().list(): + if vnic.name == profile and vnic.network.id == network.id: + vmnics_module.vnic_id = vnic.id + + # Handle appropriate action: + state = module.params['state'] + if state == 'present': + ret = vmnics_module.create() + elif state == 'absent': + ret = vmnics_module.remove() + elif state == 'plugged': + vmnics_module.create() + ret = vmnics_module.action( + action='activate', + action_condition=lambda nic: not nic.plugged, + wait_condition=lambda nic: nic.plugged, + ) + elif state == 'unplugged': + vmnics_module.create() + ret = vmnics_module.action( + action='deactivate', + action_condition=lambda nic: nic.plugged, + wait_condition=lambda nic: not nic.plugged, + ) + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_nics_facts.py b/cloud/ovirt/ovirt_nics_facts.py new file mode 100644 index 00000000000..ab5fcdad721 --- /dev/null +++ b/cloud/ovirt/ovirt_nics_facts.py @@ -0,0 +1,122 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import fnmatch +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, + search_by_name, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_nics_facts +short_description: Retrieve facts about one or more oVirt virtual machine network interfaces +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt virtual machine network interfaces." +notes: + - "This module creates a new top-level C(ovirt_nics) fact, which + contains a list of NICs." +options: + vm: + description: + - "Name of the VM where NIC is attached." + required: true + name: + description: + - "Name of the NIC, can be used as glob expression." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all NICs which names start with C(eth) for VM named C(centos7): +- ovirt_nics_facts: + vm: centos7 + name: eth* +- debug: + var: ovirt_nics +''' + +RETURN = ''' +ovirt_nics: + description: "List of dictionaries describing the network interfaces. NIC attribues are mapped to dictionary keys, + all NICs attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/nic." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + vm=dict(required=True), + name=dict(default=None), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + vms_service = connection.system_service().vms_service() + vm_name = module.params['vm'] + vm = search_by_name(vms_service, vm_name) + if vm is None: + raise Exception("VM '%s' was not found." % vm_name) + + nics_service = vms_service.service(vm.id).nics_service() + if module.params['name']: + nics = [ + e for e in nics_service.list() + if fnmatch.fnmatch(e.name, module.params['name']) + ] + else: + nics = nics_service.list() + + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_nics=[ + get_dict_of_struct(c) for c in nics + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_permissions.py b/cloud/ovirt/ovirt_permissions.py new file mode 100644 index 00000000000..6ea833599a0 --- /dev/null +++ b/cloud/ovirt/ovirt_permissions.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + create_connection, + equal, + follow_link, + get_link_name, + ovirt_full_argument_spec, + search_by_attributes, + search_by_name, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_permissions +short_description: "Module to manage permissions of users/groups in oVirt" +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage permissions of users/groups in oVirt" +options: + role: + description: + - "Name of the the role to be assigned to user/group on specific object." + default: UserRole + state: + description: + - "Should the permission be present/absent." + choices: ['present', 'absent'] + default: present + object_id: + description: + - "ID of the object where the permissions should be managed." + object_name: + description: + - "Name of the object where the permissions should be managed." + object_type: + description: + - "The object where the permissions should be managed." + default: 'virtual_machine' + choices: [ + 'data_center', + 'cluster', + 'host', + 'storage_domain', + 'network', + 'disk', + 'vm', + 'vm_pool', + 'template', + ] + user_name: + description: + - "Username of the the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user." + group_name: + description: + - "Name of the the group to manage." + authz_name: + description: + - "Authorization provider of the user/group. In previous versions of oVirt known as domain." + required: true + aliases: ['domain'] + namespace: + description: + - "Namespace of the authorization provider, where user/group resides." + required: false +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Add user user1 from authorization provider example.com-authz +- ovirt_permissions: + user_name: user1 + authz_name: example.com-authz + object_type: vm + object_name: myvm + role: UserVmManager + +# Remove permission from user +- ovirt_permissions: + state: absent + user_name: user1 + authz_name: example.com-authz + object_type: cluster + object_name: mycluster + role: ClusterAdmin +''' + +RETURN = ''' +id: + description: ID of the permission which is managed + returned: On success if permission is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +permission: + description: "Dictionary of all the permission attributes. Permission attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/permission." + returned: On success if permission is found. +''' + + +def _objects_service(connection, object_type): + return getattr( + connection.system_service(), + '%ss_service' % object_type, + None, + )() + + +def _object_service(connection, module): + object_type = module.params['object_type'] + objects_service = _objects_service(connection, object_type) + + object_id = module.params['object_id'] + if object_id is None: + sdk_object = search_by_name(objects_service, module.params['object_name']) + if sdk_object is None: + raise Exception( + "'%s' object '%s' was not found." % ( + module.params['object_type'], + module.params['object_name'] + ) + ) + object_id = sdk_object.id + + return objects_service.service(object_id) + + +def _permission(module, permissions_service, connection): + for permission in permissions_service.list(): + user = follow_link(connection, permission.user) + if ( + equal(module.params['user_name'], user.principal if user else None) and + equal(module.params['group_name'], get_link_name(connection, permission.group)) and + equal(module.params['role'], get_link_name(connection, permission.role)) + ): + return permission + + +class PermissionsModule(BaseModule): + + def _user(self): + user = search_by_attributes( + self._connection.system_service().users_service(), + usrname="{name}@{authz_name}".format( + name=self._module.params['user_name'], + authz_name=self._module.params['authz_name'], + ), + ) + if user is None: + raise Exception("User '%s' was not found." % self._module.params['user_name']) + return user + + def _group(self): + groups = self._connection.system_service().groups_service().list( + search="name={name}".format( + name=self._module.params['group_name'], + ) + ) + + # If found more groups, filter them by namespace and authz name: + # (filtering here, as oVirt backend doesn't support it) + if len(groups) > 1: + groups = [ + g for g in groups if ( + equal(self._module.params['namespace'], g.namespace) and + equal(self._module.params['authz_name'], g.domain.name) + ) + ] + if not groups: + raise Exception("Group '%s' was not found." % self._module.params['group_name']) + return groups[0] + + def build_entity(self): + entity = self._group() if self._module.params['group_name'] else self._user() + + return otypes.Permission( + user=otypes.User( + id=entity.id + ) if self._module.params['user_name'] else None, + group=otypes.Group( + id=entity.id + ) if self._module.params['group_name'] else None, + role=otypes.Role( + name=self._module.params['role'] + ), + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + role=dict(default='UserRole'), + object_type=dict( + default='virtual_machine', + choices=[ + 'data_center', + 'cluster', + 'host', + 'storage_domain', + 'network', + 'disk', + 'vm', + 'vm_pool', + 'template', + ] + ), + authz_name=dict(required=True, aliases=['domain']), + object_id=dict(default=None), + object_name=dict(default=None), + user_name=dict(rdefault=None), + group_name=dict(default=None), + namespace=dict(default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + + if module.params['object_name'] is None and module.params['object_id'] is None: + module.fail_json(msg='"object_name" or "object_id" is required') + + if module.params['user_name'] is None and module.params['group_name'] is None: + module.fail_json(msg='"user_name" or "group_name" is required') + + try: + connection = create_connection(module.params.pop('auth')) + permissions_service = _object_service(connection, module).permissions_service() + permissions_module = PermissionsModule( + connection=connection, + module=module, + service=permissions_service, + ) + + permission = _permission(module, permissions_service, connection) + state = module.params['state'] + if state == 'present': + ret = permissions_module.create(entity=permission) + elif state == 'absent': + ret = permissions_module.remove(entity=permission) + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_permissions_facts.py b/cloud/ovirt/ovirt_permissions_facts.py new file mode 100644 index 00000000000..6c855f6296d --- /dev/null +++ b/cloud/ovirt/ovirt_permissions_facts.py @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +try: + import ovirtsdk4 as sdk +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_link_name, + ovirt_full_argument_spec, + search_by_name, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_permissions_facts +short_description: Retrieve facts about one or more oVirt permissions +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt permissions." +notes: + - "This module creates a new top-level C(ovirt_permissions) fact, which + contains a list of permissions." +options: + user_name: + description: + - "Username of the the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user." + group_name: + description: + - "Name of the the group to manage." + authz_name: + description: + - "Authorization provider of the user/group. In previous versions of oVirt known as domain." + required: true + aliases: ['domain'] + namespace: + description: + - "Namespace of the authorization provider, where user/group resides." + required: false +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all permissions of user with username C(john): +- ovirt_permissions_facts: + user_name: john + authz_name: example.com-authz +- debug: + var: ovirt_permissions +''' + +RETURN = ''' +ovirt_permissions: + description: "List of dictionaries describing the permissions. Permission attribues are mapped to dictionary keys, + all permissions attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/permission." + returned: On success. + type: list +''' + + +def _permissions_service(connection, module): + if module.params['user_name']: + service = connection.system_service().users_service() + entity = search_by_name(service, module.params['user_name']) + else: + service = connection.system_service().groups_service() + entity = search_by_name(service, module.params['group_name']) + + if entity is None: + raise Exception("User/Group wasn't found.") + + return service.service(entity.id).permissions_service() + + +def main(): + argument_spec = ovirt_full_argument_spec( + authz_name=dict(required=True, aliases=['domain']), + user_name=dict(rdefault=None), + group_name=dict(default=None), + namespace=dict(default=None), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + permissions_service = _permissions_service(connection, module) + permissions = [] + for p in permissions_service.list(): + newperm = dict() + for key, value in p.__dict__.items(): + if value and isinstance(value, sdk.Struct): + newperm[key[1:]] = get_link_name(connection, value) + permissions.append(newperm) + + module.exit_json( + changed=False, + ansible_facts=dict(ovirt_permissions=permissions), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_quotas.py b/cloud/ovirt/ovirt_quotas.py new file mode 100644 index 00000000000..d9b94afa202 --- /dev/null +++ b/cloud/ovirt/ovirt_quotas.py @@ -0,0 +1,298 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + create_connection, + equal, + get_link_name, + ovirt_full_argument_spec, + search_by_name, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_quotas +short_description: Module to manage datacenter quotas in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage datacenter quotas in oVirt" +options: + name: + description: + - "Name of the the quota to manage." + required: true + state: + description: + - "Should the quota be present/absent." + choices: ['present', 'absent'] + default: present + datacenter: + description: + - "Name of the datacenter where quota should be managed." + required: true + description: + description: + - "Description of the the quota to manage." + cluster_threshold: + description: + - "Cluster threshold(soft limit) defined in percentage (0-100)." + cluster_grace: + description: + - "Cluster grace(hard limit) defined in percentage (1-100)." + storage_threshold: + description: + - "Storage threshold(soft limit) defined in percentage (0-100)." + storage_grace: + description: + - "Storage grace(hard limit) defined in percentage (1-100)." + clusters: + description: + - "List of dictionary of cluster limits, which is valid to specific cluster." + - "If cluster isn't spefied it's valid to all clusters in system:" + - "C(cluster) - Name of the cluster." + - "C(memory) - Memory limit (in GiB)." + - "C(cpu) - CPU limit." + storages: + description: + - "List of dictionary of storage limits, which is valid to specific storage." + - "If storage isn't spefied it's valid to all storages in system:" + - "C(storage) - Name of the storage." + - "C(size) - Size limit (in GiB)." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Add cluster quota to cluster cluster1 with memory limit 20GiB and CPU limit to 10: +ovirt_quotas: + name: quota1 + datacenter: dcX + clusters: + - name: cluster1 + memory: 20 + cpu: 10 + +# Add cluster quota to all clusters with memory limit 30GiB and CPU limit to 15: +ovirt_quotas: + name: quota2 + datacenter: dcX + clusters: + - memory: 30 + cpu: 15 + +# Add storage quota to storage data1 with size limit to 100GiB +ovirt_quotas: + name: quota3 + datacenter: dcX + storage_grace: 40 + storage_threshold: 60 + storages: + - name: data1 + size: 100 + +# Remove quota quota1 (Note the quota must not be assigned to any VM/disk): +ovirt_quotas: + state: absent + datacenter: dcX + name: quota1 +''' + +RETURN = ''' +id: + description: ID of the quota which is managed + returned: On success if quota is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +quota: + description: "Dictionary of all the quota attributes. Quota attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/quota." + returned: On success if quota is found. +''' + + +class QuotasModule(BaseModule): + + def build_entity(self): + return otypes.Quota( + description=self._module.params['description'], + name=self._module.params['name'], + storage_hard_limit_pct=self._module.params.get('storage_grace'), + storage_soft_limit_pct=self._module.params.get('storage_threshold'), + cluster_hard_limit_pct=self._module.params.get('cluster_grace'), + cluster_soft_limit_pct=self._module.params.get('cluster_threshold'), + ) + + def update_storage_limits(self, entity): + new_limits = {} + for storage in self._module.params.get('storages'): + new_limits[storage.get('name', '')] = { + 'size': storage.get('size'), + } + + old_limits = {} + sd_limit_service = self._service.service(entity.id).quota_storage_limits_service() + for limit in sd_limit_service.list(): + storage = get_link_name(self._connection, limit.storage_domain) if limit.storage_domain else '' + old_limits[storage] = { + 'size': limit.limit, + } + sd_limit_service.service(limit.id).remove() + + return new_limits == old_limits + + def update_cluster_limits(self, entity): + new_limits = {} + for cluster in self._module.params.get('clusters'): + new_limits[cluster.get('name', '')] = { + 'cpu': cluster.get('cpu'), + 'memory': float(cluster.get('memory')), + } + + old_limits = {} + cl_limit_service = self._service.service(entity.id).quota_cluster_limits_service() + for limit in cl_limit_service.list(): + cluster = get_link_name(self._connection, limit.cluster) if limit.cluster else '' + old_limits[cluster] = { + 'cpu': limit.vcpu_limit, + 'memory': limit.memory_limit, + } + cl_limit_service.service(limit.id).remove() + + return new_limits == old_limits + + def update_check(self, entity): + # -- FIXME -- + # Note that we here always remove all cluster/storage limits, because + # it's not currently possible to update them and then re-create the limits + # appropriatelly, this shouldn't have any side-effects, but it's not considered + # as a correct approach. + # This feature is tracked here: https://bugzilla.redhat.com/show_bug.cgi?id=1398576 + # + + return ( + self.update_storage_limits(entity) and + self.update_cluster_limits(entity) and + equal(self._module.params.get('description'), entity.description) and + equal(self._module.params.get('storage_grace'), entity.storage_hard_limit_pct) and + equal(self._module.params.get('storage_threshold'), entity.storage_soft_limit_pct) and + equal(self._module.params.get('cluster_grace'), entity.cluster_hard_limit_pct) and + equal(self._module.params.get('cluster_threshold'), entity.cluster_soft_limit_pct) + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(required=True), + datacenter=dict(required=True), + description=dict(default=None), + cluster_threshold=dict(default=None, type='int', aliases=['cluster_soft_limit']), + cluster_grace=dict(default=None, type='int', aliases=['cluster_hard_limit']), + storage_threshold=dict(default=None, type='int', aliases=['storage_soft_limit']), + storage_grace=dict(default=None, type='int', aliases=['storage_hard_limit']), + clusters=dict(default=[], type='list'), + storages=dict(default=[], type='list'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + datacenters_service = connection.system_service().data_centers_service() + dc_name = module.params['datacenter'] + dc_id = getattr(search_by_name(datacenters_service, dc_name), 'id', None) + if dc_id is None: + raise Exception("Datacenter '%s' was not found." % dc_name) + + quotas_service = datacenters_service.service(dc_id).quotas_service() + quotas_module = QuotasModule( + connection=connection, + module=module, + service=quotas_service, + ) + + state = module.params['state'] + if state == 'present': + ret = quotas_module.create() + + # Manage cluster limits: + cl_limit_service = quotas_service.service(ret['id']).quota_cluster_limits_service() + for cluster in module.params.get('clusters'): + cl_limit_service.add( + limit=otypes.QuotaClusterLimit( + memory_limit=float(cluster.get('memory')), + vcpu_limit=cluster.get('cpu'), + cluster=search_by_name( + connection.system_service().clusters_service(), + cluster.get('name') + ), + ), + ) + + # Manage storage limits: + sd_limit_service = quotas_service.service(ret['id']).quota_storage_limits_service() + for storage in module.params.get('storages'): + sd_limit_service.add( + limit=otypes.QuotaStorageLimit( + limit=storage.get('size'), + storage_domain=search_by_name( + connection.system_service().storage_domains_service(), + storage.get('name') + ), + ) + ) + + elif state == 'absent': + ret = quotas_module.remove() + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_quotas_facts.py b/cloud/ovirt/ovirt_quotas_facts.py new file mode 100644 index 00000000000..4553f64d394 --- /dev/null +++ b/cloud/ovirt/ovirt_quotas_facts.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import fnmatch +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, + search_by_name, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_quotas_facts +short_description: Retrieve facts about one or more oVirt quotas +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt quotas." +notes: + - "This module creates a new top-level C(ovirt_quotas) fact, which + contains a list of quotas." +options: + datacenter: + description: + - "Name of the datacenter where quota resides." + required: true + name: + description: + - "Name of the quota, can be used as glob expression." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about quota named C in Default datacenter: +- ovirt_quotas_facts: + datacenter: Default + name: myquota +- debug: + var: ovirt_quotas +''' + +RETURN = ''' +ovirt_quotas: + description: "List of dictionaries describing the quotas. Quota attribues are mapped to dictionary keys, + all quotas attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/quota." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + datacenter=dict(required=True), + name=dict(default=None), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + datacenters_service = connection.system_service().data_centers_service() + dc_name = module.params['datacenter'] + dc = search_by_name(datacenters_service, dc_name) + if dc is None: + raise Exception("Datacenter '%s' was not found." % dc_name) + + quotas_service = datacenters_service.service(dc.id).quotas_service() + if module.params['name']: + quotas = [ + e for e in quotas_service.list() + if fnmatch.fnmatch(e.name, module.params['name']) + ] + else: + quotas = quotas_service.list() + + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_quotas=[ + get_dict_of_struct(c) for c in quotas + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_storage_domains.py b/cloud/ovirt/ovirt_storage_domains.py new file mode 100644 index 00000000000..cfdd1230386 --- /dev/null +++ b/cloud/ovirt/ovirt_storage_domains.py @@ -0,0 +1,444 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4.types as otypes + + from ovirtsdk4.types import StorageDomainStatus as sdstate +except ImportError: + pass + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + create_connection, + ovirt_full_argument_spec, + search_by_name, + wait, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_storage_domains +short_description: Module to manage storage domains in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage storage domains in oVirt" +options: + name: + description: + - "Name of the the storage domain to manage." + state: + description: + - "Should the storage domain be present/absent/maintenance/unattached" + choices: ['present', 'absent', 'maintenance', 'unattached'] + default: present + description: + description: + - "Description of the storage domain." + comment: + description: + - "Comment of the storage domain." + data_center: + description: + - "Data center name where storage domain should be attached." + domain_function: + description: + - "Function of the storage domain." + choices: ['data', 'iso', 'export'] + default: 'data' + aliases: ['type'] + host: + description: + - "Host to be used to mount storage." + nfs: + description: + - "Dictionary with values for NFS storage type:" + - "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com" + - "C(path) - Path of the mount point. E.g.: /path/to/my/data" + iscsi: + description: + - "Dictionary with values for iSCSI storage type:" + - "C(address) - Address of the iSCSI storage server." + - "C(port) - Port of the iSCSI storage server." + - "C(target) - iSCSI target." + - "C(lun_id) - LUN id." + - "C(username) - Username to be used to access storage server." + - "C(password) - Password of the user to be used to access storage server." + posixfs: + description: + - "Dictionary with values for PosixFS storage type:" + - "C(path) - Path of the mount point. E.g.: /path/to/my/data" + - "C(vfs_type) - Virtual File System type." + - "C(mount_options) - Option which will be passed when mounting storage." + glusterfs: + description: + - "Dictionary with values for GlusterFS storage type:" + - "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com" + - "C(path) - Path of the mount point. E.g.: /path/to/my/data" + - "C(mount_options) - Option which will be passed when mounting storage." + fcp: + description: + - "Dictionary with values for fibre channel storage type:" + - "C(address) - Address of the fibre channel storage server." + - "C(port) - Port of the fibre channel storage server." + - "C(lun_id) - LUN id." + destroy: + description: + - "If I(True) storage domain metadata won't be cleaned, and user have to clean them manually." + - "This parameter is relevant only when C(state) is I(absent)." + format: + description: + - "If I(True) storage domain will be removed after removing it from oVirt." + - "This parameter is relevant only when C(state) is I(absent)." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Add data NFS storage domain +- ovirt_storage_domains: + name: data_nfs + host: myhost + data_center: mydatacenter + nfs: + address: 10.34.63.199 + path: /path/data + +# Add data iSCSI storage domain: +- ovirt_storage_domains: + name: data_iscsi + host: myhost + data_center: mydatacenter + iscsi: + target: iqn.2016-08-09.domain-01:nickname + lun_id: 1IET_000d0002 + address: 10.34.63.204 + +# Import export NFS storage domain: +- ovirt_storage_domains: + domain_function: export + host: myhost + data_center: mydatacenter + nfs: + address: 10.34.63.199 + path: /path/export + +# Create ISO NFS storage domain +- ovirt_storage_domains: + name: myiso + domain_function: iso + host: myhost + data_center: mydatacenter + nfs: + address: 10.34.63.199 + path: /path/iso + +# Remove storage domain +- ovirt_storage_domains: + state: absent + name: mystorage_domain + format: true +''' + +RETURN = ''' +id: + description: ID of the storage domain which is managed + returned: On success if storage domain is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +storage domain: + description: "Dictionary of all the storage domain attributes. Storage domain attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/storage_domain." + returned: On success if storage domain is found. +''' + + +class StorageDomainModule(BaseModule): + + def _get_storage_type(self): + for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']: + if self._module.params.get(sd_type) is not None: + return sd_type + + def _get_storage(self): + for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']: + if self._module.params.get(sd_type) is not None: + return self._module.params.get(sd_type) + + def _login(self, storage_type, storage): + if storage_type == 'iscsi': + hosts_service = self._connection.system_service().hosts_service() + host = search_by_name(hosts_service, self._module.params['host']) + hosts_service.host_service(host.id).iscsi_login( + iscsi=otypes.IscsiDetails( + username=storage.get('username'), + password=storage.get('password'), + address=storage.get('address'), + target=storage.get('target'), + ), + ) + + def build_entity(self): + storage_type = self._get_storage_type() + storage = self._get_storage() + self._login(storage_type, storage) + + return otypes.StorageDomain( + name=self._module.params['name'], + description=self._module.params['description'], + comment=self._module.params['comment'], + type=otypes.StorageDomainType( + self._module.params['domain_function'] + ), + host=otypes.Host( + name=self._module.params['host'], + ), + storage=otypes.HostStorage( + type=otypes.StorageType(storage_type), + logical_units=[ + otypes.LogicalUnit( + id=storage.get('lun_id'), + address=storage.get('address'), + port=storage.get('port', 3260), + target=storage.get('target'), + username=storage.get('username'), + password=storage.get('password'), + ), + ] if storage_type in ['iscsi', 'fcp'] else None, + mount_options=storage.get('mount_options'), + vfs_type=storage.get('vfs_type'), + address=storage.get('address'), + path=storage.get('path'), + ) + ) + + def _attached_sds_service(self): + # Get data center object of the storage domain: + dcs_service = self._connection.system_service().data_centers_service() + dc = search_by_name(dcs_service, self._module.params['data_center']) + if dc is None: + return + + dc_service = dcs_service.data_center_service(dc.id) + return dc_service.storage_domains_service() + + def _maintenance(self, storage_domain): + attached_sds_service = self._attached_sds_service() + if attached_sds_service is None: + return + + attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id) + attached_sd = attached_sd_service.get() + + if attached_sd and attached_sd.status != sdstate.MAINTENANCE: + if not self._module.check_mode: + attached_sd_service.deactivate() + self.changed = True + + wait( + service=attached_sd_service, + condition=lambda sd: sd.status == sdstate.MAINTENANCE, + wait=self._module.params['wait'], + timeout=self._module.params['timeout'], + ) + + def _unattach(self, storage_domain): + attached_sds_service = self._attached_sds_service() + if attached_sds_service is None: + return + + attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id) + attached_sd = attached_sd_service.get() + + if attached_sd and attached_sd.status == sdstate.MAINTENANCE: + if not self._module.check_mode: + # Detach the storage domain: + attached_sd_service.remove() + self.changed = True + # Wait until storage domain is detached: + wait( + service=attached_sd_service, + condition=lambda sd: sd is None, + wait=self._module.params['wait'], + timeout=self._module.params['timeout'], + ) + + def pre_remove(self, storage_domain): + # Before removing storage domain we need to put it into maintenance state: + self._maintenance(storage_domain) + + # Before removing storage domain we need to detach it from data center: + self._unattach(storage_domain) + + def post_create_check(self, sd_id): + storage_domain = self._service.service(sd_id).get() + self._service = self._attached_sds_service() + + # If storage domain isn't attached, attach it: + attached_sd_service = self._service.service(storage_domain.id) + if attached_sd_service.get() is None: + self._service.add( + otypes.StorageDomain( + id=storage_domain.id, + ), + ) + self.changed = True + # Wait until storage domain is in maintenance: + wait( + service=attached_sd_service, + condition=lambda sd: sd.status == sdstate.ACTIVE, + wait=self._module.params['wait'], + timeout=self._module.params['timeout'], + ) + + def unattached_pre_action(self, storage_domain): + self._service = self._attached_sds_service(storage_domain) + self._maintenance(self._service, storage_domain) + + +def failed_state(sd): + return sd.status in [sdstate.UNKNOWN, sdstate.INACTIVE] + + +def control_state(sd_module): + sd = sd_module.search_entity() + if sd is None: + return + + sd_service = sd_module._service.service(sd.id) + if sd.status == sdstate.LOCKED: + wait( + service=sd_service, + condition=lambda sd: sd.status != sdstate.LOCKED, + fail_condition=failed_state, + ) + + if failed_state(sd): + raise Exception("Not possible to manage storage domain '%s'." % sd.name) + elif sd.status == sdstate.ACTIVATING: + wait( + service=sd_service, + condition=lambda sd: sd.status == sdstate.ACTIVE, + fail_condition=failed_state, + ) + elif sd.status == sdstate.DETACHING: + wait( + service=sd_service, + condition=lambda sd: sd.status == sdstate.UNATTACHED, + fail_condition=failed_state, + ) + elif sd.status == sdstate.PREPARING_FOR_MAINTENANCE: + wait( + service=sd_service, + condition=lambda sd: sd.status == sdstate.MAINTENANCE, + fail_condition=failed_state, + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent', 'maintenance', 'unattached'], + default='present', + ), + name=dict(required=True), + description=dict(default=None), + comment=dict(default=None), + data_center=dict(required=True), + domain_function=dict(choices=['data', 'iso', 'export'], default='data', aliases=['type']), + host=dict(default=None), + nfs=dict(default=None, type='dict'), + iscsi=dict(default=None, type='dict'), + posixfs=dict(default=None, type='dict'), + glusterfs=dict(default=None, type='dict'), + fcp=dict(default=None, type='dict'), + destroy=dict(type='bool', default=False), + format=dict(type='bool', default=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + storage_domains_service = connection.system_service().storage_domains_service() + storage_domains_module = StorageDomainModule( + connection=connection, + module=module, + service=storage_domains_service, + ) + + state = module.params['state'] + control_state(storage_domains_module) + if state == 'absent': + ret = storage_domains_module.remove( + destroy=module.params['destroy'], + format=module.params['format'], + host=module.params['host'], + ) + elif state == 'present': + sd_id = storage_domains_module.create()['id'] + storage_domains_module.post_create_check(sd_id) + ret = storage_domains_module.action( + action='activate', + action_condition=lambda s: s.status == sdstate.MAINTENANCE, + wait_condition=lambda s: s.status == sdstate.ACTIVE, + fail_condition=failed_state, + ) + elif state == 'maintenance': + sd_id = storage_domains_module.create()['id'] + storage_domains_module.post_create_check(sd_id) + ret = storage_domains_module.action( + action='deactivate', + action_condition=lambda s: s.status == sdstate.ACTIVE, + wait_condition=lambda s: s.status == sdstate.MAINTENANCE, + fail_condition=failed_state, + ) + elif state == 'unattached': + ret = storage_domains_module.create() + storage_domains_module.pre_remove( + storage_domain=storage_domains_service.service(ret['id']).get() + ) + ret['changed'] = storage_domains_module.changed + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_storage_domains_facts.py b/cloud/ovirt/ovirt_storage_domains_facts.py new file mode 100644 index 00000000000..23431ead50a --- /dev/null +++ b/cloud/ovirt/ovirt_storage_domains_facts.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_storage_domains_facts +short_description: Retrieve facts about one or more oVirt storage domains +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt storage domains." +notes: + - "This module creates a new top-level C(ovirt_storage_domains) fact, which + contains a list of storage domains." +options: + pattern: + description: + - "Search term which is accepted by oVirt search backend." + - "For example to search storage domain X from datacenter Y use following pattern: + name=X and datacenter=Y" +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all storage domains which names start with C(data) and +# belong to data center C(west): +- ovirt_storage_domains_facts: + pattern: name=data* and datacenter=west +- debug: + var: ovirt_storage_domains +''' + +RETURN = ''' +ovirt_storage_domains: + description: "List of dictionaries describing the storage domains. Storage_domain attribues are mapped to dictionary keys, + all storage domains attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/storage_domain." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + storage_domains_service = connection.system_service().storage_domains_service() + storage_domains = storage_domains_service.list(search=module.params['pattern']) + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_storage_domains=[ + get_dict_of_struct(c) for c in storage_domains + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_templates.py b/cloud/ovirt/ovirt_templates.py new file mode 100644 index 00000000000..831ab906c08 --- /dev/null +++ b/cloud/ovirt/ovirt_templates.py @@ -0,0 +1,314 @@ +#!/usr/bin/pythonapi/ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import time +import traceback + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + create_connection, + equal, + get_dict_of_struct, + get_link_name, + ovirt_full_argument_spec, + search_by_attributes, + search_by_name, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_templates +short_description: Module to manage virtual machine templates in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage virtual machine templates in oVirt." +options: + name: + description: + - "Name of the the template to manage." + required: true + state: + description: + - "Should the template be present/absent/exported/imported" + choices: ['present', 'absent', 'exported', 'imported'] + default: present + vm: + description: + - "Name of the VM, which will be used to create template." + description: + description: + - "Description of the template." + cpu_profile: + description: + - "CPU profile to be set to template." + cluster: + description: + - "Name of the cluster, where template should be created/imported." + exclusive: + description: + - "When C(state) is I(exported) this parameter indicates if the existing templates with the + same name should be overwritten." + export_domain: + description: + - "When C(state) is I(exported) or I(imported) this parameter specifies the name of the + export storage domain." + image_provider: + description: + - "When C(state) is I(imported) this parameter specifies the name of the image provider to be used." + image_disk: + description: + - "When C(state) is I(imported) and C(image_provider) is used this parameter specifies the name of disk + to be imported as template." + storage_domain: + description: + - "When C(state) is I(imported) this parameter specifies the name of the destination data storage domain." + clone_permissions: + description: + - "If I(True) then the permissions of the VM (only the direct ones, not the inherited ones) + will be copied to the created template." + - "This parameter is used only when C(state) I(present)." + default: False +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Create template from vm +- ovirt_templates: + cluster: Default + name: mytemplate + vm: rhel7 + cpu_profile: Default + description: Test + +# Import template +- ovirt_templates: + state: imported + name: mytemplate + export_domain: myexport + storage_domain: mystorage + cluster: mycluster + +# Remove template +- ovirt_templates: + state: absent + name: mytemplate +''' + +RETURN = ''' +id: + description: ID of the template which is managed + returned: On success if template is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +template: + description: "Dictionary of all the template attributes. Template attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/template." + returned: On success if template is found. +''' + + +class TemplatesModule(BaseModule): + + def build_entity(self): + return otypes.Template( + name=self._module.params['name'], + cluster=otypes.Cluster( + name=self._module.params['cluster'] + ) if self._module.params['cluster'] else None, + vm=otypes.Vm( + name=self._module.params['vm'] + ) if self._module.params['vm'] else None, + description=self._module.params['description'], + cpu_profile=otypes.CpuProfile( + id=search_by_name( + self._connection.system_service().cpu_profiles_service(), + self._module.params['cpu_profile'], + ).id + ) if self._module.params['cpu_profile'] else None, + ) + + def update_check(self, entity): + return ( + equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and + equal(self._module.params.get('description'), entity.description) and + equal(self._module.params.get('cpu_profile'), get_link_name(self._connection, entity.cpu_profile)) + ) + + def _get_export_domain_service(self): + provider_name = self._module.params['export_domain'] or self._module.params['image_provider'] + export_sds_service = self._connection.system_service().storage_domains_service() + export_sd = search_by_name(export_sds_service, provider_name) + if export_sd is None: + raise ValueError( + "Export storage domain/Image Provider '%s' wasn't found." % provider_name + ) + + return export_sds_service.service(export_sd.id) + + def post_export_action(self, entity): + self._service = self._get_export_domain_service().templates_service() + + def post_import_action(self, entity): + self._service = self._connection.system_service().templates_service() + + +def wait_for_import(module, templates_service): + if module.params['wait']: + start = time.time() + timeout = module.params['timeout'] + poll_interval = module.params['poll_interval'] + while time.time() < start + timeout: + template = search_by_name(templates_service, module.params['name']) + if template: + return template + time.sleep(poll_interval) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent', 'exported', 'imported'], + default='present', + ), + name=dict(default=None, required=True), + vm=dict(default=None), + description=dict(default=None), + cluster=dict(default=None), + cpu_profile=dict(default=None), + disks=dict(default=[], type='list'), + clone_permissions=dict(type='bool'), + export_domain=dict(default=None), + storage_domain=dict(default=None), + exclusive=dict(type='bool'), + image_provider=dict(default=None), + image_disk=dict(default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + templates_service = connection.system_service().templates_service() + templates_module = TemplatesModule( + connection=connection, + module=module, + service=templates_service, + ) + + state = module.params['state'] + if state == 'present': + ret = templates_module.create( + result_state=otypes.TemplateStatus.OK, + clone_permissions=module.params['clone_permissions'], + ) + elif state == 'absent': + ret = templates_module.remove() + elif state == 'exported': + template = templates_module.search_entity() + export_service = templates_module._get_export_domain_service() + export_template = search_by_attributes(export_service.templates_service(), id=template.id) + + ret = templates_module.action( + entity=template, + action='export', + action_condition=lambda t: export_template is None, + wait_condition=lambda t: t is not None, + post_action=templates_module.post_export_action, + storage_domain=otypes.StorageDomain(id=export_service.get().id), + exclusive=module.params['exclusive'], + ) + elif state == 'imported': + template = templates_module.search_entity() + if template: + ret = templates_module.create( + result_state=otypes.TemplateStatus.OK, + ) + else: + kwargs = {} + if module.params['image_provider']: + kwargs.update( + disk=otypes.Disk( + name=module.params['image_disk'] + ), + template=otypes.Template( + name=module.params['name'], + ), + import_as_template=True, + ) + + if module.params['image_disk']: + # We need to refresh storage domain to get list of images: + templates_module._get_export_domain_service().images_service().list() + + glance_service = connection.system_service().openstack_image_providers_service() + image_provider = search_by_name(glance_service, module.params['image_provider']) + images_service = glance_service.service(image_provider.id).images_service() + else: + images_service = templates_module._get_export_domain_service().templates_service() + template_name = module.params['image_disk'] or module.params['name'] + entity = search_by_name(images_service, template_name) + if entity is None: + raise Exception("Image/template '%s' was not found." % template_name) + + images_service.service(entity.id).import_( + storage_domain=otypes.StorageDomain( + name=module.params['storage_domain'] + ) if module.params['storage_domain'] else None, + cluster=otypes.Cluster( + name=module.params['cluster'] + ) if module.params['cluster'] else None, + **kwargs + ) + template = wait_for_import(module, templates_service) + ret = { + 'changed': True, + 'id': template.id, + 'template': get_dict_of_struct(template), + } + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_templates_facts.py b/cloud/ovirt/ovirt_templates_facts.py new file mode 100644 index 00000000000..4a2c7c0d00f --- /dev/null +++ b/cloud/ovirt/ovirt_templates_facts.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_templates_facts +short_description: Retrieve facts about one or more oVirt templates +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt templates." +notes: + - "This module creates a new top-level C(ovirt_templates) fact, which + contains a list of templates." +options: + pattern: + description: + - "Search term which is accepted by oVirt search backend." + - "For example to search template X from datacenter Y use following pattern: + name=X and datacenter=Y" +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all templates which names start with C(centos) and +# belongs to data center C(west): +- ovirt_templates_facts: + pattern: name=centos* and datacenter=west +- debug: + var: ovirt_templates +''' + +RETURN = ''' +ovirt_templates: + description: "List of dictionaries describing the templates. Template attribues are mapped to dictionary keys, + all templates attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/template." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + templates_service = connection.system_service().templates_service() + templates = templates_service.list(search=module.params['pattern']) + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_templates=[ + get_dict_of_struct(c) for c in templates + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_users.py b/cloud/ovirt/ovirt_users.py new file mode 100644 index 00000000000..4fb47122256 --- /dev/null +++ b/cloud/ovirt/ovirt_users.py @@ -0,0 +1,169 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_sdk, + check_params, + create_connection, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_users +short_description: Module to manage users in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage users in oVirt." +options: + name: + description: + - "Name of the the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user." + required: true + state: + description: + - "Should the user be present/absent." + choices: ['present', 'absent'] + default: present + authz_name: + description: + - "Authorization provider of the user. In previous versions of oVirt known as domain." + required: true + aliases: ['domain'] +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Add user user1 from authorization provider example.com-authz +ovirt_users: + name: user1 + domain: example.com-authz + +# Add user user1 from authorization provider example.com-authz +# In case of Active Directory specify UPN: +ovirt_users: + name: user1@ad2.example.com + domain: example.com-authz + +# Remove user user1 with authorization provider example.com-authz +ovirt_users: + state: absent + name: user1 + authz_name: example.com-authz +''' + +RETURN = ''' +id: + description: ID of the user which is managed + returned: On success if user is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +user: + description: "Dictionary of all the user attributes. User attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/user." + returned: On success if user is found. +''' + + +def username(module): + return '{}@{}'.format(module.params['name'], module.params['authz_name']) + + +class UsersModule(BaseModule): + + def build_entity(self): + return otypes.User( + domain=otypes.Domain( + name=self._module.params['authz_name'] + ), + user_name=username(self._module), + principal=self._module.params['name'], + namespace=self._module.params['namespace'], + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(required=True), + authz_name=dict(required=True, aliases=['domain']), + namespace=dict(default=None), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + check_params(module) + + try: + connection = create_connection(module.params.pop('auth')) + users_service = connection.system_service().users_service() + users_module = UsersModule( + connection=connection, + module=module, + service=users_service, + ) + + state = module.params['state'] + if state == 'present': + ret = users_module.create( + search_params={ + 'usrname': username(module), + } + ) + elif state == 'absent': + ret = users_module.remove( + search_params={ + 'usrname': username(module), + } + ) + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_users_facts.py b/cloud/ovirt/ovirt_users_facts.py new file mode 100644 index 00000000000..7d2b04f1fb1 --- /dev/null +++ b/cloud/ovirt/ovirt_users_facts.py @@ -0,0 +1,102 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_users_facts +short_description: Retrieve facts about one or more oVirt users +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt users." +notes: + - "This module creates a new top-level C(ovirt_users) fact, which + contains a list of users." +options: + pattern: + description: + - "Search term which is accepted by oVirt search backend." + - "For example to search user X use following pattern: name=X" +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all users which first names start with C(john): +- ovirt_users_facts: + pattern: name=john* +- debug: + var: ovirt_users +''' + +RETURN = ''' +ovirt_users: + description: "List of dictionaries describing the users. User attribues are mapped to dictionary keys, + all users attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/user." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + users_service = connection.system_service().users_service() + users = users_service.list(search=module.params['pattern']) + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_users=[ + get_dict_of_struct(c) for c in users + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_vmpools.py b/cloud/ovirt/ovirt_vmpools.py new file mode 100644 index 00000000000..82e76d91dc1 --- /dev/null +++ b/cloud/ovirt/ovirt_vmpools.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4.types as otypes +except ImportError: + pass + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + BaseModule, + check_params, + check_sdk, + create_connection, + equal, + get_link_name, + ovirt_full_argument_spec, + wait, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_vmpools +short_description: Module to manage VM pools in oVirt +version_added: "2.3" +author: "Ondra Machacek (@machacekondra)" +description: + - "Module to manage VM pools in oVirt." +options: + name: + description: + - "Name of the the VM pool to manage." + required: true + state: + description: + - "Should the VM pool be present/absent." + - "Note that when C(state) is I(absent) all VMs in VM pool are stopped and removed." + choices: ['present', 'absent'] + default: present + template: + description: + - "Name of the template, which will be used to create VM pool." + description: + description: + - "Description of the VM pool." + cluster: + description: + - "Name of the cluster, where VM pool should be created." + type: + description: + - "Type of the VM pool. Either manual or automatic." + - "C(manual) - The administrator is responsible for explicitly returning the virtual machine to the pool. + The virtual machine reverts to the original base image after the administrator returns it to the pool." + - "C(Automatic) - When the virtual machine is shut down, it automatically reverts to its base image and + is returned to the virtual machine pool." + - "Default value is set by engine." + choices: ['manual', 'automatic'] + vm_per_user: + description: + - "Maximum number of VMs a single user can attach to from this pool." + - "Default value is set by engine." + prestarted: + description: + - "Number of pre-started VMs defines the number of VMs in run state, that are waiting + to be attached to Users." + - "Default value is set by engine." + vm_count: + description: + - "Number of VMs in the pool." + - "Default value is set by engine." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Create VM pool from template +- ovirt_vmpools: + cluster: mycluster + name: myvmpool + template: rhel7 + vm_count: 2 + prestarted: 2 + vm_per_user: 1 + +# Remove vmpool, note that all VMs in pool will be stopped and removed: +- ovirt_vmpools: + state: absent + name: myvmpool +''' + +RETURN = ''' +id: + description: ID of the VM pool which is managed + returned: On success if VM pool is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +vm_pool: + description: "Dictionary of all the VM pool attributes. VM pool attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm_pool." + returned: On success if VM pool is found. +''' + + +class VmPoolsModule(BaseModule): + + def build_entity(self): + return otypes.VmPool( + name=self._module.params['name'], + description=self._module.params['description'], + comment=self._module.params['comment'], + cluster=otypes.Cluster( + name=self._module.params['cluster'] + ) if self._module.params['cluster'] else None, + template=otypes.Template( + name=self._module.params['template'] + ) if self._module.params['template'] else None, + max_user_vms=self._module.params['vm_per_user'], + prestarted_vms=self._module.params['prestarted'], + size=self._module.params['vm_count'], + type=otypes.VmPoolType( + self._module.params['type'] + ) if self._module.params['type'] else None, + ) + + def update_check(self, entity): + return ( + equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and + equal(self._module.params.get('description'), entity.description) and + equal(self._module.params.get('comment'), entity.comment) and + equal(self._module.params.get('vm_per_user'), entity.max_user_vms) and + equal(self._module.params.get('prestarted'), entity.prestarted_vms) and + equal(self._module.params.get('vm_count'), entity.size) + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['present', 'absent'], + default='present', + ), + name=dict(default=None, required=True), + template=dict(default=None), + cluster=dict(default=None), + description=dict(default=None), + comment=dict(default=None), + vm_per_user=dict(default=None, type='int'), + prestarted=dict(default=None, type='int'), + vm_count=dict(default=None, type='int'), + type=dict(default=None, choices=['automatic', 'manual']), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + check_params(module) + + try: + connection = create_connection(module.params.pop('auth')) + vm_pools_service = connection.system_service().vm_pools_service() + vm_pools_module = VmPoolsModule( + connection=connection, + module=module, + service=vm_pools_service, + ) + + state = module.params['state'] + if state == 'present': + ret = vm_pools_module.create() + + # Wait for all VM pool VMs to be created: + if module.params['wait']: + vms_service = connection.system_service().vms_service() + for vm in vms_service.list(search='pool=%s' % module.params['name']): + wait( + service=vms_service.service(vm.id), + condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP], + timeout=module.params['timeout'], + ) + + elif state == 'absent': + ret = vm_pools_module.remove() + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_vmpools_facts.py b/cloud/ovirt/ovirt_vmpools_facts.py new file mode 100644 index 00000000000..fb20a12f833 --- /dev/null +++ b/cloud/ovirt/ovirt_vmpools_facts.py @@ -0,0 +1,101 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_vmpools_facts +short_description: Retrieve facts about one or more oVirt vmpools +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt vmpools." +notes: + - "This module creates a new top-level C(ovirt_vmpools) fact, which + contains a list of vmpools." +options: + pattern: + description: + - "Search term which is accepted by oVirt search backend." + - "For example to search vmpool X: name=X" +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all vm pools which names start with C(centos): +- ovirt_vmpools_facts: + pattern: name=centos* +- debug: + var: ovirt_vmpools +''' + +RETURN = ''' +ovirt_vm_pools: + description: "List of dictionaries describing the vmpools. Vm pool attribues are mapped to dictionary keys, + all vmpools attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm_pool." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + vmpools_service = connection.system_service().vm_pools_service() + vmpools = vmpools_service.list(search=module.params['pattern']) + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_vm_pools=[ + get_dict_of_struct(c) for c in vmpools + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + +if __name__ == '__main__': + main() diff --git a/cloud/ovirt/ovirt_vms.py b/cloud/ovirt/ovirt_vms.py new file mode 100644 index 00000000000..4edfe0aa596 --- /dev/null +++ b/cloud/ovirt/ovirt_vms.py @@ -0,0 +1,887 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +try: + import ovirtsdk4 as sdk + import ovirtsdk4.types as otypes +except ImportError: + pass + +from ansible.module_utils.ovirt import * + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_vms +short_description: "Module to manage Virtual Machines in oVirt." +version_added: "2.2" +author: "Ondra Machacek (@machacekondra)" +description: + - "This module manages whole lifecycle of the Virtual Machine(VM) in oVirt. Since VM can hold many states in oVirt, + this see notes to see how the states of the VM are handled." +options: + name: + description: + - "Name of the the Virtual Machine to manage. If VM don't exists C(name) is required. + Otherwise C(id) or C(name) can be used." + id: + description: + - "ID of the the Virtual Machine to manage." + state: + description: + - "Should the Virtual Machine be running/stopped/present/absent/suspended/next_run." + - "I(present) and I(running) are equal states." + - "I(next_run) state updates the VM and if the VM has next run configuration it will be rebooted." + - "Please check I(notes) to more detailed description of states." + choices: ['running', 'stopped', 'present', 'absent', 'suspended', 'next_run'] + default: present + cluster: + description: + - "Name of the cluster, where Virtual Machine should be created. Required if creating VM." + template: + description: + - "Name of the template, which should be used to create Virtual Machine. Required if creating VM." + - "If template is not specified and VM doesn't exist, VM will be created from I(Blank) template." + template_version: + description: + - "Version number of the template to be used for VM." + - "By default the latest available version of the template is used." + version_added: "2.3" + use_latest_template_version: + description: + - "Specify if latest template version should be used, when running a stateless VM." + - "If this parameter is set to I(true) stateless VM is created." + version_added: "2.3" + memory: + description: + - "Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB)." + - "Default value is set by engine." + memory_guaranteed: + description: + - "Amount of minimal guaranteed memory of the Virtual Machine. + Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB)." + - "C(memory_guaranteed) parameter can't be lower than C(memory) parameter. Default value is set by engine." + cpu_shares: + description: + - "Set a CPU shares for this Virtual Machine. Default value is set by oVirt engine." + cpu_cores: + description: + - "Number of virtual CPUs cores of the Virtual Machine. Default value is set by oVirt engine." + cpu_sockets: + description: + - "Number of virtual CPUs sockets of the Virtual Machine. Default value is set by oVirt engine." + type: + description: + - "Type of the Virtual Machine. Default value is set by oVirt engine." + choices: [server, desktop] + operating_system: + description: + - "Operating system of the Virtual Machine. Default value is set by oVirt engine." + choices: [ + rhel_6_ppc64, other, freebsd, windows_2003x64, windows_10, rhel_6x64, rhel_4x64, windows_2008x64, + windows_2008R2x64, debian_7, windows_2012x64, ubuntu_14_04, ubuntu_12_04, ubuntu_13_10, windows_8x64, + other_linux_ppc64, windows_2003, other_linux, windows_10x64, windows_2008, rhel_3, rhel_5, rhel_4, + other_ppc64, sles_11, rhel_6, windows_xp, rhel_7x64, freebsdx64, rhel_7_ppc64, windows_7, rhel_5x64, + ubuntu_14_04_ppc64, sles_11_ppc64, windows_8, windows_2012R2x64, windows_2008r2x64, ubuntu_13_04, + ubuntu_12_10, windows_7x64 + ] + boot_devices: + description: + - "List of boot devices which should be used to boot. Choices I(network), I(hd) and I(cdrom)." + - "For example: ['cdrom', 'hd']. Default value is set by oVirt engine." + host: + description: + - "Specify host where Virtual Machine should be running. By default the host is chosen by engine scheduler." + - "This parameter is used only when C(state) is I(running) or I(present)." + high_availability: + description: + - "If I(True) Virtual Machine will be set as highly available." + - "If I(False) Virtual Machine won't be set as highly available." + - "If no value is passed, default value is set by oVirt engine." + delete_protected: + description: + - "If I(True) Virtual Machine will be set as delete protected." + - "If I(False) Virtual Machine won't be set as delete protected." + - "If no value is passed, default value is set by oVirt engine." + stateless: + description: + - "If I(True) Virtual Machine will be set as stateless." + - "If I(False) Virtual Machine will be unset as stateless." + - "If no value is passed, default value is set by oVirt engine." + clone: + description: + - "If I(True) then the disks of the created virtual machine will be cloned and independent of the template." + - "This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before." + default: False + clone_permissions: + description: + - "If I(True) then the permissions of the template (only the direct ones, not the inherited ones) + will be copied to the created virtual machine." + - "This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before." + default: False + cd_iso: + description: + - "ISO file from ISO storage domain which should be attached to Virtual Machine." + - "If you pass empty string the CD will be ejected from VM." + - "If used with C(state) I(running) or I(present) and VM is running the CD will be attached to VM." + - "If used with C(state) I(running) or I(present) and VM is down the CD will be attached to VM persistently." + force: + description: + - "Please check to I(Synopsis) to more detailed description of force parameter, it can behave differently + in different situations." + default: False + nics: + description: + - "List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary:" + - "C(name) - Name of the NIC." + - "C(profile_name) - Profile name where NIC should be attached." + - "C(interface) - Type of the network interface. One of following: I(virtio), I(e1000), I(rtl8139), default is I(virtio)." + - "C(mac_address) - Custom MAC address of the network interface, by default it's obtained from MAC pool." + - "C(Note:)" + - "This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs. + To manage NICs of the VM in more depth please use M(ovirt_nics) module instead." + disks: + description: + - "List of disks, which should be attached to Virtual Machine. Disk is described by following dictionary:" + - "C(name) - Name of the disk. Either C(name) or C(id) is reuqired." + - "C(id) - ID of the disk. Either C(name) or C(id) is reuqired." + - "C(interface) - Interface of the disk, either I(virtio) or I(IDE), default is I(virtio)." + - "C(bootable) - I(True) if the disk should be bootable, default is non bootable." + - "C(activate) - I(True) if the disk should be activated, default is activated." + - "C(Note:)" + - "This parameter is used only when C(state) is I(running) or I(present) and is able to only attach disks. + To manage disks of the VM in more depth please use M(ovirt_disks) module instead." + sysprep: + description: + - "Dictionary with values for Windows Virtual Machine initialization using sysprep:" + - "C(host_name) - Hostname to be set to Virtual Machine when deployed." + - "C(active_directory_ou) - Active Directory Organizational Unit, to be used for login of user." + - "C(org_name) - Organization name to be set to Windows Virtual Machine." + - "C(domain) - Domain to be set to Windows Virtual Machine." + - "C(timezone) - Timezone to be set to Windows Virtual Machine." + - "C(ui_language) - UI language of the Windows Virtual Machine." + - "C(system_locale) - System localization of the Windows Virtual Machine." + - "C(input_locale) - Input localization of the Windows Virtual Machine." + - "C(windows_license_key) - License key to be set to Windows Virtual Machine." + - "C(user_name) - Username to be used for set password to Windows Virtual Machine." + - "C(root_password) - Password to be set for username to Windows Virtual Machine." + cloud_init: + description: + - "Dictionary with values for Unix-like Virtual Machine initialization using cloud init:" + - "C(host_name) - Hostname to be set to Virtual Machine when deployed." + - "C(timezone) - Timezone to be set to Virtual Machine when deployed." + - "C(user_name) - Username to be used to set password to Virtual Machine when deployed." + - "C(root_password) - Password to be set for user specified by C(user_name) parameter." + - "C(authorized_ssh_keys) - Use this SSH keys to login to Virtual Machine." + - "C(regenerate_ssh_keys) - If I(True) SSH keys will be regenerated on Virtual Machine." + - "C(custom_script) - Cloud-init script which will be executed on Virtual Machine when deployed." + - "C(dns_servers) - DNS servers to be configured on Virtual Machine." + - "C(dns_search) - DNS search domains to be configured on Virtual Machine." + - "C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of none, dhcp or static." + - "C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine." + - "C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine." + - "C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine." + - "C(nic_name) - Set name to network interface of Virtual Machine." + - "C(nic_on_boot) - If I(True) network interface will be set to start on boot." + cloud_init_nics: + description: + - "List of dictionaries representing network interafaces to be setup by cloud init." + - "This option is used, when user needs to setup more network interfaces via cloud init." + - "If one network interface is enough, user should use C(cloud_init) I(nic_*) parameters. C(cloud_init) I(nic_*) parameters + are merged with C(cloud_init_nics) parameters." + - "Dictionary can contain following values:" + - "C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of none, dhcp or static." + - "C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine." + - "C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine." + - "C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine." + - "C(nic_name) - Set name to network interface of Virtual Machine." + - "C(nic_on_boot) - If I(True) network interface will be set to start on boot." + version_added: "2.3" +notes: + - "If VM is in I(UNASSIGNED) or I(UNKNOWN) state before any operation, the module will fail. + If VM is in I(IMAGE_LOCKED) state before any operation, we try to wait for VM to be I(DOWN). + If VM is in I(SAVING_STATE) state before any operation, we try to wait for VM to be I(SUSPENDED). + If VM is in I(POWERING_DOWN) state before any operation, we try to wait for VM to be I(UP) or I(DOWN). VM can + get into I(UP) state from I(POWERING_DOWN) state, when there is no ACPI or guest agent running inside VM, or + if the shutdown operation fails. + When user specify I(UP) C(state), we always wait to VM to be in I(UP) state in case VM is I(MIGRATING), + I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). In other states we run start operation on VM. + When user specify I(stopped) C(state), and If user pass C(force) parameter set to I(true) we forcibly stop the VM in + any state. If user don't pass C(force) parameter, we always wait to VM to be in UP state in case VM is + I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or + I(SUSPENDED) state, we start the VM. Then we gracefully shutdown the VM. + When user specify I(suspended) C(state), we always wait to VM to be in UP state in case VM is I(MIGRATING), + I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or I(DOWN) state, + we start the VM. Then we suspend the VM. + When user specify I(absent) C(state), we forcibly stop the VM in any state and remove it." +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Creates a new Virtual Machine from template named 'rhel7_template' +ovirt_vms: + state: present + name: myvm + template: rhel7_template + +# Creates a stateless VM which will always use latest template version: +ovirt_vms: + name: myvm + template: rhel7 + cluster: mycluster + use_latest_template_version: true + +# Creates a new server rhel7 Virtual Machine from Blank template +# on brq01 cluster with 2GiB memory and 2 vcpu cores/sockets +# and attach bootable disk with name rhel7_disk and attach virtio NIC +ovirt_vms: + state: present + cluster: brq01 + name: myvm + memory: 2GiB + cpu_cores: 2 + cpu_sockets: 2 + cpu_shares: 1024 + type: server + operating_system: rhel_7x64 + disks: + - name: rhel7_disk + bootable: True + nics: + - name: nic1 + +# Run VM with cloud init: +ovirt_vms: + name: rhel7 + template: rhel7 + cluster: Default + memory: 1GiB + high_availability: true + cloud_init: + nic_boot_protocol: static + nic_ip_address: 10.34.60.86 + nic_netmask: 255.255.252.0 + nic_gateway: 10.34.63.254 + nic_name: eth1 + nic_on_boot: true + host_name: example.com + custom_script: | + write_files: + - content: | + Hello, world! + path: /tmp/greeting.txt + permissions: '0644' + user_name: root + root_password: super_password + +# Run VM with cloud init, with multiple network interfaces: +ovirt_vms: + name: rhel7_4 + template: rhel7 + cluster: mycluster + cloud_init_nics: + - nic_name: eth0 + nic_boot_protocol: dhcp + nic_on_boot: true + - nic_name: eth1 + nic_boot_protocol: static + nic_ip_address: 10.34.60.86 + nic_netmask: 255.255.252.0 + nic_gateway: 10.34.63.254 + nic_on_boot: true + +# Run VM with sysprep: +ovirt_vms: + name: windows2012R2_AD + template: windows2012R2 + cluster: Default + memory: 3GiB + high_availability: true + sysprep: + host_name: windowsad.example.com + user_name: Administrator + root_password: SuperPassword123 + +# Migrate/Run VM to/on host named 'host1' +ovirt_vms: + state: running + name: myvm + host: host1 + +# Change Vm's CD: +ovirt_vms: + name: myvm + cd_iso: drivers.iso + +# Eject Vm's CD: +ovirt_vms: + name: myvm + cd_iso: '' + +# Boot VM from CD: +ovirt_vms: + name: myvm + cd_iso: centos7_x64.iso + boot_devices: + - cdrom + +# Stop vm: +ovirt_vms: + state: stopped + name: myvm + +# Upgrade memory to already created VM: +ovirt_vms: + name: myvm + memory: 4GiB + +# Hot plug memory to already created and running VM: +# (VM won't be restarted) +ovirt_vms: + name: myvm + memory: 4GiB + +# When change on the VM needs restart of the VM, use next_run state, +# The VM will be updated and rebooted if there are any changes. +# If present state would be used, VM won't be restarted. +ovirt_vms: + state: next_run + name: myvm + boot_devices: + - network + +# Remove VM, if VM is running it will be stopped: +ovirt_vms: + state: absent + name: myvm +''' + + +RETURN = ''' +id: + description: ID of the VM which is managed + returned: On success if VM is found. + type: str + sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c +vm: + description: "Dictionary of all the VM attributes. VM attributes can be found on your oVirt instance + at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm." + returned: On success if VM is found. +''' + + +class VmsModule(BaseModule): + + def __get_template_with_version(self): + """ + oVirt in version 4.1 doesn't support search by template+version_number, + so we need to list all templates with specific name and then iterate + throught it's version until we find the version we look for. + """ + template = None + if self._module.params['template']: + templates_service = self._connection.system_service().templates_service() + templates = templates_service.list(search='name=%s' % self._module.params['template']) + if self._module.params['template_version']: + templates = [ + t for t in templates + if t.version.version_number == self._module.params['template_version'] + ] + if templates: + template = templates[0] + + return template + + def build_entity(self): + template = self.__get_template_with_version() + return otypes.Vm( + name=self._module.params['name'], + cluster=otypes.Cluster( + name=self._module.params['cluster'] + ) if self._module.params['cluster'] else None, + template=otypes.Template( + id=template.id, + ) if template else None, + use_latest_template_version=self._module.params['use_latest_template_version'], + stateless=self._module.params['stateless'] or self._module.params['use_latest_template_version'], + delete_protected=self._module.params['delete_protected'], + high_availability=otypes.HighAvailability( + enabled=self._module.params['high_availability'] + ) if self._module.params['high_availability'] is not None else None, + cpu=otypes.Cpu( + topology=otypes.CpuTopology( + cores=self._module.params['cpu_cores'], + sockets=self._module.params['cpu_sockets'], + ) + ) if ( + self._module.params['cpu_cores'] or self._module.params['cpu_sockets'] + ) else None, + cpu_shares=self._module.params['cpu_shares'], + os=otypes.OperatingSystem( + type=self._module.params['operating_system'], + boot=otypes.Boot( + devices=[ + otypes.BootDevice(dev) for dev in self._module.params['boot_devices'] + ], + ) if self._module.params['boot_devices'] else None, + ) if ( + self._module.params['operating_system'] or self._module.params['boot_devices'] + ) else None, + type=otypes.VmType( + self._module.params['type'] + ) if self._module.params['type'] else None, + memory=convert_to_bytes( + self._module.params['memory'] + ) if self._module.params['memory'] else None, + memory_policy=otypes.MemoryPolicy( + guaranteed=convert_to_bytes(self._module.params['memory_guaranteed']), + ) if self._module.params['memory_guaranteed'] else None, + ) + + def update_check(self, entity): + return ( + equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and + equal(convert_to_bytes(self._module.params['memory']), entity.memory) and + equal(convert_to_bytes(self._module.params['memory_guaranteed']), entity.memory_policy.guaranteed) and + equal(self._module.params.get('cpu_cores'), entity.cpu.topology.cores) and + equal(self._module.params.get('cpu_sockets'), entity.cpu.topology.sockets) and + equal(self._module.params.get('type'), str(entity.type)) and + equal(self._module.params.get('operating_system'), str(entity.os.type)) and + equal(self._module.params.get('high_availability'), entity.high_availability.enabled) and + equal(self._module.params.get('stateless'), entity.stateless) and + equal(self._module.params.get('cpu_shares'), entity.cpu_shares) and + equal(self._module.params.get('delete_protected'), entity.delete_protected) and + equal(self._module.params.get('use_latest_template_version'), entity.use_latest_template_version) and + equal(self._module.params.get('boot_devices'), [str(dev) for dev in getattr(entity.os, 'devices', [])]) + ) + + def pre_create(self, entity): + # If VM don't exists, and template is not specified, set it to Blank: + if entity is None: + if self._module.params.get('template') is None: + self._module.params['template'] = 'Blank' + + def post_update(self, entity): + self.post_create(entity) + + def post_create(self, entity): + # After creation of the VM, attach disks and NICs: + self.changed = self.__attach_disks(entity) + self.changed = self.__attach_nics(entity) + + def pre_remove(self, entity): + # Forcibly stop the VM, if it's not in DOWN state: + if entity.status != otypes.VmStatus.DOWN: + if not self._module.check_mode: + self.changed = self.action( + action='stop', + action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN, + wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN, + )['changed'] + + def __suspend_shutdown_common(self, vm_service): + if vm_service.get().status in [ + otypes.VmStatus.MIGRATING, + otypes.VmStatus.POWERING_UP, + otypes.VmStatus.REBOOT_IN_PROGRESS, + otypes.VmStatus.WAIT_FOR_LAUNCH, + otypes.VmStatus.UP, + otypes.VmStatus.RESTORING_STATE, + ]: + self._wait_for_UP(vm_service) + + def _pre_shutdown_action(self, entity): + vm_service = self._service.vm_service(entity.id) + self.__suspend_shutdown_common(vm_service) + if entity.status in [otypes.VmStatus.SUSPENDED, otypes.VmStatus.PAUSED]: + vm_service.start() + self._wait_for_UP(vm_service) + return vm_service.get() + + def _pre_suspend_action(self, entity): + vm_service = self._service.vm_service(entity.id) + self.__suspend_shutdown_common(vm_service) + if entity.status in [otypes.VmStatus.PAUSED, otypes.VmStatus.DOWN]: + vm_service.start() + self._wait_for_UP(vm_service) + return vm_service.get() + + def _post_start_action(self, entity): + vm_service = self._service.service(entity.id) + self._wait_for_UP(vm_service) + self._attach_cd(vm_service.get()) + self._migrate_vm(vm_service.get()) + + def _attach_cd(self, entity): + cd_iso = self._module.params['cd_iso'] + if cd_iso is not None: + vm_service = self._service.service(entity.id) + current = vm_service.get().status == otypes.VmStatus.UP + cdroms_service = vm_service.cdroms_service() + cdrom_device = cdroms_service.list()[0] + cdrom_service = cdroms_service.cdrom_service(cdrom_device.id) + cdrom = cdrom_service.get(current=current) + if getattr(cdrom.file, 'id', '') != cd_iso: + if not self._module.check_mode: + cdrom_service.update( + cdrom=otypes.Cdrom( + file=otypes.File(id=cd_iso) + ), + current=current, + ) + self.changed = True + + return entity + + def _migrate_vm(self, entity): + vm_host = self._module.params['host'] + vm_service = self._service.vm_service(entity.id) + if vm_host is not None: + # In case VM is preparing to be UP, wait to be up, to migrate it: + if entity.status == otypes.VmStatus.UP: + hosts_service = self._connection.system_service().hosts_service() + current_vm_host = hosts_service.host_service(entity.host.id).get().name + if vm_host != current_vm_host: + if not self._module.check_mode: + vm_service.migrate(host=otypes.Host(name=vm_host)) + self._wait_for_UP(vm_service) + self.changed = True + + return entity + + def _wait_for_UP(self, vm_service): + wait( + service=vm_service, + condition=lambda vm: vm.status == otypes.VmStatus.UP, + wait=self._module.params['wait'], + timeout=self._module.params['timeout'], + ) + + def __attach_disks(self, entity): + disks_service = self._connection.system_service().disks_service() + + for disk in self._module.params['disks']: + # If disk ID is not specified, find disk by name: + disk_id = disk.get('id') + if disk_id is None: + disk_id = getattr( + search_by_name( + service=disks_service, + name=disk.get('name') + ), + 'id', + None + ) + + # Attach disk to VM: + disk_attachments_service = self._service.service(entity.id).disk_attachments_service() + if disk_attachments_service.attachment_service(disk_id).get() is None: + if not self._module.check_mode: + disk_attachments_service.add( + otypes.DiskAttachment( + disk=otypes.Disk( + id=disk_id, + ), + active=disk.get('activate', True), + interface=otypes.DiskInterface( + disk.get('interface', 'virtio') + ), + bootable=disk.get('bootable', False), + ) + ) + self.changed = True + + def __attach_nics(self, entity): + # Attach NICs to VM, if specified: + vnic_profiles_service = self._connection.system_service().vnic_profiles_service() + nics_service = self._service.service(entity.id).nics_service() + for nic in self._module.params['nics']: + if search_by_name(nics_service, nic.get('name')) is None: + if not self._module.check_mode: + nics_service.add( + otypes.Nic( + name=nic.get('name'), + interface=otypes.NicInterface( + nic.get('interface', 'virtio') + ), + vnic_profile=otypes.VnicProfile( + id=search_by_name( + vnic_profiles_service, + nic.get('profile_name'), + ).id + ) if nic.get('profile_name') else None, + mac=otypes.Mac( + address=nic.get('mac_address') + ) if nic.get('mac_address') else None, + ) + ) + self.changed = True + + +def _get_initialization(sysprep, cloud_init, cloud_init_nics): + initialization = None + if cloud_init or cloud_init_nics: + initialization = otypes.Initialization( + nic_configurations=[ + otypes.NicConfiguration( + boot_protocol=otypes.BootProtocol( + nic.pop('nic_boot_protocol').lower() + ) if nic.get('nic_boot_protocol') else None, + name=nic.pop('nic_name', None), + on_boot=nic.pop('nic_on_boot', None), + ip=otypes.Ip( + address=nic.pop('nic_ip_address', None), + netmask=nic.pop('nic_netmask', None), + gateway=nic.pop('nic_gateway', None), + ) if ( + nic.get('nic_gateway') is not None or + nic.get('nic_netmask') is not None or + nic.get('nic_ip_address') is not None + ) else None, + ) + for nic in cloud_init_nics + if ( + nic.get('nic_gateway') is not None or + nic.get('nic_netmask') is not None or + nic.get('nic_ip_address') is not None or + nic.get('nic_boot_protocol') is not None or + nic.get('nic_on_boot') is not None + ) + ] if cloud_init_nics else None, + **cloud_init + ) + elif sysprep: + initialization = otypes.Initialization( + **sysprep + ) + return initialization + + +def control_state(vm, vms_service, module): + if vm is None: + return + + force = module.params['force'] + state = module.params['state'] + + vm_service = vms_service.vm_service(vm.id) + if vm.status == otypes.VmStatus.IMAGE_LOCKED: + wait( + service=vm_service, + condition=lambda vm: vm.status == otypes.VmStatus.DOWN, + ) + elif vm.status == otypes.VmStatus.SAVING_STATE: + # Result state is SUSPENDED, we should wait to be suspended: + wait( + service=vm_service, + condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED, + ) + elif ( + vm.status == otypes.VmStatus.UNASSIGNED or + vm.status == otypes.VmStatus.UNKNOWN + ): + # Invalid states: + module.fail_json("Not possible to control VM, if it's in '{}' status".format(vm.status)) + elif vm.status == otypes.VmStatus.POWERING_DOWN: + if (force and state == 'stopped') or state == 'absent': + vm_service.stop() + wait( + service=vm_service, + condition=lambda vm: vm.status == otypes.VmStatus.DOWN, + ) + else: + # If VM is powering down, wait to be DOWN or UP. + # VM can end in UP state in case there is no GA + # or ACPI on the VM or shutdown operation crashed: + wait( + service=vm_service, + condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP], + ) + + +def main(): + argument_spec = ovirt_full_argument_spec( + state=dict( + choices=['running', 'stopped', 'present', 'absent', 'suspended', 'next_run'], + default='present', + ), + name=dict(default=None), + id=dict(default=None), + cluster=dict(default=None), + template=dict(default=None), + template_version=dict(default=None, type='int'), + use_latest_template_version=dict(default=None, type='bool'), + disks=dict(default=[], type='list'), + memory=dict(default=None), + memory_guaranteed=dict(default=None), + cpu_sockets=dict(default=None, type='int'), + cpu_cores=dict(default=None, type='int'), + cpu_shares=dict(default=None, type='int'), + type=dict(choices=['server', 'desktop']), + operating_system=dict( + default=None, + choices=[ + 'rhel_6_ppc64', 'other', 'freebsd', 'windows_2003x64', 'windows_10', + 'rhel_6x64', 'rhel_4x64', 'windows_2008x64', 'windows_2008R2x64', + 'debian_7', 'windows_2012x64', 'ubuntu_14_04', 'ubuntu_12_04', + 'ubuntu_13_10', 'windows_8x64', 'other_linux_ppc64', 'windows_2003', + 'other_linux', 'windows_10x64', 'windows_2008', 'rhel_3', 'rhel_5', + 'rhel_4', 'other_ppc64', 'sles_11', 'rhel_6', 'windows_xp', 'rhel_7x64', + 'freebsdx64', 'rhel_7_ppc64', 'windows_7', 'rhel_5x64', + 'ubuntu_14_04_ppc64', 'sles_11_ppc64', 'windows_8', + 'windows_2012R2x64', 'windows_2008r2x64', 'ubuntu_13_04', + 'ubuntu_12_10', 'windows_7x64', + ], + ), + cd_iso=dict(default=None), + boot_devices=dict(default=None, type='list'), + high_availability=dict(type='bool'), + stateless=dict(type='bool'), + delete_protected=dict(type='bool'), + force=dict(type='bool', default=False), + nics=dict(default=[], type='list'), + cloud_init=dict(type='dict'), + cloud_init_nics=dict(defaul=[], type='list'), + sysprep=dict(type='dict'), + host=dict(default=None), + clone=dict(type='bool', default=False), + clone_permissions=dict(type='bool', default=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + check_sdk(module) + check_params(module) + + try: + state = module.params['state'] + connection = create_connection(module.params.pop('auth')) + vms_service = connection.system_service().vms_service() + vms_module = VmsModule( + connection=connection, + module=module, + service=vms_service, + ) + vm = vms_module.search_entity() + + control_state(vm, vms_service, module) + if state == 'present' or state == 'running' or state == 'next_run': + sysprep = module.params['sysprep'] + cloud_init = module.params['cloud_init'] + cloud_init_nics = module.params['cloud_init_nics'] + cloud_init_nics.append(cloud_init) + + # In case VM don't exist, wait for VM DOWN state, + # otherwise don't wait for any state, just update VM: + vms_module.create( + entity=vm, + result_state=otypes.VmStatus.DOWN if vm is None else None, + clone=module.params['clone'], + clone_permissions=module.params['clone_permissions'], + ) + ret = vms_module.action( + action='start', + post_action=vms_module._post_start_action, + action_condition=lambda vm: ( + vm.status not in [ + otypes.VmStatus.MIGRATING, + otypes.VmStatus.POWERING_UP, + otypes.VmStatus.REBOOT_IN_PROGRESS, + otypes.VmStatus.WAIT_FOR_LAUNCH, + otypes.VmStatus.UP, + otypes.VmStatus.RESTORING_STATE, + ] + ), + wait_condition=lambda vm: vm.status == otypes.VmStatus.UP, + # Start action kwargs: + use_cloud_init=cloud_init is not None or len(cloud_init_nics) > 0, + use_sysprep=sysprep is not None, + vm=otypes.Vm( + placement_policy=otypes.VmPlacementPolicy( + hosts=[otypes.Host(name=module.params['host'])] + ) if module.params['host'] else None, + initialization=_get_initialization(sysprep, cloud_init, cloud_init_nics), + ), + ) + + if state == 'next_run': + # Apply next run configuration, if needed: + vm = vms_service.vm_service(ret['id']).get() + if vm.next_run_configuration_exists: + ret = vms_module.action( + action='reboot', + entity=vm, + action_condition=lambda vm: vm.status == otypes.VmStatus.UP, + wait_condition=lambda vm: vm.status == otypes.VmStatus.UP, + ) + elif state == 'stopped': + vms_module.create( + result_state=otypes.VmStatus.DOWN if vm is None else None, + clone=module.params['clone'], + clone_permissions=module.params['clone_permissions'], + ) + if module.params['force']: + ret = vms_module.action( + action='stop', + post_action=vms_module._attach_cd, + action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN, + wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN, + ) + else: + ret = vms_module.action( + action='shutdown', + pre_action=vms_module._pre_shutdown_action, + post_action=vms_module._attach_cd, + action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN, + wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN, + ) + elif state == 'suspended': + vms_module.create( + result_state=otypes.VmStatus.DOWN if vm is None else None, + clone=module.params['clone'], + clone_permissions=module.params['clone_permissions'], + ) + ret = vms_module.action( + action='suspend', + pre_action=vms_module._pre_suspend_action, + action_condition=lambda vm: vm.status != otypes.VmStatus.SUSPENDED, + wait_condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED, + ) + elif state == 'absent': + ret = vms_module.remove() + + module.exit_json(**ret) + except Exception as e: + module.fail_json(msg=str(e)) + finally: + connection.close(logout=False) + +from ansible.module_utils.basic import * +if __name__ == "__main__": + main() diff --git a/cloud/ovirt/ovirt_vms_facts.py b/cloud/ovirt/ovirt_vms_facts.py new file mode 100644 index 00000000000..2a11ad75280 --- /dev/null +++ b/cloud/ovirt/ovirt_vms_facts.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ovirt import ( + check_sdk, + create_connection, + get_dict_of_struct, + ovirt_full_argument_spec, +) + + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ovirt_vms_facts +short_description: Retrieve facts about one or more oVirt virtual machines +author: "Ondra Machacek (@machacekondra)" +version_added: "2.3" +description: + - "Retrieve facts about one or more oVirt virtual machines." +notes: + - "This module creates a new top-level C(ovirt_vms) fact, which + contains a list of virtual machines." +options: + pattern: + description: + - "Search term which is accepted by oVirt search backend." + - "For example to search VM X from cluster Y use following pattern: + name=X and cluster=Y" +extends_documentation_fragment: ovirt +''' + +EXAMPLES = ''' +# Examples don't contain auth parameter for simplicity, +# look at ovirt_auth module to see how to reuse authentication: + +# Gather facts about all VMs which names start with C(centos) and +# belong to cluster C(west): +- ovirt_vms_facts: + pattern: name=centos* and cluster=west +- debug: + var: ovirt_vms +''' + +RETURN = ''' +ovirt_vms: + description: "List of dictionaries describing the VMs. VM attribues are mapped to dictionary keys, + all VMs attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm." + returned: On success. + type: list +''' + + +def main(): + argument_spec = ovirt_full_argument_spec( + pattern=dict(default='', required=False), + ) + module = AnsibleModule(argument_spec) + check_sdk(module) + + try: + connection = create_connection(module.params.pop('auth')) + vms_service = connection.system_service().vms_service() + vms = vms_service.list(search=module.params['pattern']) + module.exit_json( + changed=False, + ansible_facts=dict( + ovirt_vms=[ + get_dict_of_struct(c) for c in vms + ], + ), + ) + except Exception as e: + module.fail_json(msg=str(e), exception=traceback.format_exc()) + finally: + connection.close(logout=False) + + +if __name__ == '__main__': + main() diff --git a/cloud/profitbricks/profitbricks.py b/cloud/profitbricks/profitbricks.py index 556c652828e..cfafc8e0a46 100644 --- a/cloud/profitbricks/profitbricks.py +++ b/cloud/profitbricks/profitbricks.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: profitbricks @@ -31,13 +35,23 @@ description: - The name of the virtual machine. required: true - image: + image: description: - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. required: true + image_password: + description: + - Password set for the administrative user. + required: false + version_added: '2.2' + ssh_keys: + description: + - Public SSH keys allowing access to the virtual machine. + required: false + version_added: '2.2' datacenter: description: - - The Datacenter to provision this virtual machine. + - The datacenter to provision this virtual machine. required: false default: null cores: @@ -50,6 +64,13 @@ - The amount of memory to allocate to the virtual machine. required: false default: 2048 + cpu_family: + description: + - The CPU family type to allocate to the virtual machine. + required: false + default: AMD_OPTERON + choices: [ "AMD_OPTERON", "INTEL_XEON" ] + version_added: '2.2' volume_size: description: - The size in GB of the boot volume. @@ -72,10 +93,10 @@ default: 1 location: description: - - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored. + - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored. required: false default: us/las - choices: [ "us/las", "us/lasdev", "de/fra", "de/fkb" ] + choices: [ "us/las", "de/fra", "de/fkb" ] assign_public_ip: description: - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created. @@ -129,7 +150,7 @@ # Note: These examples do not set authentication details, see the AWS Guide for details. -# Provisioning example. This will create three servers and enumerate their names. +# Provisioning example. This will create three servers and enumerate their names. - profitbricks: datacenter: Tardis One @@ -137,6 +158,7 @@ cores: 4 ram: 2048 volume_size: 50 + cpu_family: INTEL_XEON image: a3eae284-a2fe-11e4-b187-5f1f641608c8 location: us/las count: 3 @@ -188,10 +210,13 @@ except ImportError: HAS_PB_SDK = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + + LOCATIONS = ['us/las', 'de/fra', - 'de/fkb', - 'us/lasdev'] + 'de/fkb'] uuid_match = re.compile( '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) @@ -218,11 +243,15 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg): promise['requestId'] ) + '" to complete.') + def _create_machine(module, profitbricks, datacenter, name): - image = module.params.get('image') cores = module.params.get('cores') ram = module.params.get('ram') + cpu_family = module.params.get('cpu_family') volume_size = module.params.get('volume_size') + disk_type = module.params.get('disk_type') + image_password = module.params.get('image_password') + ssh_keys = module.params.get('ssh_keys') bus = module.params.get('bus') lan = module.params.get('lan') assign_public_ip = module.params.get('assign_public_ip') @@ -234,26 +263,6 @@ def _create_machine(module, profitbricks, datacenter, name): wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') - try: - # Generate name, but grab first 10 chars so we don't - # screw up the uuid match routine. - v = Volume( - name=str(uuid.uuid4()).replace('-','')[:10], - size=volume_size, - image=image, - bus=bus) - - volume_response = profitbricks.create_volume( - datacenter_id=datacenter, volume=v) - - # We're forced to wait on the volume creation since - # server create relies upon this existing. - - _wait_for_completion(profitbricks, volume_response, - wait_timeout, "create_volume") - except Exception as e: - module.fail_json(msg="failed to create the new volume: %s" % str(e)) - if assign_public_ip: public_found = False @@ -269,80 +278,63 @@ def _create_machine(module, profitbricks, datacenter, name): public=True) lan_response = profitbricks.create_lan(datacenter, i) - - lan = lan_response['id'] - _wait_for_completion(profitbricks, lan_response, wait_timeout, "_create_machine") + lan = lan_response['id'] - try: - n = NIC( - lan=int(lan) - ) - - nics = [n] + v = Volume( + name=str(uuid.uuid4()).replace('-', '')[:10], + size=volume_size, + image=image, + image_password=image_password, + ssh_keys=ssh_keys, + disk_type=disk_type, + bus=bus) + + n = NIC( + lan=int(lan) + ) - s = Server( - name=name, - ram=ram, - cores=cores, - nics=nics, - boot_volume_id=volume_response['id'] - ) + s = Server( + name=name, + ram=ram, + cores=cores, + cpu_family=cpu_family, + create_volumes=[v], + nics=[n], + ) - server_response = profitbricks.create_server( + try: + create_server_response = profitbricks.create_server( datacenter_id=datacenter, server=s) - if wait: - _wait_for_completion(profitbricks, server_response, - wait_timeout, "create_virtual_machine") - + _wait_for_completion(profitbricks, create_server_response, + wait_timeout, "create_virtual_machine") - return (server_response) + server_response = profitbricks.get_server( + datacenter_id=datacenter, + server_id=create_server_response['id'], + depth=3 + ) except Exception as e: module.fail_json(msg="failed to create the new server: %s" % str(e)) + else: + return server_response -def _remove_machine(module, profitbricks, datacenter, name): - remove_boot_volume = module.params.get('remove_boot_volume') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - changed = False - - # User provided the actual UUID instead of the name. - try: - if remove_boot_volume: - # Collect information needed for later. - server = profitbricks.get_server(datacenter, name) - volume_id = server['properties']['bootVolume']['href'].split('/')[7] - - server_response = profitbricks.delete_server(datacenter, name) - changed = True - - except Exception as e: - module.fail_json(msg="failed to terminate the virtual server: %s" % str(e)) - - # Remove the bootVolume - if remove_boot_volume: - try: - volume_response = profitbricks.delete_volume(datacenter, volume_id) - - except Exception as e: - module.fail_json(msg="failed to remove the virtual server's bootvolume: %s" % str(e)) - - return changed -def _startstop_machine(module, profitbricks, datacenter, name): +def _startstop_machine(module, profitbricks, datacenter_id, server_id): state = module.params.get('state') try: if state == 'running': - profitbricks.start_server(datacenter, name) + profitbricks.start_server(datacenter_id, server_id) else: - profitbricks.stop_server(datacenter, name) + profitbricks.stop_server(datacenter_id, server_id) return True except Exception as e: - module.fail_json(msg="failed to start or stop the virtual machine %s: %s" % (name, str(e))) + module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e))) + def _create_datacenter(module, profitbricks): datacenter = module.params.get('datacenter') @@ -364,6 +356,7 @@ def _create_datacenter(module, profitbricks): except Exception as e: module.fail_json(msg="failed to create the new server(s): %s" % str(e)) + def create_virtual_machine(module, profitbricks): """ Create new virtual machine @@ -386,19 +379,15 @@ def create_virtual_machine(module, profitbricks): virtual_machines = [] virtual_machine_ids = [] - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - datacenter_found = True - break + # Locate UUID for datacenter if referenced by name. + datacenter_list = profitbricks.list_datacenters() + datacenter_id = _get_datacenter_id(datacenter_list, datacenter) + if datacenter_id: + datacenter_found = True if not datacenter_found: datacenter_response = _create_datacenter(module, profitbricks) - datacenter = datacenter_response['id'] + datacenter_id = datacenter_response['id'] _wait_for_completion(profitbricks, datacenter_response, wait_timeout, "create_virtual_machine") @@ -409,30 +398,38 @@ def create_virtual_machine(module, profitbricks): try: name % 0 - except TypeError, e: + except TypeError: + e = get_exception() if e.message.startswith('not all'): name = '%s%%d' % name else: module.fail_json(msg=e.message) - number_range = xrange(count_offset,count_offset + count + len(numbers)) + number_range = xrange(count_offset, count_offset + count + len(numbers)) available_numbers = list(set(number_range).difference(numbers)) names = [] numbers_to_use = available_numbers[:count] for number in numbers_to_use: names.append(name % number) else: - names = [name] * count + names = [name] + + # Prefetch a list of servers for later comparison. + server_list = profitbricks.list_servers(datacenter_id) + for name in names: + # Skip server creation if the server already exists. + if _get_server_id(server_list, name): + continue - for name in names: - create_response = _create_machine(module, profitbricks, str(datacenter), name) - nics = profitbricks.list_nics(datacenter,create_response['id']) + create_response = _create_machine(module, profitbricks, str(datacenter_id), name) + nics = profitbricks.list_nics(datacenter_id, create_response['id']) for n in nics['items']: if lan == n['properties']['lan']: - create_response.update({ 'public_ip': n['properties']['ips'][0] }) + create_response.update({'public_ip': n['properties']['ips'][0]}) virtual_machines.append(create_response) - failed = False + + failed = False results = { 'failed': failed, @@ -445,9 +442,10 @@ def create_virtual_machine(module, profitbricks): return results + def remove_virtual_machine(module, profitbricks): """ - Removes a virtual machine. + Removes a virtual machine. This will remove the virtual machine along with the bootVolume. @@ -459,36 +457,58 @@ def remove_virtual_machine(module, profitbricks): Returns: True if a new virtual server was deleted, false otherwise """ + datacenter = module.params.get('datacenter') + instance_ids = module.params.get('instance_ids') + remove_boot_volume = module.params.get('remove_boot_volume') + changed = False + if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1: module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting') - datacenter = module.params.get('datacenter') - instance_ids = module.params.get('instance_ids') + # Locate UUID for datacenter if referenced by name. + datacenter_list = profitbricks.list_datacenters() + datacenter_id = _get_datacenter_id(datacenter_list, datacenter) + if not datacenter_id: + module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) + + # Prefetch server list for later comparison. + server_list = profitbricks.list_servers(datacenter_id) + for instance in instance_ids: + # Locate UUID for server if referenced by name. + server_id = _get_server_id(server_list, instance) + if server_id: + # Remove the server's boot volume + if remove_boot_volume: + _remove_boot_volume(module, profitbricks, datacenter_id, server_id) + + # Remove the server + try: + server_response = profitbricks.delete_server(datacenter_id, server_id) + except Exception: + e = get_exception() + module.fail_json(msg="failed to terminate the virtual server: %s" % str(e)) + else: + changed = True - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break + return changed - for n in instance_ids: - if(uuid_match.match(n)): - _remove_machine(module, profitbricks, d['id'], n) - else: - servers = profitbricks.list_servers(d['id']) - for s in servers['items']: - if n == s['properties']['name']: - server_id = s['id'] +def _remove_boot_volume(module, profitbricks, datacenter_id, server_id): + """ + Remove the boot volume from the server + """ + try: + server = profitbricks.get_server(datacenter_id, server_id) + volume_id = server['properties']['bootVolume']['id'] + volume_response = profitbricks.delete_volume(datacenter_id, volume_id) + except Exception: + e = get_exception() + module.fail_json(msg="failed to remove the server's boot volume: %s" % str(e)) - _remove_machine(module, profitbricks, datacenter, server_id) def startstop_machine(module, profitbricks, state): """ - Starts or Stops a virtual machine. + Starts or Stops a virtual machine. module : AnsibleModule object profitbricks: authenticated profitbricks object. @@ -506,41 +526,32 @@ def startstop_machine(module, profitbricks, state): datacenter = module.params.get('datacenter') instance_ids = module.params.get('instance_ids') - # Locate UUID for Datacenter - if not (uuid_match.match(datacenter)): - datacenter_list = profitbricks.list_datacenters() - for d in datacenter_list['items']: - dc = profitbricks.get_datacenter(d['id']) - if datacenter == dc['properties']['name']: - datacenter = d['id'] - break - - for n in instance_ids: - if(uuid_match.match(n)): - _startstop_machine(module, profitbricks, datacenter, n) - + # Locate UUID for datacenter if referenced by name. + datacenter_list = profitbricks.list_datacenters() + datacenter_id = _get_datacenter_id(datacenter_list, datacenter) + if not datacenter_id: + module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter)) + + # Prefetch server list for later comparison. + server_list = profitbricks.list_servers(datacenter_id) + for instance in instance_ids: + # Locate UUID of server if referenced by name. + server_id = _get_server_id(server_list, instance) + if server_id: + _startstop_machine(module, profitbricks, datacenter_id, server_id) changed = True - else: - servers = profitbricks.list_servers(d['id']) - - for s in servers['items']: - if n == s['properties']['name']: - server_id = s['id'] - _startstop_machine(module, profitbricks, datacenter, server_id) - - changed = True if wait: wait_timeout = time.time() + wait_timeout while wait_timeout > time.time(): matched_instances = [] - for res in profitbricks.list_servers(datacenter)['items']: + for res in profitbricks.list_servers(datacenter_id)['items']: if state == 'running': if res['properties']['vmState'].lower() == state: matched_instances.append(res) elif state == 'stopped': if res['properties']['vmState'].lower() == 'shutoff': - matched_instances.append(res) + matched_instances.append(res) if len(matched_instances) < len(instance_ids): time.sleep(5) @@ -549,24 +560,50 @@ def startstop_machine(module, profitbricks, state): if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg = "wait for virtual machine state timeout on %s" % time.asctime()) + module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime()) return (changed) + +def _get_datacenter_id(datacenters, identity): + """ + Fetch and return datacenter UUID by datacenter name if found. + """ + for datacenter in datacenters['items']: + if identity in (datacenter['properties']['name'], datacenter['id']): + return datacenter['id'] + return None + + +def _get_server_id(servers, identity): + """ + Fetch and return server UUID by server name if found. + """ + for server in servers['items']: + if identity in (server['properties']['name'], server['id']): + return server['id'] + return None + + def main(): module = AnsibleModule( argument_spec=dict( datacenter=dict(), name=dict(), image=dict(), - cores=dict(default=2), - ram=dict(default=2048), - volume_size=dict(default=10), - bus=dict(default='VIRTIO'), - lan=dict(default=1), - count=dict(default=1), + cores=dict(type='int', default=2), + ram=dict(type='int', default=2048), + cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'], + default='AMD_OPTERON'), + volume_size=dict(type='int', default=10), + disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), + image_password=dict(default=None), + ssh_keys=dict(type='list', default=[]), + bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), + lan=dict(type='int', default=1), + count=dict(type='int', default=1), auto_increment=dict(type='bool', default=True), - instance_ids=dict(), + instance_ids=dict(type='list', default=[]), subscription_user=dict(), subscription_password=dict(), location=dict(choices=LOCATIONS, default='us/las'), @@ -583,8 +620,6 @@ def main(): subscription_user = module.params.get('subscription_user') subscription_password = module.params.get('subscription_password') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') profitbricks = ProfitBricksService( username=subscription_user, @@ -594,23 +629,25 @@ def main(): if state == 'absent': if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required ' + + module.fail_json(msg='datacenter parameter is required ' + 'for running or stopping machines.') try: (changed) = remove_virtual_machine(module, profitbricks) module.exit_json(changed=changed) - except Exception as e: + except Exception: + e = get_exception() module.fail_json(msg='failed to set instance state: %s' % str(e)) elif state in ('running', 'stopped'): if not module.params.get('datacenter'): - module.fail_json(msg='datacenter parameter is required for ' + + module.fail_json(msg='datacenter parameter is required for ' + 'running or stopping machines.') try: (changed) = startstop_machine(module, profitbricks, state) module.exit_json(changed=changed) - except Exception as e: + except Exception: + e = get_exception() module.fail_json(msg='failed to set instance state: %s' % str(e)) elif state == 'present': @@ -619,19 +656,19 @@ def main(): if not module.params.get('image'): module.fail_json(msg='image parameter is required for new instance') if not module.params.get('subscription_user'): - module.fail_json(msg='subscription_user parameter is ' + + module.fail_json(msg='subscription_user parameter is ' + 'required for new instance') if not module.params.get('subscription_password'): - module.fail_json(msg='subscription_password parameter is ' + + module.fail_json(msg='subscription_password parameter is ' + 'required for new instance') try: (machine_dict_array) = create_virtual_machine(module, profitbricks) module.exit_json(**machine_dict_array) - except Exception as e: + except Exception: + e = get_exception() module.fail_json(msg='failed to set instance state: %s' % str(e)) -from ansible.module_utils.basic import * - -main() +if __name__ == '__main__': + main() diff --git a/cloud/profitbricks/profitbricks_datacenter.py b/cloud/profitbricks/profitbricks_datacenter.py index cd0e38ee383..b6ce2371653 100644 --- a/cloud/profitbricks/profitbricks_datacenter.py +++ b/cloud/profitbricks/profitbricks_datacenter.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: profitbricks_datacenter @@ -35,7 +39,7 @@ - The datacenter location. required: false default: us/las - choices: [ "us/las", "us/lasdev", "de/fra", "de/fkb" ] + choices: [ "us/las", "de/fra", "de/fkb" ] subscription_user: description: - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable. @@ -94,8 +98,7 @@ LOCATIONS = ['us/las', 'de/fra', - 'de/fkb', - 'us/lasdev'] + 'de/fkb'] uuid_match = re.compile( '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) @@ -206,7 +209,7 @@ def main(): argument_spec=dict( name=dict(), description=dict(), - location=dict(choices=LOCATIONS, default='us/lasdev'), + location=dict(choices=LOCATIONS, default='us/las'), subscription_user=dict(), subscription_password=dict(), wait=dict(type='bool', default=True), @@ -256,4 +259,5 @@ def main(): from ansible.module_utils.basic import * -main() \ No newline at end of file +if __name__ == '__main__': + main() diff --git a/cloud/profitbricks/profitbricks_nic.py b/cloud/profitbricks/profitbricks_nic.py index 902d5266843..01377a338b3 100644 --- a/cloud/profitbricks/profitbricks_nic.py +++ b/cloud/profitbricks/profitbricks_nic.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: profitbricks_nic @@ -287,4 +291,5 @@ def main(): from ansible.module_utils.basic import * -main() \ No newline at end of file +if __name__ == '__main__': + main() diff --git a/cloud/profitbricks/profitbricks_volume.py b/cloud/profitbricks/profitbricks_volume.py index 89a69d5e61a..caed8579aa7 100644 --- a/cloud/profitbricks/profitbricks_volume.py +++ b/cloud/profitbricks/profitbricks_volume.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: profitbricks_volume @@ -41,24 +45,35 @@ required: false default: VIRTIO choices: [ "IDE", "VIRTIO"] - image: + image: description: - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID. required: true + image_password: + description: + - Password set for the administrative user. + required: false + version_added: '2.2' + ssh_keys: + description: + - Public SSH keys allowing access to the virtual machine. + required: false + version_added: '2.2' disk_type: description: - - The disk type. Currently only HDD. + - The disk type of the volume. required: false default: HDD + choices: [ "HDD", "SSD" ] licence_type: description: - - The licence type for the volume. This is used when the image is non-standard. + - The licence type for the volume. This is used when the image is non-standard. required: false default: UNKNOWN choices: ["LINUX", "WINDOWS", "UNKNOWN" , "OTHER"] count: description: - - The number of volumes you wish to create. + - The number of volumes you wish to create. required: false default: 1 auto_increment: @@ -124,7 +139,6 @@ ''' import re -import uuid import time HAS_PB_SDK = True @@ -134,6 +148,10 @@ except ImportError: HAS_PB_SDK = False +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + + uuid_match = re.compile( '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I) @@ -159,10 +177,13 @@ def _wait_for_completion(profitbricks, promise, wait_timeout, msg): promise['requestId'] ) + '" to complete.') + def _create_volume(module, profitbricks, datacenter, name): size = module.params.get('size') bus = module.params.get('bus') image = module.params.get('image') + image_password = module.params.get('image_password') + ssh_keys = module.params.get('ssh_keys') disk_type = module.params.get('disk_type') licence_type = module.params.get('licence_type') wait_timeout = module.params.get('wait_timeout') @@ -174,6 +195,8 @@ def _create_volume(module, profitbricks, datacenter, name): size=size, bus=bus, image=image, + image_password=image_password, + ssh_keys=ssh_keys, disk_type=disk_type, licence_type=licence_type ) @@ -186,20 +209,22 @@ def _create_volume(module, profitbricks, datacenter, name): except Exception as e: module.fail_json(msg="failed to create the volume: %s" % str(e)) - + return volume_response + def _delete_volume(module, profitbricks, datacenter, volume): try: profitbricks.delete_volume(datacenter, volume) except Exception as e: module.fail_json(msg="failed to remove the volume: %s" % str(e)) + def create_volume(module, profitbricks): """ Creates a volume. - This will create a volume in a datacenter. + This will create a volume in a datacenter. module : AnsibleModule object profitbricks: authenticated profitbricks object. @@ -235,13 +260,14 @@ def create_volume(module, profitbricks): try: name % 0 - except TypeError, e: + except TypeError: + e = get_exception() if e.message.startswith('not all'): name = '%s%%d' % name else: module.fail_json(msg=e.message) - number_range = xrange(count_offset,count_offset + count + len(numbers)) + number_range = xrange(count_offset, count_offset + count + len(numbers)) available_numbers = list(set(number_range).difference(numbers)) names = [] numbers_to_use = available_numbers[:count] @@ -250,9 +276,10 @@ def create_volume(module, profitbricks): else: names = [name] * count - for name in names: + for name in names: create_response = _create_volume(module, profitbricks, str(datacenter), name) volumes.append(create_response) + _attach_volume(module, profitbricks, datacenter, create_response['id']) failed = False results = { @@ -266,11 +293,12 @@ def create_volume(module, profitbricks): return results + def delete_volume(module, profitbricks): """ Removes a volume. - This will create a volume in a datacenter. + This will create a volume in a datacenter. module : AnsibleModule object profitbricks: authenticated profitbricks object. @@ -308,19 +336,53 @@ def delete_volume(module, profitbricks): return changed + +def _attach_volume(module, profitbricks, datacenter, volume): + """ + Attaches a volume. + + This will attach a volume to the server. + + module : AnsibleModule object + profitbricks: authenticated profitbricks object. + + Returns: + True if the volume was attached, false otherwise + """ + server = module.params.get('server') + + # Locate UUID for Server + if server: + if not (uuid_match.match(server)): + server_list = profitbricks.list_servers(datacenter) + for s in server_list['items']: + if server == s['properties']['name']: + server = s['id'] + break + + try: + return profitbricks.attach_volume(datacenter, server, volume) + except Exception: + e = get_exception() + module.fail_json(msg='failed to attach volume: %s' % str(e)) + + def main(): module = AnsibleModule( argument_spec=dict( datacenter=dict(), + server=dict(), name=dict(), - size=dict(default=10), - bus=dict(default='VIRTIO'), + size=dict(type='int', default=10), + bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'), image=dict(), - disk_type=dict(default='HDD'), + image_password=dict(default=None), + ssh_keys=dict(type='list', default=[]), + disk_type=dict(choices=['HDD', 'SSD'], default='HDD'), licence_type=dict(default='UNKNOWN'), - count=dict(default=1), + count=dict(type='int', default=1), auto_increment=dict(type='bool', default=True), - instance_ids=dict(), + instance_ids=dict(type='list', default=[]), subscription_user=dict(), subscription_password=dict(), wait=dict(type='bool', default=True), @@ -350,7 +412,8 @@ def main(): try: (changed) = delete_volume(module, profitbricks) module.exit_json(changed=changed) - except Exception as e: + except Exception: + e = get_exception() module.fail_json(msg='failed to set volume state: %s' % str(e)) elif state == 'present': @@ -360,11 +423,12 @@ def main(): module.fail_json(msg='name parameter is required for new instance') try: - (failed, volume_dict_array) = create_volume(module, profitbricks) - module.exit_json(failed=failed, volumes=volume_dict_array) - except Exception as e: + (volume_dict_array) = create_volume(module, profitbricks) + module.exit_json(**volume_dict_array) + except Exception: + e = get_exception() module.fail_json(msg='failed to set volume state: %s' % str(e)) -from ansible.module_utils.basic import * -main() \ No newline at end of file +if __name__ == '__main__': + main() diff --git a/cloud/profitbricks/profitbricks_volume_attachments.py b/cloud/profitbricks/profitbricks_volume_attachments.py index fe87594fddc..1904c470a55 100644 --- a/cloud/profitbricks/profitbricks_volume_attachments.py +++ b/cloud/profitbricks/profitbricks_volume_attachments.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: profitbricks_volume_attachments @@ -259,4 +263,5 @@ def main(): from ansible.module_utils.basic import * -main() \ No newline at end of file +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_clb_ssl.py b/cloud/rackspace/rax_clb_ssl.py index 2013b8c4d81..37c35b32de6 100644 --- a/cloud/rackspace/rax_clb_ssl.py +++ b/cloud/rackspace/rax_clb_ssl.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION=''' module: rax_clb_ssl short_description: Manage SSL termination for a Rackspace Cloud Load Balancer. @@ -156,7 +160,7 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, if needs_change: try: balancer.add_ssl_termination(**ssl_attrs) - except pyrax.exceptions.PyraxException, e: + except pyrax.exceptions.PyraxException as e: module.fail_json(msg='%s' % e.message) changed = True elif state == 'absent': @@ -164,7 +168,7 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, if existing_ssl: try: balancer.delete_ssl_termination() - except pyrax.exceptions.PyraxException, e: + except pyrax.exceptions.PyraxException as e: module.fail_json(msg='%s' % e.message) changed = True @@ -176,7 +180,7 @@ def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key, try: balancer.update(httpsRedirect=https_redirect) - except pyrax.exceptions.PyraxException, e: + except pyrax.exceptions.PyraxException as e: module.fail_json(msg='%s' % e.message) changed = True @@ -266,4 +270,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_mon_alarm.py b/cloud/rackspace/rax_mon_alarm.py index a3f29e22f50..0df4fad3401 100644 --- a/cloud/rackspace/rax_mon_alarm.py +++ b/cloud/rackspace/rax_mon_alarm.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_mon_alarm @@ -224,4 +228,5 @@ def main(): from ansible.module_utils.rax import * # Invoke the module. -main() +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_mon_check.py b/cloud/rackspace/rax_mon_check.py index 14b86864e2f..c8bcfcd569a 100644 --- a/cloud/rackspace/rax_mon_check.py +++ b/cloud/rackspace/rax_mon_check.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_mon_check @@ -310,4 +314,5 @@ def main(): from ansible.module_utils.rax import * # Invoke the module. -main() +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_mon_entity.py b/cloud/rackspace/rax_mon_entity.py index f5f142d2165..fae58309652 100644 --- a/cloud/rackspace/rax_mon_entity.py +++ b/cloud/rackspace/rax_mon_entity.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_mon_entity @@ -68,8 +72,8 @@ state: present label: my_entity named_ip_addresses: - web_box: 192.168.0.10 - db_box: 192.168.0.11 + web_box: 192.0.2.4 + db_box: 192.0.2.5 meta: hurf: durf register: the_entity @@ -189,4 +193,5 @@ def main(): from ansible.module_utils.rax import * # Invoke the module. -main() +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_mon_notification.py b/cloud/rackspace/rax_mon_notification.py index d7b6692dc2c..21396e7cb06 100644 --- a/cloud/rackspace/rax_mon_notification.py +++ b/cloud/rackspace/rax_mon_notification.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_mon_notification @@ -173,4 +177,5 @@ def main(): from ansible.module_utils.rax import * # Invoke the module. -main() +if __name__ == '__main__': + main() diff --git a/cloud/rackspace/rax_mon_notification_plan.py b/cloud/rackspace/rax_mon_notification_plan.py index 5bb3fa1652a..a0b283884ff 100644 --- a/cloud/rackspace/rax_mon_notification_plan.py +++ b/cloud/rackspace/rax_mon_notification_plan.py @@ -16,6 +16,10 @@ # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rax_mon_notification_plan @@ -178,4 +182,5 @@ def main(): from ansible.module_utils.rax import * # Invoke the module. -main() +if __name__ == '__main__': + main() diff --git a/cloud/serverless.py b/cloud/serverless.py new file mode 100644 index 00000000000..a075a2b49b0 --- /dev/null +++ b/cloud/serverless.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Ryan Scott Brown +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: serverless +short_description: Manages a Serverless Framework project +description: + - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks. +version_added: "2.3" +options: + state: + choices: ['present', 'absent'] + description: + - Goal state of given stage/project + required: false + default: present + service_path: + description: + - The path to the root of the Serverless Service to be operated on. + required: true + functions: + description: + - A list of specific functions to deploy. If this is not provided, all functions in the service will be deployed. + required: false + default: [] + region: + description: + - AWS region to deploy the service to + required: false + default: us-east-1 + deploy: + description: + - Whether or not to deploy artifacts after building them. When this option is `false` all the functions will be built, but no stack update will be run to send them out. This is mostly useful for generating artifacts to be stored/deployed elsewhere. + required: false + default: true +notes: + - Currently, the `serverless` command must be in the path of the node executing the task. In the future this may be a flag. +requirements: [ "serverless" ] +author: "Ryan Scott Brown @ryansb" +''' + +EXAMPLES = """ +# Basic deploy of a service +- serverless: + service_path: '{{ project_dir }}' + state: present + +# Deploy specific functions +- serverless: + service_path: '{{ project_dir }}' + functions: + - my_func_one + - my_func_two + +# deploy a project, then pull its resource list back into Ansible +- serverless: + stage: dev + region: us-east-1 + service_path: '{{ project_dir }}' + register: sls +# The cloudformation stack is always named the same as the full service, so the +# cloudformation_facts module can get a full list of the stack resources, as +# well as stack events and outputs +- cloudformation_facts: + region: us-east-1 + stack_name: '{{ sls.service_name }}' + stack_resources: true +""" + +RETURN = """ +service_name: + type: string + description: Most + returned: always + sample: my-fancy-service-dev +state: + type: string + description: Whether the stack for the serverless project is present/absent. + returned: always +command: + type: string + description: Full `serverless` command run by this module, in case you want to re-run the command outside the module. + returned: always + sample: serverless deploy --stage production +""" + + +import os +import traceback +import yaml + + +def read_serverless_config(module): + path = os.path.expanduser(module.params.get('service_path')) + + try: + with open(os.path.join(path, 'serverless.yml')) as sls_config: + config = yaml.safe_load(sls_config.read()) + return config + except IOError as e: + module.fail_json(msg="Could not open serverless.yml in {}. err: {}".format(path, str(e)), exception=traceback.format_exc()) + + module.fail_json(msg="Failed to open serverless config at {}".format( + os.path.join(path, 'serverless.yml'))) + + +def get_service_name(module, stage): + config = read_serverless_config(module) + if config.get('service') is None: + module.fail_json(msg="Could not read `service` key from serverless.yml file") + + if stage: + return "{}-{}".format(config['service'], stage) + + return "{}-{}".format(config['service'], config.get('stage', 'dev')) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + service_path = dict(required=True), + state = dict(default='present', choices=['present', 'absent'], required=False), + functions = dict(type='list', required=False), + region = dict(default='', required=False), + stage = dict(default='', required=False), + deploy = dict(default=True, type='bool', required=False), + ), + ) + + service_path = os.path.expanduser(module.params.get('service_path')) + state = module.params.get('state') + functions = module.params.get('functions') + region = module.params.get('region') + stage = module.params.get('stage') + deploy = module.params.get('deploy', True) + + command = "serverless " + if state == 'present': + command += 'deploy ' + elif state == 'absent': + command += 'remove ' + else: + module.fail_json(msg="State must either be 'present' or 'absent'. Received: {}".format(state)) + + if not deploy and state == 'present': + command += '--noDeploy ' + if region: + command += '--region {} '.format(region) + if stage: + command += '--stage {} '.format(stage) + + rc, out, err = module.run_command(command, cwd=service_path) + if rc != 0: + if state == 'absent' and "-{}' does not exist".format(stage) in out: + module.exit_json(changed=False, state='absent', command=command, + out=out, service_name=get_service_name(module, stage)) + + module.fail_json(msg="Failure when executing Serverless command. Exited {}.\nstdout: {}\nstderr: {}".format(rc, out, err)) + + # gather some facts about the deployment + module.exit_json(changed=True, state='present', out=out, command=command, + service_name=get_service_name(module, stage)) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/smartos/__init__.py b/cloud/smartos/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/smartos/smartos_image_facts.py b/cloud/smartos/smartos_image_facts.py new file mode 100644 index 00000000000..487aa3f648c --- /dev/null +++ b/cloud/smartos/smartos_image_facts.py @@ -0,0 +1,123 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Števko +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: smartos_image_facts +short_description: Get SmartOS image details. +description: + - Retrieve facts about all installed images on SmartOS. Facts will be + inserted to the ansible_facts key. +version_added: "2.2" +author: Adam Števko (@xen0l) +options: + filters: + description: + - Criteria for selecting image. Can be any value from image + manifest and 'published_date', 'published', 'source', 'clones', + and 'size'. More informaton can be found at U(https://smartos.org/man/1m/imgadm) + under 'imgadm list'. + required: false + default: None +''' + +EXAMPLES = ''' +# Return facts about all installed images. +smartos_image_facts: + +# Return all private active Linux images. +smartos_image_facts: filters="os=linux state=active public=false" + +# Show, how many clones does every image have. +smartos_image_facts: + +debug: msg="{{ smartos_images[item]['name'] }}-{{smartos_images[item]['version'] }} + has {{ smartos_images[item]['clones'] }} VM(s)" +with_items: "{{ smartos_images.keys() }}" +''' + +RETURN = ''' +# this module returns ansible_facts +''' + +try: + import json +except ImportError: + import simplejson as json + + +class ImageFacts(object): + + def __init__(self, module): + self.module = module + + self.filters = module.params['filters'] + + def return_all_installed_images(self): + cmd = [self.module.get_bin_path('imgadm')] + + cmd.append('list') + cmd.append('-j') + + if self.filters: + cmd.append(self.filters) + + (rc, out, err) = self.module.run_command(cmd) + + if rc != 0: + self.module.exit_json( + msg='Failed to get all installed images', stderr=err) + + images = json.loads(out) + + result = {} + for image in images: + result[image['manifest']['uuid']] = image['manifest'] + # Merge additional attributes with the image manifest. + for attrib in ['clones', 'source', 'zpool']: + result[image['manifest']['uuid']][attrib] = image[attrib] + + return result + + +def main(): + module = AnsibleModule( + argument_spec=dict( + filters=dict(default=None), + ), + supports_check_mode=False, + ) + + image_facts = ImageFacts(module) + + data = {} + data['smartos_images'] = image_facts.return_all_installed_images() + + module.exit_json(ansible_facts=data) + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/softlayer/__init__.py b/cloud/softlayer/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/softlayer/sl_vm.py b/cloud/softlayer/sl_vm.py new file mode 100644 index 00000000000..b24c0f06fac --- /dev/null +++ b/cloud/softlayer/sl_vm.py @@ -0,0 +1,364 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: sl_vm +short_description: create or cancel a virtual instance in SoftLayer +description: + - Creates or cancels SoftLayer instances. When created, optionally waits for it to be 'running'. +version_added: "2.1" +options: + instance_id: + description: + - Instance Id of the virtual instance to perform action option + required: false + default: null + hostname: + description: + - Hostname to be provided to a virtual instance + required: false + default: null + domain: + description: + - Domain name to be provided to a virtual instance + required: false + default: null + datacenter: + description: + - Datacenter for the virtual instance to be deployed + required: false + default: null + tags: + description: + - Tag or list of tags to be provided to a virtual instance + required: false + default: null + hourly: + description: + - Flag to determine if the instance should be hourly billed + required: false + default: true + private: + description: + - Flag to determine if the instance should be private only + required: false + default: false + dedicated: + description: + - Falg to determine if the instance should be deployed in dedicated space + required: false + default: false + local_disk: + description: + - Flag to determine if local disk should be used for the new instance + required: false + default: true + cpus: + description: + - Count of cpus to be assigned to new virtual instance + required: true + default: null + memory: + description: + - Amount of memory to be assigned to new virtual instance + required: true + default: null + disks: + description: + - List of disk sizes to be assigned to new virtual instance + required: true + default: [25] + os_code: + description: + - OS Code to be used for new virtual instance + required: false + default: null + image_id: + description: + - Image Template to be used for new virtual instance + required: false + default: null + nic_speed: + description: + - NIC Speed to be assigned to new virtual instance + required: false + default: 10 + public_vlan: + description: + - VLAN by its Id to be assigned to the public NIC + required: false + default: null + private_vlan: + description: + - VLAN by its Id to be assigned to the private NIC + required: false + default: null + ssh_keys: + description: + - List of ssh keys by their Id to be assigned to a virtual instance + required: false + default: null + post_uri: + description: + - URL of a post provisioning script ot be loaded and exectued on virtual instance + required: false + default: null + state: + description: + - Create, or cancel a virtual instance. Specify "present" for create, "absent" to cancel. + required: false + default: 'present' + wait: + description: + - Flag used to wait for active status before returning + required: false + default: true + wait_timeout: + description: + - time in seconds before wait returns + required: false + default: 600 + +requirements: + - "python >= 2.6" + - "softlayer >= 4.1.1" +author: "Matt Colton (@mcltn)" +''' + +EXAMPLES = ''' +- name: Build instance + hosts: localhost + gather_facts: False + tasks: + - name: Build instance request + local_action: + module: sl_vm + hostname: instance-1 + domain: anydomain.com + datacenter: dal09 + tags: ansible-module-test + hourly: True + private: False + dedicated: False + local_disk: True + cpus: 1 + memory: 1024 + disks: [25] + os_code: UBUNTU_LATEST + wait: False + +- name: Build additional instances + hosts: localhost + gather_facts: False + tasks: + - name: Build instances request + local_action: + module: sl_vm + hostname: "{{ item.hostname }}" + domain: "{{ item.domain }}" + datacenter: "{{ item.datacenter }}" + tags: "{{ item.tags }}" + hourly: "{{ item.hourly }}" + private: "{{ item.private }}" + dedicated: "{{ item.dedicated }}" + local_disk: "{{ item.local_disk }}" + cpus: "{{ item.cpus }}" + memory: "{{ item.memory }}" + disks: "{{ item.disks }}" + os_code: "{{ item.os_code }}" + ssh_keys: "{{ item.ssh_keys }}" + wait: "{{ item.wait }}" + with_items: + - { hostname: 'instance-2', domain: 'anydomain.com', datacenter: 'dal09', tags: ['ansible-module-test', 'ansible-module-test-slaves'], hourly: True, private: False, dedicated: False, local_disk: True, cpus: 1, memory: 1024, disks: [25,100], os_code: 'UBUNTU_LATEST', ssh_keys: [], wait: True } + - { hostname: 'instance-3', domain: 'anydomain.com', datacenter: 'dal09', tags: ['ansible-module-test', 'ansible-module-test-slaves'], hourly: True, private: False, dedicated: False, local_disk: True, cpus: 1, memory: 1024, disks: [25,100], os_code: 'UBUNTU_LATEST', ssh_keys: [], wait: True } + + +- name: Cancel instances + hosts: localhost + gather_facts: False + tasks: + - name: Cancel by tag + local_action: + module: sl_vm + state: absent + tags: ansible-module-test +''' + +# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed. +RETURN = '''# ''' + +import time + +#TODO: get this info from API +STATES = ['present', 'absent'] +DATACENTERS = ['ams01','ams03','che01','dal01','dal05','dal06','dal09','dal10','fra02','hkg02','hou02','lon02','mel01','mex01','mil01','mon01','osl01','par01','sjc01','sjc03','sao01','sea01','sng01','syd01','tok02','tor01','wdc01','wdc04'] +CPU_SIZES = [1,2,4,8,16,32,56] +MEMORY_SIZES = [1024,2048,4096,6144,8192,12288,16384,32768,49152,65536,131072,247808] +INITIALDISK_SIZES = [25,100] +LOCALDISK_SIZES = [25,100,150,200,300] +SANDISK_SIZES = [10,20,25,30,40,50,75,100,125,150,175,200,250,300,350,400,500,750,1000,1500,2000] +NIC_SPEEDS = [10,100,1000] + +try: + import SoftLayer + from SoftLayer import VSManager + + HAS_SL = True + vsManager = VSManager(SoftLayer.create_client_from_env()) +except ImportError: + HAS_SL = False + + +def create_virtual_instance(module): + + instances = vsManager.list_instances( + hostname = module.params.get('hostname'), + domain = module.params.get('domain'), + datacenter = module.params.get('datacenter') + ) + + if instances: + return False, None + + + # Check if OS or Image Template is provided (Can't be both, defaults to OS) + if (module.params.get('os_code') != None and module.params.get('os_code') != ''): + module.params['image_id'] = '' + elif (module.params.get('image_id') != None and module.params.get('image_id') != ''): + module.params['os_code'] = '' + module.params['disks'] = [] # Blank out disks since it will use the template + else: + return False, None + + tags = module.params.get('tags') + if isinstance(tags, list): + tags = ','.join(map(str, module.params.get('tags'))) + + instance = vsManager.create_instance( + hostname = module.params.get('hostname'), + domain = module.params.get('domain'), + cpus = module.params.get('cpus'), + memory = module.params.get('memory'), + hourly = module.params.get('hourly'), + datacenter = module.params.get('datacenter'), + os_code = module.params.get('os_code'), + image_id = module.params.get('image_id'), + local_disk = module.params.get('local_disk'), + disks = module.params.get('disks'), + ssh_keys = module.params.get('ssh_keys'), + nic_speed = module.params.get('nic_speed'), + private = module.params.get('private'), + public_vlan = module.params.get('public_vlan'), + private_vlan = module.params.get('private_vlan'), + dedicated = module.params.get('dedicated'), + post_uri = module.params.get('post_uri'), + tags = tags) + + if instance != None and instance['id'] > 0: + return True, instance + else: + return False, None + + +def wait_for_instance(module,id): + instance = None + completed = False + wait_timeout = time.time() + module.params.get('wait_time') + while not completed and wait_timeout > time.time(): + try: + completed = vsManager.wait_for_ready(id, 10, 2) + if completed: + instance = vsManager.get_instance(id) + except: + completed = False + + return completed, instance + + +def cancel_instance(module): + canceled = True + if module.params.get('instance_id') == None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')): + tags = module.params.get('tags') + if isinstance(tags, basestring): + tags = [module.params.get('tags')] + instances = vsManager.list_instances(tags = tags, hostname = module.params.get('hostname'), domain = module.params.get('domain')) + for instance in instances: + try: + vsManager.cancel_instance(instance['id']) + except: + canceled = False + elif module.params.get('instance_id') and module.params.get('instance_id') != 0: + try: + vsManager.cancel_instance(instance['id']) + except: + canceled = False + else: + return False, None + + return canceled, None + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + instance_id=dict(), + hostname=dict(), + domain=dict(), + datacenter=dict(choices=DATACENTERS), + tags=dict(), + hourly=dict(type='bool', default=True), + private=dict(type='bool', default=False), + dedicated=dict(type='bool', default=False), + local_disk=dict(type='bool', default=True), + cpus=dict(type='int', choices=CPU_SIZES), + memory=dict(type='int', choices=MEMORY_SIZES), + disks=dict(type='list', default=[25]), + os_code=dict(), + image_id=dict(), + nic_speed=dict(type='int', choices=NIC_SPEEDS), + public_vlan=dict(), + private_vlan=dict(), + ssh_keys=dict(type='list', default=[]), + post_uri=dict(), + state=dict(default='present', choices=STATES), + wait=dict(type='bool', default=True), + wait_time=dict(type='int', default=600) + ) + ) + + if not HAS_SL: + module.fail_json(msg='softlayer python library required for this module') + + if module.params.get('state') == 'absent': + (changed, instance) = cancel_instance(module) + + elif module.params.get('state') == 'present': + (changed, instance) = create_virtual_instance(module) + if module.params.get('wait') == True and instance: + (changed, instance) = wait_for_instance(module, instance['id']) + + module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__))) + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/vmware/vca_fw.py b/cloud/vmware/vca_fw.py index 45ed78ef608..78cebbb012e 100644 --- a/cloud/vmware/vca_fw.py +++ b/cloud/vmware/vca_fw.py @@ -18,6 +18,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vca_fw @@ -25,70 +29,14 @@ description: - Adds or removes firewall rules from a gateway in a vca environment version_added: "2.0" +author: Peter Sprygada (@privateip) options: - username: - description: - - The vca username or email address, if not set the environment variable VCA_USER is checked for the username. - required: false - default: None - password: - description: - - The vca password, if not set the environment variable VCA_PASS is checked for the password - required: false - default: None - org: - description: - - The org to login to for creating vapp, mostly set when the service_type is vdc. - required: false - default: None - service_id: - description: - - The service id in a vchs environment to be used for creating the vapp - required: false - default: None - host: - description: - - The authentication host to be used when service type is vcd. - required: false - default: None - api_version: - description: - - The api version to be used with the vca - required: false - default: "5.7" - service_type: - description: - - The type of service we are authenticating against - required: false - default: vca - choices: [ "vca", "vchs", "vcd" ] - state: - description: - - if the object should be added or removed - required: false - default: present - choices: [ "present", "absent" ] - verify_certs: - description: - - If the certificates of the authentication is to be verified - required: false - default: True - vdc_name: - description: - - The name of the vdc where the gateway is located. - required: false - default: None - gateway_name: - description: - - The name of the gateway of the vdc where the rule should be added - required: false - default: gateway fw_rules: description: - A list of firewall rules to be added to the gateway, Please see examples on valid entries required: True default: false - +extends_documentation_fragment: vca.documentation ''' EXAMPLES = ''' @@ -102,15 +50,15 @@ instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282' vdc_name: 'benz_ansible' state: 'absent' - fw_rules: + fw_rules: - description: "ben testing" - source_ip: "Any" - dest_ip: 192.168.2.11 + source_ip: "Any" + dest_ip: 192.0.2.23 - description: "ben testing 2" - source_ip: 192.168.2.100 + source_ip: 192.0.2.50 source_port: "Any" dest_port: "22" - dest_ip: 192.168.2.13 + dest_ip: 192.0.2.101 is_enable: "true" enable_logging: "false" protocol: "Tcp" @@ -118,235 +66,184 @@ ''' - - -import time, json, xmltodict -HAS_PYVCLOUD = False try: - from pyvcloud.vcloudair import VCA - from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType - HAS_PYVCLOUD = True + from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import FirewallRuleType + from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType except ImportError: + # normally set a flag here but it will be caught when testing for + # the existence of pyvcloud (see module_utils/vca.py). This just + # protects against generating an exception at runtime pass -SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'} -LOGIN_HOST = {} -LOGIN_HOST['vca'] = 'vca.vmware.com' -LOGIN_HOST['vchs'] = 'vchs.vmware.com' -VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description', 'dest_ip', 'dest_port', 'source_ip', 'source_port', 'protocol'] - -def vca_login(module=None): - service_type = module.params.get('service_type') - username = module.params.get('username') - password = module.params.get('password') - instance = module.params.get('instance_id') - org = module.params.get('org') - service = module.params.get('service_id') - vdc_name = module.params.get('vdc_name') - version = module.params.get('api_version') - verify = module.params.get('verify_certs') - if not vdc_name: - if service_type == 'vchs': - vdc_name = module.params.get('service_id') - if not org: - if service_type == 'vchs': - if vdc_name: - org = vdc_name - else: - org = service - if service_type == 'vcd': - host = module.params.get('host') - else: - host = LOGIN_HOST[service_type] - - if not username: - if 'VCA_USER' in os.environ: - username = os.environ['VCA_USER'] - if not password: - if 'VCA_PASS' in os.environ: - password = os.environ['VCA_PASS'] - if not username or not password: - module.fail_json(msg = "Either the username or password is not set, please check") - - if service_type == 'vchs': - version = '5.6' - if service_type == 'vcd': - if not version: - version == '5.6' - - - vca = VCA(host=host, username=username, service_type=SERVICE_MAP[service_type], version=version, verify=verify) - - if service_type == 'vca': - if not vca.login(password=password): - module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content) - if not vca.login_to_instance(password=password, instance=instance, token=None, org_url=None): - s_json = serialize_instances(vca.instances) - module.fail_json(msg = "Login to Instance failed: Seems like instance_id provided is wrong .. Please check",\ - valid_instances=s_json) - if not vca.login_to_instance(instance=instance, password=None, token=vca.vcloud_session.token, - org_url=vca.vcloud_session.org_url): - module.fail_json(msg = "Error logging into org for the instance", error=vca.response.content) - return vca - - if service_type == 'vchs': - if not vca.login(password=password): - module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content) - if not vca.login(token=vca.token): - module.fail_json(msg = "Failed to get the token", error=vca.response.content) - if not vca.login_to_org(service, org): - module.fail_json(msg = "Failed to login to org, Please check the orgname", error=vca.response.content) - return vca - - if service_type == 'vcd': - if not vca.login(password=password, org=org): - module.fail_json(msg = "Login Failed: Please check username or password or host parameters") - if not vca.login(password=password, org=org): - module.fail_json(msg = "Failed to get the token", error=vca.response.content) - if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url): - module.fail_json(msg = "Failed to login to org", error=vca.response.content) - return vca - -def validate_fw_rules(module=None, fw_rules=None): - VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Any'] +VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Other', 'Any'] +VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description', + 'dest_ip', 'dest_port', 'source_ip', 'source_port', + 'protocol'] + +def protocol_to_tuple(protocol): + return (protocol.get_Tcp(), + protocol.get_Udp(), + protocol.get_Icmp(), + protocol.get_Other(), + protocol.get_Any()) + +def protocol_to_string(protocol): + protocol = protocol_to_tuple(protocol) + if protocol[0] is True: + return 'Tcp' + elif protocol[1] is True: + return 'Udp' + elif protocol[2] is True: + return 'Icmp' + elif protocol[3] is True: + return 'Other' + elif protocol[4] is True: + return 'Any' + +def protocol_to_type(protocol): + try: + protocols = ProtocolsType() + setattr(protocols, protocol, True) + return protocols + except AttributeError: + raise VcaError("The value in protocol is not valid") + +def validate_fw_rules(fw_rules): for rule in fw_rules: - if not isinstance(rule, dict): - module.fail_json(msg="Firewall rules must be a list of dictionaries, Please check", valid_keys=VALID_RULE_KEYS) for k in rule.keys(): if k not in VALID_RULE_KEYS: - module.fail_json(msg="%s is not a valid key in fw rules, Please check above.." %k, valid_keys=VALID_RULE_KEYS) - rule['dest_port'] = rule.get('dest_port', 'Any') - rule['dest_ip'] = rule.get('dest_ip', 'Any') - rule['source_port'] = rule.get('source_port', 'Any') - rule['source_ip'] = rule.get('source_ip', 'Any') - rule['protocol'] = rule.get('protocol', 'Any') - rule['policy'] = rule.get('policy', 'allow') - rule['is_enable'] = rule.get('is_enable', 'true') - rule['enable_logging'] = rule.get('enable_logging', 'false') - rule['description'] = rule.get('description', 'rule added by Ansible') - if not rule['protocol'] in VALID_PROTO: - module.fail_json(msg="the value in protocol is not valid, valid values are as above", valid_proto=VALID_PROTO) + raise VcaError("%s is not a valid key in fw rules, please " + "check above.." % k, valid_keys=VALID_RULE_KEYS) + + rule['dest_port'] = str(rule.get('dest_port', 'Any')).lower() + rule['dest_ip'] = rule.get('dest_ip', 'Any').lower() + rule['source_port'] = str(rule.get('source_port', 'Any')).lower() + rule['source_ip'] = rule.get('source_ip', 'Any').lower() + rule['protocol'] = rule.get('protocol', 'Any').lower() + rule['policy'] = rule.get('policy', 'allow').lower() + rule['is_enable'] = rule.get('is_enable', True) + rule['enable_logging'] = rule.get('enable_logging', False) + rule['description'] = rule.get('description', 'rule added by Ansible') + return fw_rules -def create_protocol_list(protocol): - plist = [] - plist.append(protocol.get_Tcp()) - plist.append(protocol.get_Any()) - plist.append(protocol.get_Tcp()) - plist.append(protocol.get_Udp()) - plist.append(protocol.get_Icmp()) - plist.append(protocol.get_Other()) - return plist +def fw_rules_to_dict(rules): + fw_rules = list() + for rule in rules: + fw_rules.append( + dict( + dest_port=rule.get_DestinationPortRange().lower(), + dest_ip=rule.get_DestinationIp().lower().lower(), + source_port=rule.get_SourcePortRange().lower(), + source_ip=rule.get_SourceIp().lower(), + protocol=protocol_to_string(rule.get_Protocols()).lower(), + policy=rule.get_Policy().lower(), + is_enable=rule.get_IsEnabled(), + enable_logging=rule.get_EnableLogging(), + description=rule.get_Description() + ) + ) + return fw_rules +def create_fw_rule(is_enable, description, policy, protocol, dest_port, + dest_ip, source_port, source_ip, enable_logging): -def create_protocols_type(protocol): - all_protocols = {"Tcp": None, "Udp": None, "Icmp": None, "Any": None} - all_protocols[protocol] = True - return ProtocolsType(**all_protocols) + return FirewallRuleType(IsEnabled=is_enable, + Description=description, + Policy=policy, + Protocols=protocol_to_type(protocol), + DestinationPortRange=dest_port, + DestinationIp=dest_ip, + SourcePortRange=source_port, + SourceIp=source_ip, + EnableLogging=enable_logging) def main(): - module = AnsibleModule( - argument_spec=dict( - username = dict(default=None), - password = dict(default=None), - org = dict(default=None), - service_id = dict(default=None), - instance_id = dict(default=None), - host = dict(default=None), - api_version = dict(default='5.7'), - service_type = dict(default='vca', choices=['vchs', 'vca', 'vcd']), - state = dict(default='present', choices = ['present', 'absent']), - vdc_name = dict(default=None), - gateway_name = dict(default='gateway'), - fw_rules = dict(required=True, default=None, type='list'), + argument_spec = vca_argument_spec() + argument_spec.update( + dict( + fw_rules = dict(required=True, type='list'), + gateway_name = dict(default='gateway'), + state = dict(default='present', choices=['present', 'absent']) ) ) + module = AnsibleModule(argument_spec, supports_check_mode=True) + + fw_rules = module.params.get('fw_rules') + gateway_name = module.params.get('gateway_name') + vdc_name = module.params['vdc_name'] - vdc_name = module.params.get('vdc_name') - org = module.params.get('org') - service = module.params.get('service_id') - state = module.params.get('state') - service_type = module.params.get('service_type') - host = module.params.get('host') - instance_id = module.params.get('instance_id') - fw_rules = module.params.get('fw_rules') - gateway_name = module.params.get('gateway_name') - verify_certs = dict(default=True, type='bool'), - - if not HAS_PYVCLOUD: - module.fail_json(msg="python module pyvcloud is needed for this module") - if service_type == 'vca': - if not instance_id: - module.fail_json(msg="When service type is vca the instance_id parameter is mandatory") - if not vdc_name: - module.fail_json(msg="When service type is vca the vdc_name parameter is mandatory") - - if service_type == 'vchs': - if not service: - module.fail_json(msg="When service type vchs the service_id parameter is mandatory") - if not org: - org = service - if not vdc_name: - vdc_name = service - if service_type == 'vcd': - if not host: - module.fail_json(msg="When service type is vcd host parameter is mandatory") - vca = vca_login(module) - vdc = vca.get_vdc(vdc_name) - if not vdc: - module.fail_json(msg = "Error getting the vdc, Please check the vdc name") - mod_rules = validate_fw_rules(module, fw_rules) gateway = vca.get_gateway(vdc_name, gateway_name) if not gateway: - module.fail_json(msg="Not able to find the gateway %s, please check the gateway_name param" %gateway_name) + module.fail_json(msg="Not able to find the gateway %s, please check " + "the gateway_name param" % gateway_name) + + fwservice = gateway._getFirewallService() + rules = gateway.get_fw_rules() - existing_rules = [] - del_rules = [] - for rule in rules: - current_trait = (create_protocol_list(rule.get_Protocols()), - rule.get_DestinationPortRange(), - rule.get_DestinationIp(), - rule.get_SourcePortRange(), - rule.get_SourceIp()) - for idx, val in enumerate(mod_rules): - trait = (create_protocol_list(create_protocols_type(val['protocol'])), - val['dest_port'], val['dest_ip'], val['source_port'], val['source_ip']) - if current_trait == trait: - del_rules.append(mod_rules[idx]) - mod_rules.pop(idx) - existing_rules.append(current_trait) - - if state == 'absent': - if len(del_rules) < 1: - module.exit_json(changed=False, msg="Nothing to delete", delete_rules=mod_rules) - else: - for i in del_rules: - gateway.delete_fw_rule(i['protocol'], i['dest_port'], i['dest_ip'], i['source_port'], i['source_ip']) - task = gateway.save_services_configuration() - if not task: - module.fail_json(msg="Unable to Delete Rule, please check above error", error=gateway.response.content) - if not vca.block_until_completed(task): - module.fail_json(msg="Error while waiting to remove Rule, please check above error", error=gateway.response.content) - module.exit_json(changed=True, msg="Rules Deleted", deleted_rules=del_rules) - - if len(mod_rules) < 1: - module.exit_json(changed=False, rules=existing_rules) - if len(mod_rules) >= 1: - for i in mod_rules: - gateway.add_fw_rule(i['is_enable'], i['description'], i['policy'], i['protocol'], i['dest_port'], i['dest_ip'], - i['source_port'], i['source_ip'], i['enable_logging']) - task = gateway.save_services_configuration() - if not task: - module.fail_json(msg="Unable to Add Rule, please check above error", error=gateway.response.content) - if not vca.block_until_completed(task): - module.fail_json(msg="Failure in waiting for adding firewall rule", error=gateway.response.content) - module.exit_json(changed=True, rules=mod_rules) - - + current_rules = fw_rules_to_dict(rules) + + try: + desired_rules = validate_fw_rules(fw_rules) + except VcaError as e: + module.fail_json(msg=e.message) + + result = dict(changed=False) + result['current_rules'] = current_rules + result['desired_rules'] = desired_rules + + updates = list() + additions = list() + deletions = list() + + for (index, rule) in enumerate(desired_rules): + try: + if rule != current_rules[index]: + updates.append((index, rule)) + except IndexError: + additions.append(rule) + + eol = len(current_rules) > len(desired_rules) + if eol > 0: + for rule in current_rules[eos:]: + deletions.append(rule) + + for rule in additions: + if not module.check_mode: + rule['protocol'] = rule['protocol'].capitalize() + gateway.add_fw_rule(**rule) + result['changed'] = True + + for index, rule in updates: + if not module.check_mode: + rule = create_fw_rule(**rule) + fwservice.replace_FirewallRule_at(index, rule) + result['changed'] = True + + keys = ['protocol', 'dest_port', 'dest_ip', 'source_port', 'source_ip'] + for rule in deletions: + if not module.check_mode: + kwargs = dict([(k, v) for k, v in rule.items() if k in keys]) + kwargs['protocol'] = protocol_to_string(kwargs['protocol']) + gateway.delete_fw_rule(**kwargs) + result['changed'] = True + + if not module.check_mode and result['changed'] == True: + task = gateway.save_services_configuration() + if task: + vca.block_until_completed(task) + + result['rules_updated'] = count=len(updates) + result['rules_added'] = count=len(additions) + result['rules_deleted'] = count=len(deletions) + + return module.exit_json(**result) + # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.vca import * if __name__ == '__main__': main() diff --git a/cloud/vmware/vca_nat.py b/cloud/vmware/vca_nat.py index c948605ce48..64771da6928 100644 --- a/cloud/vmware/vca_nat.py +++ b/cloud/vmware/vca_nat.py @@ -18,6 +18,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vca_nat @@ -25,64 +29,8 @@ description: - Adds or removes nat rules from a gateway in a vca environment version_added: "2.0" +author: Peter Sprygada (@privateip) options: - username: - description: - - The vca username or email address, if not set the environment variable VCA_USER is checked for the username. - required: false - default: None - password: - description: - - The vca password, if not set the environment variable VCA_PASS is checked for the password - required: false - default: None - org: - description: - - The org to login to for creating vapp, mostly set when the service_type is vdc. - required: false - default: None - service_id: - description: - - The service id in a vchs environment to be used for creating the vapp - required: false - default: None - host: - description: - - The authentication host to be used when service type is vcd. - required: false - default: None - api_version: - description: - - The api version to be used with the vca - required: false - default: "5.7" - service_type: - description: - - The type of service we are authenticating against - required: false - default: vca - choices: [ "vca", "vchs", "vcd" ] - state: - description: - - if the object should be added or removed - required: false - default: present - choices: [ "present", "absent" ] - verify_certs: - description: - - If the certificates of the authentication is to be verified - required: false - default: True - vdc_name: - description: - - The name of the vdc where the gateway is located. - required: false - default: None - gateway_name: - description: - - The name of the gateway of the vdc where the rule should be added - required: false - default: gateway purge_rules: description: - If set to true, it will delete all rules in the gateway that are not given as paramter to this module. @@ -93,8 +41,7 @@ - A list of rules to be added to the gateway, Please see examples on valid entries required: True default: false - - +extends_documentation_fragment: vca.documentation ''' EXAMPLES = ''' @@ -108,10 +55,10 @@ instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282' vdc_name: 'benz_ansible' state: 'present' - nat_rules: + nat_rules: - rule_type: SNAT - original_ip: 192.168.2.10 - translated_ip: 107.189.95.208 + original_ip: 192.0.2.42 + translated_ip: 203.0.113.23 #example for a DNAT - hosts: localhost @@ -121,255 +68,152 @@ instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282' vdc_name: 'benz_ansible' state: 'present' - nat_rules: + nat_rules: - rule_type: DNAT - original_ip: 107.189.95.208 + original_ip: 203.0.113.23 original_port: 22 - translated_ip: 192.168.2.10 + translated_ip: 192.0.2.42 translated_port: 22 ''' -import time, json, xmltodict - -HAS_PYVCLOUD = False -try: - from pyvcloud.vcloudair import VCA - HAS_PYVCLOUD = True -except ImportError: - pass - -SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'} -LOGIN_HOST = {} -LOGIN_HOST['vca'] = 'vca.vmware.com' -LOGIN_HOST['vchs'] = 'vchs.vmware.com' -VALID_RULE_KEYS = ['rule_type', 'original_ip', 'original_port', 'translated_ip', 'translated_port', 'protocol'] - -def vca_login(module=None): - service_type = module.params.get('service_type') - username = module.params.get('username') - password = module.params.get('password') - instance = module.params.get('instance_id') - org = module.params.get('org') - service = module.params.get('service_id') - vdc_name = module.params.get('vdc_name') - version = module.params.get('api_version') - verify = module.params.get('verify_certs') - if not vdc_name: - if service_type == 'vchs': - vdc_name = module.params.get('service_id') - if not org: - if service_type == 'vchs': - if vdc_name: - org = vdc_name - else: - org = service - if service_type == 'vcd': - host = module.params.get('host') - else: - host = LOGIN_HOST[service_type] - - if not username: - if 'VCA_USER' in os.environ: - username = os.environ['VCA_USER'] - if not password: - if 'VCA_PASS' in os.environ: - password = os.environ['VCA_PASS'] - if not username or not password: - module.fail_json(msg = "Either the username or password is not set, please check") - - if service_type == 'vchs': - version = '5.6' - if service_type == 'vcd': - if not version: - version == '5.6' - - - vca = VCA(host=host, username=username, service_type=SERVICE_MAP[service_type], version=version, verify=verify) - - if service_type == 'vca': - if not vca.login(password=password): - module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content) - if not vca.login_to_instance(password=password, instance=instance, token=None, org_url=None): - s_json = serialize_instances(vca.instances) - module.fail_json(msg = "Login to Instance failed: Seems like instance_id provided is wrong .. Please check",\ - valid_instances=s_json) - if not vca.login_to_instance(instance=instance, password=None, token=vca.vcloud_session.token, - org_url=vca.vcloud_session.org_url): - module.fail_json(msg = "Error logging into org for the instance", error=vca.response.content) - return vca - - if service_type == 'vchs': - if not vca.login(password=password): - module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content) - if not vca.login(token=vca.token): - module.fail_json(msg = "Failed to get the token", error=vca.response.content) - if not vca.login_to_org(service, org): - module.fail_json(msg = "Failed to login to org, Please check the orgname", error=vca.response.content) - return vca - - if service_type == 'vcd': - if not vca.login(password=password, org=org): - module.fail_json(msg = "Login Failed: Please check username or password or host parameters") - if not vca.login(password=password, org=org): - module.fail_json(msg = "Failed to get the token", error=vca.response.content) - if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url): - module.fail_json(msg = "Failed to login to org", error=vca.response.content) - return vca - -def validate_nat_rules(module=None, nat_rules=None): +import time +import xmltodict + +VALID_RULE_KEYS = ['rule_type', 'original_ip', 'original_port', + 'translated_ip', 'translated_port', 'protocol'] + + +def validate_nat_rules(nat_rules): for rule in nat_rules: if not isinstance(rule, dict): - module.fail_json(msg="nat rules must be a list of dictionaries, Please check", valid_keys=VALID_RULE_KEYS) + raise VcaError("nat rules must be a list of dictionaries, " + "Please check", valid_keys=VALID_RULE_KEYS) + for k in rule.keys(): if k not in VALID_RULE_KEYS: - module.fail_json(msg="%s is not a valid key in nat rules, Please check above.." %k, valid_keys=VALID_RULE_KEYS) - rule['original_port'] = rule.get('original_port', 'any') - rule['original_ip'] = rule.get('original_ip', 'any') - rule['translated_ip'] = rule.get('translated_ip', 'any') - rule['translated_port'] = rule.get('translated_port', 'any') - rule['protocol'] = rule.get('protocol', 'any') - rule['rule_type'] = rule.get('rule_type', 'DNAT') + raise VcaError("%s is not a valid key in nat rules, please " + "check above.." % k, valid_keys=VALID_RULE_KEYS) + + rule['original_port'] = str(rule.get('original_port', 'any')).lower() + rule['original_ip'] = rule.get('original_ip', 'any').lower() + rule['translated_ip'] = rule.get('translated_ip', 'any').lower() + rule['translated_port'] = str(rule.get('translated_port', 'any')).lower() + rule['protocol'] = rule.get('protocol', 'any').lower() + rule['rule_type'] = rule.get('rule_type', 'DNAT').lower() + return nat_rules -def nat_rules_to_dict(natRules): +def nat_rules_to_dict(nat_rules): result = [] - for natRule in natRules: - ruleId = natRule.get_Id() - enable = natRule.get_IsEnabled() - ruleType = natRule.get_RuleType() - gatewayNatRule = natRule.get_GatewayNatRule() - originalIp = gatewayNatRule.get_OriginalIp() - originalPort = gatewayNatRule.get_OriginalPort() - translatedIp = gatewayNatRule.get_TranslatedIp() - translatedPort = gatewayNatRule.get_TranslatedPort() - protocol = gatewayNatRule.get_Protocol() - interface = gatewayNatRule.get_Interface().get_name() - result.append(dict(rule_type=ruleType, original_ip=originalIp, original_port="any" if not originalPort else originalPort, translated_ip=translatedIp, translated_port="any" if not translatedPort else translatedPort, - protocol="any" if not protocol else protocol)) + for rule in nat_rules: + gw_rule = rule.get_GatewayNatRule() + result.append( + dict( + rule_type=rule.get_RuleType().lower(), + original_ip=gw_rule.get_OriginalIp().lower(), + original_port=(gw_rule.get_OriginalPort().lower() or 'any'), + translated_ip=gw_rule.get_TranslatedIp().lower(), + translated_port=(gw_rule.get_TranslatedPort().lower() or 'any'), + protocol=(gw_rule.get_Protocol().lower() or 'any') + ) + ) return result +def rule_to_string(rule): + strings = list() + for key, value in rule.items(): + strings.append('%s=%s' % (key, value)) + return ', '.join(string) def main(): - module = AnsibleModule( - argument_spec=dict( - username = dict(default=None), - password = dict(default=None), - org = dict(default=None), - service_id = dict(default=None), - instance_id = dict(default=None), - host = dict(default=None), - api_version = dict(default='5.7'), - service_type = dict(default='vca', choices=['vchs', 'vca', 'vcd']), - state = dict(default='present', choices = ['present', 'absent']), - vdc_name = dict(default=None), - gateway_name = dict(default='gateway'), - nat_rules = dict(required=True, default=None, type='list'), - purge_rules = dict(default=False), + argument_spec = vca_argument_spec() + argument_spec.update( + dict( + nat_rules = dict(type='list', default=[]), + gateway_name = dict(default='gateway'), + purge_rules = dict(default=False, type='bool'), + state = dict(default='present', choices=['present', 'absent']) ) ) + module = AnsibleModule(argument_spec, supports_check_mode=True) + + vdc_name = module.params.get('vdc_name') + state = module.params['state'] + nat_rules = module.params['nat_rules'] + gateway_name = module.params['gateway_name'] + purge_rules = module.params['purge_rules'] + + if not purge_rules and not nat_rules: + module.fail_json(msg='Must define purge_rules or nat_rules') - vdc_name = module.params.get('vdc_name') - org = module.params.get('org') - service = module.params.get('service_id') - state = module.params.get('state') - service_type = module.params.get('service_type') - host = module.params.get('host') - instance_id = module.params.get('instance_id') - nat_rules = module.params.get('nat_rules') - gateway_name = module.params.get('gateway_name') - purge_rules = module.params.get('purge_rules') - verify_certs = dict(default=True, type='bool'), - - if not HAS_PYVCLOUD: - module.fail_json(msg="python module pyvcloud is needed for this module") - if service_type == 'vca': - if not instance_id: - module.fail_json(msg="When service type is vca the instance_id parameter is mandatory") - if not vdc_name: - module.fail_json(msg="When service type is vca the vdc_name parameter is mandatory") - - if service_type == 'vchs': - if not service: - module.fail_json(msg="When service type vchs the service_id parameter is mandatory") - if not org: - org = service - if not vdc_name: - vdc_name = service - if service_type == 'vcd': - if not host: - module.fail_json(msg="When service type is vcd host parameter is mandatory") - vca = vca_login(module) - vdc = vca.get_vdc(vdc_name) - if not vdc: - module.fail_json(msg = "Error getting the vdc, Please check the vdc name") - mod_rules = validate_nat_rules(module, nat_rules) gateway = vca.get_gateway(vdc_name, gateway_name) if not gateway: - module.fail_json(msg="Not able to find the gateway %s, please check the gateway_name param" %gateway_name) + module.fail_json(msg="Not able to find the gateway %s, please check " + "the gateway_name param" % gateway_name) + + try: + desired_rules = validate_nat_rules(nat_rules) + except VcaError as e: + module.fail_json(msg=e.message) + rules = gateway.get_nat_rules() - cur_rules = nat_rules_to_dict(rules) - delete_cur_rule = [] - delete_rules = [] - for rule in cur_rules: - match = False - for idx, val in enumerate(mod_rules): - match = False - if cmp(rule, val) == 0: - delete_cur_rule.append(val) - mod_rules.pop(idx) - match = True - if not match: - delete_rules.append(rule) - if state == 'absent': - if purge_rules: - if not gateway.del_all_nat_rules(): - module.fail_json(msg="Error deleting all rules") - module.exit_json(changed=True, msg="Removed all rules") - if len(delete_cur_rule) < 1: - module.exit_json(changed=False, msg="No rules to be removed", rules=cur_rules) - else: - for i in delete_cur_rule: - gateway.del_nat_rule(i['rule_type'], i['original_ip'],\ - i['original_port'], i['translated_ip'], i['translated_port'], i['protocol']) - task = gateway.save_services_configuration() - if not task: - module.fail_json(msg="Unable to delete Rule, please check above error", error=gateway.response.content) - if not vca.block_until_completed(task): - module.fail_json(msg="Failure in waiting for removing network rule", error=gateway.response.content) - module.exit_json(changed=True, msg="The rules have been deleted", rules=delete_cur_rule) - changed = False - if len(mod_rules) < 1: - if not purge_rules: - module.exit_json(changed=False, msg="all rules are available", rules=cur_rules) - for i in mod_rules: - gateway.add_nat_rule(i['rule_type'], i['original_ip'], i['original_port'],\ - i['translated_ip'], i['translated_port'], i['protocol']) - task = gateway.save_services_configuration() - if not task: - module.fail_json(msg="Unable to add rule, please check above error", rules=mod_rules, error=gateway.response.content) - if not vca.block_until_completed(task): - module.fail_json(msg="Failure in waiting for adding network rule", error=gateway.response.content) - if purge_rules: - if len(delete_rules) < 1 and len(mod_rules) < 1: - module.exit_json(changed=False, rules=cur_rules) - for i in delete_rules: - gateway.del_nat_rule(i['rule_type'], i['original_ip'],\ - i['original_port'], i['translated_ip'], i['translated_port'], i['protocol']) + + result = dict(changed=False, rules_purged=0) + + deletions = 0 + additions = 0 + + if purge_rules is True and len(rules) > 0: + result['rules_purged'] = len(rules) + deletions = result['rules_purged'] + rules = list() + if not module.check_mode: + gateway.del_all_nat_rules() task = gateway.save_services_configuration() - if not task: - module.fail_json(msg="Unable to delete Rule, please check above error", error=gateway.response.content) - if not vca.block_until_completed(task): - module.fail_json(msg="Failure in waiting for removing network rule", error=gateway.response.content) + vca.block_until_completed(task) + rules = gateway.get_nat_rules() + result['changed'] = True + + current_rules = nat_rules_to_dict(rules) + + result['current_rules'] = current_rules + result['desired_rules'] = desired_rules + + for rule in desired_rules: + if rule not in current_rules: + additions += 1 + if not module.check_mode: + gateway.add_nat_rule(**rule) + result['changed'] = True + result['rules_added'] = additions + + result['delete_rule'] = list() + result['delete_rule_rc'] = list() + for rule in current_rules: + if rule not in desired_rules: + deletions += 1 + if not module.check_mode: + result['delete_rule'].append(rule) + rc = gateway.del_nat_rule(**rule) + result['delete_rule_rc'].append(rc) + result['changed'] = True + result['rules_deleted'] = deletions + + if not module.check_mode and (additions > 0 or deletions > 0): + task = gateway.save_services_configuration() + vca.block_until_completed(task) + + module.exit_json(**result) - module.exit_json(changed=True, rules_added=mod_rules) - # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.vca import * + if __name__ == '__main__': - main() + main() diff --git a/cloud/vmware/vca_vapp.py b/cloud/vmware/vca_vapp.py index ef8e52c421b..4ebdda24d6c 100644 --- a/cloud/vmware/vca_vapp.py +++ b/cloud/vmware/vca_vapp.py @@ -18,708 +18,269 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vca_vapp -short_description: create, terminate, start or stop a vm in vca +short_description: Manages vCloud Air vApp instances. description: - - Creates or terminates vca vms. + - This module will actively managed vCloud Air vApp instances. Instances + can be created and deleted as well as both deployed and undeployed. version_added: "2.0" +author: Peter Sprygada (@privateip) options: - username: - description: - - The vca username or email address, if not set the environment variable VCA_USER is checked for the username. - required: false - default: None - password: - description: - - The vca password, if not set the environment variable VCA_PASS is checked for the password - required: false - default: None - org: - description: - - The org to login to for creating vapp, mostly set when the service_type is vdc. - required: false - default: None - service_id: - description: - - The service id in a vchs environment to be used for creating the vapp - required: false - default: None - host: - description: - - The authentication host to be used when service type is vcd. - required: false - default: None - api_version: - description: - - The api version to be used with the vca - required: false - default: "5.7" - service_type: - description: - - The type of service we are authenticating against - required: false - default: vca - choices: [ "vca", "vchs", "vcd" ] - state: - description: - - if the object should be added or removed - required: false - default: present - choices: [ "present", "absent" ] - catalog_name: - description: - - The catalog from which the vm template is used. - required: false - default: "Public Catalog" - script: - description: - - The path to script that gets injected to vm during creation. - required: false - default: "Public Catalog" - template_name: - description: - - The template name from which the vm should be created. - required: True - network_name: - description: - - The network name to which the vm should be attached. - required: false - default: 'None' - network_ip: - description: - - The ip address that should be assigned to vm when the ip assignment type is static - required: false - default: None - network_mode: - description: - - The network mode in which the ip should be allocated. - required: false - default: pool - choices: [ "pool", "dhcp", 'static' ] - instance_id:: - description: - - The instance id of the region in vca flavour where the vm should be created - required: false - default: None - wait: - description: - - If the module should wait if the operation is poweroff or poweron, is better to wait to report the right state. - required: false - default: True - wait_timeout: - description: - - The wait timeout when wait is set to true - required: false - default: 250 - vdc_name: - description: - - The name of the vdc where the vm should be created. - required: false - default: None - vm_name: - description: - - The name of the vm to be created, the vapp is named the same as the vapp name - required: false - default: 'default_ansible_vm1' - vm_cpus: - description: - - The number if cpus to be added to the vm - required: false - default: None - vm_memory: - description: - - The amount of memory to be added to vm in megabytes - required: false - default: None - verify_certs: - description: - - If the certificates of the authentication is to be verified - required: false - default: True - admin_password: - description: - - The password to be set for admin - required: false - default: None - operation: - description: - - The operation to be done on the vm - required: false - default: poweroff - choices: [ 'shutdown', 'poweroff', 'poweron', 'reboot', 'reset', 'suspend' ] - + vapp_name: + description: + - The name of the vCloud Air vApp instance + required: yes + template_name: + description: + - The name of the vApp template to use to create the vApp instance. If + the I(state) is not `absent` then the I(template_name) value must be + provided. The I(template_name) must be previously uploaded to the + catalog specified by I(catalog_name) + required: no + default: None + network_name: + description: + - The name of the network that should be attached to the virtual machine + in the vApp. The virtual network specified must already be created in + the vCloud Air VDC. If the I(state) is not 'absent' then the + I(network_name) argument must be provided. + required: no + default: None + network_mode: + description: + - Configures the mode of the network connection. + required: no + default: pool + choices: ['pool', 'dhcp', 'static'] + vm_name: + description: + - The name of the virtual machine instance in the vApp to manage. + required: no + default: None + vm_cpus: + description: + - The number of vCPUs to configure for the VM in the vApp. If the + I(vm_name) argument is provided, then this becomes a per VM setting + otherwise it is applied to all VMs in the vApp. + required: no + default: None + vm_memory: + description: + - The amount of memory in MB to allocate to VMs in the vApp. If the + I(vm_name) argument is provided, then this becomes a per VM setting + otherise it is applied to all VMs in the vApp. + required: no + default: None + operation: + description: + - Specifies an operation to be performed on the vApp. + required: no + default: noop + choices: ['noop', 'poweron', 'poweroff', 'suspend', 'shutdown', 'reboot', 'reset'] + state: + description: + - Configures the state of the vApp. + required: no + default: present + choices: ['present', 'absent', 'deployed', 'undeployed'] + username: + description: + - The vCloud Air username to use during authentication + required: false + default: None + password: + description: + - The vCloud Air password to use during authentication + required: false + default: None + org: + description: + - The org to login to for creating vapp, mostly set when the service_type is vdc. + required: false + default: None + instance_id: + description: + - The instance id in a vchs environment to be used for creating the vapp + required: false + default: None + host: + description: + - The authentication host to be used when service type is vcd. + required: false + default: None + api_version: + description: + - The api version to be used with the vca + required: false + default: "5.7" + service_type: + description: + - The type of service we are authenticating against + required: false + default: vca + choices: [ "vca", "vchs", "vcd" ] + vdc_name: + description: + - The name of the virtual data center (VDC) where the vm should be created or contains the vAPP. + required: false + default: None ''' EXAMPLES = ''' -#Create a vm in an vca environment. The username password is not set as they are set in environment - -- hosts: localhost - connection: local - tasks: - - vca_vapp: - operation: poweroff - instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282' - vdc_name: 'benz_ansible' - vm_name: benz - vm_cpus: 2 - vm_memory: 1024 - network_mode: pool - template_name: "CentOS63-32BIT" - admin_password: "Password!123" - network_name: "default-routed-network" - -#Create a vm in a vchs environment. - -- hosts: localhost - connection: local - tasks: - - vca_app: - operation: poweron - service_id: '9-69' - vdc_name: 'Marketing' - service_type: 'vchs' - vm_name: benz - vm_cpus: 1 - script: "/tmp/configure_vm.sh" - catalog_name: "Marketing-Catalog" - template_name: "Marketing-Ubuntu-1204x64" - vm_memory: 512 - network_name: "M49-default-isolated" - -#create a vm in a vdc environment - -- hosts: localhost - connection: local - tasks: - - vca_vapp: - operation: poweron - org: IT20 - host: "mycloud.vmware.net" - api_version: "5.5" - service_type: vcd - vdc_name: 'IT20 Data Center (Beta)' - vm_name: benz - vm_cpus: 1 - catalog_name: "OS Templates" - template_name: "CentOS 6.5 64Bit CLI" - network_mode: pool - +- name: Creates a new vApp in a VCA instance + vca_vapp: + vapp_name: tower + state=present + template_name='Ubuntu Server 12.04 LTS (amd64 20150127)' + vdc_name=VDC1 + instance_id= + username= + password= ''' - -import time, json, xmltodict - -HAS_PYVCLOUD = False -try: - from pyvcloud.vcloudair import VCA - HAS_PYVCLOUD = True -except ImportError: - pass - -SERVICE_MAP = {'vca': 'ondemand', 'vchs': 'subscription', 'vcd': 'vcd'} -LOGIN_HOST = {} -LOGIN_HOST['vca'] = 'vca.vmware.com' -LOGIN_HOST['vchs'] = 'vchs.vmware.com' -VM_COMPARE_KEYS = ['admin_password', 'status', 'cpus', 'memory_mb'] - -def vm_state(val=None): - if val == 8: - return "Power_Off" - elif val == 4: - return "Power_On" - else: - return "Unknown Status" - -def serialize_instances(instance_list): - instances = [] - for i in instance_list: - instances.append(dict(apiUrl=i['apiUrl'], instance_id=i['id'])) - return instances - -def get_catalogs(vca): - catalogs = vca.get_catalogs() - results = [] - for catalog in catalogs: - if catalog.CatalogItems and catalog.CatalogItems.CatalogItem: - for item in catalog.CatalogItems.CatalogItem: - results.append([catalog.name, item.name]) - else: - results.append([catalog.name, '']) - return results - -def vca_login(module=None): - service_type = module.params.get('service_type') - username = module.params.get('username') - password = module.params.get('password') - instance = module.params.get('instance_id') - org = module.params.get('org') - service = module.params.get('service_id') - vdc_name = module.params.get('vdc_name') - version = module.params.get('api_version') - verify = module.params.get('verify_certs') - if not vdc_name: - if service_type == 'vchs': - vdc_name = module.params.get('service_id') - if not org: - if service_type == 'vchs': - if vdc_name: - org = vdc_name - else: - org = service - if service_type == 'vcd': - host = module.params.get('host') - else: - host = LOGIN_HOST[service_type] - - if not username: - if 'VCA_USER' in os.environ: - username = os.environ['VCA_USER'] - if not password: - if 'VCA_PASS' in os.environ: - password = os.environ['VCA_PASS'] - if not username or not password: - module.fail_json(msg = "Either the username or password is not set, please check") - - if service_type == 'vchs': - version = '5.6' - if service_type == 'vcd': - if not version: - version == '5.6' - - - vca = VCA(host=host, username=username, service_type=SERVICE_MAP[service_type], version=version, verify=verify) - - if service_type == 'vca': - if not vca.login(password=password): - module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content) - if not vca.login_to_instance(password=password, instance=instance, token=None, org_url=None): - s_json = serialize_instances(vca.instances) - module.fail_json(msg = "Login to Instance failed: Seems like instance_id provided is wrong .. Please check",\ - valid_instances=s_json) - if not vca.login_to_instance(instance=instance, password=None, token=vca.vcloud_session.token, - org_url=vca.vcloud_session.org_url): - module.fail_json(msg = "Error logging into org for the instance", error=vca.response.content) - return vca - - if service_type == 'vchs': - if not vca.login(password=password): - module.fail_json(msg = "Login Failed: Please check username or password", error=vca.response.content) - if not vca.login(token=vca.token): - module.fail_json(msg = "Failed to get the token", error=vca.response.content) - if not vca.login_to_org(service, org): - module.fail_json(msg = "Failed to login to org, Please check the orgname", error=vca.response.content) - return vca - - if service_type == 'vcd': - if not vca.login(password=password, org=org): - module.fail_json(msg = "Login Failed: Please check username or password or host parameters") - if not vca.login(password=password, org=org): - module.fail_json(msg = "Failed to get the token", error=vca.response.content) - if not vca.login(token=vca.token, org=org, org_url=vca.vcloud_session.org_url): - module.fail_json(msg = "Failed to login to org", error=vca.response.content) - return vca - -def set_vm_state(module=None, vca=None, state=None): - wait = module.params.get('wait') - wait_tmout = module.params.get('wait_timeout') - vm_name = module.params.get('vm_name') - vdc_name = module.params.get('vdc_name') - vapp_name = module.params.get('vm_name') - service_type = module.params.get('service_type') - service_id = module.params.get('service_id') - if service_type == 'vchs' and not vdc_name: - vdc_name = service_id - vdc = vca.get_vdc(vdc_name) - if wait: - tmout = time.time() + wait_tmout - while tmout > time.time(): - vapp = vca.get_vapp(vdc, vapp_name) - vms = filter(lambda vm: vm['name'] == vm_name, vapp.get_vms_details()) - vm = vms[0] - if vm['status'] == state: - return True - time.sleep(5) - module.fail_json(msg="Timeut waiting for the vms state to change") - return True - -def vm_details(vdc=None, vapp=None, vca=None): - table = [] - networks = [] - vm_name = vapp - vdc1 = vca.get_vdc(vdc) - if not vdc1: - module.fail_json(msg = "Error getting the vdc, Please check the vdc name") - vap = vca.get_vapp(vdc1, vapp) - if vap: - vms = filter(lambda vm: vm['name'] == vm_name, vap.get_vms_details()) - networks = vap.get_vms_network_info() - if len(networks[0]) >= 1: - table.append(dict(vm_info=vms[0], network_info=networks[0][0])) - else: - table.append(dict(vm_info=vms[0], network_info=networks[0])) - return table - - -def vapp_attach_net(module=None, vca=None, vapp=None): - network_name = module.params.get('network_name') - service_type = module.params.get('service_type') - vdc_name = module.params.get('vdc_name') - mode = module.params.get('network_mode') - if mode.upper() == 'STATIC': - network_ip = module.params.get('network_ip') - else: - network_ip = None - if not vdc_name: - if service_type == 'vchs': - vdc_name = module.params.get('service_id') - nets = filter(lambda n: n.name == network_name, vca.get_networks(vdc_name)) - if len(nets) <= 1: - net_task = vapp.disconnect_vms() - if not net_task: - module.fail_json(msg="Failure in detattaching vms from vnetworks", error=vapp.response.content) - if not vca.block_until_completed(net_task): - module.fail_json(msg="Failure in waiting for detaching vms from vnetworks", error=vapp.response.content) - net_task = vapp.disconnect_from_networks() - if not net_task: - module.fail_json(msg="Failure in detattaching network from vapp", error=vapp.response.content) - if not vca.block_until_completed(net_task): - module.fail_json(msg="Failure in waiting for detaching network from vapp", error=vapp.response.content) - if not network_name: - return True - - net_task = vapp.connect_to_network(nets[0].name, nets[0].href) - if not net_task: - module.fail_json(msg="Failure in attaching network to vapp", error=vapp.response.content) - if not vca.block_until_completed(net_task): - module.fail_json(msg="Failure in waiting for attching network to vapp", error=vapp.response.content) - - net_task = vapp.connect_vms(nets[0].name, connection_index=0, ip_allocation_mode=mode.upper(), ip_address=network_ip ) - if not net_task: - module.fail_json(msg="Failure in attaching network to vm", error=vapp.response.content) - if not vca.block_until_completed(net_task): - module.fail_json(msg="Failure in waiting for attaching network to vm", error=vapp.response.content) - return True - nets = [] - for i in vca.get_networks(vdc_name): - nets.append(i.name) - module.fail_json(msg="Seems like network_name is not found in the vdc, please check Available networks as above", Available_networks=nets) - -def create_vm(vca=None, module=None): - vm_name = module.params.get('vm_name') - operation = module.params.get('operation') - vm_cpus = module.params.get('vm_cpus') - vm_memory = module.params.get('vm_memory') - catalog_name = module.params.get('catalog_name') - template_name = module.params.get('template_name') - vdc_name = module.params.get('vdc_name') - network_name = module.params.get('network_name') - service_type = module.params.get('service_type') - admin_pass = module.params.get('admin_password') - script = module.params.get('script') - vapp_name = vm_name - - if not vdc_name: - if service_type == 'vchs': - vdc_name = module.params.get('service_id') - task = vca.create_vapp(vdc_name, vapp_name, template_name, catalog_name, vm_name=None) - if not task: - catalogs = get_catalogs(vca) - module.fail_json(msg="Error in Creating VM, Please check catalog or template, Available catalogs and templates are as above or check the error field", catalogs=catalogs, errors=vca.response.content) - if not vca.block_until_completed(task): - module.fail_json(msg = "Error in waiting for VM Creation, Please check logs", errors=vca.response.content) - vdc = vca.get_vdc(vdc_name) - if not vdc: - module.fail_json(msg = "Error getting the vdc, Please check the vdc name", errors=vca.response.content) - - vapp = vca.get_vapp(vdc, vapp_name) - task = vapp.modify_vm_name(1, vm_name) - if not task: - module.fail_json(msg="Error in setting the vm_name to vapp_name", errors=vca.response.content) - if not vca.block_until_completed(task): - module.fail_json(msg = "Error in waiting for VM Renaming, Please check logs", errors=vca.response.content) - vapp = vca.get_vapp(vdc, vapp_name) - task = vapp.customize_guest_os(vm_name, computer_name=vm_name) - if not task: - module.fail_json(msg="Error in setting the computer_name to vm_name", errors=vca.response.content) - if not vca.block_until_completed(task): - module.fail_json(msg = "Error in waiting for Computer Renaming, Please check logs", errors=vca.response.content) - - - if network_name: - vapp = vca.get_vapp(vdc, vapp_name) - if not vapp_attach_net(module, vca, vapp): - module.fail_json(msg= "Attaching network to VM fails", errors=vca.response.content) - - if vm_cpus: - vapp = vca.get_vapp(vdc, vapp_name) - task = vapp.modify_vm_cpu(vm_name, vm_cpus) - if not task: - module.fail_json(msg="Error adding cpu", error=vapp.resonse.contents) - if not vca.block_until_completed(task): - module.fail_json(msg="Failure in waiting for modifying cpu", error=vapp.response.content) - - if vm_memory: - vapp = vca.get_vapp(vdc, vapp_name) - task = vapp.modify_vm_memory(vm_name, vm_memory) - if not task: - module.fail_json(msg="Error adding memory", error=vapp.resonse.contents) - if not vca.block_until_completed(task): - module.fail_json(msg="Failure in waiting for modifying memory", error=vapp.response.content) - - if admin_pass: - vapp = vca.get_vapp(vdc, vapp_name) - task = vapp.customize_guest_os(vm_name, customization_script=None, - computer_name=None, admin_password=admin_pass, - reset_password_required=False) - if not task: - module.fail_json(msg="Error adding admin password", error=vapp.resonse.contents) - if not vca.block_until_completed(task): - module.fail_json(msg = "Error in waiting for resettng admin pass, Please check logs", errors=vapp.response.content) - - if script: - vapp = vca.get_vapp(vdc, vapp_name) - if os.path.exists(os.path.expanduser(script)): - file_contents = open(script, 'r') - task = vapp.customize_guest_os(vm_name, customization_script=file_contents.read()) - if not task: - module.fail_json(msg="Error adding customization script", error=vapp.resonse.contents) - if not vca.block_until_completed(task): - module.fail_json(msg = "Error in waiting for customization script, please check logs", errors=vapp.response.content) - task = vapp.force_customization(vm_name, power_on=False ) - if not task: - module.fail_json(msg="Error adding customization script", error=vapp.resonse.contents) - if not vca.block_until_completed(task): - module.fail_json(msg = "Error in waiting for customization script, please check logs", errors=vapp.response.content) - else: - module.fail_json(msg = "The file specified in script paramter is not avaialable or accessible") - - vapp = vca.get_vapp(vdc, vapp_name) - if operation == 'poweron': - vapp.poweron() - set_vm_state(module, vca, state='Powered on') - elif operation == 'poweroff': - vapp.poweroff() - elif operation == 'reboot': - vapp.reboot() - elif operation == 'reset': - vapp.reset() - elif operation == 'suspend': - vapp.suspend() - elif operation == 'shutdown': - vapp.shutdown() - details = vm_details(vdc_name, vapp_name, vca) - module.exit_json(changed=True, msg="VM created", vm_details=details[0]) - -def vapp_reconfigure(module=None, diff=None, vm=None, vca=None, vapp=None, vdc_name=None): - flag = False - vapp_name = module.params.get('vm_name') - vm_name = module.params.get('vm_name') - cpus = module.params.get('vm_cpus') - memory = module.params.get('vm_memory') - admin_pass = module.params.get('admin_password') - - if 'status' in diff: - operation = module.params.get('operation') - if operation == 'poweroff': - vapp.poweroff() - set_vm_state(module, vca, state='Powered off') - flag = True - if 'network' in diff: - vapp_attach_net(module, vca, vapp) - flag = True - if 'cpus' in diff: - task = vapp.modify_vm_cpu(vm_name, cpus) - if not vca.block_until_completed(task): - module.fail_json(msg="Failure in waiting for modifying cpu, might be vm is powered on and doesnt support hotplugging", error=vapp.response.content) - flag = True - if 'memory_mb' in diff: - task = vapp.modify_vm_memory(vm_name, memory) - if not vca.block_until_completed(task): - module.fail_json(msg="Failure in waiting for modifying memory, might be vm is powered on and doesnt support hotplugging", error=vapp.response.content) - flag = True - if 'admin_password' in diff: - task = vapp.customize_guest_os(vm_name, customization_script=None, - computer_name=None, admin_password=admin_pass, - reset_password_required=False) - if not task: - module.fail_json(msg="Error adding admin password", error=vapp.resonse.contents) - if not vca.block_until_completed(task): - module.fail_json(msg = "Error in waiting for resettng admin pass, Please check logs", errors=vapp.response.content) - flag = True - if 'status' in diff: - operation = module.params.get('operation') - if operation == 'poweron': - vapp.poweron() - set_vm_state(module, vca, state='Powered on') - elif operation == 'reboot': - vapp.reboot() - elif operation == 'reset': - vapp.reset() - elif operation == 'suspend': - vapp.suspend() - elif operation == 'shutdown': - vapp.shutdown() - flag = True - details = vm_details(vdc_name, vapp_name, vca) - if flag: - module.exit_json(changed=True, msg="VM reconfigured", vm_details=details[0]) - module.exit_json(changed=False, msg="VM exists as per configuration",\ - vm_details=details[0]) - -def vm_exists(module=None, vapp=None, vca=None, vdc_name=None): - vm_name = module.params.get('vm_name') - operation = module.params.get('operation') - vm_cpus = module.params.get('vm_cpus') - vm_memory = module.params.get('vm_memory') - network_name = module.params.get('network_name') - admin_pass = module.params.get('admin_password') - - d_vm = {} - d_vm['name'] = vm_name - d_vm['cpus'] = vm_cpus - d_vm['memory_mb'] = vm_memory - d_vm['admin_password'] = admin_pass +DEFAULT_VAPP_OPERATION = 'noop' + +VAPP_STATUS = { + 'Powered off': 'poweroff', + 'Powered on': 'poweron', + 'Suspended': 'suspend' +} + +VAPP_STATES = ['present', 'absent', 'deployed', 'undeployed'] +VAPP_OPERATIONS = ['poweron', 'poweroff', 'suspend', 'shutdown', + 'reboot', 'reset', 'noop'] + + +def get_instance(module): + vapp_name = module.params['vapp_name'] + inst = dict(vapp_name=vapp_name, state='absent') + try: + vapp = module.get_vapp(vapp_name) + if vapp: + status = module.vca.get_status(vapp.me.get_status()) + inst['status'] = VAPP_STATUS.get(status, 'unknown') + inst['state'] = 'deployed' if vapp.me.deployed else 'undeployed' + return inst + except VcaError: + return inst + +def create(module): + vdc_name = module.params['vdc_name'] + vapp_name = module.params['vapp_name'] + template_name = module.params['template_name'] + catalog_name = module.params['catalog_name'] + network_name = module.params['network_name'] + network_mode = module.params['network_mode'] + vm_name = module.params['vm_name'] + vm_cpus = module.params['vm_cpus'] + vm_memory = module.params['vm_memory'] + deploy = module.params['state'] == 'deploy' + poweron = module.params['operation'] == 'poweron' + + task = module.vca.create_vapp(vdc_name, vapp_name, template_name, + catalog_name, network_name, network_mode, + vm_name, vm_cpus, vm_memory, deploy, poweron) + + module.vca.block_until_completed(task) + +def delete(module): + vdc_name = module.params['vdc_name'] + vapp_name = module.params['vapp_name'] + module.vca.delete_vapp(vdc_name, vapp_name) + +def do_operation(module): + vapp_name = module.params['vapp_name'] + operation = module.params['operation'] + + vm_name = module.params.get('vm_name') + vm = None + if vm_name: + vm = module.get_vm(vapp_name, vm_name) if operation == 'poweron': - d_vm['status'] = 'Powered on' + operation = 'powerOn' elif operation == 'poweroff': - d_vm['status'] = 'Powered off' - else: - d_vm['status'] = 'operate' - - vms = filter(lambda vm: vm['name'] == vm_name, vapp.get_vms_details()) - if len(vms) > 1: - module.fail_json(msg = "The vapp seems to have more than one vm with same name,\ - currently we only support a single vm deployment") - elif len(vms) == 0: - return False - - else: - vm = vms[0] - diff = [] - for i in VM_COMPARE_KEYS: - if not d_vm[i]: - continue - if vm[i] != d_vm[i]: - diff.append(i) - if len(diff) == 1 and 'status' in diff: - vapp_reconfigure(module, diff, vm, vca, vapp, vdc_name) - networks = vapp.get_vms_network_info() - if not network_name and len(networks) >=1: - if len(networks[0]) >= 1: - if networks[0][0]['network_name'] != 'none': - diff.append('network') - if not network_name: - if len(diff) == 0: - return True - if not networks[0] and network_name: - diff.append('network') - if networks[0]: - if len(networks[0]) >= 1: - if networks[0][0]['network_name'] != network_name: - diff.append('network') - if vm['status'] != 'Powered off': - if operation != 'poweroff' and len(diff) > 0: - module.fail_json(msg="To change any properties of a vm, The vm should be in Powered Off state") - if len(diff) == 0: - return True - else: - vapp_reconfigure(module, diff, vm, vca, vapp, vdc_name) + operation = 'powerOff' + + cmd = 'power:%s' % operation + module.get_vapp(vapp_name).execute(cmd, 'post', targetVM=vm) + +def set_state(module): + state = module.params['state'] + vapp = module.get_vapp(module.params['vapp_name']) + if state == 'deployed': + action = module.params['operation'] == 'poweron' + if not vapp.deploy(action): + module.fail('unable to deploy vapp') + elif state == 'undeployed': + action = module.params['operation'] + if action == 'poweroff': + action = 'powerOff' + elif action != 'suspend': + action = None + if not vapp.undeploy(action): + module.fail('unable to undeploy vapp') + def main(): - module = AnsibleModule( - argument_spec=dict( - username = dict(default=None), - password = dict(default=None), - org = dict(default=None), - service_id = dict(default=None), - script = dict(default=None), - host = dict(default=None), - api_version = dict(default='5.7'), - service_type = dict(default='vca', choices=['vchs', 'vca', 'vcd']), - state = dict(default='present', choices = ['present', 'absent']), - catalog_name = dict(default="Public Catalog"), - template_name = dict(default=None, required=True), - network_name = dict(default=None), - network_ip = dict(default=None), - network_mode = dict(default='pool', choices=['dhcp', 'static', 'pool']), - instance_id = dict(default=None), - wait = dict(default=True, type='bool'), - wait_timeout = dict(default=250, type='int'), - vdc_name = dict(default=None), - vm_name = dict(default='default_ansible_vm1'), - vm_cpus = dict(default=None, type='int'), - verify_certs = dict(default=True, type='bool'), - vm_memory = dict(default=None, type='int'), - admin_password = dict(default=None), - operation = dict(default='poweroff', choices=['shutdown', 'poweroff', 'poweron', 'reboot', 'reset', 'suspend']) - ) + + argument_spec = dict( + vapp_name=dict(required=True), + vdc_name=dict(required=True), + template_name=dict(), + catalog_name=dict(default='Public Catalog'), + network_name=dict(), + network_mode=dict(default='pool', choices=['dhcp', 'static', 'pool']), + vm_name=dict(), + vm_cpus=dict(), + vm_memory=dict(), + operation=dict(default=DEFAULT_VAPP_OPERATION, choices=VAPP_OPERATIONS), + state=dict(default='present', choices=VAPP_STATES) ) + module = VcaAnsibleModule(argument_spec=argument_spec, + supports_check_mode=True) + + state = module.params['state'] + operation = module.params['operation'] + + instance = get_instance(module) + + result = dict(changed=False) + + if instance and state == 'absent': + if not module.check_mode: + delete(module) + result['changed'] = True + + elif state != 'absent': + if instance['state'] == 'absent': + if not module.check_mode: + create(module) + result['changed'] = True + + elif instance['state'] != state and state != 'present': + if not module.check_mode: + set_state(module) + result['changed'] = True + + if operation != instance.get('status') and operation != 'noop': + if not module.check_mode: + do_operation(module) + result['changed'] = True + + return module.exit(**result) - vdc_name = module.params.get('vdc_name') - vm_name = module.params.get('vm_name') - org = module.params.get('org') - service = module.params.get('service_id') - state = module.params.get('state') - service_type = module.params.get('service_type') - host = module.params.get('host') - instance_id = module.params.get('instance_id') - network_mode = module.params.get('network_mode') - network_ip = module.params.get('network_ip') - vapp_name = vm_name - - if not HAS_PYVCLOUD: - module.fail_json(msg="python module pyvcloud is needed for this module") - - if network_mode.upper() == 'STATIC': - if not network_ip: - module.fail_json(msg="if network_mode is STATIC, network_ip is mandatory") - - if service_type == 'vca': - if not instance_id: - module.fail_json(msg="When service type is vca the instance_id parameter is mandatory") - if not vdc_name: - module.fail_json(msg="When service type is vca the vdc_name parameter is mandatory") - - if service_type == 'vchs': - if not service: - module.fail_json(msg="When service type vchs the service_id parameter is mandatory") - if not org: - org = service - if not vdc_name: - vdc_name = service - if service_type == 'vcd': - if not host: - module.fail_json(msg="When service type is vcd host parameter is mandatory") - - vca = vca_login(module) - vdc = vca.get_vdc(vdc_name) - if not vdc: - module.fail_json(msg = "Error getting the vdc, Please check the vdc name") - vapp = vca.get_vapp(vdc, vapp_name) - if vapp: - if state == 'absent': - task = vca.delete_vapp(vdc_name, vapp_name) - if not vca.block_until_completed(task): - module.fail_json(msg="failure in deleting vapp") - module.exit_json(changed=True, msg="Vapp deleted") - if vm_exists(module, vapp, vca, vdc_name ): - details = vm_details(vdc_name, vapp_name, vca) - module.exit_json(changed=False, msg="vapp exists", vm_details=details[0]) - else: - create_vm(vca, module) - if state == 'absent': - module.exit_json(changed=False, msg="Vapp does not exist") - create_vm(vca, module) - - - # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.vca import * if __name__ == '__main__': - main() + main() diff --git a/cloud/vmware/vmware_cluster.py b/cloud/vmware/vmware_cluster.py index ee64b48f08c..5fd986d52b0 100644 --- a/cloud/vmware/vmware_cluster.py +++ b/cloud/vmware/vmware_cluster.py @@ -1,4 +1,4 @@ -#!/bin/python +#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_cluster @@ -31,20 +35,6 @@ - Tested on ESXi 5.5 - PyVmomi installed options: - hostname: - description: - - The hostname or IP address of the vSphere vCenter - required: True - username: - description: - - The username of the vSphere vCenter - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the vSphere vCenter - required: True - aliases: ['pass', 'pwd'] datacenter_name: description: - The name of the datacenter the cluster will be created in. @@ -68,6 +58,7 @@ - If set to True will enable vSAN when the cluster is created. required: False default: False +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' @@ -90,152 +81,153 @@ HAS_PYVMOMI = False -def configure_ha(enable_ha): - das_config = vim.cluster.DasConfigInfo() - das_config.enabled = enable_ha - das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy() - das_config.admissionControlPolicy.failoverLevel = 2 - return das_config - - -def configure_drs(enable_drs): - drs_config = vim.cluster.DrsConfigInfo() - drs_config.enabled = enable_drs - # Set to partially automated - drs_config.vmotionRate = 3 - return drs_config - - -def configure_vsan(enable_vsan): - vsan_config = vim.vsan.cluster.ConfigInfo() - vsan_config.enabled = enable_vsan - vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo() - vsan_config.defaultConfig.autoClaimStorage = False - return vsan_config - - -def state_create_cluster(module): - - enable_ha = module.params['enable_ha'] - enable_drs = module.params['enable_drs'] - enable_vsan = module.params['enable_vsan'] - cluster_name = module.params['cluster_name'] - datacenter = module.params['datacenter'] - - try: +class VMwareCluster(object): + def __init__(self, module): + self.module = module + self.enable_ha = module.params['enable_ha'] + self.enable_drs = module.params['enable_drs'] + self.enable_vsan = module.params['enable_vsan'] + self.cluster_name = module.params['cluster_name'] + self.desired_state = module.params['state'] + self.datacenter = None + self.cluster = None + self.content = connect_to_api(module) + self.datacenter_name = module.params['datacenter_name'] + + def process_state(self): + cluster_states = { + 'absent': { + 'present': self.state_destroy_cluster, + 'absent': self.state_exit_unchanged, + }, + 'present': { + 'update': self.state_update_cluster, + 'present': self.state_exit_unchanged, + 'absent': self.state_create_cluster, + } + } + current_state = self.check_cluster_configuration() + # Based on the desired_state and the current_state call + # the appropriate method from the dictionary + cluster_states[self.desired_state][current_state]() + + def configure_ha(self): + das_config = vim.cluster.DasConfigInfo() + das_config.enabled = self.enable_ha + das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy() + das_config.admissionControlPolicy.failoverLevel = 2 + return das_config + + def configure_drs(self): + drs_config = vim.cluster.DrsConfigInfo() + drs_config.enabled = self.enable_drs + # Set to partially automated + drs_config.vmotionRate = 3 + return drs_config + + def configure_vsan(self): + vsan_config = vim.vsan.cluster.ConfigInfo() + vsan_config.enabled = self.enable_vsan + vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo() + vsan_config.defaultConfig.autoClaimStorage = False + return vsan_config + + def state_create_cluster(self): + try: + cluster_config_spec = vim.cluster.ConfigSpecEx() + cluster_config_spec.dasConfig = self.configure_ha() + cluster_config_spec.drsConfig = self.configure_drs() + if self.enable_vsan: + cluster_config_spec.vsanConfig = self.configure_vsan() + if not self.module.check_mode: + self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec) + self.module.exit_json(changed=True) + except vim.fault.DuplicateName: + self.module.fail_json(msg="A cluster with the name %s already exists" % self.cluster_name) + except vmodl.fault.InvalidArgument: + self.module.fail_json(msg="Cluster configuration specification parameter is invalid") + except vim.fault.InvalidName: + self.module.fail_json(msg="%s is an invalid name for a cluster" % self.cluster_name) + except vmodl.fault.NotSupported: + # This should never happen + self.module.fail_json(msg="Trying to create a cluster on an incorrect folder object") + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + # This should never happen either + self.module.fail_json(msg=method_fault.msg) + + def state_destroy_cluster(self): + changed = True + result = None + + try: + if not self.module.check_mode: + task = self.cluster.Destroy_Task() + changed, result = wait_for_task(task) + self.module.exit_json(changed=changed, result=result) + except vim.fault.VimFault as vim_fault: + self.module.fail_json(msg=vim_fault.msg) + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + + def state_exit_unchanged(self): + self.module.exit_json(changed=False) + + def state_update_cluster(self): cluster_config_spec = vim.cluster.ConfigSpecEx() - cluster_config_spec.dasConfig = configure_ha(enable_ha) - cluster_config_spec.drsConfig = configure_drs(enable_drs) - if enable_vsan: - cluster_config_spec.vsanConfig = configure_vsan(enable_vsan) - if not module.check_mode: - datacenter.hostFolder.CreateClusterEx(cluster_name, cluster_config_spec) - module.exit_json(changed=True) - except vim.fault.DuplicateName: - module.fail_json(msg="A cluster with the name %s already exists" % cluster_name) - except vmodl.fault.InvalidArgument: - module.fail_json(msg="Cluster configuration specification parameter is invalid") - except vim.fault.InvalidName: - module.fail_json(msg="%s is an invalid name for a cluster" % cluster_name) - except vmodl.fault.NotSupported: - # This should never happen - module.fail_json(msg="Trying to create a cluster on an incorrect folder object") - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - # This should never happen either - module.fail_json(msg=method_fault.msg) - - -def state_destroy_cluster(module): - cluster = module.params['cluster'] - changed = True - result = None - - try: - if not module.check_mode: - task = cluster.Destroy_Task() - changed, result = wait_for_task(task) - module.exit_json(changed=changed, result=result) - except vim.fault.VimFault as vim_fault: - module.fail_json(msg=vim_fault.msg) - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - - -def state_exit_unchanged(module): - module.exit_json(changed=False) - - -def state_update_cluster(module): - - cluster_config_spec = vim.cluster.ConfigSpecEx() - cluster = module.params['cluster'] - enable_ha = module.params['enable_ha'] - enable_drs = module.params['enable_drs'] - enable_vsan = module.params['enable_vsan'] - changed = True - result = None - - if cluster.configurationEx.dasConfig.enabled != enable_ha: - cluster_config_spec.dasConfig = configure_ha(enable_ha) - if cluster.configurationEx.drsConfig.enabled != enable_drs: - cluster_config_spec.drsConfig = configure_drs(enable_drs) - if cluster.configurationEx.vsanConfigInfo.enabled != enable_vsan: - cluster_config_spec.vsanConfig = configure_vsan(enable_vsan) - - try: - if not module.check_mode: - task = cluster.ReconfigureComputeResource_Task(cluster_config_spec, True) - changed, result = wait_for_task(task) - module.exit_json(changed=changed, result=result) - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - except TaskError as task_e: - module.fail_json(msg=str(task_e)) - - -def check_cluster_configuration(module): - datacenter_name = module.params['datacenter_name'] - cluster_name = module.params['cluster_name'] - - try: - content = connect_to_api(module) - datacenter = find_datacenter_by_name(content, datacenter_name) - if datacenter is None: - module.fail_json(msg="Datacenter %s does not exist, " - "please create first with Ansible Module vmware_datacenter or manually." - % datacenter_name) - cluster = find_cluster_by_name_datacenter(datacenter, cluster_name) - - module.params['content'] = content - module.params['datacenter'] = datacenter - - if cluster is None: - return 'absent' - else: - module.params['cluster'] = cluster - - desired_state = (module.params['enable_ha'], - module.params['enable_drs'], - module.params['enable_vsan']) + changed = True + result = None + + if self.cluster.configurationEx.dasConfig.enabled != self.enable_ha: + cluster_config_spec.dasConfig = self.configure_ha() + if self.cluster.configurationEx.drsConfig.enabled != self.enable_drs: + cluster_config_spec.drsConfig = self.configure_drs() + if self.cluster.configurationEx.vsanConfigInfo.enabled != self.enable_vsan: + cluster_config_spec.vsanConfig = self.configure_vsan() + + try: + if not self.module.check_mode: + task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True) + changed, result = wait_for_task(task) + self.module.exit_json(changed=changed, result=result) + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + except TaskError as task_e: + self.module.fail_json(msg=str(task_e)) + + def check_cluster_configuration(self): + try: + self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name) + if self.datacenter is None: + self.module.fail_json(msg="Datacenter %s does not exist, " + "please create first with Ansible Module vmware_datacenter or manually." + % self.datacenter_name) + self.cluster = find_cluster_by_name_datacenter(self.datacenter, self.cluster_name) + + if self.cluster is None: + return 'absent' + else: + desired_state = (self.enable_ha, + self.enable_drs, + self.enable_vsan) - current_state = (cluster.configurationEx.dasConfig.enabled, - cluster.configurationEx.drsConfig.enabled, - cluster.configurationEx.vsanConfigInfo.enabled) + current_state = (self.cluster.configurationEx.dasConfig.enabled, + self.cluster.configurationEx.drsConfig.enabled, + self.cluster.configurationEx.vsanConfigInfo.enabled) - if cmp(desired_state, current_state) != 0: - return 'update' - else: - return 'present' - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) + if cmp(desired_state, current_state) != 0: + return 'update' + else: + return 'present' + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) def main(): @@ -253,23 +245,8 @@ def main(): if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') - cluster_states = { - 'absent': { - 'present': state_destroy_cluster, - 'absent': state_exit_unchanged, - }, - 'present': { - 'update': state_update_cluster, - 'present': state_exit_unchanged, - 'absent': state_create_cluster, - } - } - desired_state = module.params['state'] - current_state = check_cluster_configuration(module) - - # Based on the desired_state and the current_state call - # the appropriate method from the dictionary - cluster_states[desired_state][current_state](module) + vmware_cluster = VMwareCluster(module) + vmware_cluster.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import * diff --git a/cloud/vmware/vmware_datacenter.py b/cloud/vmware/vmware_datacenter.py index b2083222ed5..fb60f2c9f5c 100644 --- a/cloud/vmware/vmware_datacenter.py +++ b/cloud/vmware/vmware_datacenter.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_datacenter @@ -25,9 +29,9 @@ description: - Manage VMware vSphere Datacenters version_added: 2.0 -author: "Joseph Callen (@jcpowermac)" +author: "Joseph Callen (@jcpowermac), Kamil Szczygiel (@kamsz)" notes: - - Tested on vSphere 5.5 + - Tested on vSphere 6.0 requirements: - "python >= 2.6" - PyVmomi @@ -54,7 +58,8 @@ description: - If the datacenter should be present or absent choices: ['present', 'absent'] - required: True + default: present +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' @@ -63,7 +68,7 @@ local_action: > vmware_datacenter hostname="{{ ansible_ssh_host }}" username=root password=vmware - datacenter_name="datacenter" + datacenter_name="datacenter" state=present ''' try: @@ -73,18 +78,29 @@ HAS_PYVMOMI = False -def state_create_datacenter(module): - datacenter_name = module.params['datacenter_name'] - content = module.params['content'] - changed = True - datacenter = None +def get_datacenter(context, module): + try: + datacenter_name = module.params.get('datacenter_name') + datacenter = find_datacenter_by_name(context, datacenter_name) + return datacenter + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(msg=method_fault.msg) + - folder = content.rootFolder +def create_datacenter(context, module): + datacenter_name = module.params.get('datacenter_name') + folder = context.rootFolder try: - if not module.check_mode: - datacenter = folder.CreateDatacenter(name=datacenter_name) - module.exit_json(changed=changed, result=str(datacenter)) + datacenter = get_datacenter(context, module) + changed = False + if not datacenter: + changed = True + if not module.check_mode: + folder.CreateDatacenter(name=datacenter_name) + module.exit_json(changed=changed) except vim.fault.DuplicateName: module.fail_json(msg="A datacenter with the name %s already exists" % datacenter_name) except vim.fault.InvalidName: @@ -98,34 +114,17 @@ def state_create_datacenter(module): module.fail_json(msg=method_fault.msg) -def check_datacenter_state(module): - datacenter_name = module.params['datacenter_name'] - - try: - content = connect_to_api(module) - datacenter = find_datacenter_by_name(content, datacenter_name) - module.params['content'] = content - - if datacenter is None: - return 'absent' - else: - module.params['datacenter'] = datacenter - return 'present' - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - - -def state_destroy_datacenter(module): - datacenter = module.params['datacenter'] - changed = True +def destroy_datacenter(context, module): result = None try: - if not module.check_mode: - task = datacenter.Destroy_Task() - changed, result = wait_for_task(task) + datacenter = get_datacenter(context, module) + changed = False + if datacenter: + changed = True + if not module.check_mode: + task = datacenter.Destroy_Task() + changed, result = wait_for_task(task) module.exit_json(changed=changed, result=result) except vim.fault.VimFault as vim_fault: module.fail_json(msg=vim_fault.msg) @@ -135,39 +134,28 @@ def state_destroy_datacenter(module): module.fail_json(msg=method_fault.msg) -def state_exit_unchanged(module): - module.exit_json(changed=False) - - def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - datacenter_name=dict(required=True, type='str'), - state=dict(required=True, choices=['present', 'absent'], type='str'), - ) + datacenter_name=dict(required=True, type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str') ) + ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') - datacenter_states = { - 'absent': { - 'present': state_destroy_datacenter, - 'absent': state_exit_unchanged, - }, - 'present': { - 'present': state_exit_unchanged, - 'absent': state_create_datacenter, - } - } - desired_state = module.params['state'] - current_state = check_datacenter_state(module) - - datacenter_states[desired_state][current_state](module) + context = connect_to_api(module) + state = module.params.get('state') + + if state == 'present': + create_datacenter(context, module) + if state == 'absent': + destroy_datacenter(context, module) from ansible.module_utils.basic import * from ansible.module_utils.vmware import * diff --git a/cloud/vmware/vmware_dns_config.py b/cloud/vmware/vmware_dns_config.py index b233ed610c8..4faa8b6e295 100644 --- a/cloud/vmware/vmware_dns_config.py +++ b/cloud/vmware/vmware_dns_config.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_dns_config @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the vSphere vCenter API server - required: True - username: - description: - - The username of the vSphere vCenter - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the vSphere vCenter - required: True - aliases: ['pass', 'pwd'] change_hostname_to: description: - The hostname that an ESXi host should be changed to. @@ -58,6 +48,7 @@ description: - The DNS servers that the host should be configured to use. required: True +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' diff --git a/cloud/vmware/vmware_dvs_host.py b/cloud/vmware/vmware_dvs_host.py index a9c66e4d1a7..031b90ec66f 100644 --- a/cloud/vmware/vmware_dvs_host.py +++ b/cloud/vmware/vmware_dvs_host.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_dvs_host @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the vSphere vCenter API server - required: True - username: - description: - - The username of the vSphere vCenter - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the vSphere vCenter - required: True - aliases: ['pass', 'pwd'] esxi_hostname: description: - The ESXi hostname @@ -63,6 +53,7 @@ - If the host should be present or absent attached to the vSwitch choices: ['present', 'absent'] required: True +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' @@ -89,154 +80,154 @@ HAS_PYVMOMI = False -def find_dvspg_by_name(dv_switch, portgroup_name): - portgroups = dv_switch.portgroup - - for pg in portgroups: - if pg.name == portgroup_name: - return pg - - return None +class VMwareDvsHost(object): + def __init__(self, module): + self.module = module + self.dv_switch = None + self.uplink_portgroup = None + self.host = None + self.dv_switch = None + self.nic = None + self.content = connect_to_api(self.module) + self.state = self.module.params['state'] + self.switch_name = self.module.params['switch_name'] + self.esxi_hostname = self.module.params['esxi_hostname'] + self.vmnics = self.module.params['vmnics'] + + def process_state(self): + try: + dvs_host_states = { + 'absent': { + 'present': self.state_destroy_dvs_host, + 'absent': self.state_exit_unchanged, + }, + 'present': { + 'update': self.state_update_dvs_host, + 'present': self.state_exit_unchanged, + 'absent': self.state_create_dvs_host, + } + } + dvs_host_states[self.state][self.check_dvs_host_state()]() + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + except Exception as e: + self.module.fail_json(msg=str(e)) -def find_dvs_uplink_pg(dv_switch): - # There should only always be a single uplink port group on - # a distributed virtual switch + def find_dvspg_by_name(self): + portgroups = self.dv_switch.portgroup - if len(dv_switch.config.uplinkPortgroup): - return dv_switch.config.uplinkPortgroup[0] - else: + for pg in portgroups: + if pg.name == self.portgroup_name: + return pg return None + def find_dvs_uplink_pg(self): + # There should only always be a single uplink port group on + # a distributed virtual switch -# operation should be edit, add and remove -def modify_dvs_host(dv_switch, host, operation, uplink_portgroup=None, vmnics=None): - - spec = vim.DistributedVirtualSwitch.ConfigSpec() - - spec.configVersion = dv_switch.config.configVersion - spec.host = [vim.dvs.HostMember.ConfigSpec()] - spec.host[0].operation = operation - spec.host[0].host = host - - if operation in ("edit", "add"): - spec.host[0].backing = vim.dvs.HostMember.PnicBacking() - count = 0 - - for nic in vmnics: - spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec()) - spec.host[0].backing.pnicSpec[count].pnicDevice = nic - spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = uplink_portgroup.key - count += 1 - - task = dv_switch.ReconfigureDvs_Task(spec) - changed, result = wait_for_task(task) - return changed, result - - -def state_destroy_dvs_host(module): - - operation = "remove" - host = module.params['host'] - dv_switch = module.params['dv_switch'] - - changed = True - result = None - - if not module.check_mode: - changed, result = modify_dvs_host(dv_switch, host, operation) - module.exit_json(changed=changed, result=str(result)) - - -def state_exit_unchanged(module): - module.exit_json(changed=False) - - -def state_update_dvs_host(module): - dv_switch = module.params['dv_switch'] - uplink_portgroup = module.params['uplink_portgroup'] - vmnics = module.params['vmnics'] - host = module.params['host'] - operation = "edit" - changed = True - result = None - - if not module.check_mode: - changed, result = modify_dvs_host(dv_switch, host, operation, uplink_portgroup, vmnics) - module.exit_json(changed=changed, result=str(result)) - - -def state_create_dvs_host(module): - dv_switch = module.params['dv_switch'] - uplink_portgroup = module.params['uplink_portgroup'] - vmnics = module.params['vmnics'] - host = module.params['host'] - operation = "add" - changed = True - result = None - - if not module.check_mode: - changed, result = modify_dvs_host(dv_switch, host, operation, uplink_portgroup, vmnics) - module.exit_json(changed=changed, result=str(result)) - - -def find_host_attached_dvs(esxi_hostname, dv_switch): - for dvs_host_member in dv_switch.config.host: - if dvs_host_member.config.host.name == esxi_hostname: - return dvs_host_member.config.host - - return None - - -def check_uplinks(dv_switch, host, vmnics): - pnic_device = [] - - for dvs_host_member in dv_switch.config.host: - if dvs_host_member.config.host == host: - for pnicSpec in dvs_host_member.config.backing.pnicSpec: - pnic_device.append(pnicSpec.pnicDevice) - - return collections.Counter(pnic_device) == collections.Counter(vmnics) - + if len(self.dv_switch.config.uplinkPortgroup): + return self.dv_switch.config.uplinkPortgroup[0] + else: + return None + + # operation should be edit, add and remove + def modify_dvs_host(self, operation): + spec = vim.DistributedVirtualSwitch.ConfigSpec() + spec.configVersion = self.dv_switch.config.configVersion + spec.host = [vim.dvs.HostMember.ConfigSpec()] + spec.host[0].operation = operation + spec.host[0].host = self.host + + if operation in ("edit", "add"): + spec.host[0].backing = vim.dvs.HostMember.PnicBacking() + count = 0 + + for nic in self.vmnics: + spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec()) + spec.host[0].backing.pnicSpec[count].pnicDevice = nic + spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = self.uplink_portgroup.key + count += 1 + + task = self.dv_switch.ReconfigureDvs_Task(spec) + changed, result = wait_for_task(task) + return changed, result + + def state_destroy_dvs_host(self): + operation = "remove" + changed = True + result = None + + if not self.module.check_mode: + changed, result = self.modify_dvs_host(operation) + self.module.exit_json(changed=changed, result=str(result)) + + def state_exit_unchanged(self): + self.module.exit_json(changed=False) + + def state_update_dvs_host(self): + operation = "edit" + changed = True + result = None + + if not self.module.check_mode: + changed, result = self.modify_dvs_host(operation) + self.module.exit_json(changed=changed, result=str(result)) + + def state_create_dvs_host(self): + operation = "add" + changed = True + result = None + + if not self.module.check_mode: + changed, result = self.modify_dvs_host(operation) + self.module.exit_json(changed=changed, result=str(result)) + + def find_host_attached_dvs(self): + for dvs_host_member in self.dv_switch.config.host: + if dvs_host_member.config.host.name == self.esxi_hostname: + return dvs_host_member.config.host -def check_dvs_host_state(module): + return None - switch_name = module.params['switch_name'] - esxi_hostname = module.params['esxi_hostname'] - vmnics = module.params['vmnics'] + def check_uplinks(self): + pnic_device = [] - content = connect_to_api(module) - module.params['content'] = content + for dvs_host_member in self.dv_switch.config.host: + if dvs_host_member.config.host == self.host: + for pnicSpec in dvs_host_member.config.backing.pnicSpec: + pnic_device.append(pnicSpec.pnicDevice) - dv_switch = find_dvs_by_name(content, switch_name) + return collections.Counter(pnic_device) == collections.Counter(self.vmnics) - if dv_switch is None: - raise Exception("A distributed virtual switch %s does not exist" % switch_name) + def check_dvs_host_state(self): + self.dv_switch = find_dvs_by_name(self.content, self.switch_name) - uplink_portgroup = find_dvs_uplink_pg(dv_switch) + if self.dv_switch is None: + raise Exception("A distributed virtual switch %s does not exist" % self.switch_name) - if uplink_portgroup is None: - raise Exception("An uplink portgroup does not exist on the distributed virtual switch %s" % switch_name) + self.uplink_portgroup = self.find_dvs_uplink_pg() - module.params['dv_switch'] = dv_switch - module.params['uplink_portgroup'] = uplink_portgroup + if self.uplink_portgroup is None: + raise Exception("An uplink portgroup does not exist on the distributed virtual switch %s" + % self.switch_name) - host = find_host_attached_dvs(esxi_hostname, dv_switch) + self.host = self.find_host_attached_dvs() - if host is None: - # We still need the HostSystem object to add the host - # to the distributed vswitch - host = find_hostsystem_by_name(content, esxi_hostname) - if host is None: - module.fail_json(msg="The esxi_hostname %s does not exist in vCenter" % esxi_hostname) - module.params['host'] = host - return 'absent' - else: - module.params['host'] = host - if check_uplinks(dv_switch, host, vmnics): - return 'present' + if self.host is None: + # We still need the HostSystem object to add the host + # to the distributed vswitch + self.host = find_hostsystem_by_name(self.content, self.esxi_hostname) + if self.host is None: + self.module.fail_json(msg="The esxi_hostname %s does not exist in vCenter" % self.esxi_hostname) + return 'absent' else: - return 'update' + if self.check_uplinks(): + return 'present' + else: + return 'update' def main(): @@ -252,27 +243,8 @@ def main(): if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') - try: - - dvs_host_states = { - 'absent': { - 'present': state_destroy_dvs_host, - 'absent': state_exit_unchanged, - }, - 'present': { - 'update': state_update_dvs_host, - 'present': state_exit_unchanged, - 'absent': state_create_dvs_host, - } - } - - dvs_host_states[module.params['state']][check_dvs_host_state(module)](module) - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - except Exception as e: - module.fail_json(msg=str(e)) + vmware_dvs_host = VMwareDvsHost(module) + vmware_dvs_host.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import * diff --git a/cloud/vmware/vmware_dvs_portgroup.py b/cloud/vmware/vmware_dvs_portgroup.py index 265f9fd71ef..58b4cff67c7 100644 --- a/cloud/vmware/vmware_dvs_portgroup.py +++ b/cloud/vmware/vmware_dvs_portgroup.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_dvs_portgroup @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the vSphere vCenter API server - required: True - username: - description: - - The username of the vSphere vCenter - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the vSphere vCenter - required: True - aliases: ['pass', 'pwd'] portgroup_name: description: - The name of the portgroup that is to be created or deleted @@ -70,6 +60,7 @@ - 'earlyBinding' - 'lateBinding' - 'ephemeral' +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' @@ -81,7 +72,7 @@ password: vcenter_password portgroup_name: Management switch_name: dvSwitch - vlan_id: 123 + vlan_id: 123 num_ports: 120 portgroup_type: earlyBinding state: present @@ -94,91 +85,100 @@ HAS_PYVMOMI = False -def create_port_group(dv_switch, portgroup_name, vlan_id, num_ports, portgroup_type): - config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() - - config.name = portgroup_name - config.numPorts = num_ports - - # vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation - # but this is the correct managed object type. - - config.defaultPortConfig = vim.VMwareDVSPortSetting() - - # vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the - # pyvmomi documentation but this is the correct managed object type - config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec() - config.defaultPortConfig.vlan.inherited = False - config.defaultPortConfig.vlan.vlanId = vlan_id - config.type = portgroup_type - - spec = [config] - task = dv_switch.AddDVPortgroup_Task(spec) - changed, result = wait_for_task(task) - return changed, result - - -def state_destroy_dvspg(module): - dvs_portgroup = module.params['dvs_portgroup'] - changed = True - result = None - - if not module.check_mode: - task = dvs_portgroup.Destroy_Task() +class VMwareDvsPortgroup(object): + def __init__(self, module): + self.module = module + self.dvs_portgroup = None + self.switch_name = self.module.params['switch_name'] + self.portgroup_name = self.module.params['portgroup_name'] + self.vlan_id = self.module.params['vlan_id'] + self.num_ports = self.module.params['num_ports'] + self.portgroup_type = self.module.params['portgroup_type'] + self.dv_switch = None + self.state = self.module.params['state'] + self.content = connect_to_api(module) + + def process_state(self): + try: + dvspg_states = { + 'absent': { + 'present': self.state_destroy_dvspg, + 'absent': self.state_exit_unchanged, + }, + 'present': { + 'update': self.state_update_dvspg, + 'present': self.state_exit_unchanged, + 'absent': self.state_create_dvspg, + } + } + dvspg_states[self.state][self.check_dvspg_state()]() + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + except Exception as e: + self.module.fail_json(msg=str(e)) + + def create_port_group(self): + config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + + config.name = self.portgroup_name + config.numPorts = self.num_ports + + # vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation + # but this is the correct managed object type. + + config.defaultPortConfig = vim.VMwareDVSPortSetting() + + # vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the + # pyvmomi documentation but this is the correct managed object type + config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec() + config.defaultPortConfig.vlan.inherited = False + config.defaultPortConfig.vlan.vlanId = self.vlan_id + config.type = self.portgroup_type + + spec = [config] + task = self.dv_switch.AddDVPortgroup_Task(spec) changed, result = wait_for_task(task) - module.exit_json(changed=changed, result=str(result)) - - -def state_exit_unchanged(module): - module.exit_json(changed=False) - + return changed, result -def state_update_dvspg(module): - module.exit_json(changed=False, msg="Currently not implemented.") - return + def state_destroy_dvspg(self): + changed = True + result = None + if not self.module.check_mode: + task = self.dvs_portgroup.Destroy_Task() + changed, result = wait_for_task(task) + self.module.exit_json(changed=changed, result=str(result)) -def state_create_dvspg(module): + def state_exit_unchanged(self): + self.module.exit_json(changed=False) - switch_name = module.params['switch_name'] - portgroup_name = module.params['portgroup_name'] - dv_switch = module.params['dv_switch'] - vlan_id = module.params['vlan_id'] - num_ports = module.params['num_ports'] - portgroup_type = module.params['portgroup_type'] - changed = True - result = None + def state_update_dvspg(self): + self.module.exit_json(changed=False, msg="Currently not implemented.") - if not module.check_mode: - changed, result = create_port_group(dv_switch, portgroup_name, vlan_id, num_ports, portgroup_type) - module.exit_json(changed=changed, result=str(result)) + def state_create_dvspg(self): + changed = True + result = None + if not self.module.check_mode: + changed, result = self.create_port_group() + self.module.exit_json(changed=changed, result=str(result)) -def check_dvspg_state(module): + def check_dvspg_state(self): + self.dv_switch = find_dvs_by_name(self.content, self.switch_name) - switch_name = module.params['switch_name'] - portgroup_name = module.params['portgroup_name'] + if self.dv_switch is None: + raise Exception("A distributed virtual switch with name %s does not exist" % self.switch_name) + self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.portgroup_name) - content = connect_to_api(module) - module.params['content'] = content - - dv_switch = find_dvs_by_name(content, switch_name) - - if dv_switch is None: - raise Exception("A distributed virtual switch with name %s does not exist" % switch_name) - - module.params['dv_switch'] = dv_switch - dvs_portgroup = find_dvspg_by_name(dv_switch, portgroup_name) - - if dvs_portgroup is None: - return 'absent' - else: - module.params['dvs_portgroup'] = dvs_portgroup - return 'present' + if self.dvs_portgroup is None: + return 'absent' + else: + return 'present' def main(): - argument_spec = vmware_argument_spec() argument_spec.update(dict(portgroup_name=dict(required=True, type='str'), switch_name=dict(required=True, type='str'), @@ -192,25 +192,8 @@ def main(): if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') - try: - dvspg_states = { - 'absent': { - 'present': state_destroy_dvspg, - 'absent': state_exit_unchanged, - }, - 'present': { - 'update': state_update_dvspg, - 'present': state_exit_unchanged, - 'absent': state_create_dvspg, - } - } - dvspg_states[module.params['state']][check_dvspg_state(module)](module) - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - except Exception as e: - module.fail_json(msg=str(e)) + vmware_dvs_portgroup = VMwareDvsPortgroup(module) + vmware_dvs_portgroup.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import * diff --git a/cloud/vmware/vmware_dvswitch.py b/cloud/vmware/vmware_dvswitch.py index 26212a06c5f..b3108f6a9d3 100644 --- a/cloud/vmware/vmware_dvswitch.py +++ b/cloud/vmware/vmware_dvswitch.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_dvswitch @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the vSphere vCenter API server - required: True - username: - description: - - The username of the vSphere vCenter - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the vSphere vCenter - required: True - aliases: ['pass', 'pwd'] datacenter_name: description: - The name of the datacenter that will contain the dvSwitch @@ -85,6 +75,7 @@ - 'present' - 'absent' required: False +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' - name: Create dvswitch @@ -108,78 +99,93 @@ except ImportError: HAS_PYVMOMI = False +class VMwareDVSwitch(object): + def __init__(self, module): + self.module = module + self.dvs = None + self.switch_name = self.module.params['switch_name'] + self.datacenter_name = self.module.params['datacenter_name'] + self.mtu = self.module.params['mtu'] + self.uplink_quantity = self.module.params['uplink_quantity'] + self.discovery_proto = self.module.params['discovery_proto'] + self.discovery_operation = self.module.params['discovery_operation'] + self.switch_name = self.module.params['switch_name'] + self.state = self.module.params['state'] + self.content = connect_to_api(module) + + def process_state(self): + try: + dvs_states = { + 'absent': { + 'present': self.state_destroy_dvs, + 'absent': self.state_exit_unchanged, + }, + 'present': { + 'update': self.state_update_dvs, + 'present': self.state_exit_unchanged, + 'absent': self.state_create_dvs, + } + } + dvs_states[self.state][self.check_dvs_configuration()]() + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + except Exception as e: + self.module.fail_json(msg=str(e)) -def create_dvswitch(network_folder, switch_name, mtu, uplink_quantity, discovery_proto, discovery_operation): - - result = None - changed = False - - spec = vim.DistributedVirtualSwitch.CreateSpec() - spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() - spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy() - spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig() - - spec.configSpec.name = switch_name - spec.configSpec.maxMtu = mtu - spec.configSpec.linkDiscoveryProtocolConfig.protocol = discovery_proto - spec.configSpec.linkDiscoveryProtocolConfig.operation = discovery_operation - spec.productInfo = vim.dvs.ProductSpec() - spec.productInfo.name = "DVS" - spec.productInfo.vendor = "VMware" - - for count in range(1, uplink_quantity+1): - spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count) - - task = network_folder.CreateDVS_Task(spec) - changed, result = wait_for_task(task) - return changed, result + def create_dvswitch(self, network_folder): + result = None + changed = False -def state_exit_unchanged(module): - module.exit_json(changed=False) + spec = vim.DistributedVirtualSwitch.CreateSpec() + spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() + spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy() + spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig() + spec.configSpec.name = self.switch_name + spec.configSpec.maxMtu = self.mtu + spec.configSpec.linkDiscoveryProtocolConfig.protocol = self.discovery_proto + spec.configSpec.linkDiscoveryProtocolConfig.operation = self.discovery_operation + spec.productInfo = vim.dvs.ProductSpec() + spec.productInfo.name = "DVS" + spec.productInfo.vendor = "VMware" -def state_destroy_dvs(module): - dvs = module.params['dvs'] - task = dvs.Destroy_Task() - changed, result = wait_for_task(task) - module.exit_json(changed=changed, result=str(result)) + for count in range(1, self.uplink_quantity+1): + spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count) + task = network_folder.CreateDVS_Task(spec) + changed, result = wait_for_task(task) + return changed, result -def state_update_dvs(module): - module.exit_json(changed=False, msg="Currently not implemented.") + def state_exit_unchanged(self): + self.module.exit_json(changed=False) + def state_destroy_dvs(self): + task = self.dvs.Destroy_Task() + changed, result = wait_for_task(task) + self.module.exit_json(changed=changed, result=str(result)) -def state_create_dvs(module): - switch_name = module.params['switch_name'] - datacenter_name = module.params['datacenter_name'] - content = module.params['content'] - mtu = module.params['mtu'] - uplink_quantity = module.params['uplink_quantity'] - discovery_proto = module.params['discovery_proto'] - discovery_operation = module.params['discovery_operation'] + def state_update_dvs(self): + self.module.exit_json(changed=False, msg="Currently not implemented.") - changed = True - result = None + def state_create_dvs(self): + changed = True + result = None - if not module.check_mode: - dc = find_datacenter_by_name(content, datacenter_name) - changed, result = create_dvswitch(dc.networkFolder, switch_name, - mtu, uplink_quantity, discovery_proto, - discovery_operation) - module.exit_json(changed=changed, result=str(result)) + if not self.module.check_mode: + dc = find_datacenter_by_name(self.content, self.datacenter_name) + changed, result = self.create_dvswitch(dc.networkFolder) + self.module.exit_json(changed=changed, result=str(result)) -def check_dvs_configuration(module): - switch_name = module.params['switch_name'] - content = connect_to_api(module) - module.params['content'] = content - dvs = find_dvs_by_name(content, switch_name) - if dvs is None: - return 'absent' - else: - module.params['dvs'] = dvs - return 'present' + def check_dvs_configuration(self): + self.dvs = find_dvs_by_name(self.content, self.switch_name) + if self.dvs is None: + return 'absent' + else: + return 'present' def main(): @@ -197,26 +203,8 @@ def main(): if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') - try: - # Currently state_update_dvs is not implemented. - dvs_states = { - 'absent': { - 'present': state_destroy_dvs, - 'absent': state_exit_unchanged, - }, - 'present': { - 'update': state_update_dvs, - 'present': state_exit_unchanged, - 'absent': state_create_dvs, - } - } - dvs_states[module.params['state']][check_dvs_configuration(module)](module) - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - except Exception as e: - module.fail_json(msg=str(e)) + vmware_dvswitch = VMwareDVSwitch(module) + vmware_dvswitch.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import * diff --git a/cloud/vmware/vmware_guest.py b/cloud/vmware/vmware_guest.py new file mode 100644 index 00000000000..cf3e83b3833 --- /dev/null +++ b/cloud/vmware/vmware_guest.py @@ -0,0 +1,1349 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: vmware_guest +short_description: Manages virtualmachines in vcenter +description: + - Uses pyvmomi to ... + - copy a template to a new virtualmachine + - poweron/poweroff/restart a virtualmachine + - remove a virtualmachine +version_added: 2.2 +author: James Tanner (@jctanner) +notes: + - Tested on vSphere 6.0 +requirements: + - "python >= 2.6" + - PyVmomi +options: + state: + description: + - What state should the virtualmachine be in? + required: True + choices: ['present', 'absent', 'poweredon', 'poweredoff', 'restarted', 'suspended'] + name: + description: + - Name of the newly deployed guest + required: True + name_match: + description: + - If multiple vms matching the name, use the first or last found + required: False + default: 'first' + choices: ['first', 'last'] + uuid: + description: + - UUID of the instance to manage if known, this is vmware's unique identifier. + - This is required if name is not supplied. + required: False + template: + description: + - Name of the template to deploy, if needed to create the guest (state=present). + - If the guest exists already this setting will be ignored. + required: False + folder: + description: + - Destination folder path for the new guest + required: False + hardware: + description: + - Attributes such as cpus, memory, osid, and disk controller + required: False + disk: + description: + - A list of disks to add + required: False + nic: + description: + - A list of nics to add + required: False + wait_for_ip_address: + description: + - Wait until vcenter detects an IP address for the guest + required: False + force: + description: + - Ignore warnings and complete the actions + required: False + datacenter: + description: + - Destination datacenter for the deploy operation + required: True + esxi_hostname: + description: + - The esxi hostname where the VM will run. + required: False + annotation: + description: + - A note or annotation to include in the VM + required: False + version_added: "2.3" + customize: + description: + - Should customization spec be run + required: False + version_added: "2.3" + ips: + description: + - IP Addresses to set + required: False + version_added: "2.3" + networks: + description: + - Network to use should include VM network name and gateway + required: False + version_added: "2.3" + dns_servers: + description: + - DNS servers to use + required: False + version_added: "2.3" + domain: + description: + - Domain to use while customizing + required: False + version_added: "2.3" + snapshot_op: + description: + - A key, value pair of snapshot operation types and their additional required parameters. + required: False + version_added: "2.3" +extends_documentation_fragment: vmware.documentation +''' + +EXAMPLES = ''' +Example from Ansible playbook +# +# Create a VM from a template +# + - name: create the VM + vmware_guest: + validate_certs: False + hostname: 192.0.2.44 + username: administrator@vsphere.local + password: vmware + name: testvm_2 + state: poweredon + folder: testvms + disk: + - size_gb: 10 + type: thin + datastore: g73_datastore + nic: + - type: vmxnet3 + network: VM Network + network_type: standard + hardware: + memory_mb: 512 + num_cpus: 1 + osid: centos64guest + scsi: paravirtual + datacenter: datacenter1 + esxi_hostname: 192.0.2.117 + template: template_el7 + wait_for_ip_address: yes + register: deploy + +# +# Clone Template and customize +# + - name: Clone template and customize + vmware_guest: + hostname: "192.168.1.209" + username: "administrator@vsphere.local" + password: "vmware" + validate_certs: False + name: testvm-2 + datacenter: datacenter1 + cluster: cluster + validate_certs: False + template: template_el7 + customize: True + domain: "example.com" + dns_servers: ['192.168.1.1','192.168.1.2'] + ips: "192.168.1.100" + networks: + '192.168.1.0/24': + network: 'VM Network' + gateway: '192.168.1.1' +# +# Gather facts only +# + - name: gather the VM facts + vmware_guest: + validate_certs: False + hostname: 192.168.1.209 + username: administrator@vsphere.local + password: vmware + name: testvm_2 + esxi_hostname: 192.168.1.117 + register: facts + +### Snapshot Operations +# Create snapshot + - vmware_guest: + hostname: 192.168.1.209 + username: administrator@vsphere.local + password: vmware + validate_certs: False + name: dummy_vm + snapshot_op: + op_type: create + name: snap1 + description: snap1_description + +# Remove a snapshot + - vmware_guest: + hostname: 192.168.1.209 + username: administrator@vsphere.local + password: vmware + validate_certs: False + name: dummy_vm + snapshot_op: + op_type: remove + name: snap1 + +# Revert to a snapshot + - vmware_guest: + hostname: 192.168.1.209 + username: administrator@vsphere.local + password: vmware + validate_certs: False + name: dummy_vm + snapshot_op: + op_type: revert + name: snap1 + +# List all snapshots of a VM + - vmware_guest: + hostname: 192.168.1.209 + username: administrator@vsphere.local + password: vmware + validate_certs: False + name: dummy_vm + snapshot_op: + op_type: list_all + +# List current snapshot of a VM + - vmware_guest: + hostname: 192.168.1.209 + username: administrator@vsphere.local + password: vmware + validate_certs: False + name: dummy_vm + snapshot_op: + op_type: list_current + +# Remove all snapshots of a VM + - vmware_guest: + hostname: 192.168.1.209 + username: administrator@vsphere.local + password: vmware + validate_certs: False + name: dummy_vm + snapshot_op: + op_type: remove_all +''' + +RETURN = """ +instance: + descripton: metadata about the new virtualmachine + returned: always + type: dict + sample: None +""" + +try: + import json +except ImportError: + import simplejson as json + +HAS_PYVMOMI = False +try: + import pyVmomi + from pyVmomi import vim + HAS_PYVMOMI = True +except ImportError: + pass + +import os +import time +from netaddr import IPNetwork, IPAddress + +from ansible.module_utils.urls import fetch_url + +class PyVmomiHelper(object): + + def __init__(self, module): + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi module required') + + self.module = module + self.params = module.params + self.si = None + self.smartconnect() + self.datacenter = None + self.folders = None + self.foldermap = None + + def smartconnect(self): + self.content = connect_to_api(self.module) + + def _build_folder_tree(self, folder, tree={}, treepath=None): + + tree = {'virtualmachines': [], + 'subfolders': {}, + 'vimobj': folder, + 'name': folder.name} + + children = None + if hasattr(folder, 'childEntity'): + children = folder.childEntity + + if children: + for child in children: + if child == folder or child in tree: + continue + if isinstance(child, vim.Folder): + ctree = self._build_folder_tree(child) + tree['subfolders'][child] = dict.copy(ctree) + elif isinstance(child, vim.VirtualMachine): + tree['virtualmachines'].append(child) + else: + if isinstance(folder, vim.VirtualMachine): + return folder + return tree + + + def _build_folder_map(self, folder, vmap={}, inpath='/'): + + ''' Build a searchable index for vms+uuids+folders ''' + + if isinstance(folder, tuple): + folder = folder[1] + + if not 'names' in vmap: + vmap['names'] = {} + if not 'uuids' in vmap: + vmap['uuids'] = {} + if not 'paths' in vmap: + vmap['paths'] = {} + + if inpath == '/': + thispath = '/vm' + else: + thispath = os.path.join(inpath, folder['name']) + + if thispath not in vmap['paths']: + vmap['paths'][thispath] = [] + + # helpful for isolating folder objects later on + if not 'path_by_fvim' in vmap: + vmap['path_by_fvim'] = {} + if not 'fvim_by_path' in vmap: + vmap['fvim_by_path'] = {} + # store object by path and store path by object + vmap['fvim_by_path'][thispath] = folder['vimobj'] + vmap['path_by_fvim'][folder['vimobj']] = thispath + + # helpful for isolating vm objects later on + if not 'path_by_vvim' in vmap: + vmap['path_by_vvim'] = {} + if not 'vvim_by_path' in vmap: + vmap['vvim_by_path'] = {} + if thispath not in vmap['vvim_by_path']: + vmap['vvim_by_path'][thispath] = [] + + + for item in folder.items(): + k = item[0] + v = item[1] + + if k == 'name': + pass + elif k == 'subfolders': + for x in v.items(): + vmap = self._build_folder_map(x, vmap=vmap, inpath=thispath) + elif k == 'virtualmachines': + for x in v: + if not x.config.name in vmap['names']: + vmap['names'][x.config.name] = [] + vmap['names'][x.config.name].append(x.config.uuid) + vmap['uuids'][x.config.uuid] = x.config.name + vmap['paths'][thispath].append(x.config.uuid) + + if x not in vmap['vvim_by_path'][thispath]: + vmap['vvim_by_path'][thispath].append(x) + if x not in vmap['path_by_vvim']: + vmap['path_by_vvim'][x] = thispath + return vmap + + def getfolders(self): + + if not self.datacenter: + self.get_datacenter() + self.folders = self._build_folder_tree(self.datacenter.vmFolder) + self.folder_map = self._build_folder_map(self.folders) + return (self.folders, self.folder_map) + + def compile_folder_path_for_object(self, vobj): + ''' make a /vm/foo/bar/baz like folder path for an object ''' + paths = [] + if isinstance(vobj, vim.Folder): + paths.append(vobj.name) + + thisobj = vobj + while hasattr(thisobj, 'parent'): + thisobj = thisobj.parent + if isinstance(thisobj, vim.Folder): + paths.append(thisobj.name) + paths.reverse() + if paths[0] == 'Datacenters': + paths.remove('Datacenters') + return '/' + '/'.join(paths) + + def get_datacenter(self): + self.datacenter = get_obj(self.content, [vim.Datacenter], + self.params['datacenter']) + + def getvm(self, name=None, uuid=None, folder=None, name_match=None): + + # https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.SearchIndex.html + # self.si.content.searchIndex.FindByInventoryPath('DC1/vm/test_folder') + + vm = None + folder_path = None + searchpath = None + + if uuid: + vm = self.content.searchIndex.FindByUuid(uuid=uuid, vmSearch=True) + + elif folder: + + if self.params['folder'].endswith('/'): + self.params['folder'] = self.params['folder'][0:-1] + + # Build the absolute folder path to pass into the search method + if self.params['folder'].startswith('/vm'): + searchpath = '%s' % self.params['datacenter'] + searchpath += self.params['folder'] + elif self.params['folder'].startswith('/'): + searchpath = '%s' % self.params['datacenter'] + searchpath += '/vm' + self.params['folder'] + else: + # need to look for matching absolute path + if not self.folders: + self.getfolders() + paths = self.folder_map['paths'].keys() + paths = [x for x in paths if x.endswith(self.params['folder'])] + if len(paths) > 1: + self.module.fail_json(msg='%s matches more than one folder. Please use the absolute path starting with /vm/' % self.params['folder']) + elif paths: + searchpath = paths[0] + + if searchpath: + # get all objects for this path ... + fObj = self.content.searchIndex.FindByInventoryPath(searchpath) + if fObj: + if isinstance(fObj, vim.Datacenter): + fObj = fObj.vmFolder + for cObj in fObj.childEntity: + if not isinstance(cObj, vim.VirtualMachine): + continue + if cObj.name == name: + vm = cObj + break + + if not vm: + + # FIXME - this is unused if folder has a default value + vmList = get_all_objs(self.content, [vim.VirtualMachine]) + + # narrow down by folder + if folder: + if not self.folders: + self.getfolders() + + # compare the folder path of each VM against the search path + for item in vmList.items(): + vobj = item[0] + if not isinstance(vobj.parent, vim.Folder): + continue + if self.compile_folder_path_for_object(vobj) == searchpath: + return vobj + + if name_match: + if name_match == 'first': + vm = get_obj(self.content, [vim.VirtualMachine], name) + elif name_match == 'last': + matches = [] + vmList = get_all_objs(self.content, [vim.VirtualMachine]) + for thisvm in vmList: + if thisvm.config.name == name: + matches.append(thisvm) + if matches: + vm = matches[-1] + else: + matches = [] + vmList = get_all_objs(self.content, [vim.VirtualMachine]) + for thisvm in vmList: + if thisvm.config.name == name: + matches.append(thisvm) + if len(matches) > 1: + module.fail_json(msg='more than 1 vm exists by the name %s. Please specify a uuid, or a folder, or a datacenter or name_match' % name) + if matches: + vm = matches[0] + + return vm + + + def set_powerstate(self, vm, state, force): + """ + Set the power status for a VM determined by the current and + requested states. force is forceful + """ + facts = self.gather_facts(vm) + expected_state = state.replace('_', '').lower() + current_state = facts['hw_power_status'].lower() + result = {} + + # Need Force + if not force and current_state not in ['poweredon', 'poweredoff']: + return "VM is in %s power state. Force is required!" % current_state + + # State is already true + if current_state == expected_state: + result['changed'] = False + result['failed'] = False + else: + task = None + try: + if expected_state == 'poweredoff': + task = vm.PowerOff() + + elif expected_state == 'poweredon': + task = vm.PowerOn() + + elif expected_state == 'restarted': + if current_state in ('poweredon', 'poweringon', 'resetting'): + task = vm.Reset() + else: + result = {'changed': False, 'failed': True, + 'msg': "Cannot restart VM in the current state %s" % current_state} + + except Exception: + result = {'changed': False, 'failed': True, + 'msg': get_exception()} + + if task: + self.wait_for_task(task) + if task.info.state == 'error': + result = {'changed': False, 'failed': True, 'msg': task.info.error.msg} + else: + result = {'changed': True, 'failed': False} + + # need to get new metadata if changed + if result['changed']: + newvm = self.getvm(uuid=vm.config.uuid) + facts = self.gather_facts(newvm) + result['instance'] = facts + return result + + + def gather_facts(self, vm): + + ''' Gather facts from vim.VirtualMachine object. ''' + + facts = { + 'module_hw': True, + 'hw_name': vm.config.name, + 'hw_power_status': vm.summary.runtime.powerState, + 'hw_guest_full_name': vm.summary.guest.guestFullName, + 'hw_guest_id': vm.summary.guest.guestId, + 'hw_product_uuid': vm.config.uuid, + 'hw_processor_count': vm.config.hardware.numCPU, + 'hw_memtotal_mb': vm.config.hardware.memoryMB, + 'hw_interfaces':[], + 'ipv4': None, + 'ipv6': None, + } + + netDict = {} + for device in vm.guest.net: + mac = device.macAddress + ips = list(device.ipAddress) + netDict[mac] = ips + for k,v in netDict.iteritems(): + for ipaddress in v: + if ipaddress: + if '::' in ipaddress: + facts['ipv6'] = ipaddress + else: + facts['ipv4'] = ipaddress + + for idx,entry in enumerate(vm.config.hardware.device): + if not hasattr(entry, 'macAddress'): + continue + + factname = 'hw_eth' + str(idx) + facts[factname] = { + 'addresstype': entry.addressType, + 'label': entry.deviceInfo.label, + 'macaddress': entry.macAddress, + 'ipaddresses': netDict.get(entry.macAddress, None), + 'macaddress_dash': entry.macAddress.replace(':', '-'), + 'summary': entry.deviceInfo.summary, + } + facts['hw_interfaces'].append('eth'+str(idx)) + + return facts + + + def remove_vm(self, vm): + # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy + task = vm.Destroy() + self.wait_for_task(task) + + if task.info.state == 'error': + return ({'changed': False, 'failed': True, 'msg': task.info.error.msg}) + else: + return ({'changed': True, 'failed': False}) + + + def deploy_template(self, poweron=False, wait_for_ip=False): + + # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py + # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html + # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html + # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html + + # FIXME: + # - clusters + # - multiple datacenters + # - resource pools + # - multiple templates by the same name + # - multiple disks + # - changing the esx host is ignored? + # - static IPs + + # FIXME: need to search for this in the same way as guests to ensure accuracy + template = get_obj(self.content, [vim.VirtualMachine], self.params['template']) + if not template: + self.module.fail_json(msg="Could not find a template named %s" % self.params['template']) + + datacenters = get_all_objs(self.content, [vim.Datacenter]) + datacenter = get_obj(self.content, [vim.Datacenter], + self.params['datacenter']) + if not datacenter: + self.module.fail_json(msg='No datacenter named %s was found' % self.params['datacenter']) + + if not self.foldermap: + self.folders, self.foldermap = self.getfolders() + + # find matching folders + if self.params['folder'].startswith('/'): + folders = [x for x in self.foldermap['fvim_by_path'].items() if x[0] == self.params['folder']] + else: + folders = [x for x in self.foldermap['fvim_by_path'].items() if x[0].endswith(self.params['folder'])] + + # throw error if more than one match or no matches + if len(folders) == 0: + self.module.fail_json(msg='no folder matched the path: %s' % self.params['folder']) + elif len(folders) > 1: + self.module.fail_json(msg='too many folders matched "%s", please give the full path starting with /vm/' % self.params['folder']) + + # grab the folder vim object + destfolder = folders[0][1] + + # if the user wants a cluster, get the list of hosts for the cluster and use the first one + if self.params['cluster']: + cluster = get_obj(self.content, [vim.ClusterComputeResource], self.params['cluster']) + if not cluster: + self.module.fail_json(msg="Failed to find a cluster named %s" % self.params['cluster']) + #resource_pool = cluster.resourcePool + hostsystems = [x for x in cluster.host] + hostsystem = hostsystems[0] + else: + hostsystem = get_obj(self.content, [vim.HostSystem], self.params['esxi_hostname']) + if not hostsystem: + self.module.fail_json(msg="Failed to find a host named %s" % self.params['esxi_hostname']) + + # set the destination datastore in the relocation spec + datastore_name = None + datastore = None + if self.params['disk']: + if 'datastore' in self.params['disk'][0]: + datastore_name = self.params['disk'][0]['datastore'] + datastore = get_obj(self.content, [vim.Datastore], datastore_name) + if not datastore: + # use the template's existing DS + disks = [x for x in template.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] + datastore = disks[0].backing.datastore + datastore_name = datastore.name + if not datastore: + self.module.fail_json(msg="Failed to find a matching datastore") + + # create the relocation spec + relospec = vim.vm.RelocateSpec() + relospec.host = hostsystem + relospec.datastore = datastore + + # Find the associated resourcepool for the host system + # * FIXME: find resourcepool for clusters too + resource_pool = None + resource_pools = get_all_objs(self.content, [vim.ResourcePool]) + for rp in resource_pools.items(): + if not rp[0]: + continue + if not hasattr(rp[0], 'parent'): + continue + if rp[0].parent == hostsystem.parent: + resource_pool = rp[0] + break + if resource_pool: + relospec.pool = resource_pool + else: + self.module.fail_json(msg="Failed to find a resource group for %s" \ + % hostsystem.name) + + clonespec_kwargs = {} + clonespec_kwargs['location'] = relospec + + # create disk spec if not default + if self.params['disk']: + # grab the template's first disk and modify it for this customization + disks = [x for x in template.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] + diskspec = vim.vm.device.VirtualDeviceSpec() + # set the operation to edit so that it knows to keep other settings + diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + diskspec.device = disks[0] + + # get the first disk attributes + pspec = self.params.get('disk')[0] + + # is it thin? + if pspec.get('type', '').lower() == 'thin': + diskspec.device.backing.thinProvisioned = True + + # which datastore? + if pspec.get('datastore'): + # This is already handled by the relocation spec, + # but it needs to eventually be handled for all the + # other disks defined + pass + + # what size is it? + if [x for x in pspec.keys() if x.startswith('size_') or x == 'size']: + # size_tb, size_gb, size_mb, size_kb, size_b ...? + if 'size' in pspec: + expected = ''.join(c for c in pspec['size'] if c.isdigit()) + unit = pspec['size'].replace(expected, '').lower() + expected = int(expected) + else: + param = [x for x in pspec.keys() if x.startswith('size_')][0] + unit = param.split('_')[-1].lower() + expected = [x[1] for x in pspec.items() if x[0].startswith('size_')][0] + expected = int(expected) + + kb = None + if unit == 'tb': + kb = expected * 1024 * 1024 * 1024 + elif unit == 'gb': + kb = expected * 1024 * 1024 + elif unit ==' mb': + kb = expected * 1024 + elif unit == 'kb': + kb = expected + else: + self.module.fail_json(msg='%s is not a supported unit for disk size' % unit) + diskspec.device.capacityInKB = kb + + # tell the configspec that the disk device needs to change + configspec = vim.vm.ConfigSpec(deviceChange=[diskspec]) + clonespec_kwargs['config'] = configspec + + # set cpu/memory/etc + if 'hardware' in self.params: + if not 'config' in clonespec_kwargs: + clonespec_kwargs['config'] = vim.vm.ConfigSpec() + if 'num_cpus' in self.params['hardware']: + clonespec_kwargs['config'].numCPUs = \ + int(self.params['hardware']['num_cpus']) + if 'memory_mb' in self.params['hardware']: + clonespec_kwargs['config'].memoryMB = \ + int(self.params['hardware']['memory_mb']) + + # lets try and assign a static ip addresss + if self.params['customize'] is True: + ip_settings = list() + if self.params['ips']: + for ip_string in self.params['ips']: + ip = IPAddress(self.params['ips']) + for network in self.params['networks']: + if network: + if ip in IPNetwork(network): + self.params['networks'][network]['ip'] = str(ip) + ipnet = IPNetwork(network) + self.params['networks'][network]['subnet_mask'] = str( + ipnet.netmask + ) + ip_settings.append(self.params['networks'][network]) + + key = 0 + network = get_obj(self.content, [vim.Network], ip_settings[key]['network']) + datacenter = get_obj(self.content, [vim.Datacenter], self.params['datacenter']) + # get the folder where VMs are kept for this datacenter + destfolder = datacenter.vmFolder + + cluster = get_obj(self.content, [vim.ClusterComputeResource],self.params['cluster']) + + devices = [] + adaptermaps = [] + + try: + for device in template.config.hardware.device: + if hasattr(device, 'addressType'): + nic = vim.vm.device.VirtualDeviceSpec() + nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove + nic.device = device + devices.append(nic) + except: + pass + + # single device support + nic = vim.vm.device.VirtualDeviceSpec() + nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + nic.device = vim.vm.device.VirtualVmxnet3() + nic.device.wakeOnLanEnabled = True + nic.device.addressType = 'assigned' + nic.device.deviceInfo = vim.Description() + nic.device.deviceInfo.label = 'Network Adapter %s' % (key + 1) + nic.device.deviceInfo.summary = ip_settings[key]['network'] + + if hasattr(get_obj(self.content, [vim.Network], ip_settings[key]['network']), 'portKeys'): + # VDS switch + pg_obj = get_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], ip_settings[key]['network']) + dvs_port_connection = vim.dvs.PortConnection() + dvs_port_connection.portgroupKey= pg_obj.key + dvs_port_connection.switchUuid= pg_obj.config.distributedVirtualSwitch.uuid + nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + nic.device.backing.port = dvs_port_connection + + else: + # vSwitch + nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + nic.device.backing.network = get_obj(self.content, [vim.Network], ip_settings[key]['network']) + nic.device.backing.deviceName = ip_settings[key]['network'] + + nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() + nic.device.connectable.startConnected = True + nic.device.connectable.allowGuestControl = True + nic.device.connectable.connected = True + nic.device.connectable.allowGuestControl = True + devices.append(nic) + + # Update the spec with the added NIC + clonespec_kwargs['config'].deviceChange = devices + + guest_map = vim.vm.customization.AdapterMapping() + guest_map.adapter = vim.vm.customization.IPSettings() + guest_map.adapter.ip = vim.vm.customization.FixedIp() + guest_map.adapter.ip.ipAddress = str(ip_settings[key]['ip']) + guest_map.adapter.subnetMask = str(ip_settings[key]['subnet_mask']) + + try: + guest_map.adapter.gateway = ip_settings[key]['gateway'] + except: + pass + + try: + guest_map.adapter.dnsDomain = self.params['domain'] + except: + pass + + adaptermaps.append(guest_map) + + # DNS settings + globalip = vim.vm.customization.GlobalIPSettings() + globalip.dnsServerList = self.params['dns_servers'] + globalip.dnsSuffixList = str(self.params['domain']) + + # Hostname settings + ident = vim.vm.customization.LinuxPrep() + ident.domain = str(self.params['domain']) + ident.hostName = vim.vm.customization.FixedName() + ident.hostName.name = self.params['name'] + + customspec = vim.vm.customization.Specification() + clonespec_kwargs['customization'] = customspec + + clonespec_kwargs['customization'].nicSettingMap = adaptermaps + clonespec_kwargs['customization'].globalIPSettings = globalip + clonespec_kwargs['customization'].identity = ident + + clonespec = vim.vm.CloneSpec(**clonespec_kwargs) + task = template.Clone(folder=destfolder, name=self.params['name'], spec=clonespec) + self.wait_for_task(task) + + if task.info.state == 'error': + # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361 + # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173 + return ({'changed': False, 'failed': True, 'msg': task.info.error.msg}) + else: + + # set annotation + vm = task.info.result + if self.params['annotation']: + annotation_spec = vim.vm.ConfigSpec() + annotation_spec.annotation = str(self.params['annotation']) + task = vm.ReconfigVM_Task(annotation_spec) + self.wait_for_task(task) + if wait_for_ip: + self.set_powerstate(vm, 'poweredon', force=False) + self.wait_for_vm_ip(vm) + vm_facts = self.gather_facts(vm) + return ({'changed': True, 'failed': False, 'instance': vm_facts}) + + def wait_for_task(self, task): + # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html + # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html + # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py + while task.info.state not in ['success', 'error']: + time.sleep(1) + + def wait_for_vm_ip(self, vm, poll=100, sleep=5): + ips = None + facts = {} + thispoll = 0 + while not ips and thispoll <= poll: + newvm = self.getvm(uuid=vm.config.uuid) + facts = self.gather_facts(newvm) + if facts['ipv4'] or facts['ipv6']: + ips = True + else: + time.sleep(sleep) + thispoll += 1 + + return facts + + + def fetch_file_from_guest(self, vm, username, password, src, dest): + + ''' Use VMWare's filemanager api to fetch a file over http ''' + + result = {'failed': False} + + tools_status = vm.guest.toolsStatus + if (tools_status == 'toolsNotInstalled' or + tools_status == 'toolsNotRunning'): + result['failed'] = True + result['msg'] = "VMwareTools is not installed or is not running in the guest" + return result + + # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst + creds = vim.vm.guest.NamePasswordAuthentication( + username=username, password=password + ) + + # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst + fti = self.content.guestOperationsManager.fileManager. \ + InitiateFileTransferFromGuest(vm, creds, src) + + result['size'] = fti.size + result['url'] = fti.url + + # Use module_utils to fetch the remote url returned from the api + rsp, info = fetch_url(self.module, fti.url, use_proxy=False, + force=True, last_mod_time=None, + timeout=10, headers=None) + + # save all of the transfer data + for k,v in info.iteritems(): + result[k] = v + + # exit early if xfer failed + if info['status'] != 200: + result['failed'] = True + return result + + # attempt to read the content and write it + try: + with open(dest, 'wb') as f: + f.write(rsp.read()) + except Exception as e: + result['failed'] = True + result['msg'] = str(e) + + return result + + + def push_file_to_guest(self, vm, username, password, src, dest, overwrite=True): + + ''' Use VMWare's filemanager api to push a file over http ''' + + result = {'failed': False} + + tools_status = vm.guest.toolsStatus + if (tools_status == 'toolsNotInstalled' or + tools_status == 'toolsNotRunning'): + result['failed'] = True + result['msg'] = "VMwareTools is not installed or is not running in the guest" + return result + + # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst + creds = vim.vm.guest.NamePasswordAuthentication( + username=username, password=password + ) + + # the api requires a filesize in bytes + filesize = None + fdata = None + try: + #filesize = os.path.getsize(src) + filesize = os.stat(src).st_size + fdata = None + with open(src, 'rb') as f: + fdata = f.read() + result['local_filesize'] = filesize + except Exception as e: + result['failed'] = True + result['msg'] = "Unable to read src file: %s" % str(e) + return result + + # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.vm.guest.FileManager.html#initiateFileTransferToGuest + file_attribute = vim.vm.guest.FileManager.FileAttributes() + url = self.content.guestOperationsManager.fileManager. \ + InitiateFileTransferToGuest(vm, creds, dest, file_attribute, + filesize, overwrite) + + # PUT the filedata to the url ... + rsp, info = fetch_url(self.module, url, method="put", data=fdata, + use_proxy=False, force=True, last_mod_time=None, + timeout=10, headers=None) + + result['msg'] = str(rsp.read()) + + # save all of the transfer data + for k,v in info.iteritems(): + result[k] = v + + return result + + + def run_command_in_guest(self, vm, username, password, program_path, program_args, program_cwd, program_env): + + result = {'failed': False} + + tools_status = vm.guest.toolsStatus + if (tools_status == 'toolsNotInstalled' or + tools_status == 'toolsNotRunning'): + result['failed'] = True + result['msg'] = "VMwareTools is not installed or is not running in the guest" + return result + + # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst + creds = vim.vm.guest.NamePasswordAuthentication( + username=username, password=password + ) + + res = None + pdata = None + try: + # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst + pm = self.content.guestOperationsManager.processManager + # https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html + ps = vim.vm.guest.ProcessManager.ProgramSpec( + #programPath=program, + #arguments=args + programPath=program_path, + arguments=program_args, + workingDirectory=program_cwd, + ) + res = pm.StartProgramInGuest(vm, creds, ps) + result['pid'] = res + pdata = pm.ListProcessesInGuest(vm, creds, [res]) + + # wait for pid to finish + while not pdata[0].endTime: + time.sleep(1) + pdata = pm.ListProcessesInGuest(vm, creds, [res]) + result['owner'] = pdata[0].owner + result['startTime'] = pdata[0].startTime.isoformat() + result['endTime'] = pdata[0].endTime.isoformat() + result['exitCode'] = pdata[0].exitCode + if result['exitCode'] != 0: + result['failed'] = True + result['msg'] = "program exited non-zero" + else: + result['msg'] = "program completed successfully" + + except Exception as e: + result['msg'] = str(e) + result['failed'] = True + + return result + + def list_snapshots_recursively(self, snapshots): + snapshot_data = [] + snap_text = '' + for snapshot in snapshots: + snap_text = 'Id: %s; Name: %s; Description: %s; CreateTime: %s; State: %s'%(snapshot.id, snapshot.name, + snapshot.description, snapshot.createTime, snapshot.state) + snapshot_data.append(snap_text) + snapshot_data = snapshot_data + self.list_snapshots_recursively(snapshot.childSnapshotList) + return snapshot_data + + + def get_snapshots_by_name_recursively(self, snapshots, snapname): + snap_obj = [] + for snapshot in snapshots: + if snapshot.name == snapname: + snap_obj.append(snapshot) + else: + snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname) + return snap_obj + + def get_current_snap_obj(self, snapshots, snapob): + snap_obj = [] + for snapshot in snapshots: + if snapshot.snapshot == snapob: + snap_obj.append(snapshot) + snap_obj = snap_obj + self.get_current_snap_obj(snapshot.childSnapshotList, snapob) + return snap_obj + + def snapshot_vm(self, vm, guest, snapshot_op): + ''' To perform snapshot operations create/remove/revert/list_all/list_current/remove_all ''' + + try: + snapshot_op_name = snapshot_op['op_type'] + except KeyError: + self.module.fail_json(msg="Specify op_type - create/remove/revert/list_all/list_current/remove_all") + + task = None + result = {} + + if snapshot_op_name not in ['create', 'remove', 'revert', 'list_all', 'list_current', 'remove_all']: + self.module.fail_json(msg="Specify op_type - create/remove/revert/list_all/list_current/remove_all") + + if snapshot_op_name != 'create' and vm.snapshot is None: + self.module.exit_json(msg="VM - %s doesn't have any snapshots"%guest) + + if snapshot_op_name == 'create': + try: + snapname = snapshot_op['name'] + except KeyError: + self.module.fail_json(msg="specify name & description(optional) to create a snapshot") + + if 'description' in snapshot_op: + snapdesc = snapshot_op['description'] + else: + snapdesc = '' + + dumpMemory = False + quiesce = False + task = vm.CreateSnapshot(snapname, snapdesc, dumpMemory, quiesce) + + elif snapshot_op_name in ['remove', 'revert']: + try: + snapname = snapshot_op['name'] + except KeyError: + self.module.fail_json(msg="specify snapshot name") + + snap_obj = self.get_snapshots_by_name_recursively(vm.snapshot.rootSnapshotList, snapname) + + #if len(snap_obj) is 0; then no snapshots with specified name + if len(snap_obj) == 1: + snap_obj = snap_obj[0].snapshot + if snapshot_op_name == 'remove': + task = snap_obj.RemoveSnapshot_Task(True) + else: + task = snap_obj.RevertToSnapshot_Task() + else: + self.module.exit_json(msg="Couldn't find any snapshots with specified name: %s on VM: %s"%(snapname, guest)) + + elif snapshot_op_name == 'list_all': + snapshot_data = self.list_snapshots_recursively(vm.snapshot.rootSnapshotList) + result['snapshot_data'] = snapshot_data + + elif snapshot_op_name == 'list_current': + current_snapref = vm.snapshot.currentSnapshot + current_snap_obj = self.get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref) + result['current_snapshot'] = 'Id: %s; Name: %s; Description: %s; CreateTime: %s; State: %s'%(current_snap_obj[0].id, + current_snap_obj[0].name, current_snap_obj[0].description, current_snap_obj[0].createTime, + current_snap_obj[0].state) + + elif snapshot_op_name == 'remove_all': + task = vm.RemoveAllSnapshots() + + if task: + self.wait_for_task(task) + if task.info.state == 'error': + result = {'changed': False, 'failed': True, 'msg': task.info.error.msg} + else: + result = {'changed': True, 'failed': False} + + return result + +def get_obj(content, vimtype, name): + """ + Return an object by name, if name is None the + first found object is returned + """ + obj = None + container = content.viewManager.CreateContainerView( + content.rootFolder, vimtype, True) + for c in container.view: + if name: + if c.name == name: + obj = c + break + else: + obj = c + break + + container.Destroy() + return obj + + +def main(): + + vm = None + + module = AnsibleModule( + argument_spec=dict( + hostname=dict( + type='str', + default=os.environ.get('VMWARE_HOST') + ), + username=dict( + type='str', + default=os.environ.get('VMWARE_USER') + ), + password=dict( + type='str', no_log=True, + default=os.environ.get('VMWARE_PASSWORD') + ), + state=dict( + required=False, + choices=[ + 'poweredon', + 'poweredoff', + 'present', + 'absent', + 'restarted', + 'reconfigured' + ], + default='present'), + validate_certs=dict(required=False, type='bool', default=True), + template_src=dict(required=False, type='str', aliases=['template']), + annotation=dict(required=False, type='str', aliases=['notes']), + name=dict(required=True, type='str'), + name_match=dict(required=False, type='str', default='first'), + snapshot_op=dict(required=False, type='dict', default={}), + uuid=dict(required=False, type='str'), + folder=dict(required=False, type='str', default='/vm', aliases=['folder']), + disk=dict(required=False, type='list'), + nic=dict(required=False, type='list'), + hardware=dict(required=False, type='dict', default={}), + force=dict(required=False, type='bool', default=False), + datacenter=dict(required=False, type='str', default=None), + esxi_hostname=dict(required=False, type='str', default=None), + cluster=dict(required=False, type='str', default=None), + wait_for_ip_address=dict(required=False, type='bool', default=True), + customize=dict(required=False, type='bool', default=False), + ips=dict(required=False, type='str', default=None), + dns_servers=dict(required=False, type='list', default=None), + domain=dict(required=False, type='str', default=None), + networks=dict(required=False, type='dict', default={}) + ), + supports_check_mode=True, + mutually_exclusive=[], + required_together=[ + ['state', 'force'], + ['template'], + ], + ) + + pyv = PyVmomiHelper(module) + + # Check if the VM exists before continuing + vm = pyv.getvm(name=module.params['name'], + folder=module.params['folder'], + uuid=module.params['uuid'], + name_match=module.params['name_match']) + + # VM already exists + if vm: + + if module.params['state'] == 'absent': + # destroy it + if module.params['force']: + # has to be poweredoff first + result = pyv.set_powerstate(vm, 'poweredoff', module.params['force']) + result = pyv.remove_vm(vm) + elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted']: + # set powerstate + result = pyv.set_powerstate(vm, module.params['state'], module.params['force']) + elif module.params['snapshot_op']: + result = pyv.snapshot_vm(vm, module.params['name'], module.params['snapshot_op']) + else: + # Run for facts only + try: + module.exit_json(instance=pyv.gather_facts(vm)) + except Exception: + e = get_exception() + module.fail_json( + msg="Fact gather failed with exception %s" % e) + + # VM doesn't exist + else: + create_states = ['poweredon', 'poweredoff', 'present', 'restarted'] + if module.params['state'] in create_states: + poweron = (module.params['state'] != 'poweredoff') + # Create it ... + result = pyv.deploy_template( + poweron=poweron, + wait_for_ip=module.params['wait_for_ip_address'] + ) + result['changed'] = True + elif module.params['state'] == 'absent': + result = {'changed': False, 'failed': False} + else: + result = {'changed': False, 'failed': False} + + # FIXME + if not 'failed' in result: + result['failed'] = False + + if result['failed']: + module.fail_json(**result) + else: + module.exit_json(**result) + + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/vmware/vmware_host.py b/cloud/vmware/vmware_host.py index 162397a2190..22cb82d55db 100644 --- a/cloud/vmware/vmware_host.py +++ b/cloud/vmware/vmware_host.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_host @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the vSphere vCenter API server - required: True - username: - description: - - The username of the vSphere vCenter - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the vSphere vCenter - required: True - aliases: ['pass', 'pwd'] datacenter_name: description: - Name of the datacenter to add the host @@ -74,6 +64,7 @@ - 'present' - 'absent' required: False +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' @@ -100,102 +91,118 @@ HAS_PYVMOMI = False -def find_host_by_cluster_datacenter(module): - datacenter_name = module.params['datacenter_name'] - cluster_name = module.params['cluster_name'] - content = module.params['content'] - esxi_hostname = module.params['esxi_hostname'] - - dc = find_datacenter_by_name(content, datacenter_name) - cluster = find_cluster_by_name_datacenter(dc, cluster_name) - - for host in cluster.host: - if host.name == esxi_hostname: - return host, cluster - - return None, cluster - - -def add_host_to_vcenter(module): - cluster = module.params['cluster'] - - host_connect_spec = vim.host.ConnectSpec() - host_connect_spec.hostName = module.params['esxi_hostname'] - host_connect_spec.userName = module.params['esxi_username'] - host_connect_spec.password = module.params['esxi_password'] - host_connect_spec.force = True - host_connect_spec.sslThumbprint = "" - as_connected = True - esxi_license = None - resource_pool = None +class VMwareHost(object): + def __init__(self, module): + self.module = module + self.datacenter_name = module.params['datacenter_name'] + self.cluster_name = module.params['cluster_name'] + self.esxi_hostname = module.params['esxi_hostname'] + self.esxi_username = module.params['esxi_username'] + self.esxi_password = module.params['esxi_password'] + self.state = module.params['state'] + self.dc = None + self.cluster = None + self.host = None + self.content = connect_to_api(module) + + def process_state(self): + try: + # Currently state_update_dvs is not implemented. + host_states = { + 'absent': { + 'present': self.state_remove_host, + 'absent': self.state_exit_unchanged, + }, + 'present': { + 'present': self.state_exit_unchanged, + 'absent': self.state_add_host, + } + } - try: - task = cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license) + host_states[self.state][self.check_host_state()]() + + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + except Exception as e: + self.module.fail_json(msg=str(e)) + + def find_host_by_cluster_datacenter(self): + self.dc = find_datacenter_by_name(self.content, self.datacenter_name) + self.cluster = find_cluster_by_name_datacenter(self.dc, self.cluster_name) + + for host in self.cluster.host: + if host.name == self.esxi_hostname: + return host, self.cluster + + return None, self.cluster + + def add_host_to_vcenter(self): + host_connect_spec = vim.host.ConnectSpec() + host_connect_spec.hostName = self.esxi_hostname + host_connect_spec.userName = self.esxi_username + host_connect_spec.password = self.esxi_password + host_connect_spec.force = True + host_connect_spec.sslThumbprint = "" + as_connected = True + esxi_license = None + resource_pool = None + + try: + task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license) + success, result = wait_for_task(task) + return success, result + except TaskError as add_task_error: + # This is almost certain to fail the first time. + # In order to get the sslThumbprint we first connect + # get the vim.fault.SSLVerifyFault then grab the sslThumbprint + # from that object. + # + # args is a tuple, selecting the first tuple + ssl_verify_fault = add_task_error.args[0] + host_connect_spec.sslThumbprint = ssl_verify_fault.thumbprint + + task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license) success, result = wait_for_task(task) return success, result - except TaskError as add_task_error: - # This is almost certain to fail the first time. - # In order to get the sslThumbprint we first connect - # get the vim.fault.SSLVerifyFault then grab the sslThumbprint - # from that object. - # - # args is a tuple, selecting the first tuple - ssl_verify_fault = add_task_error.args[0] - host_connect_spec.sslThumbprint = ssl_verify_fault.thumbprint - - task = cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license) - success, result = wait_for_task(task) - return success, result - - -def state_exit_unchanged(module): - module.exit_json(changed=False) - - -def state_remove_host(module): - host = module.params['host'] - changed = True - result = None - if not module.check_mode: - if not host.runtime.inMaintenanceMode: - maintenance_mode_task = host.EnterMaintenanceMode_Task(300, True, None) - changed, result = wait_for_task(maintenance_mode_task) - - if changed: - task = host.Destroy_Task() - changed, result = wait_for_task(task) - else: - raise Exception(result) - module.exit_json(changed=changed, result=str(result)) - - -def state_update_host(module): - module.exit_json(changed=False, msg="Currently not implemented.") + def state_exit_unchanged(self): + self.module.exit_json(changed=False) -def state_add_host(module): + def state_remove_host(self): + changed = True + result = None + if not self.module.check_mode: + if not self.host.runtime.inMaintenanceMode: + maintenance_mode_task = self.host.EnterMaintenanceMode_Task(300, True, None) + changed, result = wait_for_task(maintenance_mode_task) - changed = True - result = None + if changed: + task = self.host.Destroy_Task() + changed, result = wait_for_task(task) + else: + raise Exception(result) + self.module.exit_json(changed=changed, result=str(result)) - if not module.check_mode: - changed, result = add_host_to_vcenter(module) - module.exit_json(changed=changed, result=str(result)) + def state_update_host(self): + self.module.exit_json(changed=False, msg="Currently not implemented.") + def state_add_host(self): + changed = True + result = None -def check_host_state(module): + if not self.module.check_mode: + changed, result = self.add_host_to_vcenter() + self.module.exit_json(changed=changed, result=str(result)) - content = connect_to_api(module) - module.params['content'] = content + def check_host_state(self): + self.host, self.cluster = self.find_host_by_cluster_datacenter() - host, cluster = find_host_by_cluster_datacenter(module) - - module.params['cluster'] = cluster - if host is None: - return 'absent' - else: - module.params['host'] = host - return 'present' + if self.host is None: + return 'absent' + else: + return 'present' def main(): @@ -212,27 +219,8 @@ def main(): if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') - try: - # Currently state_update_dvs is not implemented. - host_states = { - 'absent': { - 'present': state_remove_host, - 'absent': state_exit_unchanged, - }, - 'present': { - 'present': state_exit_unchanged, - 'absent': state_add_host, - } - } - - host_states[module.params['state']][check_host_state(module)](module) - - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - except Exception as e: - module.fail_json(msg=str(e)) + vmware_host = VMwareHost(module) + vmware_host.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import * diff --git a/cloud/vmware/vmware_local_user_manager.py b/cloud/vmware/vmware_local_user_manager.py new file mode 100644 index 00000000000..ac52b57465a --- /dev/null +++ b/cloud/vmware/vmware_local_user_manager.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright IBM Corp. 2016 +# Author(s): Andreas Nafpliotis + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see = 2.6" + - PyVmomi installed +options: + local_user_name: + description: + - The local user name to be changed + required: True + local_user_password: + description: + - The password to be set + required: False + local_user_description: + description: + - Description for the user + required: False + state: + description: + - Indicate desired state of the user. If the user already exists when C(state=present), the user info is updated + choices: ['present', 'absent'] + default: present +extends_documentation_fragment: vmware.documentation +''' + +EXAMPLES = ''' +# Example vmware_local_user_manager command from Ansible Playbooks +- name: Add local user to ESXi + local_action: + module: vmware_local_user_manager + hostname: esxi_hostname + username: root + password: vmware + local_user_name: foo +''' + +RETURN = '''# ''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +class VMwareLocalUserManager(object): + def __init__(self, module): + self.module = module + self.content = connect_to_api(self.module) + self.local_user_name = self.module.params['local_user_name'] + self.local_user_password = self.module.params['local_user_password'] + self.local_user_description = self.module.params['local_user_description'] + self.state = self.module.params['state'] + + def process_state(self): + try: + local_account_manager_states = { + 'absent': { + 'present': self.state_remove_user, + 'absent': self.state_exit_unchanged, + }, + 'present': { + 'present': self.state_update_user, + 'absent': self.state_create_user, + } + } + + local_account_manager_states[self.state][self.check_local_user_manager_state()]() + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + except Exception as e: + self.module.fail_json(msg=str(e)) + + + def check_local_user_manager_state(self): + user_account = self.find_user_account() + if not user_account: + return 'absent' + else: + return 'present' + + + def find_user_account(self): + searchStr = self.local_user_name + exactMatch = True + findUsers = True + findGroups = False + user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups) + return user_account + + + def create_account_spec(self): + account_spec = vim.host.LocalAccountManager.AccountSpecification() + account_spec.id = self.local_user_name + account_spec.password = self.local_user_password + account_spec.description = self.local_user_description + return account_spec + + + def state_create_user(self): + account_spec = self.create_account_spec() + + try: + task = self.content.accountManager.CreateUser(account_spec) + self.module.exit_json(changed=True) + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + + def state_update_user(self): + account_spec = self.create_account_spec() + + try: + task = self.content.accountManager.UpdateUser(account_spec) + self.module.exit_json(changed=True) + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + + + def state_remove_user(self): + try: + task = self.content.accountManager.RemoveUser(self.local_user_name) + self.module.exit_json(changed=True) + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + + + def state_exit_unchanged(self): + self.module.exit_json(changed=False) + + + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(local_user_name=dict(required=True, type='str'), + local_user_password=dict(required=False, type='str', no_log=True), + local_user_description=dict(required=False, type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'))) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + vmware_local_user_manager = VMwareLocalUserManager(module) + vmware_local_user_manager.process_state() + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/vmware/vmware_maintenancemode.py b/cloud/vmware/vmware_maintenancemode.py new file mode 100644 index 00000000000..54e8958900a --- /dev/null +++ b/cloud/vmware/vmware_maintenancemode.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, VMware, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: vmware_maintenancemode +short_description: Place a host into maintenance mode +description: + - Place an ESXI host into maintenance mode + - Support for VSAN compliant maintenance mode when selected +author: "Jay Jahns " +version_added: "2.1" +notes: + - Tested on vSphere 5.5 and 6.0 +requirements: + - "python >= 2.6" + - PyVmomi +options: + esxi_hostname: + description: + - Name of the host as defined in vCenter + required: True + vsan_mode: + description: + - Specify which VSAN compliant mode to enter + choices: + - 'ensureObjectAccessibility' + - 'evacuateAllData' + - 'noAction' + required: False + evacuate: + description: + - If True, evacuate all powered off VMs + choices: + - True + - False + default: False + required: False + timeout: + description: + - Specify a timeout for the operation + required: False + default: 0 + state: + description: + - Enter or exit maintenance mode + choices: + - present + - absent + default: present + required: False +extends_documentation_fragment: vmware.documentation +''' + +EXAMPLES = ''' +- name: Enter VSAN-Compliant Maintenance Mode + local_action: + module: vmware_maintenancemode + hostname: vc_host + username: vc_user + password: vc_pass + esxi_hostname: esxi.host.example + vsan: ensureObjectAccessibility + evacuate: yes + timeout: 3600 + state: present +''' +RETURN = ''' +hostsystem: + description: Name of vim reference + returned: always + type: string + sample: "'vim.HostSystem:host-236'" +hostname: + description: Name of host in vCenter + returned: always + type: string + sample: "esxi.local.domain" +status: + description: Action taken + return: always + type: string + sample: "ENTER" +''' + +try: + from pyVmomi import vim + HAS_PYVMOMI = True + +except ImportError: + HAS_PYVMOMI = False + + +def EnterMaintenanceMode(module, host): + + if host.runtime.inMaintenanceMode: + module.exit_json( + changed=False, + hostsystem=str(host), + hostname=module.params['esxi_hostname'], + status='NO_ACTION', + msg='Host already in maintenance mode') + + spec = vim.host.MaintenanceSpec() + + if module.params['vsan']: + spec.vsanMode = vim.vsan.host.DecommissionMode() + spec.vsanMode.objectAction = module.params['vsan'] + + try: + task = host.EnterMaintenanceMode_Task( + module.params['timeout'], + module.params['evacuate'], + spec) + + success, result = wait_for_task(task) + + return dict(changed=success, + hostsystem=str(host), + hostname=module.params['esxi_hostname'], + status='ENTER', + msg='Host entered maintenance mode') + + except TaskError: + module.fail_json( + msg='Host failed to enter maintenance mode') + + +def ExitMaintenanceMode(module, host): + if not host.runtime.inMaintenanceMode: + module.exit_json( + changed=False, + hostsystem=str(host), + hostname=module.params['esxi_hostname'], + status='NO_ACTION', + msg='Host not in maintenance mode') + + try: + task = host.ExitMaintenanceMode_Task( + module.params['timeout']) + + success, result = wait_for_task(task) + + return dict(changed=success, + hostsystem=str(host), + hostname=module.params['esxi_hostname'], + status='EXIT', + msg='Host exited maintenance mode') + + except TaskError: + module.fail_json( + msg='Host failed to exit maintenance mode') + + +def main(): + spec = vmware_argument_spec() + spec.update(dict( + esxi_hostname=dict(required=True), + vsan=dict(required=False, choices=['ensureObjectAccessibility', + 'evacuateAllData', + 'noAction']), + evacuate=dict(required=False, type='bool', default=False), + timeout=dict(required=False, default=0, type='int'), + state=dict(required=False, + default='present', + choices=['present', 'absent']))) + + module = AnsibleModule(argument_spec=spec) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi is required for this module') + + content = connect_to_api(module) + host = find_hostsystem_by_name(content, module.params['esxi_hostname']) + + if not host: + module.fail_json( + msg='Host not found in vCenter') + + if module.params['state'] == 'present': + result = EnterMaintenanceMode(module, host) + + elif module.params['state'] == 'absent': + result = ExitMaintenanceMode(module, host) + + module.exit_json(**result) + + +from ansible.module_utils.basic import * +from ansible.module_utils.vmware import * + + +if __name__ == '__main__': + main() diff --git a/cloud/vmware/vmware_migrate_vmk.py b/cloud/vmware/vmware_migrate_vmk.py index c658c71b682..730102c2049 100644 --- a/cloud/vmware/vmware_migrate_vmk.py +++ b/cloud/vmware/vmware_migrate_vmk.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_migrate_vmk @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the vSphere vCenter API server - required: True - username: - description: - - The username of the vSphere vCenter - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the vSphere vCenter - required: True - aliases: ['pass', 'pwd'] esxi_hostname: description: - ESXi hostname to be managed @@ -70,6 +60,7 @@ description: - Portgroup name to migrate VMK interface to required: True +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' @@ -88,8 +79,6 @@ migrate_switch_name: dvSwitch migrate_portgroup_name: Management ''' - - try: from pyVmomi import vim, vmodl HAS_PYVMOMI = True @@ -97,88 +86,93 @@ HAS_PYVMOMI = False -def state_exit_unchanged(module): - module.exit_json(changed=False) - - -def state_migrate_vds_vss(module): - module.exit_json(changed=False, msg="Currently Not Implemented") - - -def create_host_vnic_config(dv_switch_uuid, portgroup_key, device): - - host_vnic_config = vim.host.VirtualNic.Config() - host_vnic_config.spec = vim.host.VirtualNic.Specification() - host_vnic_config.changeOperation = "edit" - host_vnic_config.device = device - host_vnic_config.portgroup = "" - host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection() - host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid - host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key - - return host_vnic_config - - -def create_port_group_config(switch_name, portgroup_name): - port_group_config = vim.host.PortGroup.Config() - port_group_config.spec = vim.host.PortGroup.Specification() - - port_group_config.changeOperation = "remove" - port_group_config.spec.name = portgroup_name - port_group_config.spec.vlanId = -1 - port_group_config.spec.vswitchName = switch_name - port_group_config.spec.policy = vim.host.NetworkPolicy() - - return port_group_config - - -def state_migrate_vss_vds(module): - content = module.params['content'] - host_system = module.params['host_system'] - migrate_switch_name = module.params['migrate_switch_name'] - migrate_portgroup_name = module.params['migrate_portgroup_name'] - current_portgroup_name = module.params['current_portgroup_name'] - current_switch_name = module.params['current_switch_name'] - device = module.params['device'] - - host_network_system = host_system.configManager.networkSystem - - dv_switch = find_dvs_by_name(content, migrate_switch_name) - pg = find_dvspg_by_name(dv_switch, migrate_portgroup_name) - - config = vim.host.NetworkConfig() - config.portgroup = [create_port_group_config(current_switch_name, current_portgroup_name)] - config.vnic = [create_host_vnic_config(dv_switch.uuid, pg.key, device)] - host_network_system.UpdateNetworkConfig(config, "modify") - module.exit_json(changed=True) - - -def check_vmk_current_state(module): - - device = module.params['device'] - esxi_hostname = module.params['esxi_hostname'] - current_portgroup_name = module.params['current_portgroup_name'] - current_switch_name = module.params['current_switch_name'] - - content = connect_to_api(module) - - host_system = find_hostsystem_by_name(content, esxi_hostname) - - module.params['content'] = content - module.params['host_system'] = host_system - - for vnic in host_system.configManager.networkSystem.networkInfo.vnic: - if vnic.device == device: - module.params['vnic'] = vnic - if vnic.spec.distributedVirtualPort is None: - if vnic.portgroup == current_portgroup_name: - return "migrate_vss_vds" - else: - dvs = find_dvs_by_name(content, current_switch_name) - if dvs is None: - return "migrated" - if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid: - return "migrate_vds_vss" +class VMwareMigrateVmk(object): + def __init__(self, module): + self.module = module + self.host_system = None + self.migrate_switch_name = self.module.params['migrate_switch_name'] + self.migrate_portgroup_name = self.module.params['migrate_portgroup_name'] + self.device = self.module.params['device'] + self.esxi_hostname = self.module.params['esxi_hostname'] + self.current_portgroup_name = self.module.params['current_portgroup_name'] + self.current_switch_name = self.module.params['current_switch_name'] + self.content = connect_to_api(module) + + def process_state(self): + try: + vmk_migration_states = { + 'migrate_vss_vds': self.state_migrate_vss_vds, + 'migrate_vds_vss': self.state_migrate_vds_vss, + 'migrated': self.state_exit_unchanged + } + + vmk_migration_states[self.check_vmk_current_state()]() + + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + except Exception as e: + self.module.fail_json(msg=str(e)) + + def state_exit_unchanged(self): + self.module.exit_json(changed=False) + + def state_migrate_vds_vss(self): + self.module.exit_json(changed=False, msg="Currently Not Implemented") + + def create_host_vnic_config(self, dv_switch_uuid, portgroup_key): + host_vnic_config = vim.host.VirtualNic.Config() + host_vnic_config.spec = vim.host.VirtualNic.Specification() + + host_vnic_config.changeOperation = "edit" + host_vnic_config.device = self.device + host_vnic_config.portgroup = "" + host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection() + host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid + host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key + + return host_vnic_config + + def create_port_group_config(self): + port_group_config = vim.host.PortGroup.Config() + port_group_config.spec = vim.host.PortGroup.Specification() + + port_group_config.changeOperation = "remove" + port_group_config.spec.name = self.current_portgroup_name + port_group_config.spec.vlanId = -1 + port_group_config.spec.vswitchName = self.current_switch_name + port_group_config.spec.policy = vim.host.NetworkPolicy() + + return port_group_config + + def state_migrate_vss_vds(self): + host_network_system = self.host_system.configManager.networkSystem + + dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name) + pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name) + + config = vim.host.NetworkConfig() + config.portgroup = [self.create_port_group_config()] + config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)] + host_network_system.UpdateNetworkConfig(config, "modify") + self.module.exit_json(changed=True) + + def check_vmk_current_state(self): + self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname) + + for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic: + if vnic.device == self.device: + #self.vnic = vnic + if vnic.spec.distributedVirtualPort is None: + if vnic.portgroup == self.current_portgroup_name: + return "migrate_vss_vds" + else: + dvs = find_dvs_by_name(self.content, self.current_switch_name) + if dvs is None: + return "migrated" + if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid: + return "migrate_vds_vss" def main(): @@ -194,23 +188,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_PYVMOMI: - module.fail_json(msg='pyvmomi required for this module') - - try: - vmk_migration_states = { - 'migrate_vss_vds': state_migrate_vss_vds, - 'migrate_vds_vss': state_migrate_vds_vss, - 'migrated': state_exit_unchanged - } - - vmk_migration_states[check_vmk_current_state(module)](module) - - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - except Exception as e: - module.fail_json(msg=str(e)) + self.module.fail_json(msg='pyvmomi required for this module') + + vmware_migrate_vmk = VMwareMigrateVmk(module) + vmware_migrate_vmk.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import * diff --git a/cloud/vmware/vmware_portgroup.py b/cloud/vmware/vmware_portgroup.py index e354ded510f..089d584d039 100644 --- a/cloud/vmware/vmware_portgroup.py +++ b/cloud/vmware/vmware_portgroup.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_portgroup @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the ESXi server - required: True - username: - description: - - The username of the ESXi server - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the ESXi server - required: True - aliases: ['pass', 'pwd'] switch_name: description: - vSwitch to modify @@ -58,6 +48,15 @@ description: - VLAN ID to assign to portgroup required: True + network_policy: + description: + - Network policy specifies layer 2 security settings for a + portgroup such as promiscuous mode, where guest adapter listens + to all the packets, MAC address changes and forged transmits. + Settings are promiscuous_mode, forged_transmits, mac_changes + required: False + version_added: "2.2" +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' @@ -72,6 +71,17 @@ switch_name: vswitch_name portgroup_name: portgroup_name vlan_id: vlan_id + + - name: Add Portgroup with Promiscuous Mode Enabled + local_action: + module: vmware_portgroup + hostname: esxi_hostname + username: esxi_username + password: esxi_password + switch_name: vswitch_name + portgroup_name: portgroup_name + network_policy: + promiscuous_mode: True ''' try: @@ -81,7 +91,20 @@ HAS_PYVMOMI = False -def create_port_group(host_system, portgroup_name, vlan_id, vswitch_name): +def create_network_policy(promiscuous_mode, forged_transmits, mac_changes): + + security_policy = vim.host.NetworkPolicy.SecurityPolicy() + if promiscuous_mode: + security_policy.allowPromiscuous = promiscuous_mode + if forged_transmits: + security_policy.forgedTransmits = forged_transmits + if mac_changes: + security_policy.macChanges = mac_changes + network_policy = vim.host.NetworkPolicy(security=security_policy) + return network_policy + + +def create_port_group(host_system, portgroup_name, vlan_id, vswitch_name, network_policy): config = vim.host.NetworkConfig() config.portgroup = [vim.host.PortGroup.Config()] @@ -90,7 +113,7 @@ def create_port_group(host_system, portgroup_name, vlan_id, vswitch_name): config.portgroup[0].spec.name = portgroup_name config.portgroup[0].spec.vlanId = vlan_id config.portgroup[0].spec.vswitchName = vswitch_name - config.portgroup[0].spec.policy = vim.host.NetworkPolicy() + config.portgroup[0].spec.policy = network_policy host_network_config_result = host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify") return True @@ -101,7 +124,8 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict(portgroup_name=dict(required=True, type='str'), switch_name=dict(required=True, type='str'), - vlan_id=dict(required=True, type='int'))) + vlan_id=dict(required=True, type='int'), + network_policy=dict(required=False, type='dict', default={}))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) @@ -111,6 +135,9 @@ def main(): portgroup_name = module.params['portgroup_name'] switch_name = module.params['switch_name'] vlan_id = module.params['vlan_id'] + promiscuous_mode = module.params['network_policy'].get('promiscuous_mode', None) + forged_transmits = module.params['network_policy'].get('forged_transmits', None) + mac_changes = module.params['network_policy'].get('mac_changes', None) try: content = connect_to_api(module) @@ -119,7 +146,11 @@ def main(): raise SystemExit("Unable to locate Physical Host.") host_system = host.keys()[0] - changed = create_port_group(host_system, portgroup_name, vlan_id, switch_name) + if find_host_portgroup_by_name(host_system, portgroup_name): + module.exit_json(changed=False) + + network_policy = create_network_policy(promiscuous_mode, forged_transmits, mac_changes) + changed = create_port_group(host_system, portgroup_name, vlan_id, switch_name, network_policy) module.exit_json(changed=changed) except vmodl.RuntimeFault as runtime_fault: diff --git a/cloud/vmware/vmware_target_canonical_facts.py b/cloud/vmware/vmware_target_canonical_facts.py index 987b4a98753..817d736d3ae 100644 --- a/cloud/vmware/vmware_target_canonical_facts.py +++ b/cloud/vmware/vmware_target_canonical_facts.py @@ -1,4 +1,4 @@ -#!/bin/python +#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen @@ -18,37 +18,28 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_target_canonical_facts short_description: Return canonical (NAA) from an ESXi host description: - Return canonical (NAA) from an ESXi host based on SCSI target ID -version_added: 2.0 +version_added: "2.0" author: Joseph Callen notes: requirements: - Tested on vSphere 5.5 - PyVmomi installed options: - hostname: - description: - - The hostname or IP address of the vSphere vCenter - required: True - username: - description: - - The username of the vSphere vCenter - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the vSphere vCenter - required: True - aliases: ['pass', 'pwd'] target_id: description: - The target id based on order of scsi device required: True +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' diff --git a/cloud/vmware/vmware_vm_facts.py b/cloud/vmware/vmware_vm_facts.py index 3551477f243..46de7a39157 100644 --- a/cloud/vmware/vmware_vm_facts.py +++ b/cloud/vmware/vmware_vm_facts.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_vm_facts @@ -31,21 +35,7 @@ requirements: - "python >= 2.6" - PyVmomi -options: - hostname: - description: - - The hostname or IP address of the vSphere vCenter API server - required: True - username: - description: - - The username of the vSphere vCenter - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the vSphere vCenter - required: True - aliases: ['pass', 'pwd'] +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' diff --git a/cloud/vmware/vmware_vm_shell.py b/cloud/vmware/vmware_vm_shell.py new file mode 100644 index 00000000000..34eb6b0f446 --- /dev/null +++ b/cloud/vmware/vmware_vm_shell.py @@ -0,0 +1,190 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, 2016 Ritesh Khadgaray +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: vmware_vm_shell +short_description: Execute a process in VM +description: + - Start a program in a VM without the need for network connection +version_added: 2.1 +author: "Ritesh Khadgaray (@ritzk)" +notes: + - Tested on vSphere 5.5 + - Only the first match against vm_id is used, even if there are multiple matches +requirements: + - "python >= 2.6" + - PyVmomi +options: + datacenter: + description: + - The datacenter hosting the VM + - Will help speed up search + required: False + default: None + cluster: + description: + - The cluster hosting the VM + - Will help speed up search + required: False + default: None + vm_id: + description: + - The identification for the VM + required: True + vm_id_type: + description: + - The identification tag for the VM + default: vm_name + choices: + - 'uuid' + - 'dns_name' + - 'inventory_path' + - 'vm_name' + required: False + vm_username: + description: + - The user to connect to the VM. + required: False + default: None + vm_password: + description: + - The password used to login to the VM. + required: False + default: None + vm_shell: + description: + - The absolute path to the program to start. On Linux this is executed via bash. + required: True + vm_shell_args: + description: + - The argument to the program. + required: False + default: None + vm_shell_env: + description: + - Comma seperated list of envirnoment variable, specified in the guest OS notation + required: False + default: None + vm_shell_cwd: + description: + - The current working directory of the application from which it will be run + required: False + default: None +extends_documentation_fragment: vmware.documentation +''' + +EXAMPLES = ''' + - name: shell execution + local_action: + module: vmware_vm_shell + hostname: myVSphere + username: myUsername + password: mySecret + datacenter: myDatacenter + vm_id: NameOfVM + vm_username: root + vm_password: superSecret + vm_shell: /bin/echo + vm_shell_args: " $var >> myFile " + vm_shell_env: + - "PATH=/bin" + - "VAR=test" + vm_shell_cwd: "/tmp" + +''' + +try: + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + +# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/execute_program_in_vm.py +def execute_command(content, vm, vm_username, vm_password, program_path, args="", env=None, cwd=None): + + creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password) + cmdspec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args, envVariables=env, programPath=program_path, workingDirectory=cwd) + cmdpid = content.guestOperationsManager.processManager.StartProgramInGuest(vm=vm, auth=creds, spec=cmdspec) + + return cmdpid + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update(dict(datacenter=dict(default=None, type='str'), + cluster=dict(default=None, type='str'), + vm_id=dict(required=True, type='str'), + vm_id_type=dict(default='vm_name', type='str', choices=['inventory_path', 'uuid', 'dns_name', 'vm_name']), + vm_username=dict(required=False, type='str'), + vm_password=dict(required=False, type='str', no_log=True), + vm_shell=dict(required=True, type='str'), + vm_shell_args=dict(default=" ", type='str'), + vm_shell_env=dict(default=None, type='list'), + vm_shell_cwd=dict(default=None, type='str'))) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_PYVMOMI: + module.fail_json(changed=False, msg='pyvmomi is required for this module') + + + try: + p = module.params + datacenter_name = p['datacenter'] + cluster_name = p['cluster'] + content = connect_to_api(module) + + datacenter = None + if datacenter_name: + datacenter = find_datacenter_by_name(content, datacenter_name) + if not datacenter: + module.fail_json(changed=False, msg="datacenter not found") + + cluster = None + if cluster_name: + cluster = find_cluster_by_name(content, cluster_name, datacenter) + if not cluster: + module.fail_json(changed=False, msg="cluster not found") + + vm = find_vm_by_id(content, p['vm_id'], p['vm_id_type'], datacenter, cluster) + if not vm: + module.fail_json(msg='VM not found') + + msg = execute_command(content, vm, p['vm_username'], p['vm_password'], + p['vm_shell'], p['vm_shell_args'], p['vm_shell_env'], p['vm_shell_cwd']) + + module.exit_json(changed=True, uuid=vm.summary.config.uuid, msg=msg) + except vmodl.RuntimeFault as runtime_fault: + module.fail_json(changed=False, msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + module.fail_json(changed=False, msg=method_fault.msg) + except Exception as e: + module.fail_json(changed=False, msg=str(e)) + +from ansible.module_utils.vmware import * +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/cloud/vmware/vmware_vm_vss_dvs_migrate.py b/cloud/vmware/vmware_vm_vss_dvs_migrate.py index ff51f86ed09..594a9e17830 100644 --- a/cloud/vmware/vmware_vm_vss_dvs_migrate.py +++ b/cloud/vmware/vmware_vm_vss_dvs_migrate.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_vm_vss_dvs_migrate @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the vSphere vCenter API server - required: True - username: - description: - - The username of the vSphere vCenter - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the vSphere vCenter - required: True - aliases: ['pass', 'pwd'] vm_name: description: - Name of the virtual machine to migrate to a dvSwitch @@ -54,6 +44,7 @@ description: - Name of the portgroup to migrate to the virtual machine to required: True +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' @@ -74,82 +65,81 @@ HAS_PYVMOMI = False -def _find_dvspg_by_name(content, pg_name): - - vmware_distributed_port_group = get_all_objs(content, [vim.dvs.DistributedVirtualPortgroup]) - for dvspg in vmware_distributed_port_group: - if dvspg.name == pg_name: - return dvspg - return None - - -def find_vm_by_name(content, vm_name): - - virtual_machines = get_all_objs(content, [vim.VirtualMachine]) - for vm in virtual_machines: - if vm.name == vm_name: - return vm - return None - - -def migrate_network_adapter_vds(module): - vm_name = module.params['vm_name'] - dvportgroup_name = module.params['dvportgroup_name'] - content = module.params['content'] - - vm_configspec = vim.vm.ConfigSpec() - nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() - port = vim.dvs.PortConnection() - devicespec = vim.vm.device.VirtualDeviceSpec() - - pg = _find_dvspg_by_name(content, dvportgroup_name) - - if pg is None: - module.fail_json(msg="The standard portgroup was not found") - - vm = find_vm_by_name(content, vm_name) - if vm is None: - module.fail_json(msg="The virtual machine was not found") - - dvswitch = pg.config.distributedVirtualSwitch - port.switchUuid = dvswitch.uuid - port.portgroupKey = pg.key - nic.port = port - - for device in vm.config.hardware.device: - if isinstance(device, vim.vm.device.VirtualEthernetCard): - devicespec.device = device - devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit - devicespec.device.backing = nic - vm_configspec.deviceChange.append(devicespec) - - task = vm.ReconfigVM_Task(vm_configspec) - changed, result = wait_for_task(task) - module.exit_json(changed=changed, result=result) - - -def state_exit_unchanged(module): - module.exit_json(changed=False) - - -def check_vm_network_state(module): - vm_name = module.params['vm_name'] - try: - content = connect_to_api(module) - module.params['content'] = content - vm = find_vm_by_name(content, vm_name) - module.params['vm'] = vm - if vm is None: - module.fail_json(msg="A virtual machine with name %s does not exist" % vm_name) - for device in vm.config.hardware.device: +class VMwareVmVssDvsMigrate(object): + def __init__(self, module): + self.module = module + self.content = connect_to_api(module) + self.vm = None + self.vm_name = module.params['vm_name'] + self.dvportgroup_name = module.params['dvportgroup_name'] + + def process_state(self): + vm_nic_states = { + 'absent': self.migrate_network_adapter_vds, + 'present': self.state_exit_unchanged, + } + + vm_nic_states[self.check_vm_network_state()]() + + def find_dvspg_by_name(self): + vmware_distributed_port_group = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup]) + for dvspg in vmware_distributed_port_group: + if dvspg.name == self.dvportgroup_name: + return dvspg + return None + + def find_vm_by_name(self): + virtual_machines = get_all_objs(self.content, [vim.VirtualMachine]) + for vm in virtual_machines: + if vm.name == self.vm_name: + return vm + return None + + def migrate_network_adapter_vds(self): + vm_configspec = vim.vm.ConfigSpec() + nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + port = vim.dvs.PortConnection() + devicespec = vim.vm.device.VirtualDeviceSpec() + + pg = self.find_dvspg_by_name() + + if pg is None: + self.module.fail_json(msg="The standard portgroup was not found") + + dvswitch = pg.config.distributedVirtualSwitch + port.switchUuid = dvswitch.uuid + port.portgroupKey = pg.key + nic.port = port + + for device in self.vm.config.hardware.device: if isinstance(device, vim.vm.device.VirtualEthernetCard): - if isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo): - return 'present' - return 'absent' - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) + devicespec.device = device + devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + devicespec.device.backing = nic + vm_configspec.deviceChange.append(devicespec) + + task = self.vm.ReconfigVM_Task(vm_configspec) + changed, result = wait_for_task(task) + self.module.exit_json(changed=changed, result=result) + + def state_exit_unchanged(self): + self.module.exit_json(changed=False) + + def check_vm_network_state(self): + try: + self.vm = self.find_vm_by_name() + + if self.vm is None: + self.module.fail_json(msg="A virtual machine with name %s does not exist" % self.vm_name) + for device in self.vm.config.hardware.device: + if isinstance(device, vim.vm.device.VirtualEthernetCard): + if isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo): + return 'present' + return 'absent' + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) def main(): @@ -162,12 +152,8 @@ def main(): if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') - vm_nic_states = { - 'absent': migrate_network_adapter_vds, - 'present': state_exit_unchanged, - } - - vm_nic_states[check_vm_network_state(module)](module) + vmware_vmnic_migrate = VMwareVmVssDvsMigrate(module) + vmware_vmnic_migrate.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import * diff --git a/cloud/vmware/vmware_vmkernel.py b/cloud/vmware/vmware_vmkernel.py index 0221f68ad2e..238b85ea345 100644 --- a/cloud/vmware/vmware_vmkernel.py +++ b/cloud/vmware/vmware_vmkernel.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_vmkernel @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the ESXi Server - required: True - username: - description: - - The username of the ESXi Server - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of ESXi Server - required: True - aliases: ['pass', 'pwd'] vswitch_name: description: - The name of the vswitch where to add the VMK interface @@ -86,6 +76,7 @@ description: - Enable the VMK interface for Fault Tolerance traffic required: False +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' diff --git a/cloud/vmware/vmware_vmkernel_ip_config.py b/cloud/vmware/vmware_vmkernel_ip_config.py index c07526f0aeb..fe545e356d8 100644 --- a/cloud/vmware/vmware_vmkernel_ip_config.py +++ b/cloud/vmware/vmware_vmkernel_ip_config.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_vmkernel_ip_config @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the ESXi server - required: True - username: - description: - - The username of the ESXi server - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the ESXi server - required: True - aliases: ['pass', 'pwd'] vmk_name: description: - VMkernel interface name @@ -58,6 +48,7 @@ description: - Subnet Mask to assign to VMkernel interface required: True +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' diff --git a/cloud/vmware/vmware_vmotion.py b/cloud/vmware/vmware_vmotion.py new file mode 100644 index 00000000000..0ceaf597879 --- /dev/null +++ b/cloud/vmware/vmware_vmotion.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Bede Carroll +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: vmware_vmotion +short_description: Move a virtual machine using vMotion +description: + - Using VMware vCenter, move a virtual machine using vMotion to a different + host. +version_added: 2.2 +author: "Bede Carroll (@bedecarroll)" +notes: + - Tested on vSphere 6.0 +requirements: + - "python >= 2.6" + - pyVmomi +options: + vm_name: + description: + - Name of the VM to perform a vMotion on + required: True + aliases: ['vm'] + destination_host: + description: + - Name of the end host the VM should be running on + required: True + aliases: ['destination'] +extends_documentation_fragment: vmware.documentation +''' + +EXAMPLES = ''' +Example from Ansible playbook + + - name: Perform vMotion of VM + local_action: + module: vmware_vmotion + hostname: 'vcenter_hostname' + username: 'vcenter_username' + password: 'vcenter_password' + validate_certs: False + vm_name: 'vm_name_as_per_vcenter' + destination_host: 'destination_host_as_per_vcenter' +''' + +RETURN = ''' +running_host: + description: List the host the virtual machine is registered to + returned: + - changed + - success + type: string + sample: 'host1.example.com' +''' + +try: + from pyVmomi import vim + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +def migrate_vm(vm_object, host_object): + """ + Migrate virtual machine and return the task. + """ + relocate_spec = vim.vm.RelocateSpec(host=host_object) + task_object = vm_object.Relocate(relocate_spec) + return task_object + +def main(): + + argument_spec = vmware_argument_spec() + argument_spec.update( + dict( + vm_name=dict(required=True, aliases=['vm'], type='str'), + destination_host=dict(required=True, aliases=['destination'], type='str'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + if not HAS_PYVMOMI: + module.fail_json(msg='pyVmomi is required for this module') + + content = connect_to_api(module=module) + + vm_object = find_vm_by_name(content=content, vm_name=module.params['vm_name']) + host_object = find_hostsystem_by_name(content=content, hostname=module.params['destination_host']) + + # Setup result + result = { + 'changed': False + } + + # Check if we could find the VM or Host + if not vm_object: + module.fail_json(msg='Cannot find virtual machine') + if not host_object: + module.fail_json(msg='Cannot find host') + + # Make sure VM isn't already at the destination + if vm_object.runtime.host.name == module.params['destination_host']: + module.exit_json(**result) + + if not module.check_mode: + # Migrate VM and get Task object back + task_object = migrate_vm(vm_object=vm_object, host_object=host_object) + + # Wait for task to complete + wait_for_task(task_object) + + # If task was a success the VM has moved, update running_host and complete module + if task_object.info.state == vim.TaskInfo.State.success: + vm_object = find_vm_by_name(content=content, vm_name=module.params['vm_name']) + result['running_host'] = vm_object.runtime.host.name + result['changed'] = True + module.exit_json(**result) + else: + if task_object.info.error is None: + module.fail_json(msg='Unable to migrate VM due to an error, please check vCenter') + else: + module.fail_json(msg='Unable to migrate VM due to an error: %s' % task_object.info.error) + else: + # If we are in check mode return a result as if move was performed + result['running_host'] = module.params['destination_host'] + result['changed'] = True + module.exit_json(**result) + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.vmware import * + +if __name__ == '__main__': + main() diff --git a/cloud/vmware/vmware_vsan_cluster.py b/cloud/vmware/vmware_vsan_cluster.py index b7b84d94c43..714f6f22ff8 100644 --- a/cloud/vmware/vmware_vsan_cluster.py +++ b/cloud/vmware/vmware_vsan_cluster.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_vsan_cluster @@ -32,24 +36,11 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the ESXi Server - required: True - username: - description: - - The username of the ESXi Server - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of ESXi Server - required: True - aliases: ['pass', 'pwd'] cluster_uuid: description: - Desired cluster UUID required: False +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' @@ -74,7 +65,7 @@ username: "{{ esxi_username }}" password: "{{ site_password }}" cluster_uuid: "{{ vsan_cluster.cluster_uuid }}" - with_items: groups['esxi'][1:] + with_items: "{{ groups['esxi'][1:] }}" ''' diff --git a/cloud/vmware/vmware_vswitch.py b/cloud/vmware/vmware_vswitch.py index d9ac55d2364..ef14f2d6bfc 100644 --- a/cloud/vmware/vmware_vswitch.py +++ b/cloud/vmware/vmware_vswitch.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vmware_vswitch @@ -32,20 +36,6 @@ - "python >= 2.6" - PyVmomi options: - hostname: - description: - - The hostname or IP address of the ESXi server - required: True - username: - description: - - The username of the ESXi server - required: True - aliases: ['user', 'admin'] - password: - description: - - The password of the ESXi server - required: True - aliases: ['pass', 'pwd'] switch_name: description: - vSwitch name to add @@ -71,6 +61,7 @@ - 'present' - 'absent' required: False +extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' @@ -95,82 +86,101 @@ def find_vswitch_by_name(host, vswitch_name): - for vss in host.config.network.vswitch: - if vss.name == vswitch_name: - return vss - return None - - -# Source from -# https://github.com/rreubenur/pyvmomi-community-samples/blob/patch-1/samples/create_vswitch.py - -def state_create_vswitch(module): - - switch_name = module.params['switch_name'] - number_of_ports = module.params['number_of_ports'] - nic_name = module.params['nic_name'] - mtu = module.params['mtu'] - host = module.params['host'] - - vss_spec = vim.host.VirtualSwitch.Specification() - vss_spec.numPorts = number_of_ports - vss_spec.mtu = mtu - vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=[nic_name]) - host.configManager.networkSystem.AddVirtualSwitch(vswitchName=switch_name, spec=vss_spec) - module.exit_json(changed=True) - - -def state_exit_unchanged(module): - module.exit_json(changed=False) - - -def state_destroy_vswitch(module): - vss = module.params['vss'] - host = module.params['host'] - config = vim.host.NetworkConfig() - - for portgroup in host.configManager.networkSystem.networkInfo.portgroup: - if portgroup.spec.vswitchName == vss.name: - portgroup_config = vim.host.PortGroup.Config() - portgroup_config.changeOperation = "remove" - portgroup_config.spec = vim.host.PortGroup.Specification() - portgroup_config.spec.name = portgroup.spec.name - portgroup_config.spec.vlanId = portgroup.spec.vlanId - portgroup_config.spec.vswitchName = portgroup.spec.vswitchName - portgroup_config.spec.policy = vim.host.NetworkPolicy() - config.portgroup.append(portgroup_config) - - host.configManager.networkSystem.UpdateNetworkConfig(config, "modify") - host.configManager.networkSystem.RemoveVirtualSwitch(vss.name) - module.exit_json(changed=True) - - -def state_update_vswitch(module): - module.exit_json(changed=False, msg="Currently not implemented.") - - -def check_vswitch_configuration(module): - switch_name = module.params['switch_name'] - content = connect_to_api(module) - module.params['content'] = content - - host = get_all_objs(content, [vim.HostSystem]) - if not host: - module.fail_json(msg="Unble to find host") - - host_system = host.keys()[0] - module.params['host'] = host_system - vss = find_vswitch_by_name(host_system, switch_name) - - if vss is None: - return 'absent' - else: - module.params['vss'] = vss - return 'present' + for vss in host.config.network.vswitch: + if vss.name == vswitch_name: + return vss + return None + + +class VMwareHostVirtualSwitch(object): + + def __init__(self, module): + self.host_system = None + self.content = None + self.vss = None + self.module = module + self.switch_name = module.params['switch_name'] + self.number_of_ports = module.params['number_of_ports'] + self.nic_name = module.params['nic_name'] + self.mtu = module.params['mtu'] + self.state = module.params['state'] + self.content = connect_to_api(self.module) + + def process_state(self): + try: + vswitch_states = { + 'absent': { + 'present': self.state_destroy_vswitch, + 'absent': self.state_exit_unchanged, + }, + 'present': { + 'update': self.state_update_vswitch, + 'present': self.state_exit_unchanged, + 'absent': self.state_create_vswitch, + } + } + vswitch_states[self.state][self.check_vswitch_configuration()]() + + except vmodl.RuntimeFault as runtime_fault: + self.module.fail_json(msg=runtime_fault.msg) + except vmodl.MethodFault as method_fault: + self.module.fail_json(msg=method_fault.msg) + except Exception as e: + self.module.fail_json(msg=str(e)) + + + # Source from + # https://github.com/rreubenur/pyvmomi-community-samples/blob/patch-1/samples/create_vswitch.py + + def state_create_vswitch(self): + vss_spec = vim.host.VirtualSwitch.Specification() + vss_spec.numPorts = self.number_of_ports + vss_spec.mtu = self.mtu + vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=[self.nic_name]) + self.host_system.configManager.networkSystem.AddVirtualSwitch(vswitchName=self.switch_name, spec=vss_spec) + self.module.exit_json(changed=True) + + def state_exit_unchanged(self): + self.module.exit_json(changed=False) + + def state_destroy_vswitch(self): + config = vim.host.NetworkConfig() + + for portgroup in self.host_system.configManager.networkSystem.networkInfo.portgroup: + if portgroup.spec.vswitchName == self.vss.name: + portgroup_config = vim.host.PortGroup.Config() + portgroup_config.changeOperation = "remove" + portgroup_config.spec = vim.host.PortGroup.Specification() + portgroup_config.spec.name = portgroup.spec.name + portgroup_config.spec.name = portgroup.spec.name + portgroup_config.spec.vlanId = portgroup.spec.vlanId + portgroup_config.spec.vswitchName = portgroup.spec.vswitchName + portgroup_config.spec.policy = vim.host.NetworkPolicy() + config.portgroup.append(portgroup_config) + + self.host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify") + self.host_system.configManager.networkSystem.RemoveVirtualSwitch(self.vss.name) + self.module.exit_json(changed=True) + + def state_update_vswitch(self): + self.module.exit_json(changed=False, msg="Currently not implemented.") + + def check_vswitch_configuration(self): + host = get_all_objs(self.content, [vim.HostSystem]) + if not host: + self.module.fail_json(msg="Unable to find host") + + self.host_system = host.keys()[0] + self.vss = find_vswitch_by_name(self.host_system, self.switch_name) + + if self.vss is None: + return 'absent' + else: + return 'present' + def main(): - argument_spec = vmware_argument_spec() argument_spec.update(dict(switch_name=dict(required=True, type='str'), nic_name=dict(required=True, type='str'), @@ -183,27 +193,8 @@ def main(): if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') - try: - vswitch_states = { - 'absent': { - 'present': state_destroy_vswitch, - 'absent': state_exit_unchanged, - }, - 'present': { - 'update': state_update_vswitch, - 'present': state_exit_unchanged, - 'absent': state_create_vswitch, - } - } - - vswitch_states[module.params['state']][check_vswitch_configuration(module)](module) - - except vmodl.RuntimeFault as runtime_fault: - module.fail_json(msg=runtime_fault.msg) - except vmodl.MethodFault as method_fault: - module.fail_json(msg=method_fault.msg) - except Exception as e: - module.fail_json(msg=str(e)) + host_virtual_switch = VMwareHostVirtualSwitch(module) + host_virtual_switch.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import * diff --git a/cloud/vmware/vsphere_copy.py b/cloud/vmware/vsphere_copy.py index 18799211522..7e2ef125c86 100644 --- a/cloud/vmware/vsphere_copy.py +++ b/cloud/vmware/vsphere_copy.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: vsphere_copy @@ -55,6 +59,14 @@ description: - The file to push to the datastore on the vCenter server. required: true + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be + set to C(no) when no other option exists. + required: false + default: 'yes' + choices: ['yes', 'no'] + notes: - "This module ought to be run from a system that can access vCenter directly and has the file to transfer. It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)." @@ -62,20 +74,36 @@ ''' EXAMPLES = ''' -- vsphere_copy: host=vhost login=vuser password=vpass src=/some/local/file datacenter='DC1 Someplace' datastore=datastore1 path=some/remote/file +- vsphere_copy: + host: vhost + login: vuser + password: vpass + src: /some/local/file + datacenter: DC1 Someplace + datastore: datastore1 + path: some/remote/file transport: local -- vsphere_copy: host=vhost login=vuser password=vpass src=/other/local/file datacenter='DC2 Someplace' datastore=datastore2 path=other/remote/file +- vsphere_copy: + host: vhost + login: vuser + password: vpass + src: /other/local/file + datacenter: DC2 Someplace + datastore: datastore2 + path: other/remote/file delegate_to: other_system ''' import atexit -import base64 -import httplib import urllib import mmap import errno import socket +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url + def vmware_path(datastore, datacenter, path): ''' Constructs a URL path that VSphere accepts reliably ''' path = "/folder/%s" % path.lstrip("/") @@ -96,11 +124,12 @@ def main(): argument_spec = dict( host = dict(required=True, aliases=[ 'hostname' ]), login = dict(required=True, aliases=[ 'username' ]), - password = dict(required=True), + password = dict(required=True, no_log=True), src = dict(required=True, aliases=[ 'name' ]), datacenter = dict(required=True), datastore = dict(required=True), dest = dict(required=True, aliases=[ 'path' ]), + validate_certs = dict(required=False, default=True, type='bool'), ), # Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable supports_check_mode = False, @@ -113,6 +142,7 @@ def main(): datacenter = module.params.get('datacenter') datastore = module.params.get('datastore') dest = module.params.get('dest') + validate_certs = module.params.get('validate_certs') fd = open(src, "rb") atexit.register(fd.close) @@ -120,37 +150,46 @@ def main(): data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ) atexit.register(data.close) - conn = httplib.HTTPSConnection(host) - atexit.register(conn.close) - remote_path = vmware_path(datastore, datacenter, dest) - auth = base64.encodestring('%s:%s' % (login, password)).rstrip() + url = 'https://%s%s' % (host, remote_path) + headers = { "Content-Type": "application/octet-stream", "Content-Length": str(len(data)), - "Authorization": "Basic %s" % auth, } - # URL is only used in JSON output (helps troubleshooting) - url = 'https://%s%s' % (host, remote_path) - try: - conn.request("PUT", remote_path, body=data, headers=headers) - except socket.error, e: + r = open_url(url, data=data, headers=headers, method='PUT', + url_username=login, url_password=password, validate_certs=validate_certs, + force_basic_auth=True) + except socket.error: + e = get_exception() if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET: # VSphere resets connection if the file is in use and cannot be replaced - module.fail_json(msg='Failed to upload, image probably in use', status=e[0], reason=str(e), url=url) + module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=str(e), url=url) else: - module.fail_json(msg=str(e), status=e[0], reason=str(e), url=url) - - resp = conn.getresponse() - - if resp.status in range(200, 300): - module.exit_json(changed=True, status=resp.status, reason=resp.reason, url=url) + module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e), url=url) + except Exception: + e = get_exception() + error_code = -1 + try: + if isinstance(e[0], int): + error_code = e[0] + except KeyError: + pass + module.fail_json(msg=str(e), status=None, errno=error_code, reason=str(e), url=url) + + status = r.getcode() + if 200 <= status < 300: + module.exit_json(changed=True, status=status, reason=r.msg, url=url) else: - module.fail_json(msg='Failed to upload', status=resp.status, reason=resp.reason, length=resp.length, version=resp.version, headers=resp.getheaders(), chunked=resp.chunked, url=url) + length = r.headers.get('content-length', None) + if r.headers.get('transfer-encoding', '').lower() == 'chunked': + chunked = 1 + else: + chunked = 0 -# Import module snippets -from ansible.module_utils.basic import * + module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url) -main() +if __name__ == '__main__': + main() diff --git a/cloud/webfaction/webfaction_app.py b/cloud/webfaction/webfaction_app.py index 1c015a401d1..63c00bd1778 100644 --- a/cloud/webfaction/webfaction_app.py +++ b/cloud/webfaction/webfaction_app.py @@ -27,6 +27,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: webfaction_app @@ -69,7 +73,9 @@ required: false default: null - open_port: + port_open: + description: + - IF the port should be opened required: false default: false @@ -112,9 +118,9 @@ def main(): name = dict(required=True), state = dict(required=False, choices=['present', 'absent'], default='present'), type = dict(required=True), - autostart = dict(required=False, choices=BOOLEANS, default=False), + autostart = dict(required=False, type='bool', default=False), extra_info = dict(required=False, default=""), - port_open = dict(required=False, choices=BOOLEANS, default=False), + port_open = dict(required=False, type='bool', default=False), login_name = dict(required=True), login_password = dict(required=True), machine = dict(required=False, default=False), @@ -193,5 +199,6 @@ def main(): ) from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/webfaction/webfaction_db.py b/cloud/webfaction/webfaction_db.py index 6c45e700e9b..6fe785f76a9 100644 --- a/cloud/webfaction/webfaction_db.py +++ b/cloud/webfaction/webfaction_db.py @@ -24,6 +24,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: webfaction_db @@ -196,5 +200,6 @@ def main(): ) from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/webfaction/webfaction_domain.py b/cloud/webfaction/webfaction_domain.py index c809dd6beb3..859209c9ce7 100644 --- a/cloud/webfaction/webfaction_domain.py +++ b/cloud/webfaction/webfaction_domain.py @@ -22,6 +22,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: webfaction_domain @@ -167,5 +171,6 @@ def main(): ) from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/webfaction/webfaction_mailbox.py b/cloud/webfaction/webfaction_mailbox.py index c08bd477601..2132eeaffbb 100644 --- a/cloud/webfaction/webfaction_mailbox.py +++ b/cloud/webfaction/webfaction_mailbox.py @@ -21,6 +21,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: webfaction_mailbox @@ -99,7 +103,7 @@ def main(): module.params['login_password'] ) - mailbox_list = webfaction.list_mailboxes(session_id) + mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)] existing_mailbox = mailbox_name in mailbox_list result = {} @@ -135,5 +139,6 @@ def main(): from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/webfaction/webfaction_site.py b/cloud/webfaction/webfaction_site.py index bb1bfb94457..08a9b4d76d4 100644 --- a/cloud/webfaction/webfaction_site.py +++ b/cloud/webfaction/webfaction_site.py @@ -22,6 +22,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: webfaction_site @@ -59,7 +63,9 @@ description: - Whether or not to use HTTPS required: false - choices: BOOLEANS + choices: + - true + - false default: 'false' site_apps: @@ -112,9 +118,9 @@ def main(): state = dict(required=False, choices=['present', 'absent'], default='present'), # You can specify an IP address or hostname. host = dict(required=True), - https = dict(required=False, choices=BOOLEANS, default=False), - subdomains = dict(required=False, default=[]), - site_apps = dict(required=False, default=[]), + https = dict(required=False, type='bool', default=False), + subdomains = dict(required=False, type='list', default=[]), + site_apps = dict(required=False, type='list', default=[]), login_name = dict(required=True), login_password = dict(required=True), ), @@ -204,5 +210,6 @@ def main(): from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/xenserver_facts.py b/cloud/xenserver_facts.py index 54ca3389752..d908e5a3fdd 100644 --- a/cloud/xenserver_facts.py +++ b/cloud/xenserver_facts.py @@ -15,6 +15,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: xenserver_facts @@ -28,8 +32,13 @@ ''' import platform -import sys -import XenAPI + +HAVE_XENAPI = False +try: + import XenAPI + HAVE_XENAPI = True +except ImportError: + pass EXAMPLES = ''' - name: Gather facts from xenserver @@ -37,7 +46,7 @@ - name: Print running VMs debug: msg="{{ item }}" - with_items: xs_vms.keys() + with_items: "{{ xs_vms.keys() }}" when: xs_vms[item]['power_state'] == "Running" TASK: [Print running VMs] *********************************************************** @@ -75,12 +84,9 @@ def codename(self): def get_xenapi_session(): - try: - session = XenAPI.xapi_local() - session.xenapi.login_with_password('', '') - return session - except XenAPI.Failure: - sys.exit(1) + session = XenAPI.xapi_local() + session.xenapi.login_with_password('', '') + return session def get_networks(session): @@ -162,9 +168,14 @@ def get_srs(session): def main(): module = AnsibleModule({}) - obj = XenServerFacts() - session = get_xenapi_session() + if not HAVE_XENAPI: + module.fail_json(changed=False, msg="python xen api required for this module") + obj = XenServerFacts() + try: + session = get_xenapi_session() + except XenAPI.Failure as e: + module.fail_json(msg='%s' % e) data = { 'xenserver_version': obj.version, @@ -192,7 +203,7 @@ def main(): module.exit_json(ansible=data) -# this is magic, see lib/ansible/module_common.py -#<> +from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/clustering/consul.py b/clustering/consul.py index f72fc6ddcac..fd69726eef9 100644 --- a/clustering/consul.py +++ b/clustering/consul.py @@ -17,6 +17,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: consul short_description: "Add, modify & delete services within a consul cluster." @@ -71,6 +75,18 @@ - the port on which the consul agent is running required: false default: 8500 + scheme: + description: + - the protocol scheme on which the consul agent is running + required: false + default: http + version_added: "2.1" + validate_certs: + description: + - whether to verify the tls certificate of the consul agent + required: false + default: True + version_added: "2.1" notes: description: - Notes to attach to check when registering it. @@ -81,6 +97,15 @@ - the port on which the service is listening required for registration of a service, i.e. if service_name or service_id is set required: false + service_address: + description: + - the address to advertise that the service will be listening on. + This value will be passed as the I(Address) parameter to Consul's + U(/v1/agent/service/register) API method, so refer to the Consul API + documentation for further details. + required: false + default: None + version_added: "2.1" tags: description: - a list of tags that will be attached to the service registration. @@ -123,6 +148,22 @@ is supplied, m will be used by default e.g. 1 will be 1m required: false default: None + http: + description: + - checks can be registered with an http endpoint. This means that consul + will check that the http endpoint returns a successful http status. + Interval must also be provided with this option. + required: false + default: None + version_added: "2.0" + timeout: + description: + - A custom HTTP check timeout. The consul default is 10 seconds. + Similar to the interval this is a number with a s or m suffix to + signify the units of seconds or minutes, e.g. 15s or 1m. + required: false + default: None + version_added: "2.0" token: description: - the token key indentifying an ACL rule set. May be required to register services. @@ -133,19 +174,32 @@ EXAMPLES = ''' - name: register nginx service with the local consul agent consul: - name: nginx + service_name: nginx service_port: 80 - name: register nginx service with curl check consul: - name: nginx + service_name: nginx service_port: 80 script: "curl http://localhost" interval: 60s + - name: register nginx with an http check + consul: + service_name: nginx + service_port: 80 + interval: 60s + http: "http://localhost:80/status" + + - name: register external service nginx available at 10.1.5.23 + consul: + service_name: nginx + service_port: 80 + service_address: 10.1.5.23 + - name: register nginx with some service tags consul: - name: nginx + service_name: nginx service_port: 80 tags: - prod @@ -153,7 +207,7 @@ - name: remove nginx service consul: - name: nginx + service_name: nginx state: absent - name: create a node level check to test disk usage @@ -163,20 +217,21 @@ script: "/opt/disk_usage.py" interval: 5m -''' - -import sys + - name: register an http check against a service that's already registered + consul: + check_name: nginx-check2 + check_id: nginx-check2 + service_id: nginx + interval: 60s + http: "http://localhost:80/morestatus" -try: - import json -except ImportError: - import simplejson as json +''' try: import consul from requests.exceptions import ConnectionError python_consul_installed = True -except ImportError, e: +except ImportError: python_consul_installed = False def register_with_consul(module): @@ -210,8 +265,7 @@ def remove(module): service_id = module.params.get('service_id') or module.params.get('service_name') check_id = module.params.get('check_id') or module.params.get('check_name') if not (service_id or check_id): - module.fail_json(msg='services and checks are removed by id or name.'\ - ' please supply a service id/name or a check id/name') + module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name') if service_id: remove_service(module, service_id) else: @@ -223,9 +277,8 @@ def add_check(module, check): retrieve the full metadata of an existing check through the consul api. Without this we can't compare to the supplied check and so we must assume a change. ''' - if not check.name: - module.fail_json(msg='a check name is required for a node level check,'\ - ' one not attached to a service') + if not check.name and not service_id: + module.fail_json(msg='a check name is required for a node level check, one not attached to a service') consul_api = get_consul_api(module) check.register(consul_api) @@ -235,7 +288,10 @@ def add_check(module, check): check_name=check.name, script=check.script, interval=check.interval, - ttl=check.ttl) + ttl=check.ttl, + http=check.http, + timeout=check.timeout, + service_id=check.service_id) def remove_check(module, check_id): @@ -255,15 +311,15 @@ def add_service(module, service): changed = False consul_api = get_consul_api(module) - existing = get_service_by_id(consul_api, service.id) + existing = get_service_by_id_or_name(consul_api, service.id) - # there is no way to retreive the details of checks so if a check is present - # in the service it must be reregistered + # there is no way to retrieve the details of checks so if a check is present + # in the service it must be re-registered if service.has_checks() or not existing or not existing == service: service.register(consul_api) # check that it registered correctly - registered = get_service_by_id(consul_api, service.id) + registered = get_service_by_id_or_name(consul_api, service.id) if registered: result = registered changed = True @@ -272,14 +328,14 @@ def add_service(module, service): service_id=result.id, service_name=result.name, service_port=result.port, - checks=map(lambda x: x.to_dict(), service.checks), + checks=[check.to_dict() for check in service.checks], tags=result.tags) def remove_service(module, service_id): ''' deregister a service from the given agent using its service id ''' consul_api = get_consul_api(module) - service = get_service_by_id(consul_api, service_id) + service = get_service_by_id_or_name(consul_api, service_id) if service: consul_api.agent.service.deregister(service_id) module.exit_json(changed=True, id=service_id) @@ -290,24 +346,25 @@ def remove_service(module, service_id): def get_consul_api(module, token=None): return consul.Consul(host=module.params.get('host'), port=module.params.get('port'), + scheme=module.params.get('scheme'), + verify=module.params.get('validate_certs'), token=module.params.get('token')) -def get_service_by_id(consul_api, service_id): +def get_service_by_id_or_name(consul_api, service_id_or_name): ''' iterate the registered services and find one with the given id ''' for name, service in consul_api.agent.services().iteritems(): - if service['ID'] == service_id: + if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name: return ConsulService(loaded=service) def parse_check(module): - if module.params.get('script') and module.params.get('ttl'): + if len(filter(None, [module.params.get('script'), module.params.get('ttl'), module.params.get('http')])) > 1: module.fail_json( - msg='check are either script or ttl driven, supplying both does'\ - ' not make sense') + msg='check are either script, http or ttl driven, supplying more than one does not make sense') - if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl'): + if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('http'): return ConsulCheck( module.params.get('check_id'), @@ -317,7 +374,10 @@ def parse_check(module): module.params.get('script'), module.params.get('interval'), module.params.get('ttl'), - module.params.get('notes') + module.params.get('notes'), + module.params.get('http'), + module.params.get('timeout'), + module.params.get('service_id'), ) @@ -327,24 +387,23 @@ def parse_service(module): return ConsulService( module.params.get('service_id'), module.params.get('service_name'), + module.params.get('service_address'), module.params.get('service_port'), module.params.get('tags'), ) elif module.params.get('service_name') and not module.params.get('service_port'): - module.fail_json( - msg="service_name supplied but no service_port, a port is required"\ - " to configure a service. Did you configure the 'port' "\ - "argument meaning 'service_port'?") + module.fail_json( msg="service_name supplied but no service_port, a port is required to configure a service. Did you configure the 'port' argument meaning 'service_port'?") -class ConsulService(): +class ConsulService(): - def __init__(self, service_id=None, name=None, port=-1, + def __init__(self, service_id=None, name=None, address=None, port=-1, tags=None, loaded=None): self.id = self.name = name if service_id: self.id = service_id + self.address = address self.port = port self.tags = tags self.checks = [] @@ -357,18 +416,19 @@ def __init__(self, service_id=None, name=None, port=-1, def register(self, consul_api): if len(self.checks) > 0: check = self.checks[0] + consul_api.agent.service.register( self.name, service_id=self.id, + address=self.address, port=self.port, tags=self.tags, - script=check.script, - interval=check.interval, - ttl=check.ttl) + check=check.check) else: consul_api.agent.service.register( self.name, service_id=self.id, + address=self.address, port=self.port, tags=self.tags) @@ -405,36 +465,52 @@ def to_dict(self): class ConsulCheck(): def __init__(self, check_id, name, node=None, host='localhost', - script=None, interval=None, ttl=None, notes=None): + script=None, interval=None, ttl=None, notes=None, http=None, timeout=None, service_id=None): self.check_id = self.name = name if check_id: self.check_id = check_id - self.script = script - self.interval = self.validate_duration('interval', interval) - self.ttl = self.validate_duration('ttl', ttl) + self.service_id = service_id self.notes = notes self.node = node self.host = host - + self.interval = self.validate_duration('interval', interval) + self.ttl = self.validate_duration('ttl', ttl) + self.script = script + self.http = http + self.timeout = self.validate_duration('timeout', timeout) + + self.check = None + + if script: + self.check = consul.Check.script(script, self.interval) + + if ttl: + self.check = consul.Check.ttl(self.ttl) + + if http: + if interval is None: + raise Exception('http check must specify interval') + + self.check = consul.Check.http(http, self.interval, self.timeout) + def validate_duration(self, name, duration): if duration: duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] if not any((duration.endswith(suffix) for suffix in duration_units)): - raise Exception('Invalid %s %s you must specify units (%s)' % - (name, duration, ', '.join(duration_units))) + duration = "{}s".format(duration) return duration def register(self, consul_api): - consul_api.agent.check.register(self.name, check_id=self.check_id, - script=self.script, - interval=self.interval, - ttl=self.ttl, notes=self.notes) + consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id, + notes=self.notes, + check=self.check) def __eq__(self, other): return (isinstance(other, self.__class__) and self.check_id == other.check_id + and self.service_id == other.service_id and self.name == other.name and self.script == script and self.interval == interval) @@ -452,11 +528,14 @@ def to_dict(self): self._add(data, 'host') self._add(data, 'interval') self._add(data, 'ttl') + self._add(data, 'http') + self._add(data, 'timeout') + self._add(data, 'service_id') return data def _add(self, data, key, attr=None): try: - if attr == None: + if attr is None: attr = key data[key] = getattr(self, attr) except: @@ -464,14 +543,15 @@ def _add(self, data, key, attr=None): def test_dependencies(module): if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. "\ - "see http://python-consul.readthedocs.org/en/latest/#installation") + module.fail_json(msg="python-consul required for this module. see http://python-consul.readthedocs.org/en/latest/#installation") def main(): module = AnsibleModule( argument_spec=dict( host=dict(default='localhost'), port=dict(default=8500, type='int'), + scheme=dict(required=False, default='http'), + validate_certs=dict(required=False, default=True, type='bool'), check_id=dict(required=False), check_name=dict(required=False), check_node=dict(required=False), @@ -480,24 +560,27 @@ def main(): script=dict(required=False), service_id=dict(required=False), service_name=dict(required=False), + service_address=dict(required=False, type='str', default=None), service_port=dict(required=False, type='int'), state=dict(default='present', choices=['present', 'absent']), interval=dict(required=False, type='str'), ttl=dict(required=False, type='str'), + http=dict(required=False, type='str'), + timeout=dict(required=False, type='str'), tags=dict(required=False, type='list'), - token=dict(required=False) + token=dict(required=False, no_log=True) ), supports_check_mode=False, ) - + test_dependencies(module) - + try: register_with_consul(module) - except ConnectionError, e: + except ConnectionError as e: module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( module.params.get('host'), module.params.get('port'), str(e))) - except Exception, e: + except Exception as e: module.fail_json(msg=str(e)) # import module snippets diff --git a/clustering/consul_acl.py b/clustering/consul_acl.py index c133704b64d..845c26f98fe 100644 --- a/clustering/consul_acl.py +++ b/clustering/consul_acl.py @@ -17,12 +17,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: consul_acl short_description: "manipulate consul acl keys and rules" description: - allows the addition, modification and deletion of ACL keys and associated - rules in a consul cluster via the agent. For more details on using and + rules in a consul cluster via the agent. For more details on using and configuring ACLs, see https://www.consul.io/docs/internals/acl.html. requirements: - "python >= 2.6" @@ -37,14 +41,16 @@ - a management token is required to manipulate the acl lists state: description: - - whether the ACL pair should be present or absent, defaults to present + - whether the ACL pair should be present or absent required: false choices: ['present', 'absent'] - type: + default: present + token_type: description: - the type of token that should be created, either management or - client, defaults to client + client choices: ['client', 'management'] + default: client name: description: - the name that should be associated with the acl key, this is opaque @@ -57,7 +63,7 @@ required: false rules: description: - - an list of the rules that should be associated with a given key/token. + - an list of the rules that should be associated with a given token. required: false host: description: @@ -69,6 +75,18 @@ - the port on which the consul agent is running required: false default: 8500 + scheme: + description: + - the protocol scheme on which the consul agent is running + required: false + default: http + version_added: "2.1" + validate_certs: + description: + - whether to verify the tls certificate of the consul agent + required: false + default: True + version_added: "2.1" """ EXAMPLES = ''' @@ -83,6 +101,19 @@ - key: 'private/foo' policy: deny + - name: create an acl with specific token with both key and serivce rules + consul_acl: + mgmt_token: 'some_management_acl' + name: 'Foo access' + token: 'some_client_token' + rules: + - key: 'foo' + policy: read + - service: '' + policy: write + - service: 'secret-' + policy: deny + - name: remove a token consul_acl: mgmt_token: 'some_management_acl' @@ -97,7 +128,7 @@ import consul from requests.exceptions import ConnectionError python_consul_installed = True -except ImportError, e: +except ImportError: python_consul_installed = False try: @@ -134,8 +165,6 @@ def update_acl(module): if token: existing_rules = load_rules_for_token(module, consul, token) supplied_rules = yml_to_rules(module, rules) - print existing_rules - print supplied_rules changed = not existing_rules == supplied_rules if changed: y = supplied_rules.to_hcl() @@ -148,18 +177,18 @@ def update_acl(module): try: rules = yml_to_rules(module, rules) if rules.are_rules(): - rules = rules.to_json() + rules = rules.to_hcl() else: rules = None token = consul.acl.create( name=name, type=token_type, rules=rules) changed = True - except Exception, e: + except Exception as e: module.fail_json( msg="No token returned, check your managment key and that \ the host is in the acl datacenter %s" % e) - except Exception, e: + except Exception as e: module.fail_json(msg="Could not create/update acl %s" % e) module.exit_json(changed=changed, @@ -181,18 +210,17 @@ def remove_acl(module): module.exit_json(changed=changed, token=token) - def load_rules_for_token(module, consul_api, token): try: rules = Rules() info = consul_api.acl.info(token) if info and info['Rules']: - rule_set = to_ascii(info['Rules']) - for rule in hcl.loads(rule_set).values(): - for key, policy in rule.iteritems(): - rules.add_rule(Rule(key, policy['policy'])) + rule_set = hcl.loads(to_ascii(info['Rules'])) + for rule_type in rule_set: + for pattern, policy in rule_set[rule_type].iteritems(): + rules.add_rule(rule_type, Rule(pattern, policy['policy'])) return rules - except Exception, e: + except Exception as e: module.fail_json( msg="Could not load rule list from retrieved rule data %s, %s" % ( token, e)) @@ -208,52 +236,65 @@ def yml_to_rules(module, yml_rules): rules = Rules() if yml_rules: for rule in yml_rules: - if not('key' in rule or 'policy' in rule): - module.fail_json(msg="a rule requires a key and a policy.") - rules.add_rule(Rule(rule['key'], rule['policy'])) + if ('key' in rule and 'policy' in rule): + rules.add_rule('key', Rule(rule['key'], rule['policy'])) + elif ('service' in rule and 'policy' in rule): + rules.add_rule('service', Rule(rule['service'], rule['policy'])) + elif ('event' in rule and 'policy' in rule): + rules.add_rule('event', Rule(rule['event'], rule['policy'])) + elif ('query' in rule and 'policy' in rule): + rules.add_rule('query', Rule(rule['query'], rule['policy'])) + else: + module.fail_json(msg="a rule requires a key/service/event or query and a policy.") return rules -template = '''key "%s" { +template = '''%s "%s" { policy = "%s" -}''' +} +''' + +RULE_TYPES = ['key', 'service', 'event', 'query'] class Rules: def __init__(self): self.rules = {} + for rule_type in RULE_TYPES: + self.rules[rule_type] = {} - def add_rule(self, rule): - self.rules[rule.key] = rule + def add_rule(self, rule_type, rule): + self.rules[rule_type][rule.pattern] = rule def are_rules(self): - return len(self.rules) > 0 - - def to_json(self): - rules = {} - for key, rule in self.rules.iteritems(): - rules[key] = {'policy': rule.policy} - return json.dumps({'keys': rules}) + return len(self) > 0 def to_hcl(self): rules = "" - for key, rule in self.rules.iteritems(): - rules += template % (key, rule.policy) - + for rule_type in RULE_TYPES: + for pattern, rule in self.rules[rule_type].iteritems(): + rules += template % (rule_type, pattern, rule.policy) return to_ascii(rules) + def __len__(self): + count = 0 + for rule_type in RULE_TYPES: + count += len(self.rules[rule_type]) + return count + def __eq__(self, other): if not (other or isinstance(other, self.__class__) - or len(other.rules) == len(self.rules)): + or len(other) == len(self)): return False - for name, other_rule in other.rules.iteritems(): - if not name in self.rules: - return False - rule = self.rules[name] + for rule_type in RULE_TYPES: + for name, other_rule in other.rules[rule_type].iteritems(): + if not name in self.rules[rule_type]: + return False + rule = self.rules[rule_type][name] - if not (rule and rule == other_rule): - return False + if not (rule and rule == other_rule): + return False return True def __str__(self): @@ -261,58 +302,63 @@ def __str__(self): class Rule: - def __init__(self, key, policy): - self.key = key + def __init__(self, pattern, policy): + self.pattern = pattern self.policy = policy def __eq__(self, other): return (isinstance(other, self.__class__) - and self.key == other.key + and self.pattern == other.pattern and self.policy == other.policy) + def __hash__(self): - return hash(self.key) ^ hash(self.policy) + return hash(self.pattern) ^ hash(self.policy) def __str__(self): - return '%s %s' % (self.key, self.policy) + return '%s %s' % (self.pattern, self.policy) def get_consul_api(module, token=None): if not token: - token = token = module.params.get('token') + token = module.params.get('token') return consul.Consul(host=module.params.get('host'), port=module.params.get('port'), + scheme=module.params.get('scheme'), + verify=module.params.get('validate_certs'), token=token) def test_dependencies(module): if not python_consul_installed: module.fail_json(msg="python-consul required for this module. "\ "see http://python-consul.readthedocs.org/en/latest/#installation") - + if not pyhcl_installed: module.fail_json( msg="pyhcl required for this module."\ " see https://pypi.python.org/pypi/pyhcl") def main(): argument_spec = dict( - mgmt_token=dict(required=True), + mgmt_token=dict(required=True, no_log=True), host=dict(default='localhost'), + scheme=dict(required=False, default='http'), + validate_certs=dict(required=False, type='bool', default=True), name=dict(required=False), port=dict(default=8500, type='int'), rules=dict(default=None, required=False, type='list'), state=dict(default='present', choices=['present', 'absent']), - token=dict(required=False), + token=dict(required=False, no_log=True), token_type=dict( required=False, choices=['client', 'management'], default='client') ) module = AnsibleModule(argument_spec, supports_check_mode=False) test_dependencies(module) - + try: execute(module) - except ConnectionError, e: + except ConnectionError as e: module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( module.params.get('host'), module.params.get('port'), str(e))) - except Exception, e: + except Exception as e: module.fail_json(msg=str(e)) # import module snippets diff --git a/clustering/consul_kv.py b/clustering/consul_kv.py index 06dd55b71fc..1f3db18359c 100644 --- a/clustering/consul_kv.py +++ b/clustering/consul_kv.py @@ -17,6 +17,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: consul_kv short_description: Manipulate entries in the key/value store of a consul cluster. @@ -99,6 +103,18 @@ - the port on which the consul agent is running required: false default: 8500 + scheme: + description: + - the protocol scheme on which the consul agent is running + required: false + default: http + version_added: "2.1" + validate_certs: + description: + - whether to verify the tls certificate of the consul agent + required: false + default: True + version_added: "2.1" """ @@ -118,20 +134,22 @@ consul_kv: key: ansible/groups/dc1/somenode value: 'top_secret' + + - name: Register a key/value pair with an associated session + consul_kv: + key: stg/node/server_birthday + value: 20160509 + session: "{{ sessionid }}" + state: acquire ''' import sys -try: - import json -except ImportError: - import simplejson as json - try: import consul from requests.exceptions import ConnectionError python_consul_installed = True -except ImportError, e: +except ImportError: python_consul_installed = False from requests.exceptions import ConnectionError @@ -150,6 +168,8 @@ def execute(module): def lock(module, state): + consul_api = get_consul_api(module) + session = module.params.get('session') key = module.params.get('key') value = module.params.get('value') @@ -159,18 +179,22 @@ def lock(module, state): msg='%s of lock for %s requested but no session supplied' % (state, key)) - if state == 'acquire': - successful = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - acquire=session, - flags=module.params.get('flags')) - else: - successful = consul_api.kv.put(key, value, - cas=module.params.get('cas'), - release=session, - flags=module.params.get('flags')) + index, existing = consul_api.kv.get(key) + + changed = not existing or (existing and existing['Value'] != value) + if changed and not module.check_mode: + if state == 'acquire': + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + acquire=session, + flags=module.params.get('flags')) + else: + changed = consul_api.kv.put(key, value, + cas=module.params.get('cas'), + release=session, + flags=module.params.get('flags')) - module.exit_json(changed=successful, + module.exit_json(changed=changed, index=index, key=key) @@ -223,13 +247,15 @@ def remove_value(module): def get_consul_api(module, token=None): return consul.Consul(host=module.params.get('host'), port=module.params.get('port'), + scheme=module.params.get('scheme'), + verify=module.params.get('validate_certs'), token=module.params.get('token')) def test_dependencies(module): if not python_consul_installed: module.fail_json(msg="python-consul required for this module. "\ "see http://python-consul.readthedocs.org/en/latest/#installation") - + def main(): argument_spec = dict( @@ -237,24 +263,27 @@ def main(): flags=dict(required=False), key=dict(required=True), host=dict(default='localhost'), + scheme=dict(required=False, default='http'), + validate_certs=dict(required=False, type='bool', default=True), port=dict(default=8500, type='int'), recurse=dict(required=False, type='bool'), - retrieve=dict(required=False, default=True), - state=dict(default='present', choices=['present', 'absent']), - token=dict(required=False, default='anonymous'), - value=dict(required=False) + retrieve=dict(required=False, type='bool', default=True), + state=dict(default='present', choices=['present', 'absent', 'acquire', 'release']), + token=dict(required=False, no_log=True), + value=dict(required=False), + session=dict(required=False) ) module = AnsibleModule(argument_spec, supports_check_mode=False) test_dependencies(module) - + try: execute(module) - except ConnectionError, e: + except ConnectionError as e: module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( module.params.get('host'), module.params.get('port'), str(e))) - except Exception, e: + except Exception as e: module.fail_json(msg=str(e)) diff --git a/clustering/consul_session.py b/clustering/consul_session.py index c298ea7fa57..e2c23c45dc5 100644 --- a/clustering/consul_session.py +++ b/clustering/consul_session.py @@ -1,4 +1,5 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # # (c) 2015, Steve Gargan # @@ -17,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: consul_session short_description: "manipulate consul sessions" @@ -30,7 +35,7 @@ - python-consul - requests version_added: "2.0" -author: "Steve Gargan (@sgargan)" +author: "Steve Gargan @sgargan" options: state: description: @@ -54,9 +59,8 @@ description: - the optional lock delay that can be attached to the session when it is created. Locks for invalidated sessions ar blocked from being - acquired until this delay has expired. Valid units for delays - include 'ns', 'us', 'ms', 's', 'm', 'h' - default: 15s + acquired until this delay has expired. Durations are in seconds + default: 15 required: false node: description: @@ -88,13 +92,33 @@ - the port on which the consul agent is running required: false default: 8500 + scheme: + description: + - the protocol scheme on which the consul agent is running + required: false + default: http + version_added: "2.1" + validate_certs: + description: + - whether to verify the tls certificate of the consul agent + required: false + default: True + version_added: "2.1" + behavior: + description: + - the optional behavior that can be attached to the session when it + is created. This can be set to either ‘release’ or ‘delete’. This + controls the behavior when a session is invalidated. + default: release + required: false + version_added: "2.2" """ EXAMPLES = ''' - name: register basic session with consul consul_session: name: session1 - + - name: register a session with an existing check consul_session: name: session_with_check @@ -113,13 +137,11 @@ consul_session: state=list ''' -import sys - try: import consul from requests.exceptions import ConnectionError python_consul_installed = True -except ImportError, e: +except ImportError: python_consul_installed = False def execute(module): @@ -138,10 +160,10 @@ def lookup_sessions(module): datacenter = module.params.get('datacenter') state = module.params.get('state') - consul = get_consul_api(module) + consul_client = get_consul_api(module) try: if state == 'list': - sessions_list = consul.session.list(dc=datacenter) + sessions_list = consul_client.session.list(dc=datacenter) #ditch the index, this can be grabbed from the results if sessions_list and sessions_list[1]: sessions_list = sessions_list[1] @@ -152,7 +174,7 @@ def lookup_sessions(module): if not node: module.fail_json( msg="node name is required to retrieve sessions for node") - sessions = consul.session.node(node, dc=datacenter) + sessions = consul_client.session.node(node, dc=datacenter) module.exit_json(changed=True, node=node, sessions=sessions) @@ -162,77 +184,67 @@ def lookup_sessions(module): module.fail_json( msg="session_id is required to retrieve indvidual session info") - session_by_id = consul.session.info(session_id, dc=datacenter) + session_by_id = consul_client.session.info(session_id, dc=datacenter) module.exit_json(changed=True, session_id=session_id, sessions=session_by_id) - except Exception, e: + except Exception as e: module.fail_json(msg="Could not retrieve session info %s" % e) def update_session(module): name = module.params.get('name') - session_id = module.params.get('id') delay = module.params.get('delay') checks = module.params.get('checks') datacenter = module.params.get('datacenter') node = module.params.get('node') + behavior = module.params.get('behavior') - consul = get_consul_api(module) - changed = True + consul_client = get_consul_api(module) try: - - session = consul.session.create( + session = consul_client.session.create( name=name, + behavior=behavior, node=node, - lock_delay=validate_duration('delay', delay), + lock_delay=delay, dc=datacenter, checks=checks ) module.exit_json(changed=True, session_id=session, name=name, + behavior=behavior, delay=delay, checks=checks, node=node) - except Exception, e: + except Exception as e: module.fail_json(msg="Could not create/update session %s" % e) def remove_session(module): session_id = module.params.get('id') - if not session_id: module.fail_json(msg="""A session id must be supplied in order to remove a session.""") - consul = get_consul_api(module) - changed = False + consul_client = get_consul_api(module) try: - session = consul.session.destroy(session_id) + consul_client.session.destroy(session_id) module.exit_json(changed=True, session_id=session_id) - except Exception, e: + except Exception as e: module.fail_json(msg="Could not remove session with id '%s' %s" % ( session_id, e)) -def validate_duration(name, duration): - if duration: - duration_units = ['ns', 'us', 'ms', 's', 'm', 'h'] - if not any((duration.endswith(suffix) for suffix in duration_units)): - raise Exception('Invalid %s %s you must specify units (%s)' % - (name, duration, ', '.join(duration_units))) - return duration - def get_consul_api(module): return consul.Consul(host=module.params.get('host'), port=module.params.get('port')) - + def test_dependencies(module): if not python_consul_installed: module.fail_json(msg="python-consul required for this module. "\ @@ -241,26 +253,31 @@ def test_dependencies(module): def main(): argument_spec = dict( checks=dict(default=None, required=False, type='list'), - delay=dict(required=False,type='str', default='15s'), + delay=dict(required=False,type='int', default='15'), + behavior=dict(required=False,type='str', default='release', + choices=['release', 'delete']), host=dict(default='localhost'), port=dict(default=8500, type='int'), + scheme=dict(required=False, default='http'), + validate_certs=dict(required=False, default=True), id=dict(required=False), name=dict(required=False), node=dict(required=False), state=dict(default='present', - choices=['present', 'absent', 'info', 'node', 'list']) + choices=['present', 'absent', 'info', 'node', 'list']), + datacenter=dict(required=False) ) module = AnsibleModule(argument_spec, supports_check_mode=False) - + test_dependencies(module) - + try: execute(module) - except ConnectionError, e: + except ConnectionError as e: module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( module.params.get('host'), module.params.get('port'), str(e))) - except Exception, e: + except Exception as e: module.fail_json(msg=str(e)) # import module snippets diff --git a/clustering/kubernetes.py b/clustering/kubernetes.py new file mode 100644 index 00000000000..20514b0fe0a --- /dev/null +++ b/clustering/kubernetes.py @@ -0,0 +1,411 @@ +#!/usr/bin/python +# Copyright 2015 Google Inc. All Rights Reserved. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: kubernetes +version_added: "2.1" +short_description: Manage Kubernetes resources. +description: + - This module can manage Kubernetes resources on an existing cluster using + the Kubernetes server API. Users can specify in-line API data, or + specify an existing Kubernetes YAML file. Currently, this module, + Only supports HTTP Basic Auth + Only supports 'strategic merge' for update, http://goo.gl/fCPYxT + SSL certs are not working, use 'validate_certs=off' to disable +options: + api_endpoint: + description: + - The IPv4 API endpoint of the Kubernetes cluster. + required: true + default: null + aliases: ["endpoint"] + inline_data: + description: + - The Kubernetes YAML data to send to the API I(endpoint). This option is + mutually exclusive with C('file_reference'). + required: true + default: null + file_reference: + description: + - Specify full path to a Kubernets YAML file to send to API I(endpoint). + This option is mutually exclusive with C('inline_data'). + required: false + default: null + certificate_authority_data: + description: + - Certificate Authority data for Kubernetes server. Should be in either + standard PEM format or base64 encoded PEM data. Note that certificate + verification is broken until ansible supports a version of + 'match_hostname' that can match the IP address against the CA data. + required: false + default: null + state: + description: + - The desired action to take on the Kubernetes data. + required: true + default: "present" + choices: ["present", "absent", "update", "replace"] + url_password: + description: + - The HTTP Basic Auth password for the API I(endpoint). This should be set + unless using the C('insecure') option. + default: null + aliases: ["password"] + url_username: + description: + - The HTTP Basic Auth username for the API I(endpoint). This should be set + unless using the C('insecure') option. + default: "admin" + aliases: ["username"] + insecure: + description: + - "Reverts the connection to using HTTP instead of HTTPS. This option should + only be used when execuing the M('kubernetes') module local to the Kubernetes + cluster using the insecure local port (locahost:8080 by default)." + validate_certs: + description: + - Enable/disable certificate validation. Note that this is set to + C(false) until Ansible can support IP address based certificate + hostname matching (exists in >= python3.5.0). + required: false + default: false + +author: "Eric Johnson (@erjohnso) " +''' + +EXAMPLES = ''' +# Create a new namespace with in-line YAML. +- name: Create a kubernetes namespace + kubernetes: + api_endpoint: 123.45.67.89 + url_username: admin + url_password: redacted + inline_data: + kind: Namespace + apiVersion: v1 + metadata: + name: ansible-test + labels: + label_env: production + label_ver: latest + annotations: + a1: value1 + a2: value2 + state: present + +# Create a new namespace from a YAML file. +- name: Create a kubernetes namespace + kubernetes: + api_endpoint: 123.45.67.89 + url_username: admin + url_password: redacted + file_reference: /path/to/create_namespace.yaml + state: present + +# Do the same thing, but using the insecure localhost port +- name: Create a kubernetes namespace + kubernetes: + api_endpoint: 123.45.67.89 + insecure: true + file_reference: /path/to/create_namespace.yaml + state: present + +''' + +RETURN = ''' +# Example response from creating a Kubernetes Namespace. +api_response: + description: Raw response from Kubernetes API, content varies with API. + returned: success + type: dictionary + contains: + apiVersion: "v1" + kind: "Namespace" + metadata: + creationTimestamp: "2016-01-04T21:16:32Z" + name: "test-namespace" + resourceVersion: "509635" + selfLink: "/api/v1/namespaces/test-namespace" + uid: "6dbd394e-b328-11e5-9a02-42010af0013a" + spec: + finalizers: + - kubernetes + status: + phase: "Active" +''' + +import base64 + +try: + import yaml + has_lib_yaml = True +except ImportError: + has_lib_yaml = False + +############################################################################ +############################################################################ +# For API coverage, this Anislbe module provides capability to operate on +# all Kubernetes objects that support a "create" call (except for 'Events'). +# In order to obtain a valid list of Kubernetes objects, the v1 spec file +# was referenced and the below python script was used to parse the JSON +# spec file, extract only the objects with a description starting with +# 'create a'. The script then iterates over all of these base objects +# to get the endpoint URL and was used to generate the KIND_URL map. +# +# import json +# from urllib2 import urlopen +# +# r = urlopen("https://raw.githubusercontent.com/kubernetes" +# "/kubernetes/master/api/swagger-spec/v1.json") +# v1 = json.load(r) +# +# apis = {} +# for a in v1['apis']: +# p = a['path'] +# for o in a['operations']: +# if o["summary"].startswith("create a") and o["type"] != "v1.Event": +# apis[o["type"]] = p +# +# def print_kind_url_map(): +# results = [] +# for a in apis.keys(): +# results.append('"%s": "%s"' % (a[3:].lower(), apis[a])) +# results.sort() +# print "KIND_URL = {" +# print ",\n".join(results) +# print "}" +# +# if __name__ == '__main__': +# print_kind_url_map() +############################################################################ +############################################################################ + +KIND_URL = { + "binding": "/api/v1/namespaces/{namespace}/bindings", + "endpoints": "/api/v1/namespaces/{namespace}/endpoints", + "limitrange": "/api/v1/namespaces/{namespace}/limitranges", + "namespace": "/api/v1/namespaces", + "node": "/api/v1/nodes", + "persistentvolume": "/api/v1/persistentvolumes", + "persistentvolumeclaim": "/api/v1/namespaces/{namespace}/persistentvolumeclaims", # NOQA + "pod": "/api/v1/namespaces/{namespace}/pods", + "podtemplate": "/api/v1/namespaces/{namespace}/podtemplates", + "replicationcontroller": "/api/v1/namespaces/{namespace}/replicationcontrollers", # NOQA + "resourcequota": "/api/v1/namespaces/{namespace}/resourcequotas", + "secret": "/api/v1/namespaces/{namespace}/secrets", + "service": "/api/v1/namespaces/{namespace}/services", + "serviceaccount": "/api/v1/namespaces/{namespace}/serviceaccounts" +} +USER_AGENT = "ansible-k8s-module/0.0.1" + + +# TODO(erjohnso): SSL Certificate validation is currently unsupported. +# It can be made to work when the following are true: +# - Ansible consistently uses a "match_hostname" that supports IP Address +# matching. This is now true in >= python3.5.0. Currently, this feature +# is not yet available in backports.ssl_match_hostname (still 3.4). +# - Ansible allows passing in the self-signed CA cert that is created with +# a kubernetes master. The lib/ansible/module_utils/urls.py method, +# SSLValidationHandler.get_ca_certs() needs a way for the Kubernetes +# CA cert to be passed in and included in the generated bundle file. +# When this is fixed, the following changes can be made to this module, +# - Remove the 'return' statement in line 254 below +# - Set 'required=true' for certificate_authority_data and ensure that +# ansible's SSLValidationHandler.get_ca_certs() can pick up this CA cert +# - Set 'required=true' for the validate_certs param. + +def decode_cert_data(module): + return + d = module.params.get("certificate_authority_data") + if d and not d.startswith("-----BEGIN"): + module.params["certificate_authority_data"] = base64.b64decode(d) + + +def api_request(module, url, method="GET", headers=None, data=None): + body = None + if data: + data = json.dumps(data) + response, info = fetch_url(module, url, method=method, headers=headers, data=data) + if int(info['status']) == -1: + module.fail_json(msg="Failed to execute the API request: %s" % info['msg'], url=url, method=method, headers=headers) + if response is not None: + body = json.loads(response.read()) + return info, body + + +def k8s_create_resource(module, url, data): + info, body = api_request(module, url, method="POST", data=data, headers={"Content-Type": "application/json"}) + if info['status'] == 409: + name = data["metadata"].get("name", None) + info, body = api_request(module, url + "/" + name) + return False, body + elif info['status'] >= 400: + module.fail_json(msg="failed to create the resource: %s" % info['msg'], url=url) + return True, body + + +def k8s_delete_resource(module, url, data): + name = data.get('metadata', {}).get('name') + if name is None: + module.fail_json(msg="Missing a named resource in object metadata when trying to remove a resource") + + url = url + '/' + name + info, body = api_request(module, url, method="DELETE") + if info['status'] == 404: + return False, "Resource name '%s' already absent" % name + elif info['status'] >= 400: + module.fail_json(msg="failed to delete the resource '%s': %s" % (name, info['msg']), url=url) + return True, "Successfully deleted resource name '%s'" % name + + +def k8s_replace_resource(module, url, data): + name = data.get('metadata', {}).get('name') + if name is None: + module.fail_json(msg="Missing a named resource in object metadata when trying to replace a resource") + + headers = {"Content-Type": "application/json"} + url = url + '/' + name + info, body = api_request(module, url, method="PUT", data=data, headers=headers) + if info['status'] == 409: + name = data["metadata"].get("name", None) + info, body = api_request(module, url + "/" + name) + return False, body + elif info['status'] >= 400: + module.fail_json(msg="failed to replace the resource '%s': %s" % (name, info['msg']), url=url) + return True, body + + +def k8s_update_resource(module, url, data): + name = data.get('metadata', {}).get('name') + if name is None: + module.fail_json(msg="Missing a named resource in object metadata when trying to update a resource") + + headers = {"Content-Type": "application/strategic-merge-patch+json"} + url = url + '/' + name + info, body = api_request(module, url, method="PATCH", data=data, headers=headers) + if info['status'] == 409: + name = data["metadata"].get("name", None) + info, body = api_request(module, url + "/" + name) + return False, body + elif info['status'] >= 400: + module.fail_json(msg="failed to update the resource '%s': %s" % (name, info['msg']), url=url) + return True, body + + +def main(): + module = AnsibleModule( + argument_spec=dict( + http_agent=dict(default=USER_AGENT), + + url_username=dict(default="admin", aliases=["username"]), + url_password=dict(default="", no_log=True, aliases=["password"]), + force_basic_auth=dict(default="yes"), + validate_certs=dict(default=False, type='bool'), + certificate_authority_data=dict(required=False), + insecure=dict(default=False, type='bool'), + api_endpoint=dict(required=True), + file_reference=dict(required=False), + inline_data=dict(required=False), + state=dict(default="present", choices=["present", "absent", "update", "replace"]) + ), + mutually_exclusive = (('file_reference', 'inline_data'), + ('url_username', 'insecure'), + ('url_password', 'insecure')), + required_one_of = (('file_reference', 'inline_data'),), + ) + + if not has_lib_yaml: + module.fail_json(msg="missing python library: yaml") + + decode_cert_data(module) + + api_endpoint = module.params.get('api_endpoint') + state = module.params.get('state') + insecure = module.params.get('insecure') + inline_data = module.params.get('inline_data') + file_reference = module.params.get('file_reference') + + if inline_data: + if not isinstance(inline_data, dict) and not isinstance(inline_data, list): + data = yaml.load(inline_data) + else: + data = inline_data + else: + try: + f = open(file_reference, "r") + data = [x for x in yaml.load_all(f)] + f.close() + if not data: + module.fail_json(msg="No valid data could be found.") + except: + module.fail_json(msg="The file '%s' was not found or contained invalid YAML/JSON data" % file_reference) + + # set the transport type and build the target endpoint url + transport = 'https' + if insecure: + transport = 'http' + + target_endpoint = "%s://%s" % (transport, api_endpoint) + + body = [] + changed = False + + # make sure the data is a list + if not isinstance(data, list): + data = [ data ] + + for item in data: + namespace = "default" + if item and 'metadata' in item: + namespace = item.get('metadata', {}).get('namespace', "default") + kind = item.get('kind', '').lower() + try: + url = target_endpoint + KIND_URL[kind] + except KeyError: + module.fail_json(msg="invalid resource kind specified in the data: '%s'" % kind) + url = url.replace("{namespace}", namespace) + else: + url = target_endpoint + + if state == 'present': + item_changed, item_body = k8s_create_resource(module, url, item) + elif state == 'absent': + item_changed, item_body = k8s_delete_resource(module, url, item) + elif state == 'replace': + item_changed, item_body = k8s_replace_resource(module, url, item) + elif state == 'update': + item_changed, item_body = k8s_update_resource(module, url, item) + + changed |= item_changed + body.append(item_body) + + module.exit_json(changed=changed, api_response=body) + + +# import module snippets +from ansible.module_utils.basic import * # NOQA +from ansible.module_utils.urls import * # NOQA + + +if __name__ == '__main__': + main() diff --git a/clustering/znode b/clustering/znode.py similarity index 87% rename from clustering/znode rename to clustering/znode.py index 142836281ea..44cdc2bc83b 100644 --- a/clustering/znode +++ b/clustering/znode.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: znode @@ -26,7 +30,7 @@ description: - A list of ZooKeeper servers (format '[server]:[port]'). required: true - path: + name: description: - The path of the znode. required: true @@ -50,26 +54,50 @@ - The amount of time to wait for a node to appear. default: 300 required: false + recursive: + description: + - Recursively delete node and all its children. + default: False + required: false + version_added: "2.1" requirements: - kazoo >= 2.1 + - python >= 2.6 author: "Trey Perry (@treyperry)" """ EXAMPLES = """ # Creating or updating a znode with a given value -- action: znode hosts=localhost:2181 name=/mypath value=myvalue state=present +- znode: + hosts: 'localhost:2181' + name: /mypath + value: myvalue + state: present # Getting the value and stat structure for a znode -- action: znode hosts=localhost:2181 name=/mypath op=get +- znode: + hosts: 'localhost:2181' + name: /mypath + op: get # Listing a particular znode's children -- action: znode hosts=localhost:2181 name=/zookeeper op=list +- znode: + hosts: 'localhost:2181' + name: /zookeeper + op: list # Waiting 20 seconds for a znode to appear at path /mypath -- action: znode hosts=localhost:2181 name=/mypath op=wait timeout=20 +- znode: + hosts: 'localhost:2181' + name: /mypath + op: wait + timeout: 20 # Deleting a znode at path /mypath -- action: znode hosts=localhost:2181 name=/mypath state=absent +- znode: + hosts: 'localhost:2181' + name: /mypath + state: absent """ try: @@ -89,7 +117,8 @@ def main(): value=dict(required=False, default=None, type='str'), op=dict(required=False, default=None, choices=['get', 'wait', 'list']), state=dict(choices=['present', 'absent']), - timeout=dict(required=False, default=300, type='int') + timeout=dict(required=False, default=300, type='int'), + recursive=dict(required=False, default=False, type='bool') ), supports_check_mode=False ) @@ -174,7 +203,7 @@ def wait(self): def _absent(self, znode): if self.exists(znode): - self.zk.delete(znode) + self.zk.delete(znode, recursive=self.module.params['recursive']) return True, {'changed': True, 'msg': 'The znode was deleted.'} else: return True, {'changed': False, 'msg': 'The znode does not exist.'} @@ -186,7 +215,7 @@ def _get(self, path): for i in dir(zstat): if not i.startswith('_'): attr = getattr(zstat, i) - if type(attr) in (int, str): + if isinstance(attr, (int, str)): stat_dict[i] = attr result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value, 'stat': stat_dict} @@ -223,5 +252,5 @@ def _wait(self, path, timeout, interval=5): from ansible.module_utils.basic import * -main() - +if __name__ == '__main__': + main() diff --git a/commands/expect.py b/commands/expect.py index e8f7a049836..77dcdfdfa0a 100644 --- a/commands/expect.py +++ b/commands/expect.py @@ -27,6 +27,10 @@ HAS_PEXPECT = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: expect @@ -56,7 +60,9 @@ required: false responses: description: - - Mapping of expected string and string to respond with + - Mapping of expected string/regex and string to respond with. If the + response is a list, successive matches return successive + responses. List functionality is new in 2.1. required: true timeout: description: @@ -73,17 +79,48 @@ - If you want to run a command through the shell (say you are using C(<), C(>), C(|), etc), you must specify a shell in the command such as C(/bin/bash -c "/path/to/something | grep else") + - The question, or key, under I(responses) is a python regex match. Case + insensitive searches are indicated with a prefix of C(?i) + - By default, if a question is encountered multiple times, it's string + response will be repeated. If you need different responses for successive + question matches, instead of a string response, use a list of strings as + the response. The list functionality is new in 2.1 author: "Matt Martz (@sivel)" ''' EXAMPLES = ''' +# Case insensitve password string match - expect: command: passwd username responses: (?i)password: "MySekretPa$$word" + +# Generic question with multiple different responses +- expect: + command: /path/to/custom/command + responses: + Question: + - response1 + - response2 + - response3 ''' +def response_closure(module, question, responses): + resp_gen = (u'%s\n' % r.rstrip('\n').decode() for r in responses) + + def wrapped(info): + try: + return resp_gen.next() + except StopIteration: + module.fail_json(msg="No remaining responses for '%s', " + "output was '%s'" % + (question, + info['child_result_list'][-1])) + + return wrapped + + def main(): module = AnsibleModule( argument_spec=dict( @@ -110,7 +147,12 @@ def main(): events = dict() for key, value in responses.iteritems(): - events[key.decode()] = u'%s\n' % value.rstrip('\n').decode() + if isinstance(value, list): + response = response_closure(module, key, value) + else: + response = u'%s\n' % value.rstrip('\n').decode() + + events[key.decode()] = response if args.strip() == '': module.fail_json(rc=256, msg="no command given") @@ -129,7 +171,6 @@ def main(): cmd=args, stdout="skipped, since %s exists" % v, changed=False, - stderr=False, rc=0 ) @@ -143,16 +184,32 @@ def main(): cmd=args, stdout="skipped, since %s does not exist" % v, changed=False, - stderr=False, rc=0 ) startd = datetime.datetime.now() try: - out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True, - events=events, cwd=chdir, echo=echo) - except pexpect.ExceptionPexpect, e: + try: + # Prefer pexpect.run from pexpect>=4 + out, rc = pexpect.run(args, timeout=timeout, withexitstatus=True, + events=events, cwd=chdir, echo=echo, + encoding='utf-8') + except TypeError: + # Use pexpect.runu in pexpect>=3.3,<4 + out, rc = pexpect.runu(args, timeout=timeout, withexitstatus=True, + events=events, cwd=chdir, echo=echo) + except (TypeError, AttributeError): + e = get_exception() + # This should catch all insufficient versions of pexpect + # We deem them insufficient for their lack of ability to specify + # to not echo responses via the run/runu functions, which would + # potentially leak sensentive information + module.fail_json(msg='Insufficient version of pexpect installed ' + '(%s), this module requires pexpect>=3.3. ' + 'Error was %s' % (pexpect.__version__, e)) + except pexpect.ExceptionPexpect: + e = get_exception() module.fail_json(msg='%s' % e) endd = datetime.datetime.now() @@ -161,7 +218,7 @@ def main(): if out is None: out = '' - module.exit_json( + ret = dict( cmd=args, stdout=out.rstrip('\r\n'), rc=rc, @@ -171,7 +228,15 @@ def main(): changed=True, ) + if rc is not None: + module.exit_json(**ret) + else: + ret['msg'] = 'command exceeded timeout' + module.fail_json(**ret) + # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception -main() +if __name__ == '__main__': + main() diff --git a/crypto/__init__.py b/crypto/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/crypto/openssl_privatekey.py b/crypto/openssl_privatekey.py new file mode 100644 index 00000000000..d643142c653 --- /dev/null +++ b/crypto/openssl_privatekey.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Yanis Guenane +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.module_utils.basic import * + +try: + from OpenSSL import crypto +except ImportError: + pyopenssl_found = False +else: + pyopenssl_found = True + + +import os + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: openssl_privatekey +author: "Yanis Guenane (@Spredzy)" +version_added: "2.3" +short_description: Generate OpenSSL private keys. +description: + - "This module allows one to (re)generate OpenSSL private keys. It uses + the pyOpenSSL python library to interact with openssl. One can generate + either RSA or DSA private keys. Keys are generated in PEM format." +requirements: + - "python-pyOpenSSL" +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the private key should exist or not, taking action if the state is different from what is stated. + size: + required: false + default: 4096 + description: + - Size (in bits) of the TLS/SSL key to generate + type: + required: false + default: "RSA" + choices: [ RSA, DSA ] + description: + - The algorithm used to generate the TLS/SSL private key + force: + required: false + default: False + choices: [ True, False ] + description: + - Should the key be regenerated even it it already exists + path: + required: true + description: + - Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode. +''' + +EXAMPLES = ''' +# Generate an OpenSSL private key with the default values (4096 bits, RSA) +# and no public key +- openssl_privatekey: + path: /etc/ssl/private/ansible.com.pem + +# Generate an OpenSSL private key with a different size (2048 bits) +- openssl_privatekey: + path: /etc/ssl/private/ansible.com.pem + size: 2048 + +# Force regenerate an OpenSSL private key if it already exists +- openssl_privatekey: + path: /etc/ssl/private/ansible.com.pem + force: True + +# Generate an OpenSSL private key with a different algorithm (DSA) +- openssl_privatekey: + path: /etc/ssl/private/ansible.com.pem + type: DSA +''' + +RETURN = ''' +size: + description: Size (in bits) of the TLS/SSL private key + returned: + - changed + - success + type: integer + sample: 4096 +type: + description: Algorithm used to generate the TLS/SSL private key + returned: + - changed + - success + type: string + sample: RSA +filename: + description: Path to the generated TLS/SSL private key file + returned: + - changed + - success + type: string + sample: /etc/ssl/private/ansible.com.pem +''' + +class PrivateKeyError(Exception): + pass + +class PrivateKey(object): + + def __init__(self, module): + self.size = module.params['size'] + self.state = module.params['state'] + self.name = os.path.basename(module.params['path']) + self.type = module.params['type'] + self.force = module.params['force'] + self.path = module.params['path'] + self.mode = module.params['mode'] + self.changed = True + self.check_mode = module.check_mode + + + def generate(self, module): + """Generate a keypair.""" + + if not os.path.exists(self.path) or self.force: + self.privatekey = crypto.PKey() + + if self.type == 'RSA': + crypto_type = crypto.TYPE_RSA + else: + crypto_type = crypto.TYPE_DSA + + try: + self.privatekey.generate_key(crypto_type, self.size) + except (TypeError, ValueError): + raise PrivateKeyError(get_exception()) + + try: + privatekey_file = os.open(self.path, + os.O_WRONLY | os.O_CREAT | os.O_TRUNC, + self.mode) + + os.write(privatekey_file, crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey)) + os.close(privatekey_file) + except IOError: + self.remove() + raise PrivateKeyError(get_exception()) + else: + self.changed = False + + file_args = module.load_file_common_arguments(module.params) + if module.set_fs_attributes_if_different(file_args, False): + self.changed = True + + + def remove(self): + """Remove the private key from the filesystem.""" + + try: + os.remove(self.path) + except OSError: + e = get_exception() + if e.errno != errno.ENOENT: + raise PrivateKeyError(e) + else: + self.changed = False + + + def dump(self): + """Serialize the object into a dictionnary.""" + + result = { + 'size': self.size, + 'type': self.type, + 'filename': self.path, + 'changed': self.changed, + } + + return result + + +def main(): + + module = AnsibleModule( + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + size=dict(default=4096, type='int'), + type=dict(default='RSA', choices=['RSA', 'DSA'], type='str'), + force=dict(default=False, type='bool'), + path=dict(required=True, type='path'), + ), + supports_check_mode = True, + add_file_common_args = True, + ) + + if not pyopenssl_found: + module.fail_json(msg='the python pyOpenSSL module is required') + + path = module.params['path'] + base_dir = os.path.dirname(module.params['path']) + + if not os.path.isdir(base_dir): + module.fail_json(name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir) + + if not module.params['mode']: + module.params['mode'] = int('0600', 8) + + private_key = PrivateKey(module) + if private_key.state == 'present': + + if module.check_mode: + result = private_key.dump() + result['changed'] = module.params['force'] or not os.path.exists(path) + module.exit_json(**result) + + try: + private_key.generate(module) + except PrivateKeyError: + e = get_exception() + module.fail_json(msg=str(e)) + else: + + if module.check_mode: + result = private_key.dump() + result['changed'] = os.path.exists(path) + module.exit_json(**result) + + try: + private_key.remove() + except PrivateKeyError: + e = get_exception() + module.fail_json(msg=str(e)) + + result = private_key.dump() + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/crypto/openssl_publickey.py b/crypto/openssl_publickey.py new file mode 100644 index 00000000000..6ac73dc975e --- /dev/null +++ b/crypto/openssl_publickey.py @@ -0,0 +1,231 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Yanis Guenane +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.module_utils.basic import * + +try: + from OpenSSL import crypto +except ImportError: + pyopenssl_found = False +else: + pyopenssl_found = True + + +import os + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: openssl_publickey +author: "Yanis Guenane (@Spredzy)" +version_added: "2.3" +short_description: Generate an OpenSSL public key from its private key. +description: + - "This module allows one to (re)generate OpenSSL public keys from their private keys. + It uses the pyOpenSSL python library to interact with openssl. Keys are generated + in PEM format. This module works only if the version of PyOpenSSL is recent enough (> 16.0.0)" +requirements: + - "python-pyOpenSSL" +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the public key should exist or not, taking action if the state is different from what is stated. + force: + required: false + default: False + choices: [ True, False ] + description: + - Should the key be regenerated even it it already exists + path: + required: true + description: + - Name of the file in which the generated TLS/SSL public key will be written. + privatekey_path: + required: true + description: + - Path to the TLS/SSL private key from which to genereate the public key. +''' + +EXAMPLES = ''' +# Generate an OpenSSL public key. +- openssl_publickey: + path: /etc/ssl/public/ansible.com.pem + privatekey_path: /etc/ssl/private/ansible.com.pem + +# Force regenerate an OpenSSL public key if it already exists +- openssl_publickey: + path: /etc/ssl/public/ansible.com.pem + privatekey_path: /etc/ssl/private/ansible.com.pem + force: True + +# Remove an OpenSSL public key +- openssl_publickey: + path: /etc/ssl/public/ansible.com.pem + privatekey_path: /etc/ssl/private/ansible.com.pem + state: absent +''' + +RETURN = ''' +privatekey: + description: Path to the TLS/SSL private key the public key was generated from + returned: + - changed + - success + type: string + sample: /etc/ssl/private/ansible.com.pem +filename: + description: Path to the generated TLS/SSL public key file + returned: + - changed + - success + type: string + sample: /etc/ssl/public/ansible.com.pem +''' + +class PublicKeyError(Exception): + pass + +class PublicKey(object): + + def __init__(self, module): + self.state = module.params['state'] + self.force = module.params['force'] + self.name = os.path.basename(module.params['path']) + self.path = module.params['path'] + self.privatekey_path = module.params['privatekey_path'] + self.changed = True + self.check_mode = module.check_mode + + + def generate(self, module): + """Generate the public key..""" + + if not os.path.exists(self.path) or self.force: + try: + privatekey_content = open(self.privatekey_path, 'r').read() + privatekey = crypto.load_privatekey(crypto.FILETYPE_PEM, privatekey_content) + publickey_file = open(self.path, 'w') + publickey_file.write(crypto.dump_publickey(crypto.FILETYPE_PEM, privatekey)) + publickey_file.close() + except (IOError, OSError): + e = get_exception() + raise PublicKeyError(e) + except AttributeError: + self.remove() + raise PublicKeyError('You need to have PyOpenSSL>=16.0.0 to generate public keys') + else: + self.changed = False + + file_args = module.load_file_common_arguments(module.params) + if module.set_fs_attributes_if_different(file_args, False): + self.changed = True + + def remove(self): + """Remove the public key from the filesystem.""" + + try: + os.remove(self.path) + except OSError: + e = get_exception() + if e.errno != errno.ENOENT: + raise PublicKeyError(e) + else: + self.changed = False + + def dump(self): + """Serialize the object into a dictionnary.""" + + result = { + 'privatekey': self.privatekey_path, + 'filename': self.path, + 'changed': self.changed, + } + + return result + + +def main(): + + module = AnsibleModule( + argument_spec = dict( + state=dict(default='present', choices=['present', 'absent'], type='str'), + force=dict(default=False, type='bool'), + path=dict(required=True, type='path'), + privatekey_path=dict(type='path'), + ), + supports_check_mode = True, + add_file_common_args = True, + ) + + if not pyopenssl_found: + module.fail_json(msg='the python pyOpenSSL module is required') + + path = module.params['path'] + privatekey_path = module.params['privatekey_path'] + base_dir = os.path.dirname(module.params['path']) + + if not os.path.isdir(base_dir): + module.fail_json(name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir) + + public_key = PublicKey(module) + if public_key.state == 'present': + + # This is only applicable when generating a new public key. + # When removing one the privatekey_path should not be required. + if not privatekey_path: + module.fail_json(msg='When generating a new public key you must specify a private key') + + if not os.path.exists(privatekey_path): + module.fail_json(name=privatekey_path, msg='The private key %s does not exist' % privatekey_path) + + if module.check_mode: + result = public_key.dump() + result['changed'] = module.params['force'] or not os.path.exists(path) + module.exit_json(**result) + + try: + public_key.generate(module) + except PublicKeyError: + e = get_exception() + module.fail_json(msg=str(e)) + else: + + if module.check_mode: + result = public_key.dump() + result['changed'] = os.path.exists(path) + module.exit_json(**result) + + try: + public_key.remove() + except PublicKeyError: + e = get_exception() + module.fail_json(msg=str(e)) + + result = public_key.dump() + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/database/influxdb/__init__.py b/database/influxdb/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/influxdb/influxdb_database.py b/database/influxdb/influxdb_database.py new file mode 100644 index 00000000000..2e1245850da --- /dev/null +++ b/database/influxdb/influxdb_database.py @@ -0,0 +1,198 @@ +#!/usr/bin/python + +# (c) 2016, Kamil Szczygiel +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: influxdb_database +short_description: Manage InfluxDB databases +description: + - Manage InfluxDB databases +version_added: 2.1 +author: "Kamil Szczygiel (@kamsz)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" +options: + hostname: + description: + - The hostname or IP address on which InfluxDB server is listening + required: true + username: + description: + - Username that will be used to authenticate against InfluxDB server + default: root + required: false + password: + description: + - Password that will be used to authenticate against InfluxDB server + default: root + required: false + port: + description: + - The port on which InfluxDB server is listening + default: 8086 + required: false + database_name: + description: + - Name of the database that will be created/destroyed + required: true + state: + description: + - Determines if the database should be created or destroyed + choices: ['present', 'absent'] + default: present + required: false +''' + +EXAMPLES = ''' +# Example influxdb_database command from Ansible Playbooks +- name: Create database + influxdb_database: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + state: present + +- name: Destroy database + influxdb_database: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + state: absent + +- name: Create database using custom credentials + influxdb_database: + hostname: "{{influxdb_ip_address}}" + username: "{{influxdb_username}}" + password: "{{influxdb_password}}" + database_name: "{{influxdb_database_name}}" + state: present +''' + +RETURN = ''' +#only defaults +''' + +try: + import requests.exceptions + from influxdb import InfluxDBClient + from influxdb import exceptions + HAS_INFLUXDB = True +except ImportError: + HAS_INFLUXDB = False + + +def influxdb_argument_spec(): + return dict( + hostname=dict(required=True, type='str'), + port=dict(default=8086, type='int'), + username=dict(default='root', type='str'), + password=dict(default='root', type='str', no_log=True), + database_name=dict(required=True, type='str') + ) + + +def connect_to_influxdb(module): + hostname = module.params['hostname'] + port = module.params['port'] + username = module.params['username'] + password = module.params['password'] + database_name = module.params['database_name'] + + client = InfluxDBClient( + host=hostname, + port=port, + username=username, + password=password, + database=database_name + ) + return client + + +def find_database(module, client, database_name): + database = None + + try: + databases = client.get_list_database() + for db in databases: + if db['name'] == database_name: + database = db + break + except requests.exceptions.ConnectionError as e: + module.fail_json(msg=str(e)) + return database + + +def create_database(module, client, database_name): + if not module.check_mode: + try: + client.create_database(database_name) + except requests.exceptions.ConnectionError as e: + module.fail_json(msg=str(e)) + + module.exit_json(changed=True) + + +def drop_database(module, client, database_name): + if not module.check_mode: + try: + client.drop_database(database_name) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + + module.exit_json(changed=True) + + +def main(): + argument_spec = influxdb_argument_spec() + argument_spec.update( + state=dict(default='present', type='str', choices=['present', 'absent']) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if not HAS_INFLUXDB: + module.fail_json(msg='influxdb python package is required for this module') + + state = module.params['state'] + database_name = module.params['database_name'] + + client = connect_to_influxdb(module) + database = find_database(module, client, database_name) + + if state == 'present': + if database: + module.exit_json(changed=False) + else: + create_database(module, client, database_name) + + if state == 'absent': + if database: + drop_database(module, client, database_name) + else: + module.exit_json(changed=False) + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/database/influxdb/influxdb_retention_policy.py b/database/influxdb/influxdb_retention_policy.py new file mode 100644 index 00000000000..7541b3dfd0d --- /dev/null +++ b/database/influxdb/influxdb_retention_policy.py @@ -0,0 +1,241 @@ +#!/usr/bin/python + +# (c) 2016, Kamil Szczygiel +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: influxdb_retention_policy +short_description: Manage InfluxDB retention policies +description: + - Manage InfluxDB retention policies +version_added: 2.1 +author: "Kamil Szczygiel (@kamsz)" +requirements: + - "python >= 2.6" + - "influxdb >= 0.9" +options: + hostname: + description: + - The hostname or IP address on which InfluxDB server is listening + required: true + username: + description: + - Username that will be used to authenticate against InfluxDB server + default: root + required: false + password: + description: + - Password that will be used to authenticate against InfluxDB server + default: root + required: false + port: + description: + - The port on which InfluxDB server is listening + default: 8086 + required: false + database_name: + description: + - Name of the database where retention policy will be created + required: true + policy_name: + description: + - Name of the retention policy + required: true + duration: + description: + - Determines how long InfluxDB should keep the data + required: true + replication: + description: + - Determines how many independent copies of each point are stored in the cluster + required: true + default: + description: + - Sets the retention policy as default retention policy + required: true +''' + +EXAMPLES = ''' +# Example influxdb_retention_policy command from Ansible Playbooks +- name: create 1 hour retention policy + influxdb_retention_policy: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + policy_name: test + duration: 1h + replication: 1 + +- name: create 1 day retention policy + influxdb_retention_policy: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + policy_name: test + duration: 1d + replication: 1 + +- name: create 1 week retention policy + influxdb_retention_policy: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + policy_name: test + duration: 1w + replication: 1 + +- name: create infinite retention policy + influxdb_retention_policy: + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + policy_name: test + duration: INF + replication: 1 +''' + +RETURN = ''' +#only defaults +''' + +import re +try: + import requests.exceptions + from influxdb import InfluxDBClient + from influxdb import exceptions + HAS_INFLUXDB = True +except ImportError: + HAS_INFLUXDB = False + + +def influxdb_argument_spec(): + return dict( + hostname=dict(required=True, type='str'), + port=dict(default=8086, type='int'), + username=dict(default='root', type='str'), + password=dict(default='root', type='str', no_log=True), + database_name=dict(required=True, type='str') + ) + + +def connect_to_influxdb(module): + hostname = module.params['hostname'] + port = module.params['port'] + username = module.params['username'] + password = module.params['password'] + database_name = module.params['database_name'] + + client = InfluxDBClient( + host=hostname, + port=port, + username=username, + password=password, + database=database_name + ) + return client + + +def find_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + retention_policy = None + + try: + retention_policies = client.get_list_retention_policies(database=database_name) + for policy in retention_policies: + if policy['name'] == policy_name: + retention_policy = policy + break + except requests.exceptions.ConnectionError as e: + module.fail_json(msg=str(e)) + return retention_policy + + +def create_retention_policy(module, client): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + duration = module.params['duration'] + replication = module.params['replication'] + default = module.params['default'] + + if not module.check_mode: + try: + client.create_retention_policy(policy_name, duration, replication, database_name, default) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + module.exit_json(changed=True) + + +def alter_retention_policy(module, client, retention_policy): + database_name = module.params['database_name'] + policy_name = module.params['policy_name'] + duration = module.params['duration'] + replication = module.params['replication'] + default = module.params['default'] + duration_regexp = re.compile('(\d+)([hdw]{1})|(^INF$){1}') + changed = False + + duration_lookup = duration_regexp.search(duration) + + if duration_lookup.group(2) == 'h': + influxdb_duration_format = '%s0m0s' % duration + elif duration_lookup.group(2) == 'd': + influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24) + elif duration_lookup.group(2) == 'w': + influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7) + elif duration == 'INF': + influxdb_duration_format = '0' + + if not retention_policy['duration'] == influxdb_duration_format or not retention_policy['replicaN'] == int(replication) or not retention_policy['default'] == default: + if not module.check_mode: + try: + client.alter_retention_policy(policy_name, database_name, duration, replication, default) + except exceptions.InfluxDBClientError as e: + module.fail_json(msg=e.content) + changed = True + module.exit_json(changed=changed) + + +def main(): + argument_spec = influxdb_argument_spec() + argument_spec.update( + policy_name=dict(required=True, type='str'), + duration=dict(required=True, type='str'), + replication=dict(required=True, type='int'), + default=dict(default=False, type='bool') + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if not HAS_INFLUXDB: + module.fail_json(msg='influxdb python package is required for this module') + + client = connect_to_influxdb(module) + retention_policy = find_retention_policy(module, client) + + if retention_policy: + alter_retention_policy(module, client, retention_policy) + else: + create_retention_policy(module, client) + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/database/misc/mongodb_parameter.py b/database/misc/mongodb_parameter.py new file mode 100644 index 00000000000..d284d2cc3f8 --- /dev/null +++ b/database/misc/mongodb_parameter.py @@ -0,0 +1,239 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +(c) 2016, Loic Blot +Sponsored by Infopro Digital. http://www.infopro-digital.com/ +Sponsored by E.T.A.I. http://www.etai.fr/ + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: mongodb_parameter +short_description: Change an administrative parameter on a MongoDB server. +description: + - Change an administrative parameter on a MongoDB server. +version_added: "2.1" +options: + login_user: + description: + - The username used to authenticate with + required: false + default: null + login_password: + description: + - The password used to authenticate with + required: false + default: null + login_host: + description: + - The host running the database + required: false + default: localhost + login_port: + description: + - The port to connect to + required: false + default: 27017 + login_database: + description: + - The database where login credentials are stored + required: false + default: null + replica_set: + description: + - Replica set to connect to (automatically connects to primary for writes) + required: false + default: null + database: + description: + - The name of the database to add/remove the user from + required: true + ssl: + description: + - Whether to use an SSL connection when connecting to the database + required: false + default: false + param: + description: + - MongoDB administrative parameter to modify + required: true + value: + description: + - MongoDB administrative parameter value to set + required: true + param_type: + description: + - Define the parameter value (str, int) + required: false + default: str + +notes: + - Requires the pymongo Python package on the remote host, version 2.4.2+. This + can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html +requirements: [ "pymongo" ] +author: "Loic Blot (@nerzhul)" +''' + +EXAMPLES = ''' +# Set MongoDB syncdelay to 60 (this is an int) +- mongodb_parameter: + param: syncdelay + value: 60 + param_type: int +''' + +RETURN = ''' +before: + description: value before modification + returned: success + type: string +after: + description: value after modification + returned: success + type: string +''' + +import ConfigParser + +try: + from pymongo.errors import ConnectionFailure + from pymongo.errors import OperationFailure + from pymongo import version as PyMongoVersion + from pymongo import MongoClient +except ImportError: + try: # for older PyMongo 2.2 + from pymongo import Connection as MongoClient + except ImportError: + pymongo_found = False + else: + pymongo_found = True +else: + pymongo_found = True + + +# ========================================= +# MongoDB module specific support methods. +# + +def load_mongocnf(): + config = ConfigParser.RawConfigParser() + mongocnf = os.path.expanduser('~/.mongodb.cnf') + + try: + config.readfp(open(mongocnf)) + creds = dict( + user=config.get('client', 'user'), + password=config.get('client', 'pass') + ) + except (ConfigParser.NoOptionError, IOError): + return False + + return creds + + +# ========================================= +# Module execution. +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None), + login_password=dict(default=None, no_log=True), + login_host=dict(default='localhost'), + login_port=dict(default=27017, type='int'), + login_database=dict(default=None), + replica_set=dict(default=None), + param=dict(default=None, required=True), + value=dict(default=None, required=True), + param_type=dict(default="str", choices=['str', 'int']), + ssl=dict(default=False, type='bool'), + ) + ) + + if not pymongo_found: + module.fail_json(msg='the python pymongo module is required') + + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + login_database = module.params['login_database'] + + replica_set = module.params['replica_set'] + ssl = module.params['ssl'] + + param = module.params['param'] + param_type = module.params['param_type'] + value = module.params['value'] + + # Verify parameter is coherent with specified type + try: + if param_type == 'int': + value = int(value) + except ValueError: + e = get_exception() + module.fail_json(msg="value '%s' is not %s" % (value, param_type)) + + try: + if replica_set: + client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl) + else: + client = MongoClient(login_host, int(login_port), ssl=ssl) + + if login_user is None and login_password is None: + mongocnf_creds = load_mongocnf() + if mongocnf_creds is not False: + login_user = mongocnf_creds['user'] + login_password = mongocnf_creds['password'] + elif login_password is None or login_user is None: + module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') + + if login_user is not None and login_password is not None: + client.admin.authenticate(login_user, login_password, source=login_database) + + except ConnectionFailure: + e = get_exception() + module.fail_json(msg='unable to connect to database: %s' % str(e)) + + db = client.admin + + try: + after_value = db.command("setParameter", **{param: int(value)}) + except OperationFailure: + e = get_exception() + module.fail_json(msg="unable to change parameter: %s" % str(e)) + + if "was" not in after_value: + module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.") + else: + module.exit_json(changed=(value != after_value["was"]), before=after_value["was"], + after=value) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index 0529abdea09..7fbcf332268 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -19,6 +19,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: mongodb_user @@ -47,6 +51,12 @@ - The port to connect to required: false default: 27017 + login_database: + version_added: "2.0" + description: + - The database where login credentials are stored + required: false + default: null replica_set: version_added: "1.6" description: @@ -73,11 +83,19 @@ description: - Whether to use an SSL connection when connecting to the database default: False + ssl_cert_reqs: + version_added: "2.2" + description: + - Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided. + required: false + default: "CERT_REQUIRED" + choices: ["CERT_REQUIRED", "CERT_OPTIONAL", "CERT_NONE"] roles: version_added: "1.3" description: - - "The database user roles valid values are one or more of the following: read, 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'" - - This param requires mongodb 2.4+ and pymongo 2.5+ + - "The database user roles valid values could either be one or more of the following strings: 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'" + - "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'." + - "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required." required: false default: "readWrite" state: @@ -104,23 +122,74 @@ EXAMPLES = ''' # Create 'burgers' database user with name 'bob' and password '12345'. -- mongodb_user: database=burgers name=bob password=12345 state=present +- mongodb_user: + database: burgers + name: bob + password: 12345 + state: present # Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly) -- mongodb_user: database=burgers name=bob password=12345 state=present ssl=True +- mongodb_user: + database: burgers + name: bob + password: 12345 + state: present + ssl: True # Delete 'burgers' database user with name 'bob'. -- mongodb_user: database=burgers name=bob state=absent +- mongodb_user: + database: burgers + name: bob + state: absent # Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style) -- mongodb_user: database=burgers name=ben password=12345 roles='read' state=present -- mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present -- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present +- mongodb_user: + database: burgers + name: ben + password: 12345 + roles: read + state: present +- mongodb_user: + database: burgers + name: jim + password: 12345 + roles: readWrite,dbAdmin,userAdmin + state: present +- mongodb_user: + database: burgers + name: joe + password: 12345 + roles: readWriteAnyDatabase + state: present # add a user to database in a replica set, the primary server is automatically discovered and written to -- mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present +- mongodb_user: + database: burgers + name: bob + replica_set: belcher + password: 12345 + roles: readWriteAnyDatabase + state: present + +# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is usefull for oplog access (MONGO_OPLOG_URL). +# please notice the credentials must be added to the 'admin' database because the 'local' database is not syncronized and can't receive user credentials +# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin" +# This syntax requires mongodb 2.6+ and pymongo 2.5+ +- mongodb_user: + login_user: root + login_password: root_password + database: admin + user: oplog_reader + password: oplog_reader_password + state: present + replica_set: belcher + roles: + - db: local + role: read + ''' +import ssl as ssl_lib import ConfigParser from distutils.version import LooseVersion try: @@ -142,30 +211,68 @@ # MongoDB module specific support methods. # -def user_find(client, user): +def check_compatibility(module, client): + """Check the compatibility between the driver and the database. + + See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility + + Args: + module: Ansible module. + client (cursor): Mongodb cursor on admin database. + """ + loose_srv_version = LooseVersion(client.server_info()['version']) + loose_driver_version = LooseVersion(PyMongoVersion) + + if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'): + module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)') + + elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'): + module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)') + + elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'): + module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)') + + elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'): + module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)') + + +def user_find(client, user, db_name): + """Check if the user exists. + + Args: + client (cursor): Mongodb cursor on admin database. + user (str): User to check. + db_name (str): User's database. + + Returns: + dict: when user exists, False otherwise. + """ for mongo_user in client["admin"].system.users.find(): if mongo_user['user'] == user: - return mongo_user + # NOTE: there is no 'db' field in mongo 2.4. + if 'db' not in mongo_user: + return mongo_user + + if mongo_user["db"] == db_name: + return mongo_user return False + def user_add(module, client, db_name, user, password, roles): - #pymono's user_add is a _create_or_update_user so we won't know if it was changed or updated + #pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated #without reproducing a lot of the logic in database.py of pymongo db = client[db_name] + if roles is None: db.add_user(user, password, False) else: - try: - db.add_user(user, password, None, roles=roles) - except OperationFailure, e: - err_msg = str(e) - if LooseVersion(PyMongoVersion) <= LooseVersion('2.5'): - err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)' - module.fail_json(msg=err_msg) + db.add_user(user, password, None, roles=roles) def user_remove(module, client, db_name, user): - exists = user_find(client, user) + exists = user_find(client, user, db_name) if exists: + if module.check_mode: + module.exit_json(changed=True, user=user) db = client[db_name] db.remove_user(user) else: @@ -186,6 +293,44 @@ def load_mongocnf(): return creds + + +def check_if_roles_changed(uinfo, roles, db_name): +# We must be aware of users which can read the oplog on a replicaset +# Such users must have access to the local DB, but since this DB does not store users credentials +# and is not synchronized among replica sets, the user must be stored on the admin db +# Therefore their structure is the following : +# { +# "_id" : "admin.oplog_reader", +# "user" : "oplog_reader", +# "db" : "admin", # <-- admin DB +# "roles" : [ +# { +# "role" : "read", +# "db" : "local" # <-- local DB +# } +# ] +# } + + def make_sure_roles_are_a_list_of_dict(roles, db_name): + output = list() + for role in roles: + if isinstance(role, basestring): + new_role = { "role": role, "db": db_name } + output.append(new_role) + else: + output.append(role) + return output + + roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name) + uinfo_roles = uinfo.get('roles', []) + + if sorted(roles_as_list_of_dict) == sorted(uinfo_roles): + return False + return True + + + # ========================================= # Module execution. # @@ -197,15 +342,18 @@ def main(): login_password=dict(default=None), login_host=dict(default='localhost'), login_port=dict(default='27017'), + login_database=dict(default=None), replica_set=dict(default=None), database=dict(required=True, aliases=['db']), name=dict(required=True, aliases=['user']), password=dict(aliases=['pass']), - ssl=dict(default=False), + ssl=dict(default=False, type='bool'), roles=dict(default=None, type='list'), state=dict(default='present', choices=['absent', 'present']), update_password=dict(default="always", choices=["always", "on_create"]), - ) + ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']), + ), + supports_check_mode=True ) if not pymongo_found: @@ -215,20 +363,36 @@ def main(): login_password = module.params['login_password'] login_host = module.params['login_host'] login_port = module.params['login_port'] + login_database = module.params['login_database'] + replica_set = module.params['replica_set'] db_name = module.params['database'] user = module.params['name'] password = module.params['password'] ssl = module.params['ssl'] - roles = module.params['roles'] + ssl_cert_reqs = None + roles = module.params['roles'] or [] state = module.params['state'] update_password = module.params['update_password'] try: + connection_params = { + "host": login_host, + "port": int(login_port), + } + if replica_set: - client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl) - else: - client = MongoClient(login_host, int(login_port), ssl=ssl) + connection_params["replicaset"] = replica_set + + if ssl: + connection_params["ssl"] = ssl + connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs']) + + client = MongoClient(**connection_params) + + # NOTE: this check must be done ASAP. + # We doesn't need to be authenticated. + check_compatibility(module, client) if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() @@ -239,35 +403,52 @@ def main(): module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') if login_user is not None and login_password is not None: - client.admin.authenticate(login_user, login_password) + client.admin.authenticate(login_user, login_password, source=login_database) elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'): if db_name != "admin": module.fail_json(msg='The localhost login exception only allows the first admin account to be created') #else: this has to be the first admin user added - except ConnectionFailure, e: + except Exception: + e = get_exception() module.fail_json(msg='unable to connect to database: %s' % str(e)) if state == 'present': if password is None and update_password == 'always': module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create') - if update_password != 'always' and user_find(client, user): - password = None - try: + uinfo = user_find(client, user, db_name) + if update_password != 'always' and uinfo: + password = None + if not check_if_roles_changed(uinfo, roles, db_name): + module.exit_json(changed=False, user=user) + + if module.check_mode: + module.exit_json(changed=True, user=user) + user_add(module, client, db_name, user, password, roles) - except OperationFailure, e: + except Exception: + e = get_exception() module.fail_json(msg='Unable to add or update user: %s' % str(e)) + # Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848 + #newuinfo = user_find(client, user, db_name) + #if uinfo['role'] == newuinfo['role'] and CheckPasswordHere: + # module.exit_json(changed=False, user=user) + elif state == 'absent': try: user_remove(module, client, db_name, user) - except OperationFailure, e: + except Exception: + e = get_exception() module.fail_json(msg='Unable to remove user: %s' % str(e)) module.exit_json(changed=True, user=user) # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/database/misc/redis.py b/database/misc/redis.py index 42e364a8e61..f99d025742b 100644 --- a/database/misc/redis.py +++ b/database/misc/redis.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: redis @@ -103,22 +107,38 @@ EXAMPLES = ''' # Set local redis instance to be slave of melee.island on port 6377 -- redis: command=slave master_host=melee.island master_port=6377 +- redis: + command: slave + master_host: melee.island + master_port: 6377 # Deactivate slave mode -- redis: command=slave slave_mode=master +- redis: + command: slave + slave_mode: master # Flush all the redis db -- redis: command=flush flush_mode=all +- redis: + command: flush + flush_mode: all # Flush only one db in a redis instance -- redis: command=flush db=1 flush_mode=db +- redis: + command: flush + db: 1 + flush_mode: db # Configure local redis to have 10000 max clients -- redis: command=config name=maxclients value=10000 +- redis: + command: config + name: maxclients + value: 10000 # Configure local redis to have lua time limit of 100 ms -- redis: command=config name=lua-time-limit value=100 +- redis: + command: config + name: lua-time-limit + value: 100 ''' try: @@ -149,7 +169,7 @@ def set_master_mode(client): def flush(client, db=None): try: - if type(db) != int: + if not isinstance(db, int): return client.flushall() else: # The passed client has been connected to the database already @@ -166,13 +186,13 @@ def main(): module = AnsibleModule( argument_spec = dict( command=dict(default=None, choices=['slave', 'flush', 'config']), - login_password=dict(default=None), + login_password=dict(default=None, no_log=True), login_host=dict(default='localhost'), - login_port=dict(default='6379'), + login_port=dict(default=6379, type='int'), master_host=dict(default=None), - master_port=dict(default=None), + master_port=dict(default=None, type='int'), slave_mode=dict(default='slave', choices=['master', 'slave']), - db=dict(default=None), + db=dict(default=None, type='int'), flush_mode=dict(default='all', choices=['all', 'db']), name=dict(default=None), value=dict(default=None) @@ -185,17 +205,13 @@ def main(): login_password = module.params['login_password'] login_host = module.params['login_host'] - login_port = int(module.params['login_port']) + login_port = module.params['login_port'] command = module.params['command'] # Slave Command section ----------- if command == "slave": master_host = module.params['master_host'] master_port = module.params['master_port'] - try: - master_port = int(module.params['master_port']) - except Exception: - pass mode = module.params['slave_mode'] #Check if we have all the data @@ -214,7 +230,8 @@ def main(): password=login_password) try: r.ping() - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to connect to database: %s" % e) #Check if we are already in the mode that we want @@ -257,15 +274,12 @@ def main(): # flush Command section ----------- elif command == "flush": - try: - db = int(module.params['db']) - except Exception: - db = 0 + db = module.params['db'] mode = module.params['flush_mode'] #Check if we have all the data if mode == "db": - if type(db) != int: + if db is None: module.fail_json( msg="In db mode the db number must be provided") @@ -276,7 +290,8 @@ def main(): db=db) try: r.ping() - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to connect to database: %s" % e) # Do the stuff @@ -303,13 +318,15 @@ def main(): try: r.ping() - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to connect to database: %s" % e) try: old_value = r.config_get(name)[name] - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to read config: %s" % e) changed = old_value != value @@ -318,7 +335,8 @@ def main(): else: try: r.config_set(name, value) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to write config: %s" % e) module.exit_json(changed=changed, name=name, value=value) else: @@ -326,4 +344,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/database/misc/riak.py b/database/misc/riak.py index 453e6c15f3e..af4ec9489f3 100644 --- a/database/misc/riak.py +++ b/database/misc/riak.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: riak @@ -88,22 +92,31 @@ EXAMPLES = ''' # Join's a Riak node to another node -- riak: command=join target_node=riak@10.1.1.1 +- riak: + command: join + target_node: riak@10.1.1.1 # Wait for handoffs to finish. Use with async and poll. -- riak: wait_for_handoffs=yes +- riak: + wait_for_handoffs: yes # Wait for riak_kv service to startup -- riak: wait_for_service=kv +- riak: + wait_for_service: kv ''' import time import socket import sys + try: import json except ImportError: - import simplejson as json + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass def ring_check(module, riak_admin_bin): @@ -120,7 +133,7 @@ def main(): argument_spec=dict( command=dict(required=False, default=None, choices=[ 'ping', 'kv_test', 'join', 'plan', 'commit']), - config_dir=dict(default='/etc/riak'), + config_dir=dict(default='/etc/riak', type='path'), http_conn=dict(required=False, default='127.0.0.1:8098'), target_node=dict(default='riak@127.0.0.1', required=False), wait_for_handoffs=dict(default=False, type='int'), diff --git a/database/mssql/__init__.py b/database/mssql/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/mssql/mssql_db.py b/database/mssql/mssql_db.py new file mode 100644 index 00000000000..2daf74d011e --- /dev/null +++ b/database/mssql/mssql_db.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Ansible module to manage mssql databases +# (c) 2014, Vedit Firat Arig +# Outline and parts are reused from Mark Theunissen's mysql_db module +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: mssql_db +short_description: Add or remove MSSQL databases from a remote host. +description: + - Add or remove MSSQL databases from a remote host. +version_added: "2.2" +options: + name: + description: + - name of the database to add or remove + required: true + default: null + aliases: [ db ] + login_user: + description: + - The username used to authenticate with + required: false + default: null + login_password: + description: + - The password used to authenticate with + required: false + default: null + login_host: + description: + - Host running the database + required: false + login_port: + description: + - Port of the MSSQL server. Requires login_host be defined as other then localhost if login_port is used + required: false + default: 1433 + state: + description: + - The database state + required: false + default: present + choices: [ "present", "absent", "import" ] + target: + description: + - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL + files (C(.sql)) files are supported. + required: false + autocommit: + description: + - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed within a transaction. + required: false + default: false + choices: [ "false", "true" ] +notes: + - Requires the pymssql Python package on the remote host. For Ubuntu, this + is as easy as pip install pymssql (See M(pip).) +requirements: + - python >= 2.7 + - pymssql +author: Vedit Firat Arig +''' + +EXAMPLES = ''' +# Create a new database with name 'jackdata' +- mssql_db: + name: jackdata + state: present + +# Copy database dump file to remote host and restore it to database 'my_db' +- copy: + src: dump.sql + dest: /tmp + +- mssql_db: + name: my_db + state: import + target: /tmp/dump.sql +''' + +RETURN = ''' +# +''' + +import os +try: + import pymssql +except ImportError: + mssql_found = False +else: + mssql_found = True + + +def db_exists(conn, cursor, db): + cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db) + conn.commit() + return bool(cursor.rowcount) + + +def db_create(conn, cursor, db): + cursor.execute("CREATE DATABASE [%s]" % db) + return db_exists(conn, cursor, db) + + +def db_delete(conn, cursor, db): + try: + cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db) + except: + pass + cursor.execute("DROP DATABASE [%s]" % db) + return not db_exists(conn, cursor, db) + +def db_import(conn, cursor, module, db, target): + if os.path.isfile(target): + backup = open(target, 'r') + try: + sqlQuery = "USE [%s]\n" % db + for line in backup: + if line is None: + break + elif line.startswith('GO'): + cursor.execute(sqlQuery) + sqlQuery = "USE [%s]\n" % db + else: + sqlQuery += line + cursor.execute(sqlQuery) + conn.commit() + finally: + backup.close() + return 0, "import successful", "" + else: + return 1, "cannot find target file", "cannot find target file" + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['db']), + login_user=dict(default=''), + login_password=dict(default=''), + login_host=dict(required=True), + login_port=dict(default='1433'), + target=dict(default=None), + autocommit=dict(type='bool', default=False), + state=dict( + default='present', choices=['present', 'absent', 'import']) + ) + ) + + if not mssql_found: + module.fail_json(msg="pymssql python module is required") + + db = module.params['name'] + state = module.params['state'] + autocommit = module.params['autocommit'] + target = module.params["target"] + + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + + login_querystring = login_host + if login_port != "1433": + login_querystring = "%s:%s" % (login_host, login_port) + + if login_user != "" and login_password == "": + module.fail_json(msg="when supplying login_user arguments login_password must be provided") + + try: + conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master') + cursor = conn.cursor() + except Exception as e: + if "Unknown database" in str(e): + errno, errstr = e.args + module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) + else: + module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your @sysconfdir@/freetds.conf / ${HOME}/.freetds.conf") + + conn.autocommit(True) + changed = False + + if db_exists(conn, cursor, db): + if state == "absent": + try: + changed = db_delete(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error deleting database: " + str(e)) + elif state == "import": + conn.autocommit(autocommit) + rc, stdout, stderr = db_import(conn, cursor, module, db, target) + + if rc != 0: + module.fail_json(msg="%s" % stderr) + else: + module.exit_json(changed=True, db=db, msg=stdout) + else: + if state == "present": + try: + changed = db_create(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error creating database: " + str(e)) + elif state == "import": + try: + changed = db_create(conn, cursor, db) + except Exception as e: + module.fail_json(msg="error creating database: " + str(e)) + + conn.autocommit(autocommit) + rc, stdout, stderr = db_import(conn, cursor, module, db, target) + + if rc != 0: + module.fail_json(msg="%s" % stderr) + else: + module.exit_json(changed=True, db=db, msg=stdout) + + module.exit_json(changed=changed, db=db) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() + diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index f5d2d5cf630..76bcdc16c47 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -22,6 +22,10 @@ along with Ansible. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: mysql_replication @@ -34,7 +38,7 @@ options: mode: description: - - module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave (START SLAVE), stopslave (STOP SLAVE) + - module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave (START SLAVE), stopslave (STOP SLAVE), resetslave (RESET SLAVE), resetslaveall (RESET SLAVE ALL) required: False choices: - getslave @@ -42,28 +46,9 @@ - changemaster - stopslave - startslave + - resetslave + - resetslaveall default: getslave - login_user: - description: - - username to connect mysql host, if defined login_password also needed. - required: False - login_password: - description: - - password to connect mysql host, if defined login_user also needed. - required: False - login_host: - description: - - mysql host to connect - required: False - login_port: - description: - - Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used - required: False - default: 3306 - version_added: "1.9" - login_unix_socket: - description: - - unix socket to connect mysql server master_host: description: - same as mysql variable @@ -116,23 +101,33 @@ required: false default: null version_added: "2.0" + +extends_documentation_fragment: mysql ''' EXAMPLES = ''' # Stop mysql slave thread -- mysql_replication: mode=stopslave +- mysql_replication: + mode: stopslave # Get master binlog file name and binlog position -- mysql_replication: mode=getmaster +- mysql_replication: + mode: getmaster -# Change master to master server 192.168.1.1 and use binary log 'mysql-bin.000009' with position 4578 -- mysql_replication: mode=changemaster master_host=192.168.1.1 master_log_file=mysql-bin.000009 master_log_pos=4578 +# Change master to master server 192.0.2.1 and use binary log 'mysql-bin.000009' with position 4578 +- mysql_replication: + mode: changemaster + master_host: 192.0.2.1 + master_log_file: mysql-bin.000009 + master_log_pos: 4578 # Check slave status using port 3308 -- mysql_replication: mode=getslave login_host=ansible.example.com login_port=3308 +- mysql_replication: + mode: getslave + login_host: ansible.example.com + login_port: 3308 ''' -import ConfigParser import os import warnings @@ -143,6 +138,10 @@ else: mysqldb_found = True +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.mysql import mysql_connect +from ansible.module_utils.pycompat24 import get_exception + def get_master_status(cursor): cursor.execute("SHOW MASTER STATUS") @@ -165,6 +164,24 @@ def stop_slave(cursor): return stopped +def reset_slave(cursor): + try: + cursor.execute("RESET SLAVE") + reset = True + except: + reset = False + return reset + + +def reset_slave_all(cursor): + try: + cursor.execute("RESET SLAVE ALL") + reset = True + except: + reset = False + return reset + + def start_slave(cursor): try: cursor.execute("START SLAVE") @@ -180,78 +197,19 @@ def changemaster(cursor, chm, chm_params): cursor.execute(query, chm_params) -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - - # If .my.cnf doesn't specify a user, default to user login name - try: - user = config_get(config, 'client', 'user') - except (ConfigParser.NoOptionError): - user = getpass.getuser() - creds = dict(user=user, passwd=passwd) - return creds - - def main(): module = AnsibleModule( argument_spec = dict( login_user=dict(default=None), - login_password=dict(default=None), + login_password=dict(default=None, no_log=True), login_host=dict(default="localhost"), login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), - mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), + mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave", "resetslave", "resetslaveall"]), master_auto_position=dict(default=False, type='bool'), master_host=dict(default=None), master_user=dict(default=None), - master_password=dict(default=None), + master_password=dict(default=None, no_log=True), master_port=dict(default=None, type='int'), master_connect_retry=dict(default=None, type='int'), master_log_file=dict(default=None), @@ -264,12 +222,13 @@ def main(): master_ssl_cert=dict(default=None), master_ssl_key=dict(default=None), master_ssl_cipher=dict(default=None), + connect_timeout=dict(default=30, type='int'), + config_file=dict(default="~/.my.cnf", type='path'), + ssl_cert=dict(default=None), + ssl_key=dict(default=None), + ssl_ca=dict(default=None), ) ) - user = module.params["login_user"] - password = module.params["login_password"] - host = module.params["login_host"] - port = module.params["login_port"] mode = module.params["mode"] master_host = module.params["master_host"] master_user = module.params["master_user"] @@ -287,59 +246,50 @@ def main(): master_ssl_key = module.params["master_ssl_key"] master_ssl_cipher = module.params["master_ssl_cipher"] master_auto_position = module.params["master_auto_position"] + ssl_cert = module.params["ssl_cert"] + ssl_key = module.params["ssl_key"] + ssl_ca = module.params["ssl_ca"] + connect_timeout = module.params['connect_timeout'] + config_file = module.params['config_file'] if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") else: warnings.filterwarnings('error', category=MySQLdb.Warning) - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. login_password = module.params["login_password"] login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") try: - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password) - elif module.params["login_port"] != 3306 and module.params["login_host"] == "localhost": - module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") + cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, None, 'MySQLdb.cursors.DictCursor', + connect_timeout=connect_timeout) + except Exception: + e = get_exception() + if os.path.exists(config_file): + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e)) else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password) - except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") - try: - cursor = db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor) - except Exception, e: - module.fail_json(msg="Trouble getting DictCursor from db_connection: %s" % e) + module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e)) if mode in "getmaster": - masterstatus = get_master_status(cursor) - try: - module.exit_json( **masterstatus ) - except TypeError: - module.fail_json(msg="Server is not configured as mysql master") + status = get_master_status(cursor) + if not isinstance(status, dict): + status = dict(Is_Master=False, msg="Server is not configured as mysql master") + else: + status['Is_Master'] = True + module.exit_json(**status) elif mode in "getslave": - slavestatus = get_slave_status(cursor) - try: - module.exit_json( **slavestatus ) - except TypeError: - module.fail_json(msg="Server is not configured as mysql slave") + status = get_slave_status(cursor) + if not isinstance(status, dict): + status = dict(Is_Slave=False, msg="Server is not configured as mysql slave") + else: + status['Is_Slave'] = True + module.exit_json(**status) elif mode in "changemaster": chm=[] chm_params = {} + result = {} if master_host: chm.append("MASTER_HOST=%(master_host)s") chm_params['master_host'] = master_host @@ -386,8 +336,16 @@ def main(): chm_params['master_ssl_cipher'] = master_ssl_cipher if master_auto_position: chm.append("MASTER_AUTO_POSITION = 1") - changemaster(cursor, chm, chm_params) - module.exit_json(changed=True) + try: + changemaster(cursor, chm, chm_params) + except MySQLdb.Warning: + e = get_exception() + result['warning'] = str(e) + except Exception: + e = get_exception() + module.fail_json(msg='%s. Query == CHANGE MASTER TO %s' % (e, chm)) + result['changed']=True + module.exit_json(**result) elif mode in "startslave": started = start_slave(cursor) if started is True: @@ -400,8 +358,20 @@ def main(): module.exit_json(msg="Slave stopped", changed=True) else: module.exit_json(msg="Slave already stopped", changed=False) + elif mode in "resetslave": + reset = reset_slave(cursor) + if reset is True: + module.exit_json(msg="Slave reset", changed=True) + else: + module.exit_json(msg="Slave already reset", changed=False) + elif mode in "resetslaveall": + reset = reset_slave_all(cursor) + if reset is True: + module.exit_json(msg="Slave reset", changed=True) + else: + module.exit_json(msg="Slave already reset", changed=False) + -# import module snippets -from ansible.module_utils.basic import * -main() -warnings.simplefilter("ignore") +if __name__ == '__main__': + main() + warnings.simplefilter("ignore") diff --git a/database/postgresql/postgresql_ext.py b/database/postgresql/postgresql_ext.py index 07ed48e9d03..09b2903dab1 100644 --- a/database/postgresql/postgresql_ext.py +++ b/database/postgresql/postgresql_ext.py @@ -16,13 +16,17 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: postgresql_ext short_description: Add or remove PostgreSQL extensions from a database. description: - Add or remove PostgreSQL extensions from a database. -version_added: "0.1" +version_added: "1.9" options: name: description: @@ -70,7 +74,9 @@ EXAMPLES = ''' # Adds postgis to the database "acme" -- postgresql_ext: name=postgis db=acme +- postgresql_ext: + name: postgis + db: acme ''' try: @@ -118,7 +124,7 @@ def main(): module = AnsibleModule( argument_spec=dict( login_user=dict(default="postgres"), - login_password=dict(default=""), + login_password=dict(default="", no_log=True), login_host=dict(default=""), port=dict(default="5432"), db=dict(required=True), @@ -159,30 +165,34 @@ def main(): .ISOLATION_LEVEL_AUTOCOMMIT) cursor = db_connection.cursor( cursor_factory=psycopg2.extras.DictCursor) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to connect to database: %s" % e) try: if module.check_mode: + if state == "present": + changed = not ext_exists(cursor, ext) + elif state == "absent": + changed = ext_exists(cursor, ext) + else: if state == "absent": - changed = not db_exists(cursor, ext) + changed = ext_delete(cursor, ext) + elif state == "present": - changed = db_exists(cursor, ext) - module.exit_json(changed=changed,ext=ext) - - if state == "absent": - changed = ext_delete(cursor, ext) - - elif state == "present": - changed = ext_create(cursor, ext) - except NotSupportedError, e: + changed = ext_create(cursor, ext) + except NotSupportedError: + e = get_exception() module.fail_json(msg=str(e)) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Database query failed: %s" % e) - module.exit_json(changed=changed, db=db) + module.exit_json(changed=changed, db=db, ext=ext) # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception +if __name__ == '__main__': + main() diff --git a/database/postgresql/postgresql_lang.py b/database/postgresql/postgresql_lang.py index ccee93194ea..1a868bf67a9 100644 --- a/database/postgresql/postgresql_lang.py +++ b/database/postgresql/postgresql_lang.py @@ -17,6 +17,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: postgresql_lang @@ -119,16 +123,32 @@ # Add language pltclu to database testdb if it doesn't exist and mark it as trusted: # Marks the language as trusted if it exists but isn't trusted yet # force_trust makes sure that the language will be marked as trusted -- postgresql_lang db=testdb lang=pltclu state=present trust=yes force_trust=yes +- postgresql_lang: + db: testdb + lang: pltclu + state: present + trust: yes + force_trust: yes # Remove language pltclu from database testdb: -- postgresql_lang: db=testdb lang=pltclu state=absent +- postgresql_lang: + db: testdb + lang: pltclu + state: absent # Remove language pltclu from database testdb and remove all dependencies: -- postgresql_lang: db=testdb lang=pltclu state=absent cascade=yes +- postgresql_lang: + db: testdb + lang: pltclu + state: absent + cascade: yes # Remove language c from database testdb but ignore errors if something prevents the removal: -- postgresql_lang: db=testdb lang=pltclu state=absent fail_on_drop=no +- postgresql_lang: + db: testdb + lang: pltclu + state: absent + fail_on_drop: no ''' try: @@ -184,7 +204,7 @@ def main(): module = AnsibleModule( argument_spec=dict( login_user=dict(default="postgres"), - login_password=dict(default=""), + login_password=dict(default="", no_log=True), login_host=dict(default=""), db=dict(required=True), port=dict(default='5432'), @@ -222,7 +242,8 @@ def main(): try: db_connection = psycopg2.connect(**kw) cursor = db_connection.cursor() - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to connect to database: %s" % e) changed = False lang_dropped = False @@ -267,4 +288,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/database/postgresql/postgresql_schema.py b/database/postgresql/postgresql_schema.py new file mode 100644 index 00000000000..52c1e5843ee --- /dev/null +++ b/database/postgresql/postgresql_schema.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: postgresql_schema +short_description: Add or remove PostgreSQL schema from a remote host +description: + - Add or remove PostgreSQL schema from a remote host. +version_added: "2.3" +options: + name: + description: + - Name of the schema to add or remove. + required: true + default: null + database: + description: + - Name of the database to connect to. + required: false + default: postgres + login_user: + description: + - The username used to authenticate with. + required: false + default: null + login_password: + description: + - The password used to authenticate with. + required: false + default: null + login_host: + description: + - Host running the database. + required: false + default: localhost + login_unix_socket: + description: + - Path to a Unix domain socket for local connections. + required: false + default: null + owner: + description: + - Name of the role to set as owner of the schema. + required: false + default: null + port: + description: + - Database port to connect to. + required: false + default: 5432 + state: + description: + - The schema state. + required: false + default: present + choices: [ "present", "absent" ] +notes: + - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on + the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. +requirements: [ psycopg2 ] +author: "Flavien Chantelot " +''' + +EXAMPLES = ''' +# Create a new schema with name "acme" +- postgresql_schema: + name: acme + +# Create a new schema "acme" with a user "bob" who will own it +- postgresql_schema: + name: acme + owner: bob + +''' + +RETURN = ''' +schema: + description: Name of the schema + returned: success, changed + type: string + sample: "acme" +''' + + +try: + import psycopg2 + import psycopg2.extras +except ImportError: + postgresqldb_found = False +else: + postgresqldb_found = True + +class NotSupportedError(Exception): + pass + + +# =========================================== +# PostgreSQL module specific support methods. +# + +def set_owner(cursor, schema, owner): + query = "ALTER SCHEMA %s OWNER TO %s" % ( + pg_quote_identifier(schema, 'schema'), + pg_quote_identifier(owner, 'role')) + cursor.execute(query) + return True + +def get_schema_info(cursor, schema): + query = """ + SELECT schema_owner AS owner + FROM information_schema.schemata + WHERE schema_name = %(schema)s + """ + cursor.execute(query, {'schema': schema}) + return cursor.fetchone() + +def schema_exists(cursor, schema): + query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = %(schema)s" + cursor.execute(query, {'schema': schema}) + return cursor.rowcount == 1 + +def schema_delete(cursor, schema): + if schema_exists(cursor, schema): + query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema') + cursor.execute(query) + return True + else: + return False + +def schema_create(cursor, schema, owner): + if not schema_exists(cursor, schema): + query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')] + if owner: + query_fragments.append('AUTHORIZATION %s' % pg_quote_identifier(owner, 'role')) + query = ' '.join(query_fragments) + cursor.execute(query) + return True + else: + schema_info = get_schema_info(cursor, schema) + if owner and owner != schema_info['owner']: + return set_owner(cursor, schema, owner) + else: + return False + +def schema_matches(cursor, schema, owner): + if not schema_exists(cursor, schema): + return False + else: + schema_info = get_schema_info(cursor, schema) + if owner and owner != schema_info['owner']: + return False + else: + return True + +# =========================================== +# Module execution. +# + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default="postgres"), + login_password=dict(default=""), + login_host=dict(default=""), + login_unix_socket=dict(default=""), + port=dict(default="5432"), + schema=dict(required=True, aliases=['name']), + owner=dict(default=""), + database=dict(default="postgres"), + state=dict(default="present", choices=["absent", "present"]), + ), + supports_check_mode = True + ) + + if not postgresqldb_found: + module.fail_json(msg="the python psycopg2 module is required") + + schema = module.params["schema"] + owner = module.params["owner"] + state = module.params["state"] + database = module.params["database"] + changed = False + + # To use defaults values, keyword arguments must be absent, so + # check which values are empty and don't include in the **kw + # dictionary + params_map = { + "login_host":"host", + "login_user":"user", + "login_password":"password", + "port":"port" + } + kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() + if k in params_map and v != '' ) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and module.params["login_unix_socket"] != "": + kw["host"] = module.params["login_unix_socket"] + + try: + db_connection = psycopg2.connect(database=database, **kw) + # Enable autocommit so we can create databases + if psycopg2.__version__ >= '2.4.2': + db_connection.autocommit = True + else: + db_connection.set_isolation_level(psycopg2 + .extensions + .ISOLATION_LEVEL_AUTOCOMMIT) + cursor = db_connection.cursor( + cursor_factory=psycopg2.extras.DictCursor) + except Exception: + e = get_exception() + module.fail_json(msg="unable to connect to database: %s" %(text, str(e))) + + try: + if module.check_mode: + if state == "absent": + changed = not schema_exists(cursor, schema) + elif state == "present": + changed = not schema_matches(cursor, schema, owner) + module.exit_json(changed=changed, schema=schema) + + if state == "absent": + try: + changed = schema_delete(cursor, schema) + except SQLParseError: + e = get_exception() + module.fail_json(msg=str(e)) + + elif state == "present": + try: + changed = schema_create(cursor, schema, owner) + except SQLParseError: + e = get_exception() + module.fail_json(msg=str(e)) + except NotSupportedError: + e = get_exception() + module.fail_json(msg=str(e)) + except SystemExit: + # Avoid catching this on Python 2.4 + raise + except Exception: + e = get_exception() + module.fail_json(msg="Database query failed: %s" %(text, str(e))) + + module.exit_json(changed=changed, schema=schema) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.database import * + +if __name__ == '__main__': + main() diff --git a/database/vertica/vertica_configuration.py b/database/vertica/vertica_configuration.py index ed75667b139..c99627a021d 100644 --- a/database/vertica/vertica_configuration.py +++ b/database/vertica/vertica_configuration.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: vertica_configuration @@ -82,6 +86,10 @@ else: pyodbc_found = True +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + + class NotSupportedError(Exception): pass @@ -164,7 +172,8 @@ def main(): module.params['login_user'], module.params['login_password'], 'true') db_conn = pyodbc.connect(dsn, autocommit=True) cursor = db_conn.cursor() - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Unable to connect to database: {0}.".format(e)) try: @@ -174,21 +183,24 @@ def main(): else: try: changed = present(configuration_facts, cursor, parameter_name, current_value) - except pyodbc.Error, e: + except pyodbc.Error: + e = get_exception() module.fail_json(msg=str(e)) - except NotSupportedError, e: + except NotSupportedError: + e = get_exception() module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts}) - except CannotDropError, e: + except CannotDropError: + e = get_exception() module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts}) except SystemExit: # avoid catching this on python 2.4 raise - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=e) module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts}) -# import ansible utilities -from ansible.module_utils.basic import * + if __name__ == '__main__': main() diff --git a/database/vertica/vertica_facts.py b/database/vertica/vertica_facts.py index 705b74a04f5..4796a53612c 100644 --- a/database/vertica/vertica_facts.py +++ b/database/vertica/vertica_facts.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: vertica_facts @@ -74,6 +78,10 @@ else: pyodbc_found = True +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + + class NotSupportedError(Exception): pass @@ -232,24 +240,23 @@ def main(): if module.params['db']: db = module.params['db'] - changed = False - try: dsn = ( "Driver=Vertica;" - "Server={0};" - "Port={1};" - "Database={2};" - "User={3};" - "Password={4};" - "ConnectionLoadBalance={5}" - ).format(module.params['cluster'], module.params['port'], db, + "Server=%s;" + "Port=%s;" + "Database=%s;" + "User=%s;" + "Password=%s;" + "ConnectionLoadBalance=%s" + ) % (module.params['cluster'], module.params['port'], db, module.params['login_user'], module.params['login_password'], 'true') db_conn = pyodbc.connect(dsn, autocommit=True) cursor = db_conn.cursor() - except Exception, e: - module.fail_json(msg="Unable to connect to database: {0}.".format(e)) - + except Exception: + e = get_exception() + module.fail_json(msg="Unable to connect to database: %s." % str(e)) + try: schema_facts = get_schema_facts(cursor) user_facts = get_user_facts(cursor) @@ -262,15 +269,16 @@ def main(): 'vertica_roles': role_facts, 'vertica_configuration': configuration_facts, 'vertica_nodes': node_facts}) - except NotSupportedError, e: + except NotSupportedError: + e = get_exception() module.fail_json(msg=str(e)) except SystemExit: # avoid catching this on python 2.4 raise - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=e) -# import ansible utilities -from ansible.module_utils.basic import * + if __name__ == '__main__': main() diff --git a/database/vertica/vertica_role.py b/database/vertica/vertica_role.py index b7a0a5d66ef..aff14581a38 100644 --- a/database/vertica/vertica_role.py +++ b/database/vertica/vertica_role.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: vertica_role @@ -93,6 +97,10 @@ else: pyodbc_found = True +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + + class NotSupportedError(Exception): pass @@ -208,7 +216,8 @@ def main(): module.params['login_user'], module.params['login_password'], 'true') db_conn = pyodbc.connect(dsn, autocommit=True) cursor = db_conn.cursor() - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Unable to connect to database: {0}.".format(e)) try: @@ -218,26 +227,30 @@ def main(): elif state == 'absent': try: changed = absent(role_facts, cursor, role, assigned_roles) - except pyodbc.Error, e: + except pyodbc.Error: + e = get_exception() module.fail_json(msg=str(e)) elif state == 'present': try: changed = present(role_facts, cursor, role, assigned_roles) - except pyodbc.Error, e: + except pyodbc.Error: + e = get_exception() module.fail_json(msg=str(e)) - except NotSupportedError, e: + except NotSupportedError: + e = get_exception() module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) - except CannotDropError, e: + except CannotDropError: + e = get_exception() module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts}) except SystemExit: # avoid catching this on python 2.4 raise - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=e) module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts}) -# import ansible utilities -from ansible.module_utils.basic import * + if __name__ == '__main__': main() diff --git a/database/vertica/vertica_schema.py b/database/vertica/vertica_schema.py index 39ccb0b60e8..0bc1918d318 100644 --- a/database/vertica/vertica_schema.py +++ b/database/vertica/vertica_schema.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: vertica_schema @@ -117,6 +121,10 @@ else: pyodbc_found = True +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + + class NotSupportedError(Exception): pass @@ -282,7 +290,8 @@ def main(): module.params['login_user'], module.params['login_password'], 'true') db_conn = pyodbc.connect(dsn, autocommit=True) cursor = db_conn.cursor() - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Unable to connect to database: {0}.".format(e)) try: @@ -292,26 +301,30 @@ def main(): elif state == 'absent': try: changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) - except pyodbc.Error, e: + except pyodbc.Error: + e = get_exception() module.fail_json(msg=str(e)) elif state == 'present': try: changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) - except pyodbc.Error, e: + except pyodbc.Error: + e = get_exception() module.fail_json(msg=str(e)) - except NotSupportedError, e: + except NotSupportedError: + e = get_exception() module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) - except CannotDropError, e: + except CannotDropError: + e = get_exception() module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts}) except SystemExit: # avoid catching this on python 2.4 raise - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=e) module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts}) -# import ansible utilities -from ansible.module_utils.basic import * + if __name__ == '__main__': main() diff --git a/database/vertica/vertica_user.py b/database/vertica/vertica_user.py index 7c52df3163a..48d20c0f6d2 100644 --- a/database/vertica/vertica_user.py +++ b/database/vertica/vertica_user.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: vertica_user @@ -130,6 +134,10 @@ else: pyodbc_found = True +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + + class NotSupportedError(Exception): pass @@ -351,7 +359,8 @@ def main(): module.params['login_user'], module.params['login_password'], 'true') db_conn = pyodbc.connect(dsn, autocommit=True) cursor = db_conn.cursor() - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="Unable to connect to database: {0}.".format(e)) try: @@ -362,27 +371,31 @@ def main(): elif state == 'absent': try: changed = absent(user_facts, cursor, user, roles) - except pyodbc.Error, e: + except pyodbc.Error: + e = get_exception() module.fail_json(msg=str(e)) elif state in ['present', 'locked']: try: changed = present(user_facts, cursor, user, profile, resource_pool, locked, password, expired, ldap, roles) - except pyodbc.Error, e: + except pyodbc.Error: + e = get_exception() module.fail_json(msg=str(e)) - except NotSupportedError, e: + except NotSupportedError: + e = get_exception() module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts}) - except CannotDropError, e: + except CannotDropError: + e = get_exception() module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts}) except SystemExit: # avoid catching this on python 2.4 raise - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=e) module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts}) -# import ansible utilities -from ansible.module_utils.basic import * + if __name__ == '__main__': main() diff --git a/files/archive.py b/files/archive.py new file mode 100644 index 00000000000..93ddbe76cde --- /dev/null +++ b/files/archive.py @@ -0,0 +1,412 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +""" +(c) 2016, Ben Doherty +Sponsored by Oomph, Inc. http://www.oomphinc.com + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: archive +version_added: 2.3 +short_description: Creates a compressed archive of one or more files or trees. +extends_documentation_fragment: files +description: + - The M(archive) module packs an archive. It is the opposite of the unarchive module. By default, it assumes the compression source exists on the target. It will not copy the source file from the local system to the target before archiving. Source files can be deleted after archival by specifying C(remove)=I(True). +options: + path: + description: + - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive. + required: true + format: + description: + - The type of compression to use. Can be 'gz', 'bz2', or 'zip'. + choices: [ 'gz', 'bz2', 'zip' ] + default: 'gz' + dest: + description: + - The file name of the destination archive. This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. + required: false + default: null + remove: + description: + - Remove any added source files and trees after adding to archive. + type: bool + required: false + default: false + +author: "Ben Doherty (@bendoh)" +notes: + - requires tarfile, zipfile, gzip, and bzip2 packages on target host + - can produce I(gzip), I(bzip2) and I(zip) compressed files or archives +''' + +EXAMPLES = ''' +# Compress directory /path/to/foo/ into /path/to/foo.tgz +- archive: + path: /path/to/foo + dest: /path/to/foo.tgz + +# Compress regular file /path/to/foo into /path/to/foo.gz and remove it +- archive: + path: /path/to/foo + remove: True + +# Create a zip archive of /path/to/foo +- archive: + path: /path/to/foo + format: zip + +# Create a bz2 archive of multiple files, rooted at /path +- archive: + path: + - /path/to/foo + - /path/wong/foo + dest: /path/file.tar.bz2 + format: bz2 +''' + +RETURN = ''' +state: + description: + The current state of the archived file. + If 'absent', then no source files were found and the archive does not exist. + If 'compress', then the file source file is in the compressed state. + If 'archive', then the source file or paths are currently archived. + If 'incomplete', then an archive was created, but not all source paths were found. + type: string + returned: always +missing: + description: Any files that were missing from the source. + type: list + returned: success +archived: + description: Any files that were compressed or added to the archive. + type: list + returned: success +arcroot: + description: The archive root. + type: string +expanded_paths: + description: The list of matching paths from paths argument. + type: list +''' + +import os +import re +import glob +import shutil +import gzip +import bz2 +import filecmp +import zipfile +import tarfile + +def main(): + module = AnsibleModule( + argument_spec = dict( + path = dict(type='list', required=True), + format = dict(choices=['gz', 'bz2', 'zip', 'tar'], default='gz', required=False), + dest = dict(required=False, type='path'), + remove = dict(required=False, default=False, type='bool'), + ), + add_file_common_args=True, + supports_check_mode=True, + ) + + params = module.params + check_mode = module.check_mode + paths = params['path'] + dest = params['dest'] + remove = params['remove'] + + expanded_paths = [] + format = params['format'] + globby = False + changed = False + state = 'absent' + + # Simple or archive file compression (inapplicable with 'zip' since it's always an archive) + archive = False + successes = [] + + for i, path in enumerate(paths): + path = os.path.expanduser(os.path.expandvars(path)) + + # Expand any glob characters. If found, add the expanded glob to the + # list of expanded_paths, which might be empty. + if ('*' in path or '?' in path): + expanded_paths = expanded_paths + glob.glob(path) + globby = True + + # If there are no glob characters the path is added to the expanded paths + # whether the path exists or not + else: + expanded_paths.append(path) + + if len(expanded_paths) == 0: + return module.fail_json(path=', '.join(paths), expanded_paths=', '.join(expanded_paths), msg='Error, no source paths were found') + + # If we actually matched multiple files or TRIED to, then + # treat this as a multi-file archive + archive = globby or os.path.isdir(expanded_paths[0]) or len(expanded_paths) > 1 + + # Default created file name (for single-file archives) to + # . + if not dest and not archive: + dest = '%s.%s' % (expanded_paths[0], format) + + # Force archives to specify 'dest' + if archive and not dest: + module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees') + + archive_paths = [] + missing = [] + arcroot = '' + + for path in expanded_paths: + # Use the longest common directory name among all the files + # as the archive root path + if arcroot == '': + arcroot = os.path.dirname(path) + os.sep + else: + for i in range(len(arcroot)): + if path[i] != arcroot[i]: + break + + if i < len(arcroot): + arcroot = os.path.dirname(arcroot[0:i+1]) + + arcroot += os.sep + + # Don't allow archives to be created anywhere within paths to be removed + if remove and os.path.isdir(path) and dest.startswith(path): + module.fail_json(path=', '.join(paths), msg='Error, created archive can not be contained in source paths when remove=True') + + if os.path.lexists(path): + archive_paths.append(path) + else: + missing.append(path) + + # No source files were found but the named archive exists: are we 'compress' or 'archive' now? + if len(missing) == len(expanded_paths) and dest and os.path.exists(dest): + # Just check the filename to know if it's an archive or simple compressed file + if re.search(r'(\.tar|\.tar\.gz|\.tgz|.tbz2|\.tar\.bz2|\.zip)$', os.path.basename(dest), re.IGNORECASE): + state = 'archive' + else: + state = 'compress' + + # Multiple files, or globbiness + elif archive: + if len(archive_paths) == 0: + # No source files were found, but the archive is there. + if os.path.lexists(dest): + state = 'archive' + elif len(missing) > 0: + # SOME source files were found, but not all of them + state = 'incomplete' + + archive = None + size = 0 + errors = [] + + if os.path.lexists(dest): + size = os.path.getsize(dest) + + if state != 'archive': + if check_mode: + changed = True + + else: + try: + # Slightly more difficult (and less efficient!) compression using zipfile module + if format == 'zip': + arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED) + + # Easier compression using tarfile module + elif format == 'gz' or format == 'bz2': + arcfile = tarfile.open(dest, 'w|' + format) + + # Or plain tar archiving + elif format == 'tar': + arcfile = tarfile.open(dest, 'w') + + match_root = re.compile('^%s' % re.escape(arcroot)) + for path in archive_paths: + if os.path.isdir(path): + # Recurse into directories + for dirpath, dirnames, filenames in os.walk(path, topdown=True): + if not dirpath.endswith(os.sep): + dirpath += os.sep + + for dirname in dirnames: + fullpath = dirpath + dirname + arcname = match_root.sub('', fullpath) + + try: + if format == 'zip': + arcfile.write(fullpath, arcname) + else: + arcfile.add(fullpath, arcname, recursive=False) + + except Exception: + e = get_exception() + errors.append('%s: %s' % (fullpath, str(e))) + + for filename in filenames: + fullpath = dirpath + filename + arcname = match_root.sub('', fullpath) + + if not filecmp.cmp(fullpath, dest): + try: + if format == 'zip': + arcfile.write(fullpath, arcname) + else: + arcfile.add(fullpath, arcname, recursive=False) + + successes.append(fullpath) + except Exception: + e = get_exception() + errors.append('Adding %s: %s' % (path, str(e))) + else: + if format == 'zip': + arcfile.write(path, match_root.sub('', path)) + else: + arcfile.add(path, match_root.sub('', path), recursive=False) + + successes.append(path) + + except Exception: + e = get_exception() + return module.fail_json(msg='Error when writing %s archive at %s: %s' % (format == 'zip' and 'zip' or ('tar.' + format), dest, str(e))) + + if arcfile: + arcfile.close() + state = 'archive' + + if len(errors) > 0: + module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors))) + + if state in ['archive', 'incomplete'] and remove: + for path in successes: + try: + if os.path.isdir(path): + shutil.rmtree(path) + elif not check_mode: + os.remove(path) + except OSError: + e = get_exception() + errors.append(path) + + if len(errors) > 0: + module.fail_json(dest=dest, msg='Error deleting some source files: ' + str(e), files=errors) + + # Rudimentary check: If size changed then file changed. Not perfect, but easy. + if os.path.getsize(dest) != size: + changed = True + + if len(successes) and state != 'incomplete': + state = 'archive' + + # Simple, single-file compression + else: + path = expanded_paths[0] + + # No source or compressed file + if not (os.path.exists(path) or os.path.lexists(dest)): + state = 'absent' + + # if it already exists and the source file isn't there, consider this done + elif not os.path.lexists(path) and os.path.lexists(dest): + state = 'compress' + + else: + if module.check_mode: + if not os.path.exists(dest): + changed = True + else: + size = 0 + f_in = f_out = arcfile = None + + if os.path.lexists(dest): + size = os.path.getsize(dest) + + try: + if format == 'zip': + arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED) + arcfile.write(path, path[len(arcroot):]) + arcfile.close() + state = 'archive' # because all zip files are archives + + else: + f_in = open(path, 'rb') + + if format == 'gz': + f_out = gzip.open(dest, 'wb') + elif format == 'bz2': + f_out = bz2.BZ2File(dest, 'wb') + else: + raise OSError("Invalid format") + + shutil.copyfileobj(f_in, f_out) + + successes.append(path) + + except OSError: + e = get_exception() + module.fail_json(path=path, dest=dest, msg='Unable to write to compressed file: %s' % str(e)) + + if arcfile: + arcfile.close() + if f_in: + f_in.close() + if f_out: + f_out.close() + + # Rudimentary check: If size changed then file changed. Not perfect, but easy. + if os.path.getsize(dest) != size: + changed = True + + state = 'compress' + + if remove and not check_mode: + try: + os.remove(path) + + except OSError: + e = get_exception() + module.fail_json(path=path, msg='Unable to remove source file: %s' % str(e)) + + params['path'] = dest + file_args = module.load_file_common_arguments(params) + + changed = module.set_fs_attributes_if_different(file_args, changed) + + module.exit_json(archived=successes, dest=dest, changed=changed, state=state, arcroot=arcroot, missing=missing, expanded_paths=expanded_paths) + +if __name__ == '__main__': + main() diff --git a/files/blockinfile.py b/files/blockinfile.py new file mode 100755 index 00000000000..ec85c078822 --- /dev/null +++ b/files/blockinfile.py @@ -0,0 +1,324 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, 2015 YAEGASHI Takeshi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: blockinfile +author: + - 'YAEGASHI Takeshi (@yaegashi)' +extends_documentation_fragment: + - files + - validate +short_description: Insert/update/remove a text block + surrounded by marker lines. +version_added: '2.0' +description: + - This module will insert/update/remove a block of multi-line text + surrounded by customizable marker lines. +notes: + - This module supports check mode. + - When using 'with_*' loops be aware that if you do not set a unique mark the block will be overwritten on each iteration. +options: + dest: + aliases: [ name, destfile ] + required: true + description: + - The file to modify. + state: + required: false + choices: [ present, absent ] + default: present + description: + - Whether the block should be there or not. + marker: + required: false + default: '# {mark} ANSIBLE MANAGED BLOCK' + description: + - The marker line template. + "{mark}" will be replaced with "BEGIN" or "END". + block: + aliases: [ content ] + required: false + default: '' + description: + - The text to insert inside the marker lines. + If it's missing or an empty string, + the block will be removed as if C(state) were specified to C(absent). + insertafter: + required: false + default: EOF + description: + - If specified, the block will be inserted after the last match of + specified regular expression. A special value is available; C(EOF) for + inserting the block at the end of the file. If specified regular + expresion has no matches, C(EOF) will be used instead. + choices: [ 'EOF', '*regex*' ] + insertbefore: + required: false + default: None + description: + - If specified, the block will be inserted before the last match of + specified regular expression. A special value is available; C(BOF) for + inserting the block at the beginning of the file. If specified regular + expresion has no matches, the block will be inserted at the end of the + file. + choices: [ 'BOF', '*regex*' ] + create: + required: false + default: 'no' + choices: [ 'yes', 'no' ] + description: + - Create a new file if it doesn't exist. + backup: + required: false + default: 'no' + choices: [ 'yes', 'no' ] + description: + - Create a backup file including the timestamp information so you can + get the original file back if you somehow clobbered it incorrectly. + follow: + required: false + default: "no" + choices: [ "yes", "no" ] + description: + - 'This flag indicates that filesystem links, if they exist, should be followed.' + version_added: "2.1" +""" + +EXAMPLES = r""" +- name: insert/update "Match User" configuation block in /etc/ssh/sshd_config + blockinfile: + dest: /etc/ssh/sshd_config + block: | + Match User ansible-agent + PasswordAuthentication no + +- name: insert/update eth0 configuration stanza in /etc/network/interfaces + (it might be better to copy files into /etc/network/interfaces.d/) + blockinfile: + dest: /etc/network/interfaces + block: | + iface eth0 inet static + address 192.0.2.23 + netmask 255.255.255.0 + +- name: insert/update HTML surrounded by custom markers after line + blockinfile: + dest: /var/www/html/index.html + marker: "" + insertafter: "" + content: | +

Welcome to {{ansible_hostname}}

+

Last updated on {{ansible_date_time.iso8601}}

+ +- name: remove HTML as well as surrounding markers + blockinfile: + dest: /var/www/html/index.html + marker: "" + content: "" + +- name: Add mappings to /etc/hosts + blockinfile: + dest: /etc/hosts + block: | + {{item.ip}} {{item.name}} + marker: "# {mark} ANSIBLE MANAGED BLOCK {{item.name}}" + with_items: + - { name: host1, ip: 10.10.1.10 } + - { name: host2, ip: 10.10.1.11 } + - { name: host3, ip: 10.10.1.12 } +""" + +import re +import os +import tempfile +from ansible.module_utils.six import b +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes + +def write_changes(module, contents, dest): + + tmpfd, tmpfile = tempfile.mkstemp() + f = os.fdopen(tmpfd, 'wb') + f.write(contents) + f.close() + + validate = module.params.get('validate', None) + valid = not validate + if validate: + if "%s" not in validate: + module.fail_json(msg="validate must contain %%s: %s" % (validate)) + (rc, out, err) = module.run_command(validate % tmpfile) + valid = rc == 0 + if rc != 0: + module.fail_json(msg='failed to validate: ' + 'rc:%s error:%s' % (rc, err)) + if valid: + module.atomic_move(tmpfile, dest, unsafe_writes=module.params['unsafe_writes']) + + +def check_file_attrs(module, changed, message): + + file_args = module.load_file_common_arguments(module.params) + if module.set_file_attributes_if_different(file_args, False): + + if changed: + message += " and " + changed = True + message += "ownership, perms or SE linux context changed" + + return message, changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dest=dict(required=True, aliases=['name', 'destfile'], type='path'), + state=dict(default='present', choices=['absent', 'present']), + marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'), + block=dict(default='', type='str', aliases=['content']), + insertafter=dict(default=None), + insertbefore=dict(default=None), + create=dict(default=False, type='bool'), + backup=dict(default=False, type='bool'), + validate=dict(default=None, type='str'), + ), + mutually_exclusive=[['insertbefore', 'insertafter']], + add_file_common_args=True, + supports_check_mode=True + ) + + params = module.params + dest = params['dest'] + if module.boolean(params.get('follow', None)): + dest = os.path.realpath(dest) + + if os.path.isdir(dest): + module.fail_json(rc=256, + msg='Destination %s is a directory !' % dest) + + path_exists = os.path.exists(dest) + if not path_exists: + if not module.boolean(params['create']): + module.fail_json(rc=257, + msg='Destination %s does not exist !' % dest) + original = None + lines = [] + else: + f = open(dest, 'rb') + original = f.read() + f.close() + lines = original.splitlines() + + insertbefore = params['insertbefore'] + insertafter = params['insertafter'] + block = to_bytes(params['block']) + marker = to_bytes(params['marker']) + present = params['state'] == 'present' + + if not present and not path_exists: + module.exit_json(changed=False, msg="File not present") + + if insertbefore is None and insertafter is None: + insertafter = 'EOF' + + if insertafter not in (None, 'EOF'): + insertre = re.compile(insertafter) + elif insertbefore not in (None, 'BOF'): + insertre = re.compile(insertbefore) + else: + insertre = None + + marker0 = re.sub(b(r'{mark}'), b('BEGIN'), marker) + marker1 = re.sub(b(r'{mark}'), b('END'), marker) + if present and block: + # Escape seqeuences like '\n' need to be handled in Ansible 1.x + if module.ansible_version.startswith('1.'): + block = re.sub('', block, '') + blocklines = [marker0] + block.splitlines() + [marker1] + else: + blocklines = [] + + n0 = n1 = None + for i, line in enumerate(lines): + if line == marker0: + n0 = i + if line == marker1: + n1 = i + + if None in (n0, n1): + n0 = None + if insertre is not None: + for i, line in enumerate(lines): + if insertre.search(line): + n0 = i + if n0 is None: + n0 = len(lines) + elif insertafter is not None: + n0 += 1 + elif insertbefore is not None: + n0 = 0 # insertbefore=BOF + else: + n0 = len(lines) # insertafter=EOF + elif n0 < n1: + lines[n0:n1+1] = [] + else: + lines[n1:n0+1] = [] + n0 = n1 + + lines[n0:n0] = blocklines + + if lines: + result = b('\n').join(lines) + if original is None or original.endswith(b('\n')): + result += b('\n') + else: + result = '' + if original == result: + msg = '' + changed = False + elif original is None: + msg = 'File created' + changed = True + elif not blocklines: + msg = 'Block removed' + changed = True + else: + msg = 'Block inserted' + changed = True + + if changed and not module.check_mode: + if module.boolean(params['backup']) and path_exists: + module.backup_local(dest) + write_changes(module, result, dest) + + if module.check_mode and not path_exists: + module.exit_json(changed=changed, msg=msg) + + msg, changed = check_file_attrs(module, changed, msg) + module.exit_json(changed=changed, msg=msg) + + +if __name__ == '__main__': + main() diff --git a/files/patch.py b/files/patch.py index 576333c38f8..c5aecf4e0d4 100644 --- a/files/patch.py +++ b/files/patch.py @@ -19,6 +19,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: patch @@ -46,17 +50,17 @@ src: description: - Path of the patch file as accepted by the GNU patch tool. If - C(remote_src) is False, the patch source file is looked up from the + C(remote_src) is 'no', the patch source file is looked up from the module's "files" directory. required: true aliases: [ "patchfile" ] remote_src: description: - - If False, it will search for src at originating/master machine, if True it will - go to the remote/target machine for the src. Default is False. - choices: [ "True", "False" ] + - If C(no), it will search for src at originating/master machine, if C(yes) it will + go to the remote/target machine for the src. Default is C(no). + choices: [ "yes", "no" ] required: false - default: "False" + default: "no" strip: description: - Number that indicates the smallest prefix containing leading slashes @@ -70,15 +74,17 @@ description: - passes --backup --version-control=numbered to patch, producing numbered backup copies + choices: [ 'yes', 'no' ] + default: 'no' binary: version_added: "2.0" description: - - Setting to true will disable patch's heuristic for transforming CRLF + - Setting to C(yes) will disable patch's heuristic for transforming CRLF line endings into LF. Line endings of src and dest must match. If set to - False, patch will replace CRLF in src files on POSIX. + C(no), patch will replace CRLF in src files on POSIX. required: false type: "bool" - default: "False" + default: "no" note: - This module requires GNU I(patch) utility to be installed on the remote host. ''' @@ -183,11 +189,14 @@ def main(): apply_patch( patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, dry_run=module.check_mode, backup=p.backup ) changed = True - except PatchError, e: + except PatchError: + e = get_exception() module.fail_json(msg=str(e)) module.exit_json(changed=changed) # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/files/tempfile.py b/files/tempfile.py new file mode 100644 index 00000000000..021c88dbbb1 --- /dev/null +++ b/files/tempfile.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2016 Krzysztof Magosa +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: tempfile +version_added: "2.3" +author: + - Krzysztof Magosa +short_description: Creates temporary files and directories. +description: + - The M(tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible you need to use M(file) module. +options: + state: + description: + - Whether to create file or directory. + required: false + choices: [ "file", "directory" ] + default: file + path: + description: + - Location where temporary file or directory should be created. If path is not specified default system temporary directory will be used. + required: false + default: null + prefix: + description: + - Prefix of file/directory name created by module. + required: false + default: ansible. + suffix: + description: + - Suffix of file/directory name created by module. + required: false + default: "" +''' + +EXAMPLES = """ +- name: create temporary build directory + tempfile: + state: directory + suffix: build + +- name: create temporary file + tempfile: + state: file + suffix: temp +""" + +RETURN = ''' +path: + description: Path to created file or directory + returned: success + type: string + sample: "/tmp/ansible.bMlvdk" +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from tempfile import mkstemp, mkdtemp +from os import close + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default='file', choices=['file', 'directory']), + path = dict(default=None), + prefix = dict(default='ansible.'), + suffix = dict(default='') + ) + ) + + try: + if module.params['state'] == 'file': + handle, path = mkstemp( + prefix=module.params['prefix'], + suffix=module.params['suffix'], + dir=module.params['path'] + ) + close(handle) + elif module.params['state'] == 'directory': + path = mkdtemp( + prefix=module.params['prefix'], + suffix=module.params['suffix'], + dir=module.params['path'] + ) + + module.exit_json(changed=True, path=path) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) + +if __name__ == '__main__': + main() diff --git a/identity/__init__.py b/identity/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/identity/ipa/__init__.py b/identity/ipa/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/identity/ipa/ipa_group.py b/identity/ipa/ipa_group.py new file mode 100644 index 00000000000..e34efc48daf --- /dev/null +++ b/identity/ipa/ipa_group.py @@ -0,0 +1,316 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipa_group +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA group +description: +- Add, modify and delete group within IPA server +options: + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + external: + description: + - Allow adding external non-IPA members from trusted domains. + required: false + gidnumber: + description: + - GID (use this option to set it manually). + required: false + group: + description: + - List of group names assigned to this group. + - If an empty list is passed all groups will be removed from this group. + - If option is omitted assigned groups will not be checked or changed. + - Groups that are already assigned but not passed will be removed. + nonposix: + description: + - Create as a non-POSIX group. + required: false + user: + description: + - List of user names assigned to this group. + - If an empty list is passed all users will be removed from this group. + - If option is omitted assigned users will not be checked or changed. + - Users that are already assigned but not passed will be removed. + state: + description: + - State to ensure + required: false + default: "present" + choices: ["present", "absent"] + ipa_port: + description: Port of IPA server + required: false + default: 443 + ipa_host: + description: IP or hostname of IPA server + required: false + default: "ipa.example.com" + ipa_user: + description: Administrative account used on IPA server + required: false + default: "admin" + ipa_pass: + description: Password of administrative user + required: true + ipa_prot: + description: Protocol used by IPA server + required: false + default: "https" + choices: ["http", "https"] + validate_certs: + description: + - This only applies if C(ipa_prot) is I(https). + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + required: false + default: true +version_added: "2.3" +''' + +EXAMPLES = ''' +# Ensure group is present +- ipa_group: + name: oinstall + gidnumber: 54321 + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure that groups sysops and appops are assigned to ops but no other group +- ipa_group: + name: ops + group: + - sysops + - appops + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure that users linus and larry are assign to the group, but no other user +- ipa_group: + name: sysops + user: + - linus + - larry + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure group is absent +- ipa_group: + name: sysops + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = ''' +group: + description: Group as returned by IPA API + returned: always + type: dict +''' + +from ansible.module_utils.ipa import IPAClient + +class GroupIPAClient(IPAClient): + + def __init__(self, module, host, port, protocol): + super(GroupIPAClient, self).__init__(module, host, port, protocol) + + def group_find(self, name): + return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name}) + + def group_add(self, name, item): + return self._post_json(method='group_add', name=name, item=item) + + def group_mod(self, name, item): + return self._post_json(method='group_mod', name=name, item=item) + + def group_del(self, name): + return self._post_json(method='group_del', name=name) + + def group_add_member(self, name, item): + return self._post_json(method='group_add_member', name=name, item=item) + + def group_add_member_group(self, name, item): + return self.group_add_member(name=name, item={'group': item}) + + def group_add_member_user(self, name, item): + return self.group_add_member(name=name, item={'user': item}) + + def group_remove_member(self, name, item): + return self._post_json(method='group_remove_member', name=name, item=item) + + def group_remove_member_group(self, name, item): + return self.group_remove_member(name=name, item={'group': item}) + + def group_remove_member_user(self, name, item): + return self.group_remove_member(name=name, item={'user': item}) + + +def get_group_dict(description=None, external=None, gid=None, nonposix=None): + group = {} + if description is not None: + group['description'] = description + if external is not None: + group['external'] = external + if gid is not None: + group['gidnumber'] = gid + if nonposix is not None: + group['nonposix'] = nonposix + return group + + +def get_group_diff(ipa_group, module_group): + data = [] + # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed. + if 'nonposix' in module_group: + # Only non-posix groups can be changed to posix + if not module_group['nonposix'] and ipa_group.get('nonposix'): + module_group['posix'] = True + del module_group['nonposix'] + + for key in module_group.keys(): + module_value = module_group.get(key, None) + ipa_value = ipa_group.get(key, None) + if isinstance(ipa_value, list) and not isinstance(module_value, list): + module_value = [module_value] + if isinstance(ipa_value, list) and isinstance(module_value, list): + ipa_value = sorted(ipa_value) + module_value = sorted(module_value) + if ipa_value != module_value: + data.append(key) + return data + + +def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method): + changed = False + diff = list(set(ipa_list) - set(module_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + remove_method(name=name, item=diff) + + diff = list(set(module_list) - set(ipa_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + add_method(name=name, item=diff) + + return changed + + +def ensure(module, client): + state = module.params['state'] + name = module.params['name'] + group = module.params['group'] + user = module.params['user'] + + module_group = get_group_dict(description=module.params['description'], external=module.params['external'], + gid=module.params['gidnumber'], nonposix=module.params['nonposix']) + ipa_group = client.group_find(name=name) + + changed = False + if state == 'present': + if not ipa_group: + changed = True + if not module.check_mode: + ipa_group = client.group_add(name, item=module_group) + else: + diff = get_group_diff(ipa_group, module_group) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_group.get(key) + client.group_mod(name=name, item=data) + + if group is not None: + changed = modify_if_diff(module, name, ipa_group.get('member_group', []), group, + client.group_add_member_group, + client.group_remove_member_group) or changed + + if user is not None: + changed = modify_if_diff(module, name, ipa_group.get('member_user', []), user, + client.group_add_member_user, + client.group_remove_member_user) or changed + + else: + if ipa_group: + changed = True + if not module.check_mode: + client.group_del(name) + + return changed, client.group_find(name=name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str', required=False), + external=dict(type='bool', required=False), + gidnumber=dict(type='str', required=False, aliases=['gid']), + group=dict(type='list', required=False), + nonposix=dict(type='bool', required=False), + state=dict(type='str', required=False, default='present', choices=['present', 'absent']), + user=dict(type='list', required=False), + ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), + ipa_host=dict(type='str', required=False, default='ipa.example.com'), + ipa_port=dict(type='int', required=False, default=443), + ipa_user=dict(type='str', required=False, default='admin'), + ipa_pass=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + client = GroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, group = ensure(module, client) + module.exit_json(changed=changed, group=group) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/identity/ipa/ipa_hbacrule.py b/identity/ipa/ipa_hbacrule.py new file mode 100644 index 00000000000..d93bc32fd45 --- /dev/null +++ b/identity/ipa/ipa_hbacrule.py @@ -0,0 +1,411 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipa_hbacrule +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA HBAC rule +description: +- Add, modify or delete an IPA HBAC rule using IPA API. +options: + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + description: + description: Description + required: false + host: + description: + - List of host names to assign. + - If an empty list is passed all hosts will be removed from the rule. + - If option is omitted hosts will not be checked or changed. + required: false + hostcategory: + description: Host category + required: false + choices: ['all'] + hostgroup: + description: + - List of hostgroup names to assign. + - If an empty list is passed all hostgroups will be removed. from the rule + - If option is omitted hostgroups will not be checked or changed. + service: + description: + - List of service names to assign. + - If an empty list is passed all services will be removed from the rule. + - If option is omitted services will not be checked or changed. + servicecategory: + description: Service category + required: false + choices: ['all'] + servicegroup: + description: + - List of service group names to assign. + - If an empty list is passed all assigned service groups will be removed from the rule. + - If option is omitted service groups will not be checked or changed. + sourcehost: + description: + - List of source host names to assign. + - If an empty list if passed all assigned source hosts will be removed from the rule. + - If option is omitted source hosts will not be checked or changed. + sourcehostcategory: + description: Source host category + required: false + choices: ['all'] + sourcehostgroup: + description: + - List of source host group names to assign. + - If an empty list if passed all assigned source host groups will be removed from the rule. + - If option is omitted source host groups will not be checked or changed. + state: + description: State to ensure + required: false + default: "present" + choices: ["present", "absent", "enabled", "disabled"] + user: + description: + - List of user names to assign. + - If an empty list if passed all assigned users will be removed from the rule. + - If option is omitted users will not be checked or changed. + usercategory: + description: User category + required: false + choices: ['all'] + usergroup: + description: + - List of user group names to assign. + - If an empty list if passed all assigned user groups will be removed from the rule. + - If option is omitted user groups will not be checked or changed. + ipa_port: + description: Port of IPA server + required: false + default: 443 + ipa_host: + description: IP or hostname of IPA server + required: false + default: "ipa.example.com" + ipa_user: + description: Administrative account used on IPA server + required: false + default: "admin" + ipa_pass: + description: Password of administrative user + required: true + ipa_prot: + description: Protocol used by IPA server + required: false + default: "https" + choices: ["http", "https"] + validate_certs: + description: + - This only applies if C(ipa_prot) is I(https). + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + required: false + default: true +version_added: "2.3" +''' + +EXAMPLES = ''' +# Ensure rule to allow all users to access any host from any host +- ipa_hbacrule: + name: allow_all + description: Allow all users to access any host from any host + hostcategory: all + servicecategory: all + usercategory: all + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure rule with certain limitations +- ipa_hbacrule: + name: allow_all_developers_access_to_db + description: Allow all developers to access any database from any host + hostgroup: + - db-server + usergroup: + - developers + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure rule is absent +- ipa_hbacrule: + name: rule_to_be_deleted + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = ''' +hbacrule: + description: HBAC rule as returned by IPA API. + returned: always + type: dict +''' + +from ansible.module_utils.ipa import IPAClient + +class HBACRuleIPAClient(IPAClient): + + def __init__(self, module, host, port, protocol): + super(HBACRuleIPAClient, self).__init__(module, host, port, protocol) + + def hbacrule_find(self, name): + return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name}) + + def hbacrule_add(self, name, item): + return self._post_json(method='hbacrule_add', name=name, item=item) + + def hbacrule_mod(self, name, item): + return self._post_json(method='hbacrule_mod', name=name, item=item) + + def hbacrule_del(self, name): + return self._post_json(method='hbacrule_del', name=name) + + def hbacrule_add_host(self, name, item): + return self._post_json(method='hbacrule_add_host', name=name, item=item) + + def hbacrule_remove_host(self, name, item): + return self._post_json(method='hbacrule_remove_host', name=name, item=item) + + def hbacrule_add_service(self, name, item): + return self._post_json(method='hbacrule_add_service', name=name, item=item) + + def hbacrule_remove_service(self, name, item): + return self._post_json(method='hbacrule_remove_service', name=name, item=item) + + def hbacrule_add_user(self, name, item): + return self._post_json(method='hbacrule_add_user', name=name, item=item) + + def hbacrule_remove_user(self, name, item): + return self._post_json(method='hbacrule_remove_user', name=name, item=item) + + def hbacrule_add_sourcehost(self, name, item): + return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item) + + def hbacrule_remove_sourcehost(self, name, item): + return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item) + + +def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None, + sourcehostcategory=None, + usercategory=None): + data = {} + if description is not None: + data['description'] = description + if hostcategory is not None: + data['hostcategory'] = hostcategory + if ipaenabledflag is not None: + data['ipaenabledflag'] = ipaenabledflag + if servicecategory is not None: + data['servicecategory'] = servicecategory + if sourcehostcategory is not None: + data['sourcehostcategory'] = sourcehostcategory + if usercategory is not None: + data['usercategory'] = usercategory + return data + + +def get_hbcarule_diff(ipa_hbcarule, module_hbcarule): + data = [] + for key in module_hbcarule.keys(): + module_value = module_hbcarule.get(key, None) + ipa_value = ipa_hbcarule.get(key, None) + if isinstance(ipa_value, list) and not isinstance(module_value, list): + module_value = [module_value] + if isinstance(ipa_value, list) and isinstance(module_value, list): + ipa_value = sorted(ipa_value) + module_value = sorted(module_value) + if ipa_value != module_value: + data.append(key) + return data + + +def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method, item): + changed = False + diff = list(set(ipa_list) - set(module_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + remove_method(name=name, item={item: diff}) + + diff = list(set(module_list) - set(ipa_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + add_method(name=name, item={item: diff}) + + return changed + + +def ensure(module, client): + name = module.params['name'] + state = module.params['state'] + + if state in ['present', 'enabled']: + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = 'FALSE' + + host = module.params['host'] + hostcategory = module.params['hostcategory'] + hostgroup = module.params['hostgroup'] + service = module.params['service'] + servicecategory = module.params['servicecategory'] + servicegroup = module.params['servicegroup'] + sourcehost = module.params['sourcehost'] + sourcehostcategory = module.params['sourcehostcategory'] + sourcehostgroup = module.params['sourcehostgroup'] + user = module.params['user'] + usercategory = module.params['usercategory'] + usergroup = module.params['usergroup'] + + module_hbacrule = get_hbacrule_dict(description=module.params['description'], + hostcategory=hostcategory, + ipaenabledflag=ipaenabledflag, + servicecategory=servicecategory, + sourcehostcategory=sourcehostcategory, + usercategory=usercategory) + ipa_hbacrule = client.hbacrule_find(name=name) + + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_hbacrule: + changed = True + if not module.check_mode: + ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule) + else: + diff = get_hbcarule_diff(ipa_hbacrule, module_hbacrule) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_hbacrule.get(key) + client.hbacrule_mod(name=name, item=data) + + if host is not None: + changed = modify_if_diff(module, name, ipa_hbacrule.get('memberhost_host', []), host, + client.hbacrule_add_host, + client.hbacrule_remove_host, 'host') or changed + + if hostgroup is not None: + changed = modify_if_diff(module, name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup, + client.hbacrule_add_host, + client.hbacrule_remove_host, 'hostgroup') or changed + + if service is not None: + changed = modify_if_diff(module, name, ipa_hbacrule.get('memberservice_hbacsvc', []), service, + client.hbacrule_add_service, + client.hbacrule_remove_service, 'hbacsvc') or changed + + if servicegroup is not None: + changed = modify_if_diff(module, name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []), + servicegroup, + client.hbacrule_add_service, + client.hbacrule_remove_service, 'hbacsvcgroup') or changed + + if sourcehost is not None: + changed = modify_if_diff(module, name, ipa_hbacrule.get('sourcehost_host', []), sourcehost, + client.hbacrule_add_sourcehost, + client.hbacrule_remove_sourcehost, 'host') or changed + + if sourcehostgroup is not None: + changed = modify_if_diff(module, name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup, + client.hbacrule_add_sourcehost, + client.hbacrule_remove_sourcehost, 'hostgroup') or changed + + if user is not None: + changed = modify_if_diff(module, name, ipa_hbacrule.get('memberuser_user', []), user, + client.hbacrule_add_user, + client.hbacrule_remove_user, 'user') or changed + + if usergroup is not None: + changed = modify_if_diff(module, name, ipa_hbacrule.get('memberuser_group', []), usergroup, + client.hbacrule_add_user, + client.hbacrule_remove_user, 'group') or changed + else: + if ipa_hbacrule: + changed = True + if not module.check_mode: + client.hbacrule_del(name=name) + + return changed, client.hbacrule_find(name=name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str', required=False), + host=dict(type='list', required=False), + hostcategory=dict(type='str', required=False, choices=['all']), + hostgroup=dict(type='list', required=False), + service=dict(type='list', required=False), + servicecategory=dict(type='str', required=False, choices=['all']), + servicegroup=dict(type='list', required=False), + sourcehost=dict(type='list', required=False), + sourcehostcategory=dict(type='str', required=False, choices=['all']), + sourcehostgroup=dict(type='list', required=False), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + user=dict(type='list', required=False), + usercategory=dict(type='str', required=False, choices=['all']), + usergroup=dict(type='list', required=False), + ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), + ipa_host=dict(type='str', required=False, default='ipa.example.com'), + ipa_port=dict(type='int', required=False, default=443), + ipa_user=dict(type='str', required=False, default='admin'), + ipa_pass=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + client = HBACRuleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, hbacrule = ensure(module, client) + module.exit_json(changed=changed, hbacrule=hbacrule) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/identity/ipa/ipa_host.py b/identity/ipa/ipa_host.py new file mode 100644 index 00000000000..17b78500bc5 --- /dev/null +++ b/identity/ipa/ipa_host.py @@ -0,0 +1,311 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipa_host +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA host +description: +- Add, modify and delete an IPA host using IPA API +options: + fqdn: + description: + - Full qualified domain name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + description: + description: + - A description of this host. + required: false + force: + description: + - Force host name even if not in DNS. + required: false + ip_address: + description: + - Add the host to DNS with this IP address. + required: false + mac_address: + description: + - List of Hardware MAC address(es) off this host. + - If option is omitted MAC addresses will not be checked or changed. + - If an empty list is passed all assigned MAC addresses will be removed. + - MAC addresses that are already assigned but not passed will be removed. + required: false + aliases: ["macaddress"] + ns_host_location: + description: + - Host location (e.g. "Lab 2") + required: false + aliases: ["nshostlocation"] + ns_hardware_platform: + description: + - Host hardware platform (e.g. "Lenovo T61") + required: false + aliases: ["nshardwareplatform"] + ns_os_version: + description: + - Host operating system and version (e.g. "Fedora 9") + required: false + aliases: ["nsosversion"] + user_certificate: + description: + - List of Base-64 encoded server certificates. + - If option is ommitted certificates will not be checked or changed. + - If an emtpy list is passed all assigned certificates will be removed. + - Certificates already assigned but not passed will be removed. + required: false + aliases: ["usercertificate"] + state: + description: State to ensure + required: false + default: present + choices: ["present", "absent", "disabled"] + ipa_port: + description: Port of IPA server + required: false + default: 443 + ipa_host: + description: IP or hostname of IPA server + required: false + default: ipa.example.com + ipa_user: + description: Administrative account used on IPA server + required: false + default: admin + ipa_pass: + description: Password of administrative user + required: true + ipa_prot: + description: Protocol used by IPA server + required: false + default: https + choices: ["http", "https"] + validate_certs: + description: + - This only applies if C(ipa_prot) is I(https). + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + required: false + default: true +version_added: "2.3" +''' + +EXAMPLES = ''' +# Ensure host is present +- ipa_host: + name: host01.example.com + description: Example host + ip_address: 192.168.0.123 + ns_host_location: Lab + ns_os_version: CentOS 7 + ns_hardware_platform: Lenovo T61 + mac_address: + - "08:00:27:E3:B1:2D" + - "52:54:00:BD:97:1E" + state: present + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure host is disabled +- ipa_host: + name: host01.example.com + state: disabled + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure that all user certificates are removed +- ipa_host: + name: host01.example.com + user_certificate: [] + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure host is absent +- ipa_host: + name: host01.example.com + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = ''' +host: + description: Host as returned by IPA API. + returned: always + type: dict +host_diff: + description: List of options that differ and would be changed + returned: if check mode and a difference is found + type: list +''' + +from ansible.module_utils.ipa import IPAClient + +class HostIPAClient(IPAClient): + + def __init__(self, module, host, port, protocol): + super(HostIPAClient, self).__init__(module, host, port, protocol) + + def host_find(self, name): + return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name}) + + def host_add(self, name, host): + return self._post_json(method='host_add', name=name, item=host) + + def host_mod(self, name, host): + return self._post_json(method='host_mod', name=name, item=host) + + def host_del(self, name): + return self._post_json(method='host_del', name=name) + + def host_disable(self, name): + return self._post_json(method='host_disable', name=name) + + +def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None, + ns_os_version=None, user_certificate=None, mac_address=None): + data = {} + if description is not None: + data['description'] = description + if force is not None: + data['force'] = force + if ip_address is not None: + data['ip_address'] = ip_address + if ns_host_location is not None: + data['nshostlocation'] = ns_host_location + if ns_hardware_platform is not None: + data['nshardwareplatform'] = ns_hardware_platform + if ns_os_version is not None: + data['nsosversion'] = ns_os_version + if user_certificate is not None: + data['usercertificate'] = [{"__base64__": item} for item in user_certificate] + if mac_address is not None: + data['macaddress'] = mac_address + return data + + +def get_host_diff(ipa_host, module_host): + non_updateable_keys = ['force', 'ip_address'] + data = [] + for key in non_updateable_keys: + if key in module_host: + del module_host[key] + for key in module_host.keys(): + ipa_value = ipa_host.get(key, None) + module_value = module_host.get(key, None) + if isinstance(ipa_value, list) and not isinstance(module_value, list): + module_value = [module_value] + if isinstance(ipa_value, list) and isinstance(module_value, list): + ipa_value = sorted(ipa_value) + module_value = sorted(module_value) + if ipa_value != module_value: + data.append(key) + return data + + +def ensure(module, client): + name = module.params['name'] + state = module.params['state'] + + ipa_host = client.host_find(name=name) + module_host = get_host_dict(description=module.params['description'], + force=module.params['force'], ip_address=module.params['ip_address'], + ns_host_location=module.params['ns_host_location'], + ns_hardware_platform=module.params['ns_hardware_platform'], + ns_os_version=module.params['ns_os_version'], + user_certificate=module.params['user_certificate'], + mac_address=module.params['mac_address']) + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_host: + changed = True + if not module.check_mode: + client.host_add(name=name, host=module_host) + else: + diff = get_host_diff(ipa_host, module_host) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_host.get(key) + client.host_mod(name=name, host=data) + + else: + if ipa_host: + changed = True + if not module.check_mode: + client.host_del(name=name) + + return changed, client.host_find(name=name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + description=dict(type='str', required=False), + fqdn=dict(type='str', required=True, aliases=['name']), + force=dict(type='bool', required=False), + ip_address=dict(type='str', required=False), + ns_host_location=dict(type='str', required=False, aliases=['nshostlocation']), + ns_hardware_platform=dict(type='str', required=False, aliases=['nshardwareplatform']), + ns_os_version=dict(type='str', required=False, aliases=['nsosversion']), + user_certificate=dict(type='list', required=False, aliases=['usercertificate']), + mac_address=dict(type='list', required=False, aliases=['macaddress']), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), + ipa_host=dict(type='str', required=False, default='ipa.example.com'), + ipa_port=dict(type='int', required=False, default=443), + ipa_user=dict(type='str', required=False, default='admin'), + ipa_pass=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + client = HostIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, host = ensure(module, client) + module.exit_json(changed=changed, host=host) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/identity/ipa/ipa_hostgroup.py b/identity/ipa/ipa_hostgroup.py new file mode 100644 index 00000000000..57fbc5b4531 --- /dev/null +++ b/identity/ipa/ipa_hostgroup.py @@ -0,0 +1,278 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipa_hostgroup +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA host-group +description: +- Add, modify and delete an IPA host-group using IPA API +options: + cn: + description: + - Name of host-group. + - Can not be changed as it is the unique identifier. + required: true + aliases: ["name"] + description: + description: + - Description + required: false + host: + description: + - List of hosts that belong to the host-group. + - If an empty list is passed all hosts will be removed from the group. + - If option is omitted hosts will not be checked or changed. + - If option is passed all assigned hosts that are not passed will be unassigned from the group. + required: false + hostgroup: + description: + - List of host-groups than belong to that host-group. + - If an empty list is passed all host-groups will be removed from the group. + - If option is omitted host-groups will not be checked or changed. + - If option is passed all assigned hostgroups that are not passed will be unassigned from the group. + required: false + state: + description: + - State to ensure. + required: false + default: "present" + choices: ["present", "absent"] + ipa_port: + description: Port of IPA server + required: false + default: 443 + ipa_host: + description: IP or hostname of IPA server + required: false + default: "ipa.example.com" + ipa_user: + description: Administrative account used on IPA server + required: false + default: "admin" + ipa_pass: + description: Password of administrative user + required: true + ipa_prot: + description: Protocol used by IPA server + required: false + default: "https" + choices: ["http", "https"] + validate_certs: + description: + - This only applies if C(ipa_prot) is I(https). + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + required: false + default: true +version_added: "2.3" +''' + +EXAMPLES = ''' +# Ensure host-group databases is present +- ipa_hostgroup: + name: databases + state: present + host: + - db.example.com + hostgroup: + - mysql-server + - oracle-server + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure host-group databases is absent +- ipa_hostgroup: + name: databases + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = ''' +hostgroup: + description: Hostgroup as returned by IPA API. + returned: always + type: dict +''' + +from ansible.module_utils.ipa import IPAClient + +class HostGroupIPAClient(IPAClient): + + def __init__(self, module, host, port, protocol): + super(HostGroupIPAClient, self).__init__(module, host, port, protocol) + + def hostgroup_find(self, name): + return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name}) + + def hostgroup_add(self, name, item): + return self._post_json(method='hostgroup_add', name=name, item=item) + + def hostgroup_mod(self, name, item): + return self._post_json(method='hostgroup_mod', name=name, item=item) + + def hostgroup_del(self, name): + return self._post_json(method='hostgroup_del', name=name) + + def hostgroup_add_member(self, name, item): + return self._post_json(method='hostgroup_add_member', name=name, item=item) + + def hostgroup_add_host(self, name, item): + return self.hostgroup_add_member(name=name, item={'host': item}) + + def hostgroup_add_hostgroup(self, name, item): + return self.hostgroup_add_member(name=name, item={'hostgroup': item}) + + def hostgroup_remove_member(self, name, item): + return self._post_json(method='hostgroup_remove_member', name=name, item=item) + + def hostgroup_remove_host(self, name, item): + return self.hostgroup_remove_member(name=name, item={'host': item}) + + def hostgroup_remove_hostgroup(self, name, item): + return self.hostgroup_remove_member(name=name, item={'hostgroup': item}) + + +def get_hostgroup_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_hostgroup_diff(ipa_hostgroup, module_hostgroup): + data = [] + for key in module_hostgroup.keys(): + ipa_value = ipa_hostgroup.get(key, None) + module_value = module_hostgroup.get(key, None) + if isinstance(ipa_value, list) and not isinstance(module_value, list): + module_value = [module_value] + if isinstance(ipa_value, list) and isinstance(module_value, list): + ipa_value = sorted(ipa_value) + module_value = sorted(module_value) + if ipa_value != module_value: + data.append(key) + return data + + +def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method): + changed = False + diff = list(set(ipa_list) - set(module_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + remove_method(name=name, item=diff) + + diff = list(set(module_list) - set(ipa_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + add_method(name=name, item=diff) + return changed + + +def ensure(module, client): + name = module.params['name'] + state = module.params['state'] + host = module.params['host'] + hostgroup = module.params['hostgroup'] + + ipa_hostgroup = client.hostgroup_find(name=name) + module_hostgroup = get_hostgroup_dict(description=module.params['description']) + + changed = False + if state == 'present': + if not ipa_hostgroup: + changed = True + if not module.check_mode: + ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup) + else: + diff = get_hostgroup_diff(ipa_hostgroup, module_hostgroup) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_hostgroup.get(key) + client.hostgroup_mod(name=name, item=data) + + if host is not None: + changed = modify_if_diff(module, name, ipa_hostgroup.get('member_host', []), + [item.lower() for item in host], + client.hostgroup_add_host, client.hostgroup_remove_host) or changed + + if hostgroup is not None: + changed = modify_if_diff(module, name, ipa_hostgroup.get('member_hostgroup', []), + [item.lower() for item in hostgroup], + client.hostgroup_add_hostgroup, client.hostgroup_remove_hostgroup) or changed + + else: + if ipa_hostgroup: + changed = True + if not module.check_mode: + client.hostgroup_del(name=name) + + return changed, client.hostgroup_find(name=name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str', required=False), + host=dict(type='list', required=False), + hostgroup=dict(type='list', required=False), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), + ipa_host=dict(type='str', required=False, default='ipa.example.com'), + ipa_port=dict(type='int', required=False, default=443), + ipa_user=dict(type='str', required=False, default='admin'), + ipa_pass=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + client = HostGroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, hostgroup = ensure(module, client) + module.exit_json(changed=changed, hostgroup=hostgroup) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/identity/ipa/ipa_role.py b/identity/ipa/ipa_role.py new file mode 100644 index 00000000000..95cd2bc45ed --- /dev/null +++ b/identity/ipa/ipa_role.py @@ -0,0 +1,344 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipa_role +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA role +description: +- Add, modify and delete a role within FreeIPA server using FreeIPA API +options: + cn: + description: + - Role name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + description: + description: + - A description of this role-group. + required: false + group: + description: + - List of group names assign to this role. + - If an empty list is passed all assigned groups will be unassigned from the role. + - If option is omitted groups will not be checked or changed. + - If option is passed all assigned groups that are not passed will be unassigned from the role. + host: + description: + - List of host names to assign. + - If an empty list is passed all assigned hosts will be unassigned from the role. + - If option is omitted hosts will not be checked or changed. + - If option is passed all assigned hosts that are not passed will be unassigned from the role. + required: false + hostgroup: + description: + - List of host group names to assign. + - If an empty list is passed all assigned host groups will be removed from the role. + - If option is omitted host groups will not be checked or changed. + - If option is passed all assigned hostgroups that are not passed will be unassigned from the role. + required: false + service: + description: + - List of service names to assign. + - If an empty list is passed all assigned services will be removed from the role. + - If option is omitted services will not be checked or changed. + - If option is passed all assigned services that are not passed will be removed from the role. + required: false + state: + description: State to ensure + required: false + default: "present" + choices: ["present", "absent"] + user: + description: + - List of user names to assign. + - If an empty list is passed all assigned users will be removed from the role. + - If option is omitted users will not be checked or changed. + required: false + ipa_port: + description: Port of IPA server + required: false + default: 443 + ipa_host: + description: IP or hostname of IPA server + required: false + default: "ipa.example.com" + ipa_user: + description: Administrative account used on IPA server + required: false + default: "admin" + ipa_pass: + description: Password of administrative user + required: true + ipa_prot: + description: Protocol used by IPA server + required: false + default: "https" + choices: ["http", "https"] + validate_certs: + description: + - This only applies if C(ipa_prot) is I(https). + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + required: false + default: true +version_added: "2.3" +''' + +EXAMPLES = ''' +# Ensure role is present +- ipa_role: + name: dba + description: Database Administrators + state: present + user: + - pinky + - brain + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure role with certain details +- ipa_role: + name: another-role + description: Just another role + group: + - editors + host: + - host01.example.com + hostgroup: + - hostgroup01 + service: + - service01 + +# Ensure role is absent +- ipa_role: + name: dba + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = ''' +role: + description: Role as returned by IPA API. + returned: always + type: dict +''' + +from ansible.module_utils.ipa import IPAClient + +class RoleIPAClient(IPAClient): + + def __init__(self, module, host, port, protocol): + super(RoleIPAClient, self).__init__(module, host, port, protocol) + + def role_find(self, name): + return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name}) + + def role_add(self, name, item): + return self._post_json(method='role_add', name=name, item=item) + + def role_mod(self, name, item): + return self._post_json(method='role_mod', name=name, item=item) + + def role_del(self, name): + return self._post_json(method='role_del', name=name) + + def role_add_member(self, name, item): + return self._post_json(method='role_add_member', name=name, item=item) + + def role_add_group(self, name, item): + return self.role_add_member(name=name, item={'group': item}) + + def role_add_host(self, name, item): + return self.role_add_member(name=name, item={'host': item}) + + def role_add_hostgroup(self, name, item): + return self.role_add_member(name=name, item={'hostgroup': item}) + + def role_add_service(self, name, item): + return self.role_add_member(name=name, item={'service': item}) + + def role_add_user(self, name, item): + return self.role_add_member(name=name, item={'user': item}) + + def role_remove_member(self, name, item): + return self._post_json(method='role_remove_member', name=name, item=item) + + def role_remove_group(self, name, item): + return self.role_remove_member(name=name, item={'group': item}) + + def role_remove_host(self, name, item): + return self.role_remove_member(name=name, item={'host': item}) + + def role_remove_hostgroup(self, name, item): + return self.role_remove_member(name=name, item={'hostgroup': item}) + + def role_remove_service(self, name, item): + return self.role_remove_member(name=name, item={'service': item}) + + def role_remove_user(self, name, item): + return self.role_remove_member(name=name, item={'user': item}) + + +def get_role_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_role_diff(ipa_role, module_role): + data = [] + for key in module_role.keys(): + module_value = module_role.get(key, None) + ipa_value = ipa_role.get(key, None) + if isinstance(ipa_value, list) and not isinstance(module_value, list): + module_value = [module_value] + if isinstance(ipa_value, list) and isinstance(module_value, list): + ipa_value = sorted(ipa_value) + module_value = sorted(module_value) + if ipa_value != module_value: + data.append(key) + return data + + +def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method): + changed = False + diff = list(set(ipa_list) - set(module_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + remove_method(name=name, item=diff) + + diff = list(set(module_list) - set(ipa_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + add_method(name=name, item=diff) + return changed + + +def ensure(module, client): + state = module.params['state'] + name = module.params['name'] + group = module.params['group'] + host = module.params['host'] + hostgroup = module.params['hostgroup'] + service = module.params['service'] + user = module.params['user'] + + module_role = get_role_dict(description=module.params['description']) + ipa_role = client.role_find(name=name) + + changed = False + if state == 'present': + if not ipa_role: + changed = True + if not module.check_mode: + ipa_role = client.role_add(name=name, item=module_role) + else: + diff = get_role_diff(ipa_role=ipa_role, module_role=module_role) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_role.get(key) + client.role_mod(name=name, item=data) + + if group is not None: + changed = modify_if_diff(module, name, ipa_role.get('member_group', []), group, + client.role_add_group, + client.role_remove_group) or changed + + if host is not None: + changed = modify_if_diff(module, name, ipa_role.get('member_host', []), host, + client.role_add_host, + client.role_remove_host) or changed + + if hostgroup is not None: + changed = modify_if_diff(module, name, ipa_role.get('member_hostgroup', []), hostgroup, + client.role_add_hostgroup, + client.role_remove_hostgroup) or changed + + if service is not None: + changed = modify_if_diff(module, name, ipa_role.get('member_service', []), service, + client.role_add_service, + client.role_remove_service) or changed + if user is not None: + changed = modify_if_diff(module, name, ipa_role.get('member_user', []), user, + client.role_add_user, + client.role_remove_user) or changed + else: + if ipa_role: + changed = True + if not module.check_mode: + client.role_del(name) + + return changed, client.role_find(name=name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str', required=False), + group=dict(type='list', required=False), + host=dict(type='list', required=False), + hostgroup=dict(type='list', required=False), + service=dict(type='list', required=False), + state=dict(type='str', required=False, default='present', choices=['present', 'absent']), + user=dict(type='list', required=False), + ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), + ipa_host=dict(type='str', required=False, default='ipa.example.com'), + ipa_port=dict(type='int', required=False, default=443), + ipa_user=dict(type='str', required=False, default='admin'), + ipa_pass=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + client = RoleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, role = ensure(module, client) + module.exit_json(changed=changed, role=role) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/identity/ipa/ipa_sudocmd.py b/identity/ipa/ipa_sudocmd.py new file mode 100644 index 00000000000..6ec3c84bb1d --- /dev/null +++ b/identity/ipa/ipa_sudocmd.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipa_sudocmd +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo command +description: +- Add, modify or delete sudo command within FreeIPA server using FreeIPA API. +options: + sudocmd: + description: + - Sudo Command. + aliases: ['name'] + required: true + description: + description: + - A description of this command. + required: false + state: + description: State to ensure + required: false + default: present + choices: ['present', 'absent'] + ipa_port: + description: Port of IPA server + required: false + default: 443 + ipa_host: + description: IP or hostname of IPA server + required: false + default: "ipa.example.com" + ipa_user: + description: Administrative account used on IPA server + required: false + default: "admin" + ipa_pass: + description: Password of administrative user + required: true + ipa_prot: + description: Protocol used by IPA server + required: false + default: "https" + choices: ["http", "https"] + validate_certs: + description: + - This only applies if C(ipa_prot) is I(https). + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + required: false + default: true +version_added: "2.3" +''' + +EXAMPLES = ''' +# Ensure sudo command exists +- ipa_sudocmd: + name: su + description: Allow to run su via sudo + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure sudo command does not exist +- ipa_sudocmd: + name: su + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = ''' +sudocmd: + description: Sudo command as return from IPA API + returned: always + type: dict +''' + +from ansible.module_utils.ipa import IPAClient + +class SudoCmdIPAClient(IPAClient): + + def __init__(self, module, host, port, protocol): + super(SudoCmdIPAClient, self).__init__(module, host, port, protocol) + + def sudocmd_find(self, name): + return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name}) + + def sudocmd_add(self, name, item): + return self._post_json(method='sudocmd_add', name=name, item=item) + + def sudocmd_mod(self, name, item): + return self._post_json(method='sudocmd_mod', name=name, item=item) + + def sudocmd_del(self, name): + return self._post_json(method='sudocmd_del', name=name) + + +def get_sudocmd_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def get_sudocmd_diff(ipa_sudocmd, module_sudocmd): + data = [] + for key in module_sudocmd.keys(): + module_value = module_sudocmd.get(key, None) + ipa_value = ipa_sudocmd.get(key, None) + if isinstance(ipa_value, list) and not isinstance(module_value, list): + module_value = [module_value] + if isinstance(ipa_value, list) and isinstance(module_value, list): + ipa_value = sorted(ipa_value) + module_value = sorted(module_value) + if ipa_value != module_value: + data.append(key) + return data + + +def ensure(module, client): + name = module.params['sudocmd'] + state = module.params['state'] + + module_sudocmd = get_sudocmd_dict(description=module.params['description']) + ipa_sudocmd = client.sudocmd_find(name=name) + + changed = False + if state == 'present': + if not ipa_sudocmd: + changed = True + if not module.check_mode: + client.sudocmd_add(name=name, item=module_sudocmd) + else: + diff = get_sudocmd_diff(ipa_sudocmd, module_sudocmd) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_sudocmd.get(key) + client.sudocmd_mod(name=name, item=data) + else: + if ipa_sudocmd: + changed = True + if not module.check_mode: + client.sudocmd_del(name=name) + + return changed, client.sudocmd_find(name=name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + description=dict(type='str', required=False), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + sudocmd=dict(type='str', required=True, aliases=['name']), + ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), + ipa_host=dict(type='str', required=False, default='ipa.example.com'), + ipa_port=dict(type='int', required=False, default=443), + ipa_user=dict(type='str', required=False, default='admin'), + ipa_pass=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + client = SudoCmdIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudocmd = ensure(module, client) + module.exit_json(changed=changed, sudocmd=sudocmd) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/identity/ipa/ipa_sudocmdgroup.py b/identity/ipa/ipa_sudocmdgroup.py new file mode 100644 index 00000000000..e1d0e9b6021 --- /dev/null +++ b/identity/ipa/ipa_sudocmdgroup.py @@ -0,0 +1,249 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipa_sudocmdgroup +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo command group +description: +- Add, modify or delete sudo command group within IPA server using IPA API. +options: + cn: + description: + - Sudo Command Group. + aliases: ['name'] + required: true + description: + description: + - Group description. + state: + description: State to ensure + required: false + default: present + choices: ['present', 'absent'] + sudocmd: + description: + - List of sudo commands to assign to the group. + - If an empty list is passed all assigned commands will be removed from the group. + - If option is omitted sudo commands will not be checked or changed. + required: false + ipa_port: + description: Port of IPA server + required: false + default: 443 + ipa_host: + description: IP or hostname of IPA server + required: false + default: "ipa.example.com" + ipa_user: + description: Administrative account used on IPA server + required: false + default: "admin" + ipa_pass: + description: Password of administrative user + required: true + ipa_prot: + description: Protocol used by IPA server + required: false + default: "https" + choices: ["http", "https"] + validate_certs: + description: + - This only applies if C(ipa_prot) is I(https). + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + required: false + default: true +version_added: "2.3" +''' + +EXAMPLES = ''' +- name: Ensure sudo command group exists + ipa_sudocmdgroup: + name: group01 + description: Group of important commands + sudocmd: + - su + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +- name: Ensure sudo command group does not exists + ipa_sudocmdgroup: + name: group01 + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = ''' +sudocmdgroup: + description: Sudo command group as returned by IPA API + returned: always + type: dict +''' + +from ansible.module_utils.ipa import IPAClient + +class SudoCmdGroupIPAClient(IPAClient): + + def __init__(self, module, host, port, protocol): + super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol) + + def sudocmdgroup_find(self, name): + return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name}) + + def sudocmdgroup_add(self, name, item): + return self._post_json(method='sudocmdgroup_add', name=name, item=item) + + def sudocmdgroup_mod(self, name, item): + return self._post_json(method='sudocmdgroup_mod', name=name, item=item) + + def sudocmdgroup_del(self, name): + return self._post_json(method='sudocmdgroup_del', name=name) + + def sudocmdgroup_add_member(self, name, item): + return self._post_json(method='sudocmdgroup_add_member', name=name, item=item) + + def sudocmdgroup_add_member_sudocmd(self, name, item): + return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item}) + + def sudocmdgroup_remove_member(self, name, item): + return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item) + + def sudocmdgroup_remove_member_sudocmd(self, name, item): + return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item}) + + +def get_sudocmdgroup_dict(description=None): + data = {} + if description is not None: + data['description'] = description + return data + + +def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method): + changed = False + diff = list(set(ipa_list) - set(module_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + remove_method(name=name, item=diff) + + diff = list(set(module_list) - set(ipa_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + add_method(name=name, item=diff) + return changed + + +def get_sudocmdgroup_diff(ipa_sudocmdgroup, module_sudocmdgroup): + data = [] + for key in module_sudocmdgroup.keys(): + module_value = module_sudocmdgroup.get(key, None) + ipa_value = ipa_sudocmdgroup.get(key, None) + if isinstance(ipa_value, list) and not isinstance(module_value, list): + module_value = [module_value] + if isinstance(ipa_value, list) and isinstance(module_value, list): + ipa_value = sorted(ipa_value) + module_value = sorted(module_value) + if ipa_value != module_value: + data.append(key) + return data + + +def ensure(module, client): + name = module.params['name'] + state = module.params['state'] + sudocmd = module.params['sudocmd'] + + module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description']) + ipa_sudocmdgroup = client.sudocmdgroup_find(name=name) + + changed = False + if state == 'present': + if not ipa_sudocmdgroup: + changed = True + if not module.check_mode: + ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup) + else: + diff = get_sudocmdgroup_diff(ipa_sudocmdgroup, module_sudocmdgroup) + if len(diff) > 0: + changed = True + if not module.check_mode: + data = {} + for key in diff: + data[key] = module_sudocmdgroup.get(key) + client.sudocmdgroup_mod(name=name, item=data) + + if sudocmd is not None: + changed = modify_if_diff(module, name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd, + client.sudocmdgroup_add_member_sudocmd, + client.sudocmdgroup_remove_member_sudocmd) + else: + if ipa_sudocmdgroup: + changed = True + if not module.check_mode: + client.sudocmdgroup_del(name=name) + + return changed, client.sudocmdgroup_find(name=name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str', required=False), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + sudocmd=dict(type='list', required=False), + ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), + ipa_host=dict(type='str', required=False, default='ipa.example.com'), + ipa_port=dict(type='int', required=False, default=443), + ipa_user=dict(type='str', required=False, default='admin'), + ipa_pass=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + client = SudoCmdGroupIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudocmdgroup = ensure(module, client) + module.exit_json(changed=changed, sudorule=sudocmdgroup) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/identity/ipa/ipa_sudorule.py b/identity/ipa/ipa_sudorule.py new file mode 100644 index 00000000000..f5da15a7046 --- /dev/null +++ b/identity/ipa/ipa_sudorule.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipa_sudorule +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA sudo rule +description: +- Add, modify or delete sudo rule within IPA server using IPA API. +options: + cn: + description: + - Canonical name. + - Can not be changed as it is the unique identifier. + required: true + aliases: ['name'] + cmdcategory: + description: + - Command category the rule applies to. + choices: ['all'] + required: false + cmd: + description: + - List of commands assigned to the rule. + - If an empty list is passed all commands will be removed from the rule. + - If option is omitted commands will not be checked or changed. + required: false + host: + description: + - List of hosts assigned to the rule. + - If an empty list is passed all hosts will be removed from the rule. + - If option is omitted hosts will not be checked or changed. + - Option C(hostcategory) must be omitted to assign hosts. + required: false + hostcategory: + description: + - Host category the rule applies to. + - If 'all' is passed one must omit C(host) and C(hostgroup). + - Option C(host) and C(hostgroup) must be omitted to assign 'all'. + choices: ['all'] + required: false + hostgroup: + description: + - List of host groups assigned to the rule. + - If an empty list is passed all host groups will be removed from the rule. + - If option is omitted host groups will not be checked or changed. + - Option C(hostcategory) must be omitted to assign host groups. + required: false + user: + description: + - List of users assigned to the rule. + - If an empty list is passed all users will be removed from the rule. + - If option is omitted users will not be checked or changed. + required: false + usercategory: + description: + - User category the rule applies to. + choices: ['all'] + required: false + usergroup: + description: + - List of user groups assigned to the rule. + - If an empty list is passed all user groups will be removed from the rule. + - If option is omitted user groups will not be checked or changed. + required: false + state: + description: State to ensure + required: false + default: present + choices: ['present', 'absent', 'enabled', 'disabled'] + ipa_port: + description: Port of IPA server + required: false + default: 443 + ipa_host: + description: IP or hostname of IPA server + required: false + default: "ipa.example.com" + ipa_user: + description: Administrative account used on IPA server + required: false + default: "admin" + ipa_pass: + description: Password of administrative user + required: true + ipa_prot: + description: Protocol used by IPA server + required: false + default: "https" + choices: ["http", "https"] + validate_certs: + description: + - This only applies if C(ipa_prot) is I(https). + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + required: false + default: true +version_added: "2.3" +''' + +EXAMPLES = ''' +# Ensure sudo rule is present thats allows all every body to execute any command on any host without beeing asked for a password. +- ipa_sudorule: + name: sudo_all_nopasswd + cmdcategory: all + description: Allow to run every command with sudo without password + hostcategory: all + sudoopt: + - '!authenticate' + usercategory: all + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +# Ensure user group developers can run every command on host group db-server as well as on host db01.example.com. +- ipa_sudorule: + name: sudo_dev_dbserver + description: Allow developers to run every command with sudo on all database server + cmdcategory: all + host: + - db01.example.com + hostgroup: + - db-server + sudoopt: + - '!authenticate' + usergroup: + - developers + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = ''' +sudorule: + description: Sudorule as returned by IPA + returned: always + type: dict +''' + +from ansible.module_utils.ipa import IPAClient + +class SudoRuleIPAClient(IPAClient): + + def __init__(self, module, host, port, protocol): + super(SudoRuleIPAClient, self).__init__(module, host, port, protocol) + + def sudorule_find(self, name): + return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name}) + + def sudorule_add(self, name, item): + return self._post_json(method='sudorule_add', name=name, item=item) + + def sudorule_mod(self, name, item): + return self._post_json(method='sudorule_mod', name=name, item=item) + + def sudorule_del(self, name): + return self._post_json(method='sudorule_del', name=name) + + def sudorule_add_option(self, name, item): + return self._post_json(method='sudorule_add_option', name=name, item=item) + + def sudorule_add_option_ipasudoopt(self, name, item): + return self.sudorule_add_option(name=name, item={'ipasudoopt': item}) + + def sudorule_remove_option(self, name, item): + return self._post_json(method='sudorule_remove_option', name=name, item=item) + + def sudorule_remove_option_ipasudoopt(self, name, item): + return self.sudorule_remove_option(name=name, item={'ipasudoopt': item}) + + def sudorule_add_host(self, name, item): + return self._post_json(method='sudorule_add_host', name=name, item=item) + + def sudorule_add_host_host(self, name, item): + return self.sudorule_add_host(name=name, item={'host': item}) + + def sudorule_add_host_hostgroup(self, name, item): + return self.sudorule_add_host(name=name, item={'hostgroup': item}) + + def sudorule_remove_host(self, name, item): + return self._post_json(method='sudorule_remove_host', name=name, item=item) + + def sudorule_remove_host_host(self, name, item): + return self.sudorule_remove_host(name=name, item={'host': item}) + + def sudorule_remove_host_hostgroup(self, name, item): + return self.sudorule_remove_host(name=name, item={'hostgroup': item}) + + def sudorule_add_allow_command(self, name, item): + return self._post_json(method='sudorule_add_allow_command', name=name, item=item) + + def sudorule_remove_allow_command(self, name, item): + return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) + + def sudorule_add_user(self, name, item): + return self._post_json(method='sudorule_add_user', name=name, item=item) + + def sudorule_add_user_user(self, name, item): + return self.sudorule_add_user(name=name, item={'user': item}) + + def sudorule_add_user_group(self, name, item): + return self.sudorule_add_user(name=name, item={'group': item}) + + def sudorule_remove_user(self, name, item): + return self._post_json(method='sudorule_remove_user', name=name, item=item) + + def sudorule_remove_user_user(self, name, item): + return self.sudorule_remove_user(name=name, item={'user': item}) + + def sudorule_remove_user_group(self, name, item): + return self.sudorule_remove_user(name=name, item={'group': item}) + + +def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None): + data = {} + if cmdcategory is not None: + data['cmdcategory'] = cmdcategory + if description is not None: + data['description'] = description + if hostcategory is not None: + data['hostcategory'] = hostcategory + if ipaenabledflag is not None: + data['ipaenabledflag'] = ipaenabledflag + if usercategory is not None: + data['usercategory'] = usercategory + return data + + +def get_sudorule_diff(ipa_sudorule, module_sudorule): + data = [] + for key in module_sudorule.keys(): + module_value = module_sudorule.get(key, None) + ipa_value = ipa_sudorule.get(key, None) + if isinstance(ipa_value, list) and not isinstance(module_value, list): + module_value = [module_value] + if isinstance(ipa_value, list) and isinstance(module_value, list): + ipa_value = sorted(ipa_value) + module_value = sorted(module_value) + if ipa_value != module_value: + data.append(key) + return data + + +def modify_if_diff(module, name, ipa_list, module_list, add_method, remove_method): + changed = False + diff = list(set(ipa_list) - set(module_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + remove_method(name=name, item=item) + + diff = list(set(module_list) - set(ipa_list)) + if len(diff) > 0: + changed = True + if not module.check_mode: + for item in diff: + add_method(name=name, item=item) + + return changed + + +def category_changed(module, client, category_name, ipa_sudorule): + if ipa_sudorule.get(category_name, None) == ['all']: + if not module.check_mode: + # cn is returned as list even with only a single value. + client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None}) + return True + return False + + +def ensure(module, client): + state = module.params['state'] + name = module.params['name'] + cmd = module.params['cmd'] + cmdcategory = module.params['cmdcategory'] + host = module.params['host'] + hostcategory = module.params['hostcategory'] + hostgroup = module.params['hostgroup'] + + if state in ['present', 'enabled']: + ipaenabledflag = 'TRUE' + else: + ipaenabledflag = 'FALSE' + + sudoopt = module.params['sudoopt'] + user = module.params['user'] + usercategory = module.params['usercategory'] + usergroup = module.params['usergroup'] + + module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory, + description=module.params['description'], + hostcategory=hostcategory, + ipaenabledflag=ipaenabledflag, + usercategory=usercategory) + ipa_sudorule = client.sudorule_find(name=name) + + changed = False + if state in ['present', 'disabled', 'enabled']: + if not ipa_sudorule: + changed = True + if not module.check_mode: + ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule) + else: + diff = get_sudorule_diff(ipa_sudorule, module_sudorule) + if len(diff) > 0: + changed = True + if not module.check_mode: + if 'hostcategory' in diff: + if ipa_sudorule.get('memberhost_host', None) is not None: + client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host')) + if ipa_sudorule.get('memberhost_hostgroup', None) is not None: + client.sudorule_remove_host_hostgroup(name=name, + item=ipa_sudorule.get('memberhost_hostgroup')) + + client.sudorule_mod(name=name, item=module_sudorule) + + if cmd is not None: + changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed + if not module.check_mode: + client.sudorule_add_allow_command(name=name, item=cmd) + + if host is not None: + changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed + changed = modify_if_diff(module, name, ipa_sudorule.get('memberhost_host', []), host, + client.sudorule_add_host_host, + client.sudorule_remove_host_host) or changed + + if hostgroup is not None: + changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed + changed = modify_if_diff(module, name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup, + client.sudorule_add_host_hostgroup, + client.sudorule_remove_host_hostgroup) or changed + if sudoopt is not None: + changed = modify_if_diff(module, name, ipa_sudorule.get('ipasudoopt', []), sudoopt, + client.sudorule_add_option_ipasudoopt, + client.sudorule_remove_option_ipasudoopt) or changed + if user is not None: + changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed + changed = modify_if_diff(module, name, ipa_sudorule.get('memberuser_user', []), user, + client.sudorule_add_user_user, + client.sudorule_remove_user_user) or changed + if usergroup is not None: + changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed + changed = modify_if_diff(module, name, ipa_sudorule.get('memberuser_group', []), usergroup, + client.sudorule_add_user_group, + client.sudorule_remove_user_group) or changed + else: + if ipa_sudorule: + changed = True + if not module.check_mode: + client.sudorule_del(name) + + return changed, client.sudorule_find(name) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + cmd=dict(type='list', required=False), + cmdcategory=dict(type='str', required=False, choices=['all']), + cn=dict(type='str', required=True, aliases=['name']), + description=dict(type='str', required=False), + host=dict(type='list', required=False), + hostcategory=dict(type='str', required=False, choices=['all']), + hostgroup=dict(type='list', required=False), + sudoopt=dict(type='list', required=False), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + user=dict(type='list', required=False), + usercategory=dict(type='str', required=False, choices=['all']), + usergroup=dict(type='list', required=False), + ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), + ipa_host=dict(type='str', required=False, default='ipa.example.com'), + ipa_port=dict(type='int', required=False, default=443), + ipa_user=dict(type='str', required=False, default='admin'), + ipa_pass=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + ), + mutually_exclusive=[['cmdcategory', 'cmd'], + ['hostcategory', 'host'], + ['hostcategory', 'hostgroup'], + ['usercategory', 'user'], + ['usercategory', 'usergroup']], + supports_check_mode=True, + ) + + client = SudoRuleIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, sudorule = ensure(module, client) + module.exit_json(changed=changed, sudorule=sudorule) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/identity/ipa/ipa_user.py b/identity/ipa/ipa_user.py new file mode 100644 index 00000000000..5e020d73440 --- /dev/null +++ b/identity/ipa/ipa_user.py @@ -0,0 +1,346 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipa_user +author: Thomas Krahn (@Nosmoht) +short_description: Manage FreeIPA users +description: +- Add, modify and delete user within IPA server +options: + displayname: + description: Display name + required: false + givenname: + description: First name + required: false + loginshell: + description: Login shell + required: false + mail: + description: + - List of mail addresses assigned to the user. + - If an empty list is passed all assigned email addresses will be deleted. + - If None is passed email addresses will not be checked or changed. + required: false + password: + description: + - Password + required: false + sn: + description: Surname + required: false + sshpubkey: + description: + - List of public SSH key. + - If an empty list is passed all assigned public keys will be deleted. + - If None is passed SSH public keys will not be checked or changed. + required: false + state: + description: State to ensure + required: false + default: "present" + choices: ["present", "absent", "enabled", "disabled"] + telephonenumber: + description: + - List of telephone numbers assigned to the user. + - If an empty list is passed all assigned telephone numbers will be deleted. + - If None is passed telephone numbers will not be checked or changed. + required: false + title: + description: Title + required: false + uid: + description: uid of the user + required: true + aliases: ["name"] + ipa_port: + description: Port of IPA server + required: false + default: 443 + ipa_host: + description: IP or hostname of IPA server + required: false + default: "ipa.example.com" + ipa_user: + description: Administrative account used on IPA server + required: false + default: "admin" + ipa_pass: + description: Password of administrative user + required: true + ipa_prot: + description: Protocol used by IPA server + required: false + default: "https" + choices: ["http", "https"] + validate_certs: + description: + - This only applies if C(ipa_prot) is I(https). + - If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates. + required: false + default: true +version_added: "2.3" +requirements: +- base64 +- hashlib +''' + +EXAMPLES = ''' +# Ensure pinky is present +- ipa_user: + name: pinky + state: present + givenname: Pinky + sn: Acme + mail: + - pinky@acme.com + telephonenumber: + - '+555123456' + sshpubkeyfp: + - ssh-rsa .... + - ssh-dsa .... + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret + +# Ensure brain is absent +- ipa_user: + name: brain + state: absent + ipa_host: ipa.example.com + ipa_user: admin + ipa_pass: topsecret +''' + +RETURN = ''' +user: + description: User as returned by IPA API + returned: always + type: dict +''' + +import base64 +import hashlib + +from ansible.module_utils.ipa import IPAClient + +class UserIPAClient(IPAClient): + + def __init__(self, module, host, port, protocol): + super(UserIPAClient, self).__init__(module, host, port, protocol) + + def user_find(self, name): + return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name}) + + def user_add(self, name, item): + return self._post_json(method='user_add', name=name, item=item) + + def user_mod(self, name, item): + return self._post_json(method='user_mod', name=name, item=item) + + def user_del(self, name): + return self._post_json(method='user_del', name=name) + + def user_disable(self, name): + return self._post_json(method='user_disable', name=name) + + def user_enable(self, name): + return self._post_json(method='user_enable', name=name) + + +def get_user_dict(displayname=None, givenname=None, loginshell=None, mail=None, nsaccountlock=False, sn=None, + sshpubkey=None, telephonenumber=None, title=None, userpassword=None): + user = {} + if displayname is not None: + user['displayname'] = displayname + if givenname is not None: + user['givenname'] = givenname + if loginshell is not None: + user['loginshell'] = loginshell + if mail is not None: + user['mail'] = mail + user['nsaccountlock'] = nsaccountlock + if sn is not None: + user['sn'] = sn + if sshpubkey is not None: + user['ipasshpubkey'] = sshpubkey + if telephonenumber is not None: + user['telephonenumber'] = telephonenumber + if title is not None: + user['title'] = title + if userpassword is not None: + user['userpassword'] = userpassword + + return user + + +def get_user_diff(ipa_user, module_user): + """ + Return the keys of each dict whereas values are different. Unfortunately the IPA + API returns everything as a list even if only a single value is possible. + Therefore some more complexity is needed. + The method will check if the value type of module_user.attr is not a list and + create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method + must not be changed if the returned API dict is changed. + :param ipa_user: + :param module_user: + :return: + """ + # return [item for item in module_user.keys() if module_user.get(item, None) != ipa_user.get(item, None)] + result = [] + # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints. + # These are used for comparison. + sshpubkey = None + if 'ipasshpubkey' in module_user: + module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey) for pubkey in module_user['ipasshpubkey']] + # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on + sshpubkey = module_user['ipasshpubkey'] + del module_user['ipasshpubkey'] + for key in module_user.keys(): + mod_value = module_user.get(key, None) + ipa_value = ipa_user.get(key, None) + if isinstance(ipa_value, list) and not isinstance(mod_value, list): + mod_value = [mod_value] + if isinstance(ipa_value, list) and isinstance(mod_value, list): + mod_value = sorted(mod_value) + ipa_value = sorted(ipa_value) + if mod_value != ipa_value: + result.append(key) + # If there are public keys, remove the fingerprints and add them back to the dict + if sshpubkey is not None: + del module_user['sshpubkeyfp'] + module_user['ipasshpubkey'] = sshpubkey + return result + + +def get_ssh_key_fingerprint(ssh_key): + """ + Return the public key fingerprint of a given public SSH key + in format "FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7 [user@host] (ssh-rsa)" + :param ssh_key: + :return: + """ + parts = ssh_key.strip().split() + if len(parts) == 0: + return None + key_type = parts[0] + key = base64.b64decode(parts[1].encode('ascii')) + + fp_plain = hashlib.md5(key).hexdigest() + key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper() + if len(parts) < 3: + return "%s (%s)" % (key_fp, key_type) + else: + user_host = parts[2] + return "%s %s (%s)" % (key_fp, user_host, key_type) + + +def ensure(module, client): + state = module.params['state'] + name = module.params['name'] + nsaccountlock = state == 'disabled' + + module_user = get_user_dict(displayname=module.params.get('displayname'), + givenname=module.params.get('givenname'), + loginshell=module.params['loginshell'], + mail=module.params['mail'], sn=module.params['sn'], + sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock, + telephonenumber=module.params['telephonenumber'], title=module.params['title'], + userpassword=module.params['password']) + + ipa_user = client.user_find(name=name) + + changed = False + if state in ['present', 'enabled', 'disabled']: + if not ipa_user: + changed = True + if not module.check_mode: + ipa_user = client.user_add(name=name, item=module_user) + else: + diff = get_user_diff(ipa_user, module_user) + if len(diff) > 0: + changed = True + if not module.check_mode: + ipa_user = client.user_mod(name=name, item=module_user) + else: + if ipa_user: + changed = True + if not module.check_mode: + client.user_del(name) + + return changed, ipa_user + + +def main(): + module = AnsibleModule( + argument_spec=dict( + displayname=dict(type='str', required=False), + givenname=dict(type='str', required=False), + loginshell=dict(type='str', required=False), + mail=dict(type='list', required=False), + sn=dict(type='str', required=False), + uid=dict(type='str', required=True, aliases=['name']), + password=dict(type='str', required=False, no_log=True), + sshpubkey=dict(type='list', required=False), + state=dict(type='str', required=False, default='present', + choices=['present', 'absent', 'enabled', 'disabled']), + telephonenumber=dict(type='list', required=False), + title=dict(type='str', required=False), + ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']), + ipa_host=dict(type='str', required=False, default='ipa.example.com'), + ipa_port=dict(type='int', required=False, default=443), + ipa_user=dict(type='str', required=False, default='admin'), + ipa_pass=dict(type='str', required=True, no_log=True), + validate_certs=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + client = UserIPAClient(module=module, + host=module.params['ipa_host'], + port=module.params['ipa_port'], + protocol=module.params['ipa_prot']) + + # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list). + # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey + # as different which should be avoided. + if module.params['sshpubkey'] is not None: + if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] is "": + module.params['sshpubkey'] = None + + try: + client.login(username=module.params['ipa_user'], + password=module.params['ipa_pass']) + changed, user = ensure(module, client) + module.exit_json(changed=changed, user=user) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/identity/opendj/__init__.py b/identity/opendj/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/identity/opendj/opendj_backendprop.py b/identity/opendj/opendj_backendprop.py new file mode 100644 index 00000000000..893bbfdd47d --- /dev/null +++ b/identity/opendj/opendj_backendprop.py @@ -0,0 +1,221 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: opendj_backendprop +short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command. +description: + - This module will update settings for OpenDJ with the command set-backend-prop. + - It will check first via de get-backend-prop if configuration needs to be applied. +version_added: "2.2" +author: + - Werner Dijkerman +options: + opendj_bindir: + description: + - The path to the bin directory of OpenDJ. + required: false + default: /opt/opendj/bin + hostname: + description: + - The hostname of the OpenDJ server. + required: true + port: + description: + - The Admin port on which the OpenDJ instance is available. + required: true + username: + description: + - The username to connect to. + required: false + default: cn=Directory Manager + password: + description: + - The password for the cn=Directory Manager user. + - Either password or passwordfile is needed. + required: false + passwordfile: + description: + - Location to the password file which holds the password for the cn=Directory Manager user. + - Either password or passwordfile is needed. + required: false + backend: + description: + - The name of the backend on which the property needs to be updated. + required: true + name: + description: + - The configuration setting to update. + required: true + value: + description: + - The value for the configuration item. + required: true + state: + description: + - If configuration needs to be added/updated + required: false + default: "present" +''' + +EXAMPLES = ''' + - name: "Add or update OpenDJ backend properties" + action: opendj_backendprop + hostname=localhost + port=4444 + username="cn=Directory Manager" + password=password + backend=userRoot + name=index-entry-limit + value=5000 +''' + +RETURN = ''' +''' + +import subprocess + + +class BackendProp(object): + def __init__(self, module): + self._module = module + + def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name): + my_command = [ + opendj_bindir + '/dsconfig', + 'get-backend-prop', + '-h', hostname, + '--port', str(port), + '--bindDN', username, + '--backend-name', backend_name, + '-n', '-X', '-s' + ] + password_method + process = subprocess.Popen(my_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode == 0: + return stdout + else: + self._module.fail_json(msg="Error message: " + str(stderr)) + + def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name,name, value): + my_command = [ + opendj_bindir + '/dsconfig', + 'set-backend-prop', + '-h', hostname, + '--port', str(port), + '--bindDN', username, + '--backend-name', backend_name, + '--set', name + ":" + value, + '-n', '-X' + ] + password_method + process = subprocess.Popen(my_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + if process.returncode == 0: + return True + else: + self._module.fail_json(msg="Error message: " + stderr) + + def validate_data(self, data=None, name=None, value=None): + for config_line in data.split('\n'): + if config_line: + split_line = config_line.split() + if split_line[0] == name: + if split_line[1] == value: + return True + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + opendj_bindir=dict(default="/opt/opendj/bin", type="path"), + hostname=dict(required=True), + port=dict(required=True), + username=dict(default="cn=Directory Manager", required=False), + password=dict(required=False, no_log=True), + passwordfile=dict(required=False, type="path"), + backend=dict(required=True), + name=dict(required=True), + value=dict(required=True), + state=dict(default="present"), + ), + supports_check_mode=True + ) + + opendj_bindir = module.params['opendj_bindir'] + hostname = module.params['hostname'] + port = module.params['port'] + username = module.params['username'] + password = module.params['password'] + passwordfile = module.params['passwordfile'] + backend_name = module.params['backend'] + name = module.params['name'] + value = module.params['value'] + state = module.params['state'] + + if module.params["password"] is not None: + password_method = ['-w', password] + elif module.params["passwordfile"] is not None: + password_method = ['-j', passwordfile] + else: + module.fail_json(msg="No credentials are given. Use either 'password' or 'passwordfile'") + + if module.params["passwordfile"] and module.params["password"]: + module.fail_json(msg="only one of 'password' or 'passwordfile' can be set") + + opendj = BackendProp(module) + validate = opendj.get_property(opendj_bindir=opendj_bindir, + hostname=hostname, + port=port, + username=username, + password_method=password_method, + backend_name=backend_name) + + if validate: + if not opendj.validate_data(data=validate, name=name, value=value): + if module.check_mode: + module.exit_json(changed=True) + if opendj.set_property(opendj_bindir=opendj_bindir, + hostname=hostname, + port=port, + username=username, + password_method=password_method, + backend_name=backend_name, + name=name, + value=value): + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + else: + module.exit_json(changed=False) + else: + module.exit_json(changed=False) + + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/infrastructure/__init__.py b/infrastructure/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/infrastructure/foreman/__init__.py b/infrastructure/foreman/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/infrastructure/foreman/foreman.py b/infrastructure/foreman/foreman.py new file mode 100644 index 00000000000..d7dcb5f2959 --- /dev/null +++ b/infrastructure/foreman/foreman.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2016, Eric D Helms +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: foreman +short_description: Manage Foreman Resources +description: + - Allows the management of Foreman resources inside your Foreman server +version_added: "2.3" +author: "Eric D Helms (@ehelms)" +requirements: + - "nailgun >= 0.28.0" + - "python >= 2.6" + - datetime +options: + server_url: + description: + - URL of Foreman server + required: true + username: + description: + - Username on Foreman server + required: true + password: + description: + - Password for user accessing Foreman server + required: true + entity: + description: + - The Foreman resource that the action will be performed on (e.g. organization, host) + required: true + params: + description: + - Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description) + required: true +''' + +EXAMPLES = ''' +- name: "Create CI Organization" + local_action: + module: foreman + username: "admin" + password: "admin" + server_url: "https://fakeserver.com" + entity: "organization" + params: + name: "My Cool New Organization" +''' + +RETURN = '''# ''' + +import datetime + +try: + from nailgun import entities, entity_fields + from nailgun.config import ServerConfig + HAS_NAILGUN_PACKAGE = True +except: + HAS_NAILGUN_PACKAGE = False + +class NailGun(object): + def __init__(self, server, entities, module): + self._server = server + self._entities = entities + self._module = module + + def find_organization(self, name, **params): + org = self._entities.Organization(self._server, name=name, **params) + response = org.search(set(), {'search': 'name={}'.format(name)}) + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Content View found for %s" % name) + + def organization(self, params): + name = params['name'] + del params['name'] + org = self.find_organization(name, **params) + + if org: + org = self._entities.Organization(self._server, name=name, id=org.id, **params) + org.update() + else: + org = self._entities.Organization(self._server, name=name, **params) + org.create() + + return True + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True), + username=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + entity=dict(required=True, no_log=False), + verify_ssl=dict(required=False, type='bool', default=False), + params=dict(required=True, no_log=True, type='dict'), + ), + supports_check_mode=True + ) + + if not HAS_NAILGUN_PACKAGE: + module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun") + + server_url = module.params['server_url'] + username = module.params['username'] + password = module.params['password'] + entity = module.params['entity'] + params = module.params['params'] + verify_ssl = module.params['verify_ssl'] + + server = ServerConfig( + url=server_url, + auth=(username, password), + verify=verify_ssl + ) + ng = NailGun(server, entities, module) + + # Lets make an connection to the server with username and password + try: + org = entities.Organization(server) + org.search() + except Exception as e: + module.fail_json(msg="Failed to connect to Foreman server: %s " % e) + + if entity == 'organization': + ng.organization(params) + module.exit_json(changed=True, result="%s updated" % entity) + else: + module.fail_json(changed=False, result="Unsupported entity supplied") + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/infrastructure/foreman/katello.py b/infrastructure/foreman/katello.py new file mode 100644 index 00000000000..86b7be0622c --- /dev/null +++ b/infrastructure/foreman/katello.py @@ -0,0 +1,533 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2016, Eric D Helms +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: katello +short_description: Manage Katello Resources +description: + - Allows the management of Katello resources inside your Foreman server +version_added: "2.3" +author: "Eric D Helms (@ehelms)" +requirements: + - "nailgun >= 0.28.0" + - "python >= 2.6" + - datetime +options: + server_url: + description: + - URL of Foreman server + required: true + username: + description: + - Username on Foreman server + required: true + password: + description: + - Password for user accessing Foreman server + required: true + entity: + description: + - The Foreman resource that the action will be performed on (e.g. organization, host) + required: true + params: + description: + - Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description) + required: true +''' + +EXAMPLES = ''' +Simple Example: + +- name: "Create Product" + local_action: + module: katello + username: "admin" + password: "admin" + server_url: "https://fakeserver.com" + entity: "product" + params: + name: "Centos 7" + +Abstraction Example: + +katello.yml +--- +- name: "{{ name }}" + local_action: + module: katello + username: "admin" + password: "admin" + server_url: "https://fakeserver.com" + entity: "{{ entity }}" + params: "{{ params }}" + +tasks.yml +--- +- include: katello.yml + vars: + name: "Create Dev Environment" + entity: "lifecycle_environment" + params: + name: "Dev" + prior: "Library" + organization: "Default Organization" + +- include: katello.yml + vars: + name: "Create Centos Product" + entity: "product" + params: + name: "Centos 7" + organization: "Default Organization" + +- include: katello.yml + vars: + name: "Create 7.2 Repository" + entity: "repository" + params: + name: "Centos 7.2" + product: "Centos 7" + organization: "Default Organization" + content_type: "yum" + url: "http://mirror.centos.org/centos/7/os/x86_64/" + +- include: katello.yml + vars: + name: "Create Centos 7 View" + entity: "content_view" + params: + name: "Centos 7 View" + organization: "Default Organization" + repositories: + - name: "Centos 7.2" + product: "Centos 7" + +- include: katello.yml + vars: + name: "Enable RHEL Product" + entity: "repository_set" + params: + name: "Red Hat Enterprise Linux 7 Server (RPMs)" + product: "Red Hat Enterprise Linux Server" + organization: "Default Organization" + basearch: "x86_64" + releasever: "7" +''' + +RETURN = '''# ''' + +import datetime + +try: + from nailgun import entities, entity_fields, entity_mixins + from nailgun.config import ServerConfig + HAS_NAILGUN_PACKAGE = True +except: + HAS_NAILGUN_PACKAGE = False + + +class NailGun(object): + def __init__(self, server, entities, module): + self._server = server + self._entities = entities + self._module = module + entity_mixins.TASK_TIMEOUT = 1000 + + def find_organization(self, name, **params): + org = self._entities.Organization(self._server, name=name, **params) + response = org.search(set(), {'search': 'name={}'.format(name)}) + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No organization found for %s" % name) + + def find_lifecycle_environment(self, name, organization): + org = self.find_organization(organization) + + lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org) + response = lifecycle_env.search() + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Lifecycle Found found for %s" % name) + + def find_product(self, name, organization): + org = self.find_organization(organization) + + product = self._entities.Product(self._server, name=name, organization=org) + response = product.search() + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Product found for %s" % name) + + def find_repository(self, name, product, organization): + product = self.find_product(product, organization) + + repository = self._entities.Repository(self._server, name=name, product=product) + repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization) + repository.organization = product.organization + response = repository.search() + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Repository found for %s" % name) + + def find_content_view(self, name, organization): + org = self.find_organization(organization) + + content_view = self._entities.ContentView(self._server, name=name, organization=org) + response = content_view.search() + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Content View found for %s" % name) + + def organization(self, params): + name = params['name'] + del params['name'] + org = self.find_organization(name, **params) + + if org: + org = self._entities.Organization(self._server, name=name, id=org.id, **params) + org.update() + else: + org = self._entities.Organization(self._server, name=name, **params) + org.create() + + return True + + def manifest(self, params): + org = self.find_organization(params['organization']) + params['organization'] = org.id + + try: + file = open(os.getcwd() + params['content'], 'r') + content = file.read() + finally: + file.close() + + manifest = self._entities.Subscription(self._server) + + try: + manifest.upload( + data={'organization_id': org.id}, + files={'content': content} + ) + return True + except Exception: + e = get_exception() + + if "Import is the same as existing data" in e.message: + return True + else: + self._module.fail_json(msg="Manifest import failed with %s" % e) + + def product(self, params): + org = self.find_organization(params['organization']) + params['organization'] = org.id + + product = self._entities.Product(self._server, **params) + response = product.search() + + if len(response) == 1: + product.id = response[0].id + product.update() + else: + product.create() + + return True + + def sync_product(self, params): + org = self.find_organization(params['organization']) + product = self.find_product(params['name'], org.name) + + return product.sync() + + def repository(self, params): + product = self.find_product(params['product'], params['organization']) + params['product'] = product.id + del params['organization'] + + repository = self._entities.Repository(self._server, **params) + repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization) + repository.organization = product.organization + response = repository.search() + + if len(response) == 1: + repository.id = response[0].id + repository.update() + else: + repository.create() + + return True + + def sync_repository(self, params): + org = self.find_organization(params['organization']) + repository = self.find_repository(params['name'], params['product'], org.name) + + return repository.sync() + + def repository_set(self, params): + product = self.find_product(params['product'], params['organization']) + del params['product'] + del params['organization'] + + if not product: + return False + else: + reposet = self._entities.RepositorySet(self._server, product=product, name=params['name']) + reposet = reposet.search()[0] + + formatted_name = [params['name'].replace('(', '').replace(')', '')] + formatted_name.append(params['basearch']) + + if params['releasever']: + formatted_name.append(params['releasever']) + + formatted_name = ' '.join(formatted_name) + + repository = self._entities.Repository(self._server, product=product, name=formatted_name) + repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization) + repository.organization = product.organization + repository = repository.search() + + if len(repository) == 0: + reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']}) + + return True + + def sync_plan(self, params): + org = self.find_organization(params['organization']) + params['organization'] = org.id + params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M") + + products = params['products'] + del params['products'] + + sync_plan = self._entities.SyncPlan( + self._server, + name=params['name'], + organization=org + ) + response = sync_plan.search() + + sync_plan.sync_date = params['sync_date'] + sync_plan.interval = params['interval'] + + if len(response) == 1: + sync_plan.id = response[0].id + sync_plan.update() + else: + response = sync_plan.create() + sync_plan.id = response[0].id + + if products: + ids = [] + + for name in products: + product = self.find_product(name, org.name) + ids.append(product.id) + + sync_plan.add_products(data={'product_ids': ids}) + + return True + + def content_view(self, params): + org = self.find_organization(params['organization']) + + content_view = self._entities.ContentView(self._server, name=params['name'], organization=org) + response = content_view.search() + + if len(response) == 1: + content_view.id = response[0].id + content_view.update() + else: + content_view = content_view.create() + + if params['repositories']: + repos = [] + + for repository in params['repositories']: + repository = self.find_repository(repository['name'], repository['product'], org.name) + repos.append(repository) + + content_view.repository = repos + content_view.update(['repository']) + + def find_content_view(self, name, organization): + org = self.find_organization(organization) + + content_view = self._entities.ContentView(self._server, name=name, organization=org) + response = content_view.search() + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Content View found for %s" % name) + + def find_content_view_version(self, name, organization, environment): + env = self.find_lifecycle_environment(environment, organization) + content_view = self.find_content_view(name, organization) + + content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view) + response = content_view_version.search(['content_view'], {'environment_id': env.id}) + + if len(response) == 1: + return response[0] + else: + self._module.fail_json(msg="No Content View version found for %s" % response) + + def publish(self, params): + content_view = self.find_content_view(params['name'], params['organization']) + + return content_view.publish() + + def promote(self, params): + to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization']) + version = self.find_content_view_version(params['name'], params['organization'], params['from_environment']) + + data = {'environment_id': to_environment.id} + return version.promote(data=data) + + def lifecycle_environment(self, params): + org = self.find_organization(params['organization']) + prior_env = self.find_lifecycle_environment(params['prior'], params['organization']) + + lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env) + response = lifecycle_env.search() + + if len(response) == 1: + lifecycle_env.id = response[0].id + lifecycle_env.update() + else: + lifecycle_env.create() + + return True + + def activation_key(self, params): + org = self.find_organization(params['organization']) + + activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org) + response = activation_key.search() + + if len(response) == 1: + activation_key.id = response[0].id + activation_key.update() + else: + activation_key.create() + + if params['content_view']: + content_view = self.find_content_view(params['content_view'], params['organization']) + lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization']) + + activation_key.content_view = content_view + activation_key.environment = lifecycle_environment + activation_key.update() + + return True + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True), + username=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + entity=dict(required=True, no_log=False), + action=dict(required=False, no_log=False), + verify_ssl=dict(required=False, type='bool', default=False), + params=dict(required=True, no_log=True, type='dict'), + ), + supports_check_mode=True + ) + + if not HAS_NAILGUN_PACKAGE: + module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun") + + server_url = module.params['server_url'] + username = module.params['username'] + password = module.params['password'] + entity = module.params['entity'] + action = module.params['action'] + params = module.params['params'] + verify_ssl = module.params['verify_ssl'] + + server = ServerConfig( + url=server_url, + auth=(username, password), + verify=verify_ssl + ) + ng = NailGun(server, entities, module) + + # Lets make an connection to the server with username and password + try: + org = entities.Organization(server) + org.search() + except Exception as e: + module.fail_json(msg="Failed to connect to Foreman server: %s " % e) + + result = False + + if entity == 'product': + if action == 'sync': + result = ng.sync_product(params) + else: + result = ng.product(params) + elif entity == 'repository': + if action == 'sync': + result = ng.sync_repository(params) + else: + result = ng.repository(params) + elif entity == 'manifest': + result = ng.manifest(params) + elif entity == 'repository_set': + result = ng.repository_set(params) + elif entity == 'sync_plan': + result = ng.sync_plan(params) + elif entity == 'content_view': + if action == 'publish': + result = ng.publish(params) + elif action == 'promote': + result = ng.promote(params) + else: + result = ng.content_view(params) + elif entity == 'lifecycle_environment': + result = ng.lifecycle_environment(params) + elif entity == 'activation_key': + result = ng.activation_key(params) + else: + module.fail_json(changed=False, result="Unsupported entity supplied") + + module.exit_json(changed=result, result="%s updated" % entity) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py index fc69f490fad..428bec096f3 100644 --- a/messaging/rabbitmq_binding.py +++ b/messaging/rabbitmq_binding.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rabbitmq_binding @@ -28,7 +32,7 @@ short_description: This module manages rabbitMQ bindings description: - This module uses rabbitMQ Rest API to create/delete bindings -requirements: [ python requests ] +requirements: [ "requests >= 1.0.0" ] options: state: description: @@ -94,10 +98,18 @@ EXAMPLES = ''' # Bind myQueue to directExchange with routing key info -- rabbitmq_binding: name=directExchange destination=myQueue type=queue routing_key=info +- rabbitmq_binding: + name: directExchange + destination: myQueue + type: queue + routing_key: info # Bind directExchange to topicExchange with routing key *.info -- rabbitmq_binding: name=topicExchange destination=topicExchange type=exchange routing_key="*.info" +- rabbitmq_binding: + name: topicExchange + destination: topicExchange + type: exchange + routing_key: *.info ''' import requests @@ -127,14 +139,19 @@ def main(): else: dest_type="e" + if module.params['routing_key'] == "": + props = "~" + else: + props = urllib.quote(module.params['routing_key'],'') + url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % ( module.params['login_host'], module.params['login_port'], urllib.quote(module.params['vhost'],''), - module.params['name'], + urllib.quote(module.params['name'],''), dest_type, - module.params['destination'], - urllib.quote(module.params['routing_key'],'') + urllib.quote(module.params['destination'],''), + props ) # Check if exchange already exists @@ -173,9 +190,9 @@ def main(): module.params['login_host'], module.params['login_port'], urllib.quote(module.params['vhost'],''), - module.params['name'], + urllib.quote(module.params['name'],''), dest_type, - module.params['destination'] + urllib.quote(module.params['destination'],'') ) r = requests.post( @@ -211,4 +228,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py index fb74298879b..a5e1e353dac 100644 --- a/messaging/rabbitmq_exchange.py +++ b/messaging/rabbitmq_exchange.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rabbitmq_exchange @@ -28,7 +32,7 @@ short_description: This module manages rabbitMQ exchanges description: - This module uses rabbitMQ Rest API to create/delete exchanges -requirements: [ python requests ] +requirements: [ "requests >= 1.0.0" ] options: name: description: @@ -100,10 +104,14 @@ EXAMPLES = ''' # Create direct exchange -- rabbitmq_exchange: name=directExchange +- rabbitmq_exchange: + name: directExchange # Create topic exchange on vhost -- rabbitmq_exchange: name=topicExchange type=topic vhost=myVhost +- rabbitmq_exchange: + name: topicExchange + type: topic + vhost: myVhost ''' import requests @@ -120,9 +128,9 @@ def main(): login_host = dict(default='localhost', type='str'), login_port = dict(default='15672', type='str'), vhost = dict(default='/', type='str'), - durable = dict(default=True, choices=BOOLEANS, type='bool'), - auto_delete = dict(default=False, choices=BOOLEANS, type='bool'), - internal = dict(default=False, choices=BOOLEANS, type='bool'), + durable = dict(default=True, type='bool'), + auto_delete = dict(default=False, type='bool'), + internal = dict(default=False, type='bool'), exchange_type = dict(default='direct', aliases=['type'], type='str'), arguments = dict(default=dict(), type='dict') ), @@ -133,9 +141,9 @@ def main(): module.params['login_host'], module.params['login_port'], urllib.quote(module.params['vhost'],''), - module.params['name'] + urllib.quote(module.params['name'],'') ) - + # Check if exchange already exists r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) @@ -215,4 +223,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/messaging/rabbitmq_parameter.py b/messaging/rabbitmq_parameter.py index 6be18bdce3d..32959f2e562 100644 --- a/messaging/rabbitmq_parameter.py +++ b/messaging/rabbitmq_parameter.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rabbitmq_parameter @@ -63,10 +67,11 @@ EXAMPLES = """ # Set the federation parameter 'local_username' to a value of 'guest' (in quotes) -- rabbitmq_parameter: component=federation - name=local-username - value='"guest"' - state=present +- rabbitmq_parameter: + component: federation + name: local-username + value: '"guest"' + state: present """ class RabbitMqParameter(object): @@ -96,12 +101,17 @@ def get(self): component, name, value = param_item.split('\t') if component == self.component and name == self.name: - self._value = value + self._value = json.loads(value) return True return False def set(self): - self._exec(['set_parameter', '-p', self.vhost, self.component, self.name, self.value]) + self._exec(['set_parameter', + '-p', + self.vhost, + self.component, + self.name, + json.dumps(self.value)]) def delete(self): self._exec(['clear_parameter', '-p', self.vhost, self.component, self.name]) @@ -126,6 +136,8 @@ def main(): component = module.params['component'] name = module.params['name'] value = module.params['value'] + if isinstance(value, str): + value = json.loads(value) vhost = module.params['vhost'] state = module.params['state'] node = module.params['node'] @@ -149,4 +161,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/messaging/rabbitmq_plugin.py b/messaging/rabbitmq_plugin.py index 8d3a9428016..cc16966dcf4 100644 --- a/messaging/rabbitmq_plugin.py +++ b/messaging/rabbitmq_plugin.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rabbitmq_plugin @@ -56,7 +60,9 @@ EXAMPLES = ''' # Enables the rabbitmq_management plugin -- rabbitmq_plugin: names=rabbitmq_management state=enabled +- rabbitmq_plugin: + names: rabbitmq_management + state: enabled ''' import os @@ -88,7 +94,14 @@ def _exec(self, args, run_in_check_mode=False): return list() def get_all(self): - return self._exec(['list', '-E', '-m'], True) + list_output = self._exec(['list', '-E', '-m'], True) + plugins = [] + for plugin in list_output: + if not plugin: + break + plugins.append(plugin) + + return plugins def enable(self, name): self._exec(['enable', name]) @@ -96,6 +109,7 @@ def enable(self, name): def disable(self, name): self._exec(['disable', name]) + def main(): arg_spec = dict( names=dict(required=True, aliases=['name']), @@ -139,4 +153,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/messaging/rabbitmq_policy.py b/messaging/rabbitmq_policy.py index 81d7068ec46..6d5a053f3d6 100644 --- a/messaging/rabbitmq_policy.py +++ b/messaging/rabbitmq_policy.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rabbitmq_policy @@ -38,6 +42,13 @@ - The name of the vhost to apply to. required: false default: / + apply_to: + description: + - What the policy applies to. Requires RabbitMQ 3.2.0 or later. + required: false + default: all + choices: [all, exchanges, queues] + version_added: "2.1" pattern: description: - A regex of queues to apply the policy to. @@ -67,13 +78,19 @@ EXAMPLES = ''' - name: ensure the default vhost contains the HA policy via a dict - rabbitmq_policy: name=HA pattern='.*' + rabbitmq_policy: + name: HA + pattern: .* args: tags: - "ha-mode": all + ha-mode: all - name: ensure the default vhost contains the HA policy - rabbitmq_policy: name=HA pattern='.*' tags="ha-mode=all" + rabbitmq_policy: + name: HA + pattern: .* + tags: + - ha-mode: all ''' class RabbitMqPolicy(object): def __init__(self, module, name): @@ -81,6 +98,7 @@ def __init__(self, module, name): self._name = name self._vhost = module.params['vhost'] self._pattern = module.params['pattern'] + self._apply_to = module.params['apply_to'] self._tags = module.params['tags'] self._priority = module.params['priority'] self._node = module.params['node'] @@ -112,6 +130,9 @@ def set(self): args.append(json.dumps(self._tags)) args.append('--priority') args.append(self._priority) + if (self._apply_to != 'all'): + args.append('--apply-to') + args.append(self._apply_to) return self._exec(args) def clear(self): @@ -123,6 +144,7 @@ def main(): name=dict(required=True), vhost=dict(default='/'), pattern=dict(required=True), + apply_to=dict(default='all', choices=['all', 'exchanges', 'queues']), tags=dict(type='dict', required=True), priority=dict(default='0'), node=dict(default='rabbit'), @@ -153,4 +175,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/messaging/rabbitmq_queue.py b/messaging/rabbitmq_queue.py index 5a403a6b602..6b49fea9f06 100644 --- a/messaging/rabbitmq_queue.py +++ b/messaging/rabbitmq_queue.py @@ -19,16 +19,20 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rabbitmq_queue -author: "Manuel Sousa (@manuel-sousa)" +author: "Manuel Sousa (@manuel-sousa)" version_added: "2.0" short_description: This module manages rabbitMQ queues description: - This module uses rabbitMQ Rest API to create/delete queues -requirements: [ python requests ] +requirements: [ "requests >= 1.0.0" ] options: name: description: @@ -114,10 +118,15 @@ EXAMPLES = ''' # Create a queue -- rabbitmq_queue: name=myQueue +- rabbitmq_queue: + name: myQueue # Create a queue on remote host -- rabbitmq_queue: name=myRemoteQueue login_user=user login_password=secret login_host=remote.example.org +- rabbitmq_queue: + name: myRemoteQueue + login_user: user + login_password: secret + login_host: remote.example.org ''' import requests @@ -134,8 +143,8 @@ def main(): login_host = dict(default='localhost', type='str'), login_port = dict(default='15672', type='str'), vhost = dict(default='/', type='str'), - durable = dict(default=True, choices=BOOLEANS, type='bool'), - auto_delete = dict(default=False, choices=BOOLEANS, type='bool'), + durable = dict(default=True, type='bool'), + auto_delete = dict(default=False, type='bool'), message_ttl = dict(default=None, type='int'), auto_expires = dict(default=None, type='int'), max_length = dict(default=None, type='int'), @@ -152,7 +161,7 @@ def main(): urllib.quote(module.params['vhost'],''), module.params['name'] ) - + # Check if queue already exists r = requests.get( url, auth=(module.params['login_user'],module.params['login_password'])) @@ -260,4 +269,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/messaging/rabbitmq_user.py b/messaging/rabbitmq_user.py index b12178e08ea..02afe298cb2 100644 --- a/messaging/rabbitmq_user.py +++ b/messaging/rabbitmq_user.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rabbitmq_user @@ -45,9 +49,19 @@ - User tags specified as comma delimited required: false default: null + permissions: + description: + - a list of dicts, each dict contains vhost, configure_priv, write_priv, and read_priv, + and represents a permission rule for that vhost. + - This option should be preferable when you care about all permissions of the user. + - You should use vhost, configure_priv, write_priv, and read_priv options instead + if you care about permissions for just some vhosts. + required: false + default: [] vhost: description: - vhost to apply access privileges. + - This option will be ignored when permissions option is used. required: false default: / node: @@ -61,6 +75,7 @@ - Regular expression to restrict configure actions on a resource for the specified vhost. - By default all actions are restricted. + - This option will be ignored when permissions option is used. required: false default: ^$ write_priv: @@ -68,6 +83,7 @@ - Regular expression to restrict configure actions on a resource for the specified vhost. - By default all actions are restricted. + - This option will be ignored when permissions option is used. required: false default: ^$ read_priv: @@ -75,6 +91,7 @@ - Regular expression to restrict configure actions on a resource for the specified vhost. - By default all actions are restricted. + - This option will be ignored when permissions option is used. required: false default: ^$ force: @@ -92,18 +109,33 @@ ''' EXAMPLES = ''' -# Add user to server and assign full access control -- rabbitmq_user: user=joe - password=changeme - vhost=/ - configure_priv=.* - read_priv=.* - write_priv=.* - state=present +# Add user to server and assign full access control on / vhost. +# The user might have permission rules for other vhost but you don't care. +- rabbitmq_user: + user: joe + password: changeme + vhost: / + configure_priv: .* + read_priv: .* + write_priv: .* + state: present + +# Add user to server and assign full access control on / vhost. +# The user doesn't have permission rules for other vhosts +- rabbitmq_user: + user: joe + password: changeme + permissions: + - vhost: / + configure_priv: .* + read_priv: .* + write_priv: .* + state: present ''' class RabbitMqUser(object): - def __init__(self, module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node): + def __init__(self, module, username, password, tags, permissions, + node, bulk_permissions=False): self.module = module self.username = username self.password = password @@ -113,21 +145,18 @@ def __init__(self, module, username, password, tags, vhost, configure_priv, writ else: self.tags = tags.split(',') - permissions = dict( - vhost=vhost, - configure_priv=configure_priv, - write_priv=write_priv, - read_priv=read_priv - ) self.permissions = permissions + self.bulk_permissions = bulk_permissions self._tags = None - self._permissions = None + self._permissions = [] self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) def _exec(self, args, run_in_check_mode=False): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = [self._rabbitmqctl, '-q', '-n', self.node] + cmd = [self._rabbitmqctl, '-q'] + if self.node is not None: + cmd.extend(['-n', self.node]) rc, out, err = self.module.run_command(cmd + args, check_rc=True) return out.splitlines() return list() @@ -136,6 +165,9 @@ def get(self): users = self._exec(['list_users'], True) for user_tag in users: + if '\t' not in user_tag: + continue + user, tags = user_tag.split('\t') if user == self.username: @@ -154,12 +186,18 @@ def get(self): def _get_permissions(self): perms_out = self._exec(['list_user_permissions', self.username], True) + perms_list = list() for perm in perms_out: vhost, configure_priv, write_priv, read_priv = perm.split('\t') - if vhost == self.permissions['vhost']: - return dict(vhost=vhost, configure_priv=configure_priv, write_priv=write_priv, read_priv=read_priv) - - return dict() + if not self.bulk_permissions: + if vhost == self.permissions[0]['vhost']: + perms_list.append(dict(vhost=vhost, configure_priv=configure_priv, + write_priv=write_priv, read_priv=read_priv)) + break + else: + perms_list.append(dict(vhost=vhost, configure_priv=configure_priv, + write_priv=write_priv, read_priv=read_priv)) + return perms_list def add(self): if self.password is not None: @@ -175,14 +213,21 @@ def set_tags(self): self._exec(['set_user_tags', self.username] + self.tags) def set_permissions(self): - cmd = ['set_permissions'] - cmd.append('-p') - cmd.append(self.permissions['vhost']) - cmd.append(self.username) - cmd.append(self.permissions['configure_priv']) - cmd.append(self.permissions['write_priv']) - cmd.append(self.permissions['read_priv']) - self._exec(cmd) + for permission in self._permissions: + if permission not in self.permissions: + cmd = ['clear_permissions', '-p'] + cmd.append(permission['vhost']) + cmd.append(self.username) + self._exec(cmd) + for permission in self.permissions: + if permission not in self._permissions: + cmd = ['set_permissions', '-p'] + cmd.append(permission['vhost']) + cmd.append(self.username) + cmd.append(permission['configure_priv']) + cmd.append(permission['write_priv']) + cmd.append(permission['read_priv']) + self._exec(cmd) def has_tags_modifications(self): return set(self.tags) != set(self._tags) @@ -195,13 +240,14 @@ def main(): user=dict(required=True, aliases=['username', 'name']), password=dict(default=None), tags=dict(default=None), + permissions=dict(default=list(), type='list'), vhost=dict(default='/'), configure_priv=dict(default='^$'), write_priv=dict(default='^$'), read_priv=dict(default='^$'), force=dict(default='no', type='bool'), state=dict(default='present', choices=['present', 'absent']), - node=dict(default='rabbit') + node=dict(default=None) ) module = AnsibleModule( argument_spec=arg_spec, @@ -211,6 +257,7 @@ def main(): username = module.params['user'] password = module.params['password'] tags = module.params['tags'] + permissions = module.params['permissions'] vhost = module.params['vhost'] configure_priv = module.params['configure_priv'] write_priv = module.params['write_priv'] @@ -219,7 +266,19 @@ def main(): state = module.params['state'] node = module.params['node'] - rabbitmq_user = RabbitMqUser(module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node) + bulk_permissions = True + if permissions == []: + perm = { + 'vhost': vhost, + 'configure_priv': configure_priv, + 'write_priv': write_priv, + 'read_priv': read_priv + } + permissions.append(perm) + bulk_permissions = False + + rabbitmq_user = RabbitMqUser(module, username, password, tags, permissions, + node, bulk_permissions=bulk_permissions) changed = False if rabbitmq_user.get(): @@ -250,4 +309,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/messaging/rabbitmq_vhost.py b/messaging/rabbitmq_vhost.py index dbde32393cb..635d8b77bbe 100644 --- a/messaging/rabbitmq_vhost.py +++ b/messaging/rabbitmq_vhost.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rabbitmq_vhost @@ -55,7 +59,9 @@ EXAMPLES = ''' # Ensure that the vhost /test exists. -- rabbitmq_vhost: name=/test state=present +- rabbitmq_vhost: + name: /test + state: present ''' class RabbitMqVhost(object): @@ -144,4 +150,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/monitoring/airbrake_deployment.py b/monitoring/airbrake_deployment.py index a58df024182..124a801ea94 100644 --- a/monitoring/airbrake_deployment.py +++ b/monitoring/airbrake_deployment.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: airbrake_deployment @@ -51,7 +55,7 @@ description: - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. required: false - default: "https://airbrake.io/deploys" + default: "https://airbrake.io/deploys.txt" version_added: "1.5" validate_certs: description: @@ -65,10 +69,11 @@ ''' EXAMPLES = ''' -- airbrake_deployment: token=AAAAAA - environment='staging' - user='ansible' - revision=4.2 +- airbrake_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: '4.2' ''' import urllib @@ -81,7 +86,7 @@ def main(): module = AnsibleModule( argument_spec=dict( - token=dict(required=True), + token=dict(required=True, no_log=True), environment=dict(required=True), user=dict(required=False), repo=dict(required=False), @@ -127,5 +132,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() - +if __name__ == '__main__': + main() diff --git a/monitoring/bigpanda.py b/monitoring/bigpanda.py index 0139f3a598e..90b37841526 100644 --- a/monitoring/bigpanda.py +++ b/monitoring/bigpanda.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: bigpanda @@ -79,23 +83,34 @@ ''' EXAMPLES = ''' -- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started +- bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + state: started ... -- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=finished - -or using a deployment object: -- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started - register: deployment - -- bigpanda: state=finished - args: deployment - -If outside servers aren't reachable from your machine, use local_action and pass the hostname: -- local_action: bigpanda component=myapp version=1.3 hosts={{ansible_hostname}} token={{ bigpanda_token }} state=started +- bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + state: finished + +# If outside servers aren't reachable from your machine, use delegate_to and override hosts: +- bigpanda: + component: myapp + version: '1.3' + token: '{{ bigpanda_token }}' + hosts: '{{ ansible_hostname }}' + state: started + delegate_to: localhost register: deployment ... -- local_action: bigpanda state=finished - args: deployment +- bigpanda: + component: '{{ deployment.component }}' + version: '{{ deployment.version }}' + token: '{{ deployment.token }}' + state: finished + delegate_to: localhost ''' # =========================================== @@ -109,7 +124,7 @@ def main(): argument_spec=dict( component=dict(required=True, aliases=['name']), version=dict(required=True), - token=dict(required=True), + token=dict(required=True, no_log=True), state=dict(required=True, choices=['started', 'finished', 'failed']), hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']), env=dict(required=False), @@ -178,11 +193,13 @@ def main(): module.exit_json(changed=True, **deployment) else: module.fail_json(msg=json.dumps(info)) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * +from ansible.module_utils.pycompat24 import get_exception if __name__ == '__main__': main() diff --git a/monitoring/boundary_meter.py b/monitoring/boundary_meter.py index 3e03a55c8aa..ccbf014026f 100644 --- a/monitoring/boundary_meter.py +++ b/monitoring/boundary_meter.py @@ -22,10 +22,9 @@ along with Ansible. If not, see . """ -import json -import datetime -import base64 -import os +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' @@ -80,15 +79,33 @@ ''' +import base64 +import os + +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + api_host = "api.boundary.com" config_directory = "/etc/bprobe" + # "resource" like thing or apikey? def auth_encode(apikey): auth = base64.standard_b64encode(apikey) auth.replace("\n", "") return auth - + + def build_url(name, apiid, action, meter_id=None, cert_type=None): if action == "create": return 'https://%s/%s/meters' % (api_host, apiid) @@ -190,7 +207,7 @@ def delete_meter(module, name, apiid, apikey): try: cert_file = '%s/%s.pem' % (config_directory,cert_type) os.remove(cert_file) - except OSError, e: + except OSError: module.fail_json("Failed to remove " + cert_type + ".pem file") return 0, "Meter " + name + " deleted" @@ -211,9 +228,9 @@ def download_request(module, name, apiid, apikey, cert_type): body = response.read() cert_file = open(cert_file_path, 'w') cert_file.write(body) - cert_file.close - os.chmod(cert_file_path, 0600) - except: + cert_file.close() + os.chmod(cert_file_path, int('0600', 8)) + except: module.fail_json("Could not write to certificate file") return True @@ -248,9 +265,7 @@ def main(): module.exit_json(status=result,changed=True) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * + if __name__ == '__main__': main() diff --git a/monitoring/circonus_annotation.py b/monitoring/circonus_annotation.py index ae5c98c87a1..5e9029e9fb0 100644 --- a/monitoring/circonus_annotation.py +++ b/monitoring/circonus_annotation.py @@ -17,9 +17,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import requests -import time -import json + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -67,25 +68,34 @@ # Create a simple annotation event with a source, defaults to start and end time of now - circonus_annotation: api_key: XXXXXXXXXXXXXXXXX - title: 'App Config Change' - description: 'This is a detailed description of the config change' - category: 'This category groups like annotations' + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations # Create an annotation with a duration of 5 minutes and a default start time of now - circonus_annotation: api_key: XXXXXXXXXXXXXXXXX - title: 'App Config Change' - description: 'This is a detailed description of the config change' - category: 'This category groups like annotations' + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations duration: 300 # Create an annotation with a start_time and end_time - circonus_annotation: api_key: XXXXXXXXXXXXXXXXX - title: 'App Config Change' - description: 'This is a detailed description of the config change' - category: 'This category groups like annotations' + title: App Config Change + description: This is a detailed description of the config change + category: This category groups like annotations start_time: 1395940006 end_time: 1395954407 ''' +import json +import time + +import requests + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + + def post_annotation(annotation, api_key): ''' Takes annotation dict and api_key string''' base_url = 'https://api.circonus.com/v2' @@ -95,6 +105,7 @@ def post_annotation(annotation, api_key): resp.raise_for_status() return resp + def create_annotation(module): ''' Takes ansible module object ''' annotation = {} @@ -116,6 +127,8 @@ def create_annotation(module): annotation['description'] = module.params['description'] annotation['title'] = module.params['title'] return annotation + + def build_headers(api_token): '''Takes api token, returns headers with it included.''' headers = {'X-Circonus-App-Name': 'ansible', @@ -123,6 +136,7 @@ def build_headers(api_token): 'Accept': 'application/json'} return headers + def main(): '''Main function, dispatches logic''' module = AnsibleModule( @@ -133,15 +147,17 @@ def main(): title=dict(required=True), description=dict(required=True), duration=dict(required=False, type='int'), - api_key=dict(required=True) + api_key=dict(required=True, no_log=True) ) ) annotation = create_annotation(module) try: resp = post_annotation(annotation, module.params['api_key']) - except requests.exceptions.RequestException, err_str: + except requests.exceptions.RequestException: + err_str = get_exception() module.fail_json(msg='Request Failed', reason=err_str) module.exit_json(changed=True, annotation=resp.json()) -from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/monitoring/datadog_event.py b/monitoring/datadog_event.py index 25e8ce052b6..4e3bf03b159 100644 --- a/monitoring/datadog_event.py +++ b/monitoring/datadog_event.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # # Author: Artūras 'arturaz' Šlajus +# Author: Naoya Nakazawa # # This module is proudly sponsored by iGeolise (www.igeolise.com) and # Tiny Lab Productions (www.tinylabproductions.com). @@ -21,6 +22,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Import Datadog +try: + from datadog import initialize, api + HAS_DATADOG = True +except: + HAS_DATADOG = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -30,7 +41,9 @@ - "Allows to post events to DataDog (www.datadoghq.com) service." - "Uses http://docs.datadoghq.com/api/#events API." version_added: "1.3" -author: "Artūras `arturaz` Šlajus (@arturaz)" +author: +- "Artūras `arturaz` Šlajus (@arturaz)" +- "Naoya Nakazawa (@n0ts)" notes: [] requirements: [] options: @@ -38,6 +51,10 @@ description: ["Your DataDog API key."] required: true default: null + app_key: + description: ["Your DataDog app key."] + required: true + version_added: "2.2" title: description: ["The event title."] required: true @@ -82,20 +99,27 @@ EXAMPLES = ''' # Post an event with low priority -datadog_event: title="Testing from ansible" text="Test!" priority="low" - api_key="6873258723457823548234234234" +- datadog_event: + title: Testing from ansible + text: Test + priority: low + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN # Post an event with several tags -datadog_event: title="Testing from ansible" text="Test!" - api_key="6873258723457823548234234234" - tags=aa,bb,#host:{{ inventory_hostname }} +- datadog_event: + title: Testing from ansible + text: Test + api_key: 9775a026f1ca7d1c6c5af9d94d9595a4 + app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN + tags: 'aa,bb,#host:{{ inventory_hostname }}' ''' -import socket - +# Import Datadog def main(): module = AnsibleModule( argument_spec=dict( - api_key=dict(required=True), + api_key=dict(required=True, no_log=True), + app_key=dict(required=True, no_log=True), title=dict(required=True), text=dict(required=True), date_happened=dict(required=False, default=None, type='int'), @@ -108,51 +132,42 @@ def main(): choices=['error', 'warning', 'info', 'success'] ), aggregation_key=dict(required=False, default=None), - source_type_name=dict( - required=False, default='my apps', - choices=['nagios', 'hudson', 'jenkins', 'user', 'my apps', - 'feed', 'chef', 'puppet', 'git', 'bitbucket', 'fabric', - 'capistrano'] - ), validate_certs = dict(default='yes', type='bool'), ) ) - post_event(module) + # Prepare Datadog + if not HAS_DATADOG: + module.fail_json(msg='datadogpy required for this module') -def post_event(module): - uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key'] + options = { + 'api_key': module.params['api_key'], + 'app_key': module.params['app_key'] + } - body = dict( - title=module.params['title'], - text=module.params['text'], - priority=module.params['priority'], - alert_type=module.params['alert_type'] - ) - if module.params['date_happened'] != None: - body['date_happened'] = module.params['date_happened'] - if module.params['tags'] != None: - body['tags'] = module.params['tags'] - if module.params['aggregation_key'] != None: - body['aggregation_key'] = module.params['aggregation_key'] - if module.params['source_type_name'] != None: - body['source_type_name'] = module.params['source_type_name'] + initialize(**options) + + _post_event(module) + + +def _post_event(module): + try: + msg = api.Event.create(title=module.params['title'], + text=module.params['text'], + tags=module.params['tags'], + priority=module.params['priority'], + alert_type=module.params['alert_type'], + aggregation_key=module.params['aggregation_key'], + source_type_name='ansible') + if msg['status'] != 'ok': + module.fail_json(msg=msg) - json_body = module.jsonify(body) - headers = {"Content-Type": "application/json"} + module.exit_json(changed=True, msg=msg) + except Exception: + e = get_exception() + module.fail_json(msg=str(e)) - (response, info) = fetch_url(module, uri, data=json_body, headers=headers) - if info['status'] == 200: - response_body = response.read() - response_json = module.from_json(response_body) - if response_json['status'] == 'ok': - module.exit_json(changed=True) - else: - module.fail_json(msg=response) - else: - module.fail_json(**info) -# import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * if __name__ == '__main__': diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index 9853d748c2c..50a067d8a2a 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -19,12 +19,9 @@ # along with Ansible. If not, see . # import module snippets -# Import Datadog -try: - from datadog import initialize, api - HAS_DATADOG = True -except: - HAS_DATADOG = False +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -34,8 +31,7 @@ - "Manages monitors within Datadog" - "Options like described on http://docs.datadoghq.com/api/" version_added: "2.0" -author: "Sebastian Kornehl (@skornehl)" -notes: [] +author: "Sebastian Kornehl (@skornehl)" requirements: [datadog] options: api_key: @@ -48,20 +44,27 @@ description: ["The designated state of the monitor."] required: true choices: ['present', 'absent', 'muted', 'unmuted'] + tags: + description: ["A list of tags to associate with your monitor when creating or updating. This can help you categorize and filter monitors."] + required: false + default: None + version_added: "2.2" type: - description: ["The type of the monitor."] + description: + - "The type of the monitor." + - The 'event alert'is available starting at Ansible 2.1 required: false default: null - choices: ['metric alert', 'service check'] + choices: ['metric alert', 'service check', 'event alert'] query: - description: ["he monitor query to notify on with syntax varying depending on what type of monitor you are creating."] + description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."] required: false default: null name: description: ["The name of the alert."] required: true message: - description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events."] + description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'."] required: false default: null silenced: @@ -93,9 +96,24 @@ required: false default: False thresholds: - description: ["A dictionary of thresholds by status. Because service checks can have multiple thresholds, we don't define them directly in the query."] + description: ["A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have multiple thresholds, we don't define them directly in the query."] required: false default: {'ok': 1, 'critical': 1, 'warning': 1} + locked: + description: ["A boolean indicating whether changes to this monitor should be restricted to the creator or admins."] + required: false + default: False + version_added: "2.2" + require_full_window: + description: ["A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped."] + required: false + default: null + version_added: "2.3" + id: + description: ["The id of the alert. If set, will be used instead of the name to locate the alert."] + required: false + default: null + version_added: "2.3" ''' EXAMPLES = ''' @@ -105,7 +123,7 @@ name: "Test monitor" state: "present" query: "datadog.agent.up".over("host:host1").last(2).count_by_status()" - message: "Some message." + message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog." api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" @@ -132,25 +150,39 @@ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" ''' +# Import Datadog +try: + from datadog import initialize, api + HAS_DATADOG = True +except: + HAS_DATADOG = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + def main(): module = AnsibleModule( argument_spec=dict( - api_key=dict(required=True), - app_key=dict(required=True), + api_key=dict(required=True, no_log=True), + app_key=dict(required=True, no_log=True), state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']), - type=dict(required=False, choises=['metric alert', 'service check']), + type=dict(required=False, choises=['metric alert', 'service check', 'event alert']), name=dict(required=True), query=dict(required=False), message=dict(required=False, default=None), silenced=dict(required=False, default=None, type='dict'), - notify_no_data=dict(required=False, default=False, choices=BOOLEANS), + notify_no_data=dict(required=False, default=False, type='bool'), no_data_timeframe=dict(required=False, default=None), timeout_h=dict(required=False, default=None), renotify_interval=dict(required=False, default=None), escalation_message=dict(required=False, default=None), - notify_audit=dict(required=False, default=False, choices=BOOLEANS), - thresholds=dict(required=False, type='dict', default={'ok': 1, 'critical': 1, 'warning': 1}), + notify_audit=dict(required=False, default=False, type='bool'), + thresholds=dict(required=False, type='dict', default=None), + tags=dict(required=False, type='list', default=None), + locked=dict(required=False, default=False, type='bool'), + require_full_window=dict(required=False, default=None, type='bool'), + id=dict(required=False) ) ) @@ -174,24 +206,40 @@ def main(): elif module.params['state'] == 'unmute': unmute_monitor(module) +def _fix_template_vars(message): + if message: + return message.replace('[[', '{{').replace(']]', '}}') + return message + def _get_monitor(module): - for monitor in api.Monitor.get_all(): - if monitor['name'] == module.params['name']: - return monitor + if module.params['id'] is not None: + monitor = api.Monitor.get(module.params['id']) + if 'errors' in monitor: + module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors']))) + return monitor + else: + monitors = api.Monitor.get_all() + for monitor in monitors: + if monitor['name'] == module.params['name']: + return monitor return {} def _post_monitor(module, options): try: - msg = api.Monitor.create(type=module.params['type'], query=module.params['query'], - name=module.params['name'], message=module.params['message'], - options=options) + kwargs = dict(type=module.params['type'], query=module.params['query'], + name=module.params['name'], message=_fix_template_vars(module.params['message']), + options=options) + if module.params['tags'] is not None: + kwargs['tags'] = module.params['tags'] + msg = api.Monitor.create(**kwargs) if 'errors' in msg: module.fail_json(msg=str(msg['errors'])) else: module.exit_json(changed=True, msg=msg) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) def _equal_dicts(a, b, ignore_keys): @@ -201,16 +249,21 @@ def _equal_dicts(a, b, ignore_keys): def _update_monitor(module, monitor, options): try: - msg = api.Monitor.update(id=monitor['id'], query=module.params['query'], - name=module.params['name'], message=module.params['message'], - options=options) + kwargs = dict(id=monitor['id'], query=module.params['query'], + name=module.params['name'], message=_fix_template_vars(module.params['message']), + options=options) + if module.params['tags'] is not None: + kwargs['tags'] = module.params['tags'] + msg = api.Monitor.update(**kwargs) + if 'errors' in msg: module.fail_json(msg=str(msg['errors'])) - elif _equal_dicts(msg, monitor, ['creator', 'overall_state']): + elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified']): module.exit_json(changed=False, msg=msg) else: module.exit_json(changed=True, msg=msg) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) @@ -223,9 +276,13 @@ def install_monitor(module): "renotify_interval": module.params['renotify_interval'], "escalation_message": module.params['escalation_message'], "notify_audit": module.boolean(module.params['notify_audit']), + "locked": module.boolean(module.params['locked']), + "require_full_window" : module.params['require_full_window'] } if module.params['type'] == "service check": + options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1} + if module.params['type'] == "metric alert" and module.params['thresholds'] is not None: options["thresholds"] = module.params['thresholds'] monitor = _get_monitor(module) @@ -242,7 +299,8 @@ def delete_monitor(module): try: msg = api.Monitor.delete(monitor['id']) module.exit_json(changed=True, msg=msg) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) @@ -261,7 +319,8 @@ def mute_monitor(module): else: msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) module.exit_json(changed=True, msg=msg) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) @@ -274,10 +333,10 @@ def unmute_monitor(module): try: msg = api.Monitor.unmute(monitor['id']) module.exit_json(changed=True, msg=msg) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/monitoring/honeybadger_deployment.py b/monitoring/honeybadger_deployment.py new file mode 100644 index 00000000000..362af67963a --- /dev/null +++ b/monitoring/honeybadger_deployment.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014 Benjamin Curtis +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: honeybadger_deployment +author: "Benjamin Curtis (@stympy)" +version_added: "2.2" +short_description: Notify Honeybadger.io about app deployments +description: + - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking) +options: + token: + description: + - API token. + required: true + environment: + description: + - The environment name, typically 'production', 'staging', etc. + required: true + user: + description: + - The username of the person doing the deployment + required: false + default: None + repo: + description: + - URL of the project repository + required: false + default: None + revision: + description: + - A hash, number, tag, or other identifier showing what revision was deployed + required: false + default: None + url: + description: + - Optional URL to submit the notification to. + required: false + default: "https://api.honeybadger.io/v1/deploys" + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + +requirements: [] +''' + +EXAMPLES = ''' +- honeybadger_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: b6826b8 + repo: 'git@github.com:user/repo.git' +''' + +RETURN = '''# ''' + +import urllib + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import * + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True, no_log=True), + environment=dict(required=True), + user=dict(required=False), + repo=dict(required=False), + revision=dict(required=False), + url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'), + validate_certs=dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + params = {} + + if module.params["environment"]: + params["deploy[environment]"] = module.params["environment"] + + if module.params["user"]: + params["deploy[local_username]"] = module.params["user"] + + if module.params["repo"]: + params["deploy[repository]"] = module.params["repo"] + + if module.params["revision"]: + params["deploy[revision]"] = module.params["revision"] + + params["api_key"] = module.params["token"] + + url = module.params.get('url') + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + try: + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + except Exception: + e = get_exception() + module.fail_json(msg='Unable to notify Honeybadger: %s' % e) + else: + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) + +if __name__ == '__main__': + main() + diff --git a/monitoring/librato_annotation.py b/monitoring/librato_annotation.py index f174bda0ea4..838abf14e60 100644 --- a/monitoring/librato_annotation.py +++ b/monitoring/librato_annotation.py @@ -20,6 +20,10 @@ # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: librato_annotation @@ -77,27 +81,28 @@ - librato_annotation: user: user@example.com api_key: XXXXXXXXXXXXXXXXX - title: 'App Config Change' - source: 'foo.bar' - description: 'This is a detailed description of the config change' + title: App Config Change + source: foo.bar + description: This is a detailed description of the config change # Create an annotation that includes a link - librato_annotation: user: user@example.com api_key: XXXXXXXXXXXXXXXXXX - name: 'code.deploy' - title: 'app code deploy' - description: 'this is a detailed description of a deployment' + name: code.deploy + title: app code deploy + description: this is a detailed description of a deployment links: - - { rel: 'example', href: 'http://www.example.com/deploy' } + - rel: example + href: http://www.example.com/deploy # Create an annotation with a start_time and end_time - librato_annotation: user: user@example.com api_key: XXXXXXXXXXXXXXXXXX - name: 'maintenance' - title: 'Maintenance window' - description: 'This is a detailed description of maintenance' + name: maintenance + title: Maintenance window + description: This is a detailed description of maintenance start_time: 1395940006 end_time: 1395954406 ''' diff --git a/monitoring/logentries.py b/monitoring/logentries.py index a347afd84c2..a85679ef2eb 100644 --- a/monitoring/logentries.py +++ b/monitoring/logentries.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: logentries @@ -48,8 +52,16 @@ - Requires the LogEntries agent which can be installed following the instructions at logentries.com ''' EXAMPLES = ''' -- logentries: path=/var/log/nginx/access.log state=present name=nginx-access-log -- logentries: path=/var/log/nginx/error.log state=absent +# Track nginx logs +- logentries: + path: /var/log/nginx/access.log + state: present + name: nginx-access-log + +# Stop tracking nginx logs +- logentries: + path: /var/log/nginx/error.log + state: absent ''' def query_log_status(module, le_path, path, state="present"): @@ -144,4 +156,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/monitoring/logicmonitor.py b/monitoring/logicmonitor.py new file mode 100644 index 00000000000..f2267207a71 --- /dev/null +++ b/monitoring/logicmonitor.py @@ -0,0 +1,2178 @@ +#!/usr/bin/python + +"""LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups + Copyright (C) 2015 LogicMonitor + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software Foundation, + Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA""" + +import datetime +import os +import platform +import socket +import sys +import types +import urllib + +HAS_LIB_JSON = True +try: + import json + # Detect the python-json library which is incompatible + # Look for simplejson if that's the case + try: + if ( + not isinstance(json.loads, types.FunctionType) or + not isinstance(json.dumps, types.FunctionType) + ): + raise ImportError + except AttributeError: + raise ImportError +except ImportError: + try: + import simplejson as json + except ImportError: + print( + '\n{"msg": "Error: ansible requires the stdlib json or ' + + 'simplejson module, neither was found!", "failed": true}' + ) + HAS_LIB_JSON = False + except SyntaxError: + print( + '\n{"msg": "SyntaxError: probably due to installed simplejson ' + + 'being for a different python version", "failed": true}' + ) + HAS_LIB_JSON = False + +RETURN = ''' +--- +success: + description: flag indicating that execution was successful + returned: success + type: boolean + sample: True +... +''' + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: logicmonitor +short_description: Manage your LogicMonitor account through Ansible Playbooks +description: + - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform. + - This module manages hosts, host groups, and collectors within your LogicMonitor account. +version_added: "2.2" +author: [Ethan Culler-Mayeno (@ethanculler), Jeff Wozniak (@woz5999)] +notes: + - You must have an existing LogicMonitor account for this module to function. +requirements: ["An existing LogicMonitor account", "Linux"] +options: + target: + description: + - The type of LogicMonitor object you wish to manage. + - "Collector: Perform actions on a LogicMonitor collector." + - NOTE You should use Ansible service modules such as M(service) or M(supervisorctl) for managing the Collector 'logicmonitor-agent' and 'logicmonitor-watchdog' services. Specifically, you'll probably want to start these services after a Collector add and stop these services before a Collector remove. + - "Host: Perform actions on a host device." + - "Hostgroup: Perform actions on a LogicMonitor host group." + - NOTE Host and Hostgroup tasks should always be performed via local_action. There are no benefits to running these tasks on the remote host and doing so will typically cause problems. + required: true + default: null + choices: ['collector', 'host', 'datsource', 'hostgroup'] + action: + description: + - The action you wish to perform on target. + - "Add: Add an object to your LogicMonitor account." + - "Remove: Remove an object from your LogicMonitor account." + - "Update: Update properties, description, or groups (target=host) for an object in your LogicMonitor account." + - "SDT: Schedule downtime for an object in your LogicMonitor account." + required: true + default: null + choices: ['add', 'remove', 'update', 'sdt'] + company: + description: + - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes." + required: true + default: null + user: + description: + - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user. + required: true + default: null + password: + description: + - The password of the specified LogicMonitor user + required: true + default: null + collector: + description: + - The fully qualified domain name of a collector in your LogicMonitor account. + - This is required for the creation of a LogicMonitor host (target=host action=add). + - This is required for updating, removing or scheduling downtime for hosts if 'displayname' isn't specified (target=host action=update action=remove action=sdt). + required: false + default: null + hostname: + description: + - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to manage. + - Optional for managing hosts (target=host). + required: false + default: 'hostname -f' + displayname: + description: + - The display name of a host in your LogicMonitor account or the desired display name of a device to manage. + - Optional for managing hosts (target=host). + required: false + default: 'hostname -f' + description: + description: + - The long text description of the object in your LogicMonitor account. + - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update). + required: false + default: "" + properties: + description: + - A dictionary of properties to set on the LogicMonitor host or host group. + - Optional for managing hosts and host groups (target=host or target=hostgroup; action=add or action=update). + - This parameter will add or update existing properties in your LogicMonitor account. + required: false + default: {} + groups: + description: + - A list of groups that the host should be a member of. + - Optional for managing hosts (target=host; action=add or action=update). + required: false + default: [] + id: + description: + - ID of the datasource to target. + - Required for management of LogicMonitor datasources (target=datasource). + required: false + default: null + fullpath: + description: + - The fullpath of the host group object you would like to manage. + - Recommend running on a single Ansible host. + - Required for management of LogicMonitor host groups (target=hostgroup). + required: false + default: null + alertenable: + description: + - A boolean flag to turn alerting on or off for an object. + - Optional for managing all hosts (action=add or action=update). + required: false + default: true + choices: [true, false] + starttime: + description: + - The time that the Scheduled Down Time (SDT) should begin. + - Optional for managing SDT (action=sdt). + - Y-m-d H:M + required: false + default: Now + duration: + description: + - The duration (minutes) of the Scheduled Down Time (SDT). + - Optional for putting an object into SDT (action=sdt). + required: false + default: 30 +... +''' +EXAMPLES = ''' + # example of adding a new LogicMonitor collector to these devices + --- + - hosts: collectors + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: Deploy/verify LogicMonitor collectors + become: yes + logicmonitor: + target=collector + action=add + company={{ company }} + user={{ user }} + password={{ password }} + + #example of adding a list of hosts into monitoring + --- + - hosts: hosts + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: Deploy LogicMonitor Host + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=host + action=add + collector='mycompany-Collector' + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + groups="/servers/production,/datacenter1" + properties="{'snmp.community':'secret','dc':'1', 'type':'prod'}" + + #example of putting a datasource in SDT + --- + - hosts: localhost + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: SDT a datasource + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=datasource + action=sdt + id='123' + duration=3000 + starttime='2017-03-04 05:06' + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + + #example of creating a hostgroup + --- + - hosts: localhost + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: Create a host group + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=hostgroup + action=add + fullpath='/servers/development' + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + properties="{'snmp.community':'commstring', 'type':'dev'}" + + #example of putting a list of hosts into SDT + --- + - hosts: hosts + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: SDT hosts + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=host + action=sdt + duration=3000 + starttime='2016-11-10 09:08' + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + collector='mycompany-Collector' + + #example of putting a host group in SDT + --- + - hosts: localhost + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: SDT a host group + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=hostgroup + action=sdt + fullpath='/servers/development' + duration=3000 + starttime='2017-03-04 05:06' + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + + #example of updating a list of hosts + --- + - hosts: hosts + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: Update a list of hosts + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=host + action=update + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + collector='mycompany-Collector' + groups="/servers/production,/datacenter5" + properties="{'snmp.community':'commstring','dc':'5'}" + + #example of updating a hostgroup + --- + - hosts: hosts + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: Update a host group + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=hostgroup + action=update + fullpath='/servers/development' + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + properties="{'snmp.community':'hg', 'type':'dev', 'status':'test'}" + + #example of removing a list of hosts from monitoring + --- + - hosts: hosts + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: Remove LogicMonitor hosts + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=host + action=remove + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + collector='mycompany-Collector' + + #example of removing a host group + --- + - hosts: hosts + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: Remove LogicMonitor development servers hostgroup + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=hostgroup + action=remove + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + fullpath='/servers/development' + - name: Remove LogicMonitor servers hostgroup + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=hostgroup + action=remove + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + fullpath='/servers' + - name: Remove LogicMonitor datacenter1 hostgroup + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=hostgroup + action=remove + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + fullpath='/datacenter1' + - name: Remove LogicMonitor datacenter5 hostgroup + # All tasks except for target=collector should use local_action + local_action: > + logicmonitor + target=hostgroup + action=remove + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + fullpath='/datacenter5' + + ### example of removing a new LogicMonitor collector to these devices + --- + - hosts: collectors + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: Remove LogicMonitor collectors + become: yes + logicmonitor: + target=collector + action=remove + company={{ company }} + user={{ user }} + password={{ password }} + + #complete example + --- + - hosts: localhost + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: Create a host group + local_action: > + logicmonitor + target=hostgroup + action=add + fullpath='/servers/production/database' + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + properties="{'snmp.community':'commstring'}" + - name: SDT a host group + local_action: > + logicmonitor + target=hostgroup + action=sdt + fullpath='/servers/production/web' + duration=3000 + starttime='2012-03-04 05:06' + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + + - hosts: collectors + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: Deploy/verify LogicMonitor collectors + logicmonitor: + target: collector + action: add + company: {{ company }} + user: {{ user }} + password: {{ password }} + - name: Place LogicMonitor collectors into 30 minute Scheduled downtime + logicmonitor: target=collector action=sdt company={{ company }} + user={{ user }} password={{ password }} + - name: Deploy LogicMonitor Host + local_action: > + logicmonitor + target=host + action=add + collector=agent1.ethandev.com + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + properties="{'snmp.community':'commstring', 'dc':'1'}" + groups="/servers/production/collectors, /datacenter1" + + - hosts: database-servers + remote_user: '{{ username }}' + vars: + company: 'mycompany' + user: 'myusername' + password: 'mypassword' + tasks: + - name: deploy logicmonitor hosts + local_action: > + logicmonitor + target=host + action=add + collector=monitoring.dev.com + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' + properties="{'snmp.community':'commstring', 'type':'db', 'dc':'1'}" + groups="/servers/production/database, /datacenter1" + - name: schedule 5 hour downtime for 2012-11-10 09:08 + local_action: > + logicmonitor + target=host + action=sdt + duration=3000 + starttime='2012-11-10 09:08' + company='{{ company }}' + user='{{ user }}' + password='{{ password }}' +''' + + +class LogicMonitor(object): + + def __init__(self, module, **params): + self.__version__ = "1.0-python" + self.module = module + self.module.debug("Instantiating LogicMonitor object") + + self.check_mode = False + self.company = params["company"] + self.user = params["user"] + self.password = params["password"] + self.fqdn = socket.getfqdn() + self.lm_url = "logicmonitor.com/santaba" + self.__version__ = self.__version__ + "-ansible-module" + + def rpc(self, action, params): + """Make a call to the LogicMonitor RPC library + and return the response""" + self.module.debug("Running LogicMonitor.rpc") + + param_str = urllib.urlencode(params) + creds = urllib.urlencode( + {"c": self.company, + "u": self.user, + "p": self.password}) + + if param_str: + param_str = param_str + "&" + + param_str = param_str + creds + + try: + url = ("https://" + self.company + "." + self.lm_url + + "/rpc/" + action + "?" + param_str) + + # Set custom LogicMonitor header with version + headers = {"X-LM-User-Agent": self.__version__} + + # Set headers + f = open_url(url, headers=headers) + + raw = f.read() + resp = json.loads(raw) + if resp["status"] == 403: + self.module.debug("Authentication failed.") + self.fail(msg="Error: " + resp["errmsg"]) + else: + return raw + except IOError: + ioe = get_exception() + self.fail(msg="Error: Exception making RPC call to " + + "https://" + self.company + "." + self.lm_url + + "/rpc/" + action + "\nException" + str(ioe)) + + def do(self, action, params): + """Make a call to the LogicMonitor + server \"do\" function""" + self.module.debug("Running LogicMonitor.do...") + + param_str = urllib.urlencode(params) + creds = (urllib.urlencode( + {"c": self.company, + "u": self.user, + "p": self.password})) + + if param_str: + param_str = param_str + "&" + param_str = param_str + creds + + try: + self.module.debug("Attempting to open URL: " + + "https://" + self.company + "." + self.lm_url + + "/do/" + action + "?" + param_str) + f = open_url( + "https://" + self.company + "." + self.lm_url + + "/do/" + action + "?" + param_str) + return f.read() + except IOError: + ioe = get_exception() + self.fail(msg="Error: Exception making RPC call to " + + "https://" + self.company + "." + self.lm_url + + "/do/" + action + "\nException" + str(ioe)) + + def get_collectors(self): + """Returns a JSON object containing a list of + LogicMonitor collectors""" + self.module.debug("Running LogicMonitor.get_collectors...") + + self.module.debug("Making RPC call to 'getAgents'") + resp = self.rpc("getAgents", {}) + resp_json = json.loads(resp) + + if resp_json["status"] is 200: + self.module.debug("RPC call succeeded") + return resp_json["data"] + else: + self.fail(msg=resp) + + def get_host_by_hostname(self, hostname, collector): + """Returns a host object for the host matching the + specified hostname""" + self.module.debug("Running LogicMonitor.get_host_by_hostname...") + + self.module.debug("Looking for hostname " + hostname) + self.module.debug("Making RPC call to 'getHosts'") + hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1})) + + if collector: + if hostlist_json["status"] == 200: + self.module.debug("RPC call succeeded") + + hosts = hostlist_json["data"]["hosts"] + + self.module.debug( + "Looking for host matching: hostname " + hostname + + " and collector " + str(collector["id"])) + + for host in hosts: + if (host["hostName"] == hostname and + host["agentId"] == collector["id"]): + + self.module.debug("Host match found") + return host + self.module.debug("No host match found") + return None + else: + self.module.debug("RPC call failed") + self.module.debug(hostlist_json) + else: + self.module.debug("No collector specified") + return None + + def get_host_by_displayname(self, displayname): + """Returns a host object for the host matching the + specified display name""" + self.module.debug("Running LogicMonitor.get_host_by_displayname...") + + self.module.debug("Looking for displayname " + displayname) + self.module.debug("Making RPC call to 'getHost'") + host_json = (json.loads(self.rpc("getHost", + {"displayName": displayname}))) + + if host_json["status"] == 200: + self.module.debug("RPC call succeeded") + return host_json["data"] + else: + self.module.debug("RPC call failed") + self.module.debug(host_json) + return None + + def get_collector_by_description(self, description): + """Returns a JSON collector object for the collector + matching the specified FQDN (description)""" + self.module.debug( + "Running LogicMonitor.get_collector_by_description..." + ) + + collector_list = self.get_collectors() + if collector_list is not None: + self.module.debug("Looking for collector with description {0}" + + description) + for collector in collector_list: + if collector["description"] == description: + self.module.debug("Collector match found") + return collector + self.module.debug("No collector match found") + return None + + def get_group(self, fullpath): + """Returns a JSON group object for the group matching the + specified path""" + self.module.debug("Running LogicMonitor.get_group...") + + self.module.debug("Making RPC call to getHostGroups") + resp = json.loads(self.rpc("getHostGroups", {})) + + if resp["status"] == 200: + self.module.debug("RPC called succeeded") + groups = resp["data"] + + self.module.debug("Looking for group matching " + fullpath) + for group in groups: + if group["fullPath"] == fullpath.lstrip('/'): + self.module.debug("Group match found") + return group + + self.module.debug("No group match found") + return None + else: + self.module.debug("RPC call failed") + self.module.debug(resp) + + return None + + def create_group(self, fullpath): + """Recursively create a path of host groups. + Returns the id of the newly created hostgroup""" + self.module.debug("Running LogicMonitor.create_group...") + + res = self.get_group(fullpath) + if res: + self.module.debug("Group {0} exists." + fullpath) + return res["id"] + + if fullpath == "/": + self.module.debug("Specified group is root. Doing nothing.") + return 1 + else: + self.module.debug("Creating group named " + fullpath) + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + parentpath, name = fullpath.rsplit('/', 1) + parentgroup = self.get_group(parentpath) + + parentid = 1 + + if parentpath == "": + parentid = 1 + elif parentgroup: + parentid = parentgroup["id"] + else: + parentid = self.create_group(parentpath) + + h = None + + # Determine if we're creating a group from host or hostgroup class + if hasattr(self, '_build_host_group_hash'): + h = self._build_host_group_hash( + fullpath, + self.description, + self.properties, + self.alertenable) + h["name"] = name + h["parentId"] = parentid + else: + h = {"name": name, + "parentId": parentid, + "alertEnable": True, + "description": ""} + + self.module.debug("Making RPC call to 'addHostGroup'") + resp = json.loads( + self.rpc("addHostGroup", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"]["id"] + elif resp["errmsg"] == "The record already exists": + self.module.debug("The hostgroup already exists") + group = self.get_group(fullpath) + return group["id"] + else: + self.module.debug("RPC call failed") + self.fail( + msg="Error: unable to create new hostgroup \"" + + name + "\".\n" + resp["errmsg"]) + + def fail(self, msg): + self.module.fail_json(msg=msg, changed=self.change, failed=True) + + def exit(self, changed): + self.module.debug("Changed: " + changed) + self.module.exit_json(changed=changed, success=True) + + def output_info(self, info): + self.module.debug("Registering properties as Ansible facts") + self.module.exit_json(changed=False, ansible_facts=info) + + +class Collector(LogicMonitor): + + def __init__(self, params, module=None): + """Initializor for the LogicMonitor Collector object""" + self.change = False + self.params = params + + LogicMonitor.__init__(self, module, **params) + self.module.debug("Instantiating Collector object") + + if self.params['description']: + self.description = self.params['description'] + else: + self.description = self.fqdn + + self.info = self._get() + self.installdir = "/usr/local/logicmonitor" + self.platform = platform.system() + self.is_64bits = sys.maxsize > 2**32 + self.duration = self.params['duration'] + self.starttime = self.params['starttime'] + + if self.info is None: + self.id = None + else: + self.id = self.info["id"] + + def create(self): + """Idempotent function to make sure that there is + a running collector installed and registered""" + self.module.debug("Running Collector.create...") + + self._create() + self.get_installer_binary() + self.install() + + def remove(self): + """Idempotent function to make sure that there is + not a running collector installed and registered""" + self.module.debug("Running Collector.destroy...") + + self._unreigster() + self.uninstall() + + def get_installer_binary(self): + """Download the LogicMonitor collector installer binary""" + self.module.debug("Running Collector.get_installer_binary...") + + arch = 32 + + if self.is_64bits: + self.module.debug("64 bit system") + arch = 64 + else: + self.module.debug("32 bit system") + + if self.platform == "Linux" and self.id is not None: + self.module.debug("Platform is Linux") + self.module.debug("Agent ID is " + str(self.id)) + + installfilepath = (self.installdir + + "/logicmonitorsetup" + + str(self.id) + "_" + str(arch) + + ".bin") + + self.module.debug("Looking for existing installer at " + + installfilepath) + if not os.path.isfile(installfilepath): + self.module.debug("No previous installer found") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Downloading installer file") + # attempt to create the install dir before download + self.module.run_command("mkdir " + self.installdir) + + try: + f = open(installfilepath, "w") + installer = (self.do("logicmonitorsetup", + {"id": self.id, + "arch": arch})) + f.write(installer) + f.closed + except: + self.fail(msg="Unable to open installer file for writing") + f.closed + else: + self.module.debug("Collector installer already exists") + return installfilepath + + elif self.id is None: + self.fail( + msg="Error: There is currently no collector " + + "associated with this device. To download " + + " the installer, first create a collector " + + "for this device.") + elif self.platform != "Linux": + self.fail( + msg="Error: LogicMonitor Collector must be " + + "installed on a Linux device.") + else: + self.fail( + msg="Error: Unable to retrieve the installer from the server") + + def install(self): + """Execute the LogicMonitor installer if not + already installed""" + self.module.debug("Running Collector.install...") + + if self.platform == "Linux": + self.module.debug("Platform is Linux") + + installer = self.get_installer_binary() + + if self.info is None: + self.module.debug("Retriving collector information") + self.info = self._get() + + if not os.path.exists(self.installdir + "/agent"): + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Setting installer file permissions") + os.chmod(installer, 484) # decimal for 0o744 + + self.module.debug("Executing installer") + ret_code, out, err = self.module.run_command(installer + " -y") + + if ret_code != 0: + self.fail(msg="Error: Unable to install collector: " + err) + else: + self.module.debug("Collector installed successfully") + else: + self.module.debug("Collector already installed") + else: + self.fail( + msg="Error: LogicMonitor Collector must be " + + "installed on a Linux device") + + def uninstall(self): + """Uninstall LogicMontitor collector from the system""" + self.module.debug("Running Collector.uninstall...") + + uninstallfile = self.installdir + "/agent/bin/uninstall.pl" + + if os.path.isfile(uninstallfile): + self.module.debug("Collector uninstall file exists") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Running collector uninstaller") + ret_code, out, err = self.module.run_command(uninstallfile) + + if ret_code != 0: + self.fail( + msg="Error: Unable to uninstall collector: " + err) + else: + self.module.debug("Collector successfully uninstalled") + else: + if os.path.exists(self.installdir + "/agent"): + (self.fail( + msg="Unable to uninstall LogicMonitor " + + "Collector. Can not find LogicMonitor " + + "uninstaller.")) + + def sdt(self): + """Create a scheduled down time + (maintenance window) for this host""" + self.module.debug("Running Collector.sdt...") + + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + duration = self.duration + starttime = self.starttime + offsetstart = starttime + + if starttime: + self.module.debug("Start time specified") + start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') + offsetstart = start + else: + self.module.debug("No start time specified. Using default.") + start = datetime.datetime.utcnow() + + # Use user UTC offset + self.module.debug("Making RPC call to 'getTimeZoneSetting'") + accountresp = json.loads(self.rpc("getTimeZoneSetting", {})) + + if accountresp["status"] == 200: + self.module.debug("RPC call succeeded") + + offset = accountresp["data"]["offset"] + offsetstart = start + datetime.timedelta(0, offset) + else: + self.fail(msg="Error: Unable to retrieve timezone offset") + + offsetend = offsetstart + datetime.timedelta(0, int(duration)*60) + + h = {"agentId": self.id, + "type": 1, + "notifyCC": True, + "year": offsetstart.year, + "month": offsetstart.month-1, + "day": offsetstart.day, + "hour": offsetstart.hour, + "minute": offsetstart.minute, + "endYear": offsetend.year, + "endMonth": offsetend.month-1, + "endDay": offsetend.day, + "endHour": offsetend.hour, + "endMinute": offsetend.minute} + + self.module.debug("Making RPC call to 'setAgentSDT'") + resp = json.loads(self.rpc("setAgentSDT", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=resp["errmsg"]) + + def site_facts(self): + """Output current properties information for the Collector""" + self.module.debug("Running Collector.site_facts...") + + if self.info: + self.module.debug("Collector exists") + props = self.get_properties(True) + + self.output_info(props) + else: + self.fail(msg="Error: Collector doesn't exit.") + + def _get(self): + """Returns a JSON object representing this collector""" + self.module.debug("Running Collector._get...") + collector_list = self.get_collectors() + + if collector_list is not None: + self.module.debug("Collectors returned") + for collector in collector_list: + if collector["description"] == self.description: + return collector + else: + self.module.debug("No collectors returned") + return None + + def _create(self): + """Create a new collector in the associated + LogicMonitor account""" + self.module.debug("Running Collector._create...") + + if self.platform == "Linux": + self.module.debug("Platform is Linux") + ret = self.info or self._get() + + if ret is None: + self.change = True + self.module.debug("System changed") + + if self.check_mode: + self.exit(changed=True) + + h = {"autogen": True, + "description": self.description} + + self.module.debug("Making RPC call to 'addAgent'") + create = (json.loads(self.rpc("addAgent", h))) + + if create["status"] is 200: + self.module.debug("RPC call succeeded") + self.info = create["data"] + self.id = create["data"]["id"] + return create["data"] + else: + self.fail(msg=create["errmsg"]) + else: + self.info = ret + self.id = ret["id"] + return ret + else: + self.fail( + msg="Error: LogicMonitor Collector must be " + + "installed on a Linux device.") + + def _unreigster(self): + """Delete this collector from the associated + LogicMonitor account""" + self.module.debug("Running Collector._unreigster...") + + if self.info is None: + self.module.debug("Retrieving collector information") + self.info = self._get() + + if self.info is not None: + self.module.debug("Collector found") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Making RPC call to 'deleteAgent'") + delete = json.loads(self.rpc("deleteAgent", + {"id": self.id})) + + if delete["status"] is 200: + self.module.debug("RPC call succeeded") + return delete + else: + # The collector couldn't unregister. Start the service again + self.module.debug("Error unregistering collecting. " + + delete["errmsg"]) + self.fail(msg=delete["errmsg"]) + else: + self.module.debug("Collector not found") + return None + + +class Host(LogicMonitor): + + def __init__(self, params, module=None): + """Initializor for the LogicMonitor host object""" + self.change = False + self.params = params + self.collector = None + + LogicMonitor.__init__(self, module, **self.params) + self.module.debug("Instantiating Host object") + + if self.params["hostname"]: + self.module.debug("Hostname is " + self.params["hostname"]) + self.hostname = self.params['hostname'] + else: + self.module.debug("No hostname specified. Using " + self.fqdn) + self.hostname = self.fqdn + + if self.params["displayname"]: + self.module.debug("Display name is " + self.params["displayname"]) + self.displayname = self.params['displayname'] + else: + self.module.debug("No display name specified. Using " + self.fqdn) + self.displayname = self.fqdn + + # Attempt to host information via display name of host name + self.module.debug("Attempting to find host by displayname " + + self.displayname) + info = self.get_host_by_displayname(self.displayname) + + if info is not None: + self.module.debug("Host found by displayname") + # Used the host information to grab the collector description + # if not provided + if (not hasattr(self.params, "collector") and + "agentDescription" in info): + self.module.debug("Setting collector from host response. " + + "Collector " + info["agentDescription"]) + self.params["collector"] = info["agentDescription"] + else: + self.module.debug("Host not found by displayname") + + # At this point, a valid collector description is required for success + # Check that the description exists or fail + if self.params["collector"]: + self.module.debug( + "Collector specified is " + + self.params["collector"] + ) + self.collector = (self.get_collector_by_description( + self.params["collector"])) + else: + self.fail(msg="No collector specified.") + + # If the host wasn't found via displayname, attempt by hostname + if info is None: + self.module.debug("Attempting to find host by hostname " + + self.hostname) + info = self.get_host_by_hostname(self.hostname, self.collector) + + self.info = info + self.properties = self.params["properties"] + self.description = self.params["description"] + self.starttime = self.params["starttime"] + self.duration = self.params["duration"] + self.alertenable = self.params["alertenable"] + if self.params["groups"] is not None: + self.groups = self._strip_groups(self.params["groups"]) + else: + self.groups = None + + def create(self): + """Idemopotent function to create if missing, + update if changed, or skip""" + self.module.debug("Running Host.create...") + + self.update() + + def get_properties(self): + """Returns a hash of the properties + associated with this LogicMonitor host""" + self.module.debug("Running Host.get_properties...") + + if self.info: + self.module.debug("Making RPC call to 'getHostProperties'") + properties_json = (json.loads(self.rpc("getHostProperties", + {'hostId': self.info["id"], + "filterSystemProperties": True}))) + + if properties_json["status"] == 200: + self.module.debug("RPC call succeeded") + return properties_json["data"] + else: + self.module.debug("Error: there was an issue retrieving the " + + "host properties") + self.module.debug(properties_json["errmsg"]) + + self.fail(msg=properties_json["status"]) + else: + self.module.debug( + "Unable to find LogicMonitor host which matches " + + self.displayname + " (" + self.hostname + ")" + ) + return None + + def set_properties(self, propertyhash): + """update the host to have the properties + contained in the property hash""" + self.module.debug("Running Host.set_properties...") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Assigning property hash to host object") + self.properties = propertyhash + + def add(self): + """Add this device to monitoring + in your LogicMonitor account""" + self.module.debug("Running Host.add...") + + if self.collector and not self.info: + self.module.debug("Host not registered. Registering.") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + h = self._build_host_hash( + self.hostname, + self.displayname, + self.collector, + self.description, + self.groups, + self.properties, + self.alertenable) + + self.module.debug("Making RPC call to 'addHost'") + resp = json.loads(self.rpc("addHost", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.module.debug(resp) + return resp["errmsg"] + elif self.collector is None: + self.fail(msg="Specified collector doesn't exist") + else: + self.module.debug("Host already registered") + + def update(self): + """This method takes changes made to this host + and applies them to the corresponding host + in your LogicMonitor account.""" + self.module.debug("Running Host.update...") + + if self.info: + self.module.debug("Host already registed") + if self.is_changed(): + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + h = (self._build_host_hash( + self.hostname, + self.displayname, + self.collector, + self.description, + self.groups, + self.properties, + self.alertenable)) + h["id"] = self.info["id"] + h["opType"] = "replace" + + self.module.debug("Making RPC call to 'updateHost'") + resp = json.loads(self.rpc("updateHost", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + else: + self.module.debug("RPC call failed") + self.fail(msg="Error: unable to update the host.") + else: + self.module.debug( + "Host properties match supplied properties. " + + "No changes to make." + ) + return self.info + else: + self.module.debug("Host not registed. Registering") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + return self.add() + + def remove(self): + """Remove this host from your LogicMonitor account""" + self.module.debug("Running Host.remove...") + + if self.info: + self.module.debug("Host registered") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Making RPC call to 'deleteHost'") + resp = json.loads(self.rpc("deleteHost", + {"hostId": self.info["id"], + "deleteFromSystem": True, + "hostGroupId": 1})) + + if resp["status"] == 200: + self.module.debug(resp) + self.module.debug("RPC call succeeded") + return resp + else: + self.module.debug("RPC call failed") + self.module.debug(resp) + self.fail(msg=resp["errmsg"]) + + else: + self.module.debug("Host not registered") + + def is_changed(self): + """Return true if the host doesn't + match the LogicMonitor account""" + self.module.debug("Running Host.is_changed") + + ignore = ['system.categories', 'snmp.version'] + + hostresp = self.get_host_by_displayname(self.displayname) + + if hostresp is None: + hostresp = self.get_host_by_hostname(self.hostname, self.collector) + + if hostresp: + self.module.debug("Comparing simple host properties") + if hostresp["alertEnable"] != self.alertenable: + return True + + if hostresp["description"] != self.description: + return True + + if hostresp["displayedAs"] != self.displayname: + return True + + if (self.collector and + hasattr(self.collector, "id") and + hostresp["agentId"] != self.collector["id"]): + return True + + self.module.debug("Comparing groups.") + if self._compare_groups(hostresp) is True: + return True + + propresp = self.get_properties() + + if propresp: + self.module.debug("Comparing properties.") + if self._compare_props(propresp, ignore) is True: + return True + else: + self.fail( + msg="Error: Unknown error retrieving host properties") + + return False + else: + self.fail(msg="Error: Unknown error retrieving host information") + + def sdt(self): + """Create a scheduled down time + (maintenance window) for this host""" + self.module.debug("Running Host.sdt...") + if self.info: + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + duration = self.duration + starttime = self.starttime + offset = starttime + + if starttime: + self.module.debug("Start time specified") + start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') + offsetstart = start + else: + self.module.debug("No start time specified. Using default.") + start = datetime.datetime.utcnow() + + # Use user UTC offset + self.module.debug("Making RPC call to 'getTimeZoneSetting'") + accountresp = (json.loads(self.rpc("getTimeZoneSetting", {}))) + + if accountresp["status"] == 200: + self.module.debug("RPC call succeeded") + + offset = accountresp["data"]["offset"] + offsetstart = start + datetime.timedelta(0, offset) + else: + self.fail( + msg="Error: Unable to retrieve timezone offset") + + offsetend = offsetstart + datetime.timedelta(0, int(duration)*60) + + h = {"hostId": self.info["id"], + "type": 1, + "year": offsetstart.year, + "month": offsetstart.month - 1, + "day": offsetstart.day, + "hour": offsetstart.hour, + "minute": offsetstart.minute, + "endYear": offsetend.year, + "endMonth": offsetend.month - 1, + "endDay": offsetend.day, + "endHour": offsetend.hour, + "endMinute": offsetend.minute} + + self.module.debug("Making RPC call to 'setHostSDT'") + resp = (json.loads(self.rpc("setHostSDT", h))) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=resp["errmsg"]) + else: + self.fail(msg="Error: Host doesn't exit.") + + def site_facts(self): + """Output current properties information for the Host""" + self.module.debug("Running Host.site_facts...") + + if self.info: + self.module.debug("Host exists") + props = self.get_properties() + + self.output_info(props) + else: + self.fail(msg="Error: Host doesn't exit.") + + def _build_host_hash(self, + hostname, + displayname, + collector, + description, + groups, + properties, + alertenable): + """Return a property formated hash for the + creation of a host using the rpc function""" + self.module.debug("Running Host._build_host_hash...") + + h = {} + h["hostName"] = hostname + h["displayedAs"] = displayname + h["alertEnable"] = alertenable + + if collector: + self.module.debug("Collector property exists") + h["agentId"] = collector["id"] + else: + self.fail( + msg="Error: No collector found. Unable to build host hash.") + + if description: + h["description"] = description + + if groups is not None and groups is not []: + self.module.debug("Group property exists") + groupids = "" + + for group in groups: + groupids = groupids + str(self.create_group(group)) + "," + + h["hostGroupIds"] = groupids.rstrip(',') + + if properties is not None and properties is not {}: + self.module.debug("Properties hash exists") + propnum = 0 + for key, value in properties.iteritems(): + h["propName" + str(propnum)] = key + h["propValue" + str(propnum)] = value + propnum = propnum + 1 + + return h + + def _verify_property(self, propname): + """Check with LogicMonitor server to + verify property is unchanged""" + self.module.debug("Running Host._verify_property...") + + if self.info: + self.module.debug("Host is registered") + if propname not in self.properties: + self.module.debug("Property " + propname + " does not exist") + return False + else: + self.module.debug("Property " + propname + " exists") + h = {"hostId": self.info["id"], + "propName0": propname, + "propValue0": self.properties[propname]} + + self.module.debug("Making RCP call to 'verifyProperties'") + resp = json.loads(self.rpc('verifyProperties', h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"]["match"] + else: + self.fail( + msg="Error: unable to get verification " + + "from server.\n%s" % resp["errmsg"]) + else: + self.fail( + msg="Error: Host doesn't exist. Unable to verify properties") + + def _compare_groups(self, hostresp): + """Function to compare the host's current + groups against provided groups""" + self.module.debug("Running Host._compare_groups") + + g = [] + fullpathinids = hostresp["fullPathInIds"] + self.module.debug("Building list of groups") + for path in fullpathinids: + if path != []: + h = {'hostGroupId': path[-1]} + + hgresp = json.loads(self.rpc("getHostGroup", h)) + + if (hgresp["status"] == 200 and + hgresp["data"]["appliesTo"] == ""): + + g.append(path[-1]) + + if self.groups is not None: + self.module.debug("Comparing group lists") + for group in self.groups: + groupjson = self.get_group(group) + + if groupjson is None: + self.module.debug("Group mismatch. No result.") + return True + elif groupjson['id'] not in g: + self.module.debug("Group mismatch. ID doesn't exist.") + return True + else: + g.remove(groupjson['id']) + + if g != []: + self.module.debug("Group mismatch. New ID exists.") + return True + self.module.debug("Groups match") + + def _compare_props(self, propresp, ignore): + """Function to compare the host's current + properties against provided properties""" + self.module.debug("Running Host._compare_props...") + p = {} + + self.module.debug("Creating list of properties") + for prop in propresp: + if prop["name"] not in ignore: + if ("*******" in prop["value"] and + self._verify_property(prop["name"])): + p[prop["name"]] = self.properties[prop["name"]] + else: + p[prop["name"]] = prop["value"] + + self.module.debug("Comparing properties") + # Iterate provided properties and compare to received properties + for prop in self.properties: + if (prop not in p or + p[prop] != self.properties[prop]): + self.module.debug("Properties mismatch") + return True + self.module.debug("Properties match") + + def _strip_groups(self, groups): + """Function to strip whitespace from group list. + This function provides the user some flexibility when + formatting group arguments """ + self.module.debug("Running Host._strip_groups...") + return map(lambda x: x.strip(), groups) + + +class Datasource(LogicMonitor): + + def __init__(self, params, module=None): + """Initializor for the LogicMonitor Datasource object""" + self.change = False + self.params = params + + LogicMonitor.__init__(self, module, **params) + self.module.debug("Instantiating Datasource object") + + self.id = self.params["id"] + self.starttime = self.params["starttime"] + self.duration = self.params["duration"] + + def sdt(self): + """Create a scheduled down time + (maintenance window) for this host""" + self.module.debug("Running Datasource.sdt...") + + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + duration = self.duration + starttime = self.starttime + offsetstart = starttime + + if starttime: + self.module.debug("Start time specified") + start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') + offsetstart = start + else: + self.module.debug("No start time specified. Using default.") + start = datetime.datetime.utcnow() + + # Use user UTC offset + self.module.debug("Making RPC call to 'getTimeZoneSetting'") + accountresp = json.loads(self.rpc("getTimeZoneSetting", {})) + + if accountresp["status"] == 200: + self.module.debug("RPC call succeeded") + + offset = accountresp["data"]["offset"] + offsetstart = start + datetime.timedelta(0, offset) + else: + self.fail(msg="Error: Unable to retrieve timezone offset") + + offsetend = offsetstart + datetime.timedelta(0, int(duration)*60) + + h = {"hostDataSourceId": self.id, + "type": 1, + "notifyCC": True, + "year": offsetstart.year, + "month": offsetstart.month-1, + "day": offsetstart.day, + "hour": offsetstart.hour, + "minute": offsetstart.minute, + "endYear": offsetend.year, + "endMonth": offsetend.month-1, + "endDay": offsetend.day, + "endHour": offsetend.hour, + "endMinute": offsetend.minute} + + self.module.debug("Making RPC call to 'setHostDataSourceSDT'") + resp = json.loads(self.rpc("setHostDataSourceSDT", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=resp["errmsg"]) + + +class Hostgroup(LogicMonitor): + + def __init__(self, params, module=None): + """Initializor for the LogicMonitor host object""" + self.change = False + self.params = params + + LogicMonitor.__init__(self, module, **self.params) + self.module.debug("Instantiating Hostgroup object") + + self.fullpath = self.params["fullpath"] + self.info = self.get_group(self.fullpath) + self.properties = self.params["properties"] + self.description = self.params["description"] + self.starttime = self.params["starttime"] + self.duration = self.params["duration"] + self.alertenable = self.params["alertenable"] + + def create(self): + """Wrapper for self.update()""" + self.module.debug("Running Hostgroup.create...") + self.update() + + def get_properties(self, final=False): + """Returns a hash of the properties + associated with this LogicMonitor host""" + self.module.debug("Running Hostgroup.get_properties...") + + if self.info: + self.module.debug("Group found") + + self.module.debug("Making RPC call to 'getHostGroupProperties'") + properties_json = json.loads(self.rpc( + "getHostGroupProperties", + {'hostGroupId': self.info["id"], + "finalResult": final})) + + if properties_json["status"] == 200: + self.module.debug("RPC call succeeded") + return properties_json["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=properties_json["status"]) + else: + self.module.debug("Group not found") + return None + + def set_properties(self, propertyhash): + """Update the host to have the properties + contained in the property hash""" + self.module.debug("Running Hostgroup.set_properties") + + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Assigning property has to host object") + self.properties = propertyhash + + def add(self): + """Idempotent function to ensure that the host + group exists in your LogicMonitor account""" + self.module.debug("Running Hostgroup.add") + + if self.info is None: + self.module.debug("Group doesn't exist. Creating.") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.create_group(self.fullpath) + self.info = self.get_group(self.fullpath) + + self.module.debug("Group created") + return self.info + else: + self.module.debug("Group already exists") + + def update(self): + """Idempotent function to ensure the host group settings + (alertenable, properties, etc) in the + LogicMonitor account match the current object.""" + self.module.debug("Running Hostgroup.update") + + if self.info: + if self.is_changed(): + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + h = self._build_host_group_hash( + self.fullpath, + self.description, + self.properties, + self.alertenable) + h["opType"] = "replace" + + if self.fullpath != "/": + h["id"] = self.info["id"] + + self.module.debug("Making RPC call to 'updateHostGroup'") + resp = json.loads(self.rpc("updateHostGroup", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg="Error: Unable to update the " + + "host.\n" + resp["errmsg"]) + else: + self.module.debug( + "Group properties match supplied properties. " + + "No changes to make" + ) + return self.info + else: + self.module.debug("Group doesn't exist. Creating.") + + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + return self.add() + + def remove(self): + """Idempotent function to ensure the host group + does not exist in your LogicMonitor account""" + self.module.debug("Running Hostgroup.remove...") + + if self.info: + self.module.debug("Group exists") + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + self.module.debug("Making RPC call to 'deleteHostGroup'") + resp = json.loads(self.rpc("deleteHostGroup", + {"hgId": self.info["id"]})) + + if resp["status"] == 200: + self.module.debug(resp) + self.module.debug("RPC call succeeded") + return resp + elif resp["errmsg"] == "No such group": + self.module.debug("Group doesn't exist") + else: + self.module.debug("RPC call failed") + self.module.debug(resp) + self.fail(msg=resp["errmsg"]) + else: + self.module.debug("Group doesn't exist") + + def is_changed(self): + """Return true if the host doesn't match + the LogicMonitor account""" + self.module.debug("Running Hostgroup.is_changed...") + + ignore = [] + group = self.get_group(self.fullpath) + properties = self.get_properties() + + if properties is not None and group is not None: + self.module.debug("Comparing simple group properties") + if (group["alertEnable"] != self.alertenable or + group["description"] != self.description): + + return True + + p = {} + + self.module.debug("Creating list of properties") + for prop in properties: + if prop["name"] not in ignore: + if ("*******" in prop["value"] and + self._verify_property(prop["name"])): + + p[prop["name"]] = ( + self.properties[prop["name"]]) + else: + p[prop["name"]] = prop["value"] + + self.module.debug("Comparing properties") + if set(p) != set(self.properties): + return True + else: + self.module.debug("No property information received") + return False + + def sdt(self, duration=30, starttime=None): + """Create a scheduled down time + (maintenance window) for this host""" + self.module.debug("Running Hostgroup.sdt") + + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + duration = self.duration + starttime = self.starttime + offset = starttime + + if starttime: + self.module.debug("Start time specified") + start = datetime.datetime.strptime(starttime, '%Y-%m-%d %H:%M') + offsetstart = start + else: + self.module.debug("No start time specified. Using default.") + start = datetime.datetime.utcnow() + + # Use user UTC offset + self.module.debug("Making RPC call to 'getTimeZoneSetting'") + accountresp = json.loads(self.rpc("getTimeZoneSetting", {})) + + if accountresp["status"] == 200: + self.module.debug("RPC call succeeded") + + offset = accountresp["data"]["offset"] + offsetstart = start + datetime.timedelta(0, offset) + else: + self.fail( + msg="Error: Unable to retrieve timezone offset") + + offsetend = offsetstart + datetime.timedelta(0, int(duration)*60) + + h = {"hostGroupId": self.info["id"], + "type": 1, + "year": offsetstart.year, + "month": offsetstart.month-1, + "day": offsetstart.day, + "hour": offsetstart.hour, + "minute": offsetstart.minute, + "endYear": offsetend.year, + "endMonth": offsetend.month-1, + "endDay": offsetend.day, + "endHour": offsetend.hour, + "endMinute": offsetend.minute} + + self.module.debug("Making RPC call to setHostGroupSDT") + resp = json.loads(self.rpc("setHostGroupSDT", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=resp["errmsg"]) + + def site_facts(self): + """Output current properties information for the Hostgroup""" + self.module.debug("Running Hostgroup.site_facts...") + + if self.info: + self.module.debug("Group exists") + props = self.get_properties(True) + + self.output_info(props) + else: + self.fail(msg="Error: Group doesn't exit.") + + def _build_host_group_hash(self, + fullpath, + description, + properties, + alertenable): + """Return a property formated hash for the + creation of a hostgroup using the rpc function""" + self.module.debug("Running Hostgroup._build_host_hash") + + h = {} + h["alertEnable"] = alertenable + + if fullpath == "/": + self.module.debug("Group is root") + h["id"] = 1 + else: + self.module.debug("Determining group path") + parentpath, name = fullpath.rsplit('/', 1) + parent = self.get_group(parentpath) + + h["name"] = name + + if parent: + self.module.debug("Parent group " + + str(parent["id"]) + " found.") + h["parentID"] = parent["id"] + else: + self.module.debug("No parent group found. Using root.") + h["parentID"] = 1 + + if description: + self.module.debug("Description property exists") + h["description"] = description + + if properties != {}: + self.module.debug("Properties hash exists") + propnum = 0 + for key, value in properties.iteritems(): + h["propName" + str(propnum)] = key + h["propValue" + str(propnum)] = value + propnum = propnum + 1 + + return h + + def _verify_property(self, propname): + """Check with LogicMonitor server + to verify property is unchanged""" + self.module.debug("Running Hostgroup._verify_property") + + if self.info: + self.module.debug("Group exists") + if propname not in self.properties: + self.module.debug("Property " + propname + " does not exist") + return False + else: + self.module.debug("Property " + propname + " exists") + h = {"hostGroupId": self.info["id"], + "propName0": propname, + "propValue0": self.properties[propname]} + + self.module.debug("Making RCP call to 'verifyProperties'") + resp = json.loads(self.rpc('verifyProperties', h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"]["match"] + else: + self.fail( + msg="Error: unable to get verification " + + "from server.\n%s" % resp["errmsg"]) + else: + self.fail( + msg="Error: Group doesn't exist. Unable to verify properties") + + +def selector(module): + """Figure out which object and which actions + to take given the right parameters""" + + if module.params["target"] == "collector": + target = Collector(module.params, module) + elif module.params["target"] == "host": + # Make sure required parameter collector is specified + if ((module.params["action"] == "add" or + module.params["displayname"] is None) and + module.params["collector"] is None): + module.fail_json( + msg="Parameter 'collector' required.") + + target = Host(module.params, module) + elif module.params["target"] == "datasource": + # Validate target specific required parameters + if module.params["id"] is not None: + # make sure a supported action was specified + if module.params["action"] == "sdt": + target = Datasource(module.params, module) + else: + errmsg = ("Error: Unexpected action \"" + + module.params["action"] + "\" was specified.") + module.fail_json(msg=errmsg) + + elif module.params["target"] == "hostgroup": + # Validate target specific required parameters + if module.params["fullpath"] is not None: + target = Hostgroup(module.params, module) + else: + module.fail_json( + msg="Parameter 'fullpath' required for target 'hostgroup'") + else: + module.fail_json( + msg="Error: Unexpected target \"" + module.params["target"] + + "\" was specified.") + + if module.params["action"].lower() == "add": + action = target.create + elif module.params["action"].lower() == "remove": + action = target.remove + elif module.params["action"].lower() == "sdt": + action = target.sdt + elif module.params["action"].lower() == "update": + action = target.update + else: + errmsg = ("Error: Unexpected action \"" + module.params["action"] + + "\" was specified.") + module.fail_json(msg=errmsg) + + action() + module.exit_json(changed=target.change) + + +def main(): + TARGETS = [ + "collector", + "host", + "datasource", + "hostgroup"] + + ACTIONS = [ + "add", + "remove", + "sdt", + "update"] + + module = AnsibleModule( + argument_spec=dict( + target=dict(required=True, default=None, choices=TARGETS), + action=dict(required=True, default=None, choices=ACTIONS), + company=dict(required=True, default=None), + user=dict(required=True, default=None), + password=dict(required=True, default=None, no_log=True), + + collector=dict(required=False, default=None), + hostname=dict(required=False, default=None), + displayname=dict(required=False, default=None), + id=dict(required=False, default=None), + description=dict(required=False, default=""), + fullpath=dict(required=False, default=None), + starttime=dict(required=False, default=None), + duration=dict(required=False, default=30), + properties=dict(required=False, default={}, type="dict"), + groups=dict(required=False, default=[], type="list"), + alertenable=dict(required=False, default="true", choices=BOOLEANS) + ), + supports_check_mode=True + ) + + if HAS_LIB_JSON is not True: + module.fail_json(msg="Unable to load JSON library") + + selector(module) + + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +from ansible.module_utils.urls import open_url + + +if __name__ == "__main__": + main() diff --git a/monitoring/logicmonitor_facts.py b/monitoring/logicmonitor_facts.py new file mode 100644 index 00000000000..5ade901a76a --- /dev/null +++ b/monitoring/logicmonitor_facts.py @@ -0,0 +1,638 @@ +#!/usr/bin/python + +"""LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups + Copyright (C) 2015 LogicMonitor + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software Foundation, + Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA""" + + +import socket +import types +import urllib + +HAS_LIB_JSON = True +try: + import json + # Detect the python-json library which is incompatible + # Look for simplejson if that's the case + try: + if ( + not isinstance(json.loads, types.FunctionType) or + not isinstance(json.dumps, types.FunctionType) + ): + raise ImportError + except AttributeError: + raise ImportError +except ImportError: + try: + import simplejson as json + except ImportError: + print( + '\n{"msg": "Error: ansible requires the stdlib json or ' + + 'simplejson module, neither was found!", "failed": true}' + ) + HAS_LIB_JSON = False + except SyntaxError: + print( + '\n{"msg": "SyntaxError: probably due to installed simplejson ' + + 'being for a different python version", "failed": true}' + ) + HAS_LIB_JSON = False + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: logicmonitor_facts +short_description: Collect facts about LogicMonitor objects +description: + - LogicMonitor is a hosted, full-stack, infrastructure monitoring platform. + - This module collects facts about hosts and host groups within your LogicMonitor account. +version_added: "2.2" +author: [Ethan Culler-Mayeno (@ethanculler), Jeff Wozniak (@woz5999)] +notes: + - You must have an existing LogicMonitor account for this module to function. +requirements: ["An existing LogicMonitor account", "Linux"] +options: + target: + description: + - The LogicMonitor object you wish to manage. + required: true + default: null + choices: ['host', 'hostgroup'] + company: + description: + - The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes". + required: true + default: null + user: + description: + - A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user. + required: true + default: null + password: + description: + - The password for the chosen LogicMonitor User. + - If an md5 hash is used, the digest flag must be set to true. + required: true + default: null + collector: + description: + - The fully qualified domain name of a collector in your LogicMonitor account. + - This is optional for querying a LogicMonitor host when a displayname is specified. + - This is required for querying a LogicMonitor host when a displayname is not specified. + required: false + default: null + hostname: + description: + - The hostname of a host in your LogicMonitor account, or the desired hostname of a device to add into monitoring. + - Required for managing hosts (target=host). + required: false + default: 'hostname -f' + displayname: + description: + - The display name of a host in your LogicMonitor account or the desired display name of a device to add into monitoring. + required: false + default: 'hostname -f' + fullpath: + description: + - The fullpath of the hostgroup object you would like to manage. + - Recommend running on a single ansible host. + - Required for management of LogicMonitor host groups (target=hostgroup). + required: false + default: null +... +''' + +EXAMPLES = ''' +#example of querying a list of hosts +``` +--- +- hosts: hosts + user: root + vars: + company: 'yourcompany' + user: 'Luigi' + password: 'ImaLuigi,number1!' + tasks: + - name: query a list of hosts + # All tasks should use local_action + local_action: + logicmonitor_facts: + target: host + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' +``` + +#example of querying a hostgroup +``` +--- +- hosts: somemachine.superheroes.com + user: root + vars: + company: 'yourcompany' + user: 'mario' + password: 'itsame.Mario!' + tasks: + - name: query a host group + # All tasks should use local_action + local_action: + logicmonitor_facts: + target: hostgroup + fullpath: '/servers/production' + company: '{{ company }}' + user: '{{ user }}' + password: '{{ password }}' +``` +''' + + +RETURN = ''' +--- + ansible_facts: + description: LogicMonitor properties set for the specified object + returned: success + type: list of dicts containing name/value pairs + example: > + { + "name": "dc", + "value": "1" + }, + { + "name": "type", + "value": "prod" + }, + { + "name": "system.categories", + "value": "" + }, + { + "name": "snmp.community", + "value": "********" + } +... +''' + + +class LogicMonitor(object): + + def __init__(self, module, **params): + self.__version__ = "1.0-python" + self.module = module + self.module.debug("Instantiating LogicMonitor object") + + self.check_mode = False + self.company = params["company"] + self.user = params["user"] + self.password = params["password"] + self.fqdn = socket.getfqdn() + self.lm_url = "logicmonitor.com/santaba" + self.__version__ = self.__version__ + "-ansible-module" + + def rpc(self, action, params): + """Make a call to the LogicMonitor RPC library + and return the response""" + self.module.debug("Running LogicMonitor.rpc") + + param_str = urllib.urlencode(params) + creds = urllib.urlencode( + {"c": self.company, + "u": self.user, + "p": self.password}) + + if param_str: + param_str = param_str + "&" + + param_str = param_str + creds + + try: + url = ("https://" + self.company + "." + self.lm_url + + "/rpc/" + action + "?" + param_str) + + # Set custom LogicMonitor header with version + headers = {"X-LM-User-Agent": self.__version__} + + # Set headers + f = open_url(url, headers=headers) + + raw = f.read() + resp = json.loads(raw) + if resp["status"] == 403: + self.module.debug("Authentication failed.") + self.fail(msg="Error: " + resp["errmsg"]) + else: + return raw + except IOError: + ioe = get_exception() + self.fail(msg="Error: Exception making RPC call to " + + "https://" + self.company + "." + self.lm_url + + "/rpc/" + action + "\nException" + str(ioe)) + + def get_collectors(self): + """Returns a JSON object containing a list of + LogicMonitor collectors""" + self.module.debug("Running LogicMonitor.get_collectors...") + + self.module.debug("Making RPC call to 'getAgents'") + resp = self.rpc("getAgents", {}) + resp_json = json.loads(resp) + + if resp_json["status"] is 200: + self.module.debug("RPC call succeeded") + return resp_json["data"] + else: + self.fail(msg=resp) + + def get_host_by_hostname(self, hostname, collector): + """Returns a host object for the host matching the + specified hostname""" + self.module.debug("Running LogicMonitor.get_host_by_hostname...") + + self.module.debug("Looking for hostname " + hostname) + self.module.debug("Making RPC call to 'getHosts'") + hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1})) + + if collector: + if hostlist_json["status"] == 200: + self.module.debug("RPC call succeeded") + + hosts = hostlist_json["data"]["hosts"] + + self.module.debug( + "Looking for host matching: hostname " + hostname + + " and collector " + str(collector["id"])) + + for host in hosts: + if (host["hostName"] == hostname and + host["agentId"] == collector["id"]): + + self.module.debug("Host match found") + return host + self.module.debug("No host match found") + return None + else: + self.module.debug("RPC call failed") + self.module.debug(hostlist_json) + else: + self.module.debug("No collector specified") + return None + + def get_host_by_displayname(self, displayname): + """Returns a host object for the host matching the + specified display name""" + self.module.debug("Running LogicMonitor.get_host_by_displayname...") + + self.module.debug("Looking for displayname " + displayname) + self.module.debug("Making RPC call to 'getHost'") + host_json = (json.loads(self.rpc("getHost", + {"displayName": displayname}))) + + if host_json["status"] == 200: + self.module.debug("RPC call succeeded") + return host_json["data"] + else: + self.module.debug("RPC call failed") + self.module.debug(host_json) + return None + + def get_collector_by_description(self, description): + """Returns a JSON collector object for the collector + matching the specified FQDN (description)""" + self.module.debug( + "Running LogicMonitor.get_collector_by_description..." + ) + + collector_list = self.get_collectors() + if collector_list is not None: + self.module.debug("Looking for collector with description " + + description) + for collector in collector_list: + if collector["description"] == description: + self.module.debug("Collector match found") + return collector + self.module.debug("No collector match found") + return None + + def get_group(self, fullpath): + """Returns a JSON group object for the group matching the + specified path""" + self.module.debug("Running LogicMonitor.get_group...") + + self.module.debug("Making RPC call to getHostGroups") + resp = json.loads(self.rpc("getHostGroups", {})) + + if resp["status"] == 200: + self.module.debug("RPC called succeeded") + groups = resp["data"] + + self.module.debug("Looking for group matching " + fullpath) + for group in groups: + if group["fullPath"] == fullpath.lstrip('/'): + self.module.debug("Group match found") + return group + + self.module.debug("No group match found") + return None + else: + self.module.debug("RPC call failed") + self.module.debug(resp) + + return None + + def create_group(self, fullpath): + """Recursively create a path of host groups. + Returns the id of the newly created hostgroup""" + self.module.debug("Running LogicMonitor.create_group...") + + res = self.get_group(fullpath) + if res: + self.module.debug("Group " + fullpath + " exists.") + return res["id"] + + if fullpath == "/": + self.module.debug("Specified group is root. Doing nothing.") + return 1 + else: + self.module.debug("Creating group named " + fullpath) + self.module.debug("System changed") + self.change = True + + if self.check_mode: + self.exit(changed=True) + + parentpath, name = fullpath.rsplit('/', 1) + parentgroup = self.get_group(parentpath) + + parentid = 1 + + if parentpath == "": + parentid = 1 + elif parentgroup: + parentid = parentgroup["id"] + else: + parentid = self.create_group(parentpath) + + h = None + + # Determine if we're creating a group from host or hostgroup class + if hasattr(self, '_build_host_group_hash'): + h = self._build_host_group_hash( + fullpath, + self.description, + self.properties, + self.alertenable) + h["name"] = name + h["parentId"] = parentid + else: + h = {"name": name, + "parentId": parentid, + "alertEnable": True, + "description": ""} + + self.module.debug("Making RPC call to 'addHostGroup'") + resp = json.loads( + self.rpc("addHostGroup", h)) + + if resp["status"] == 200: + self.module.debug("RPC call succeeded") + return resp["data"]["id"] + elif resp["errmsg"] == "The record already exists": + self.module.debug("The hostgroup already exists") + group = self.get_group(fullpath) + return group["id"] + else: + self.module.debug("RPC call failed") + self.fail( + msg="Error: unable to create new hostgroup \"" + name + + "\".\n" + resp["errmsg"]) + + def fail(self, msg): + self.module.fail_json(msg=msg, changed=self.change) + + def exit(self, changed): + self.module.debug("Changed: " + changed) + self.module.exit_json(changed=changed) + + def output_info(self, info): + self.module.debug("Registering properties as Ansible facts") + self.module.exit_json(changed=False, ansible_facts=info) + + +class Host(LogicMonitor): + + def __init__(self, params, module=None): + """Initializor for the LogicMonitor host object""" + self.change = False + self.params = params + self.collector = None + + LogicMonitor.__init__(self, module, **self.params) + self.module.debug("Instantiating Host object") + + if self.params["hostname"]: + self.module.debug("Hostname is " + self.params["hostname"]) + self.hostname = self.params['hostname'] + else: + self.module.debug("No hostname specified. Using " + self.fqdn) + self.hostname = self.fqdn + + if self.params["displayname"]: + self.module.debug("Display name is " + self.params["displayname"]) + self.displayname = self.params['displayname'] + else: + self.module.debug("No display name specified. Using " + self.fqdn) + self.displayname = self.fqdn + + # Attempt to host information via display name of host name + self.module.debug("Attempting to find host by displayname " + + self.displayname) + info = self.get_host_by_displayname(self.displayname) + + if info is not None: + self.module.debug("Host found by displayname") + # Used the host information to grab the collector description + # if not provided + if (not hasattr(self.params, "collector") and + "agentDescription" in info): + self.module.debug("Setting collector from host response. " + + "Collector " + info["agentDescription"]) + self.params["collector"] = info["agentDescription"] + else: + self.module.debug("Host not found by displayname") + + # At this point, a valid collector description is required for success + # Check that the description exists or fail + if self.params["collector"]: + self.module.debug("Collector specified is " + + self.params["collector"]) + self.collector = (self.get_collector_by_description( + self.params["collector"])) + else: + self.fail(msg="No collector specified.") + + # If the host wasn't found via displayname, attempt by hostname + if info is None: + self.module.debug("Attempting to find host by hostname " + + self.hostname) + info = self.get_host_by_hostname(self.hostname, self.collector) + + self.info = info + + def get_properties(self): + """Returns a hash of the properties + associated with this LogicMonitor host""" + self.module.debug("Running Host.get_properties...") + + if self.info: + self.module.debug("Making RPC call to 'getHostProperties'") + properties_json = (json.loads(self.rpc("getHostProperties", + {'hostId': self.info["id"], + "filterSystemProperties": True}))) + + if properties_json["status"] == 200: + self.module.debug("RPC call succeeded") + return properties_json["data"] + else: + self.module.debug("Error: there was an issue retrieving the " + + "host properties") + self.module.debug(properties_json["errmsg"]) + + self.fail(msg=properties_json["status"]) + else: + self.module.debug( + "Unable to find LogicMonitor host which matches " + + self.displayname + " (" + self.hostname + ")" + ) + return None + + def site_facts(self): + """Output current properties information for the Host""" + self.module.debug("Running Host.site_facts...") + + if self.info: + self.module.debug("Host exists") + props = self.get_properties() + + self.output_info(props) + else: + self.fail(msg="Error: Host doesn't exit.") + + +class Hostgroup(LogicMonitor): + + def __init__(self, params, module=None): + """Initializor for the LogicMonitor host object""" + self.change = False + self.params = params + + LogicMonitor.__init__(self, module, **self.params) + self.module.debug("Instantiating Hostgroup object") + + self.fullpath = self.params["fullpath"] + self.info = self.get_group(self.fullpath) + + def get_properties(self, final=False): + """Returns a hash of the properties + associated with this LogicMonitor host""" + self.module.debug("Running Hostgroup.get_properties...") + + if self.info: + self.module.debug("Group found") + + self.module.debug("Making RPC call to 'getHostGroupProperties'") + properties_json = json.loads(self.rpc( + "getHostGroupProperties", + {'hostGroupId': self.info["id"], + "finalResult": final})) + + if properties_json["status"] == 200: + self.module.debug("RPC call succeeded") + return properties_json["data"] + else: + self.module.debug("RPC call failed") + self.fail(msg=properties_json["status"]) + else: + self.module.debug("Group not found") + return None + + def site_facts(self): + """Output current properties information for the Hostgroup""" + self.module.debug("Running Hostgroup.site_facts...") + + if self.info: + self.module.debug("Group exists") + props = self.get_properties(True) + + self.output_info(props) + else: + self.fail(msg="Error: Group doesn't exit.") + + +def selector(module): + """Figure out which object and which actions + to take given the right parameters""" + + if module.params["target"] == "host": + target = Host(module.params, module) + target.site_facts() + elif module.params["target"] == "hostgroup": + # Validate target specific required parameters + if module.params["fullpath"] is not None: + target = Hostgroup(module.params, module) + target.site_facts() + else: + module.fail_json( + msg="Parameter 'fullpath' required for target 'hostgroup'") + else: + module.fail_json( + msg="Error: Unexpected target \"" + module.params["target"] + + "\" was specified.") + + +def main(): + TARGETS = [ + "host", + "hostgroup"] + + module = AnsibleModule( + argument_spec=dict( + target=dict(required=True, default=None, choices=TARGETS), + company=dict(required=True, default=None), + user=dict(required=True, default=None), + password=dict(required=True, default=None, no_log=True), + + collector=dict(require=False, default=None), + hostname=dict(required=False, default=None), + displayname=dict(required=False, default=None), + fullpath=dict(required=False, default=None) + ), + supports_check_mode=True + ) + + if HAS_LIB_JSON is not True: + module.fail_json(msg="Unable to load JSON library") + + selector(module) + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +from ansible.module_utils.urls import open_url + +if __name__ == "__main__": + main() diff --git a/monitoring/monit.py b/monitoring/monit.py index 3d3c7c8c3ca..5e88c7b54d8 100644 --- a/monitoring/monit.py +++ b/monitoring/monit.py @@ -18,6 +18,11 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # +import time + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -38,18 +43,29 @@ required: true default: null choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] + timeout: + description: + - If there are pending actions for the service monitored by monit, then Ansible will check + for up to this many seconds to verify the the requested action has been performed. + Ansible will sleep for five seconds between each check. + required: false + default: 300 + version_added: "2.1" requirements: [ ] author: "Darryl Stoflet (@dstoflet)" ''' EXAMPLES = ''' # Manage the state of program "httpd" to be in "started" state. -- monit: name=httpd state=started +- monit: + name: httpd + state: started ''' def main(): arg_spec = dict( name=dict(required=True), + timeout=dict(default=300, type='int'), state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded']) ) @@ -57,17 +73,10 @@ def main(): name = module.params['name'] state = module.params['state'] + timeout = module.params['timeout'] MONIT = module.get_bin_path('monit', True) - if state == 'reloaded': - if module.check_mode: - module.exit_json(changed=True) - rc, out, err = module.run_command('%s reload' % MONIT) - if rc != 0: - module.fail_json(msg='monit reload failed', stdout=out, stderr=err) - module.exit_json(changed=True, name=name, state=state) - def status(): """Return the status of the process in monit, or the empty string if not present.""" rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True) @@ -86,8 +95,34 @@ def run_command(command): module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True) return status() - process_status = status() - present = process_status != '' + def wait_for_monit_to_stop_pending(): + """Fails this run if there is no status or it's pending/initalizing for timeout""" + timeout_time = time.time() + timeout + sleep_time = 5 + + running_status = status() + while running_status == '' or 'pending' in running_status or 'initializing' in running_status: + if time.time() >= timeout_time: + module.fail_json( + msg='waited too long for "pending", or "initiating" status to go away ({0})'.format( + running_status + ), + state=state + ) + + time.sleep(sleep_time) + running_status = status() + + if state == 'reloaded': + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = module.run_command('%s reload' % MONIT) + if rc != 0: + module.fail_json(msg='monit reload failed', stdout=out, stderr=err) + wait_for_monit_to_stop_pending() + module.exit_json(changed=True, name=name, state=state) + + present = status() != '' if not present and not state == 'present': module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state) @@ -98,12 +133,12 @@ def run_command(command): module.exit_json(changed=True) status = run_command('reload') if status == '': - module.fail_json(msg='%s process not configured with monit' % name, name=name, state=state) - else: - module.exit_json(changed=True, name=name, state=state) + wait_for_monit_to_stop_pending() + module.exit_json(changed=True, name=name, state=state) module.exit_json(changed=False, name=name, state=state) - running = 'running' in process_status + wait_for_monit_to_stop_pending() + running = 'running' in status() if running and state in ['started', 'monitored']: module.exit_json(changed=False, name=name, state=state) @@ -153,4 +188,5 @@ def run_command(command): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/monitoring/nagios.py b/monitoring/nagios.py index ed1da7a1e2e..78bd897ed1d 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -15,6 +15,10 @@ # along with this program. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: nagios @@ -31,9 +35,9 @@ description: - Action to take. - servicegroup options were added in 2.0. + - delete_downtime options were added in 2.2. required: true - default: null - choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", + choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime", "servicegroup_host_downtime" ] host: @@ -72,7 +76,6 @@ B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions. aliases: [ "service" ] required: true - default: null servicegroup: version_added: "2.0" description: @@ -84,58 +87,113 @@ should not include the submitted time header or the line-feed B(Required) option when using the C(command) action. required: true - default: null author: "Tim Bielawa (@tbielawa)" -requirements: [ "Nagios" ] ''' EXAMPLES = ''' # set 30 minutes of apache downtime -- nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }} +- nagios: + action: downtime + minutes: 30 + service: httpd + host: '{{ inventory_hostname }}' # schedule an hour of HOST downtime -- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} +- nagios: + action: downtime + minutes: 60 + service: host + host: '{{ inventory_hostname }}' # schedule an hour of HOST downtime, with a comment describing the reason -- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} - comment='This host needs disciplined' +- nagios: + action: downtime + minutes: 60 + service: host + host: '{{ inventory_hostname }}' + comment: Rebuilding machine # schedule downtime for ALL services on HOST -- nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }} +- nagios: + action: downtime + minutes: 45 + service: all + host: '{{ inventory_hostname }}' # schedule downtime for a few services -- nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }} +- nagios: + action: downtime + services: frob,foobar,qeuz + host: '{{ inventory_hostname }}' # set 30 minutes downtime for all services in servicegroup foo -- nagios: action=servicegroup_service_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }} +- nagios: + action: servicegroup_service_downtime + minutes: 30 + servicegroup: foo + host: '{{ inventory_hostname }}' # set 30 minutes downtime for all host in servicegroup foo -- nagios: action=servicegroup_host_downtime minutes=30 servicegroup=foo host={{ inventory_hostname }} +- nagios: + action: servicegroup_host_downtime + minutes: 30 + servicegroup: foo + host: '{{ inventory_hostname }}' + +# delete all downtime for a given host +- nagios: + action: delete_downtime + host: '{{ inventory_hostname }}' + service: all + +# delete all downtime for HOST with a particular comment +- nagios: + action: delete_downtime + host: '{{ inventory_hostname }}' + service: host + comment: Planned maintenance # enable SMART disk alerts -- nagios: action=enable_alerts service=smart host={{ inventory_hostname }} +- nagios: + action: enable_alerts + service: smart + host: '{{ inventory_hostname }}' # "two services at once: disable httpd and nfs alerts" -- nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }} +- nagios: + action: disable_alerts + service: httpd,nfs + host: '{{ inventory_hostname }}' # disable HOST alerts -- nagios: action=disable_alerts service=host host={{ inventory_hostname }} +- nagios: + action: disable_alerts + service: host + host: '{{ inventory_hostname }}' # silence ALL alerts -- nagios: action=silence host={{ inventory_hostname }} +- nagios: + action: silence + host: '{{ inventory_hostname }}' # unsilence all alerts -- nagios: action=unsilence host={{ inventory_hostname }} +- nagios: + action: unsilence + host: '{{ inventory_hostname }}' # SHUT UP NAGIOS -- nagios: action=silence_nagios +- nagios: + action: silence_nagios # ANNOY ME NAGIOS -- nagios: action=unsilence_nagios +- nagios: + action: unsilence_nagios # command something -- nagios: action=command command='DISABLE_FAILURE_PREDICTION' +- nagios: + action: command + command: DISABLE_FAILURE_PREDICTION ''' import ConfigParser @@ -185,6 +243,7 @@ def which_cmdfile(): def main(): ACTION_CHOICES = [ 'downtime', + 'delete_downtime', 'silence', 'unsilence', 'enable_alerts', @@ -246,6 +305,12 @@ def main(): except Exception: module.fail_json(msg='invalid entry for minutes') + ###################################################################### + if action == 'delete_downtime': + # Make sure there's an actual service selected + if not services: + module.fail_json(msg='no service selected to set downtime for') + ###################################################################### if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']: @@ -270,7 +335,7 @@ def main(): module.fail_json(msg='no command passed for command action') ################################################################## if not cmdfile: - module.fail_json('unable to locate nagios.cfg') + module.fail_json(msg='unable to locate nagios.cfg') ################################################################## ansible_nagios = Nagios(module, **module.params) @@ -387,6 +452,47 @@ def _fmt_dt_str(self, cmd, host, duration, author=None, return dt_str + def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None): + """ + Format an external-command downtime deletion string. + + cmd - Nagios command ID + host - Host to remove scheduled downtime from + comment - Reason downtime was added (upgrade, reboot, etc) + start - Start of downtime in seconds since 12:00AM Jan 1 1970 + svc - Service to remove downtime for, omit to remove all downtime for the host + + Syntax: [submitted] COMMAND;; + [];[];[] + """ + + entry_time = self._now() + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + + if comment is None: + comment = self.comment + + dt_del_args = [] + if svc is not None: + dt_del_args.append(svc) + else: + dt_del_args.append('') + + if start is not None: + dt_del_args.append(str(start)) + else: + dt_del_args.append('') + + if comment is not None: + dt_del_args.append(comment) + else: + dt_del_args.append('') + + dt_del_arg_str = ";".join(dt_del_args) + dt_del_str = hdr + dt_del_arg_str + "\n" + + return dt_del_str + def _fmt_notif_str(self, cmd, host=None, svc=None): """ Format an external-command notification string. @@ -466,6 +572,26 @@ def schedule_host_svc_downtime(self, host, minutes=30): dt_cmd_str = self._fmt_dt_str(cmd, host, minutes) self._write_command(dt_cmd_str) + def delete_host_downtime(self, host, services=None, comment=None): + """ + This command is used to remove scheduled downtime for a particular + host. + + Syntax: DEL_DOWNTIME_BY_HOST_NAME;; + [];[];[] + """ + + cmd = "DEL_DOWNTIME_BY_HOST_NAME" + + if services is None: + dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment) + self._write_command(dt_del_cmd_str) + else: + for service in services: + dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment) + self._write_command(dt_del_cmd_str) + + def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30): """ This command is used to schedule downtime for all hosts in a @@ -877,7 +1003,7 @@ def nagios_cmd(self, cmd): pre = '[%s]' % int(time.time()) post = '\n' - cmdstr = '%s %s %s' % (pre, cmd, post) + cmdstr = '%s %s%s' % (pre, cmd, post) self._write_command(cmdstr) def act(self): @@ -895,6 +1021,15 @@ def act(self): self.schedule_svc_downtime(self.host, services=self.services, minutes=self.minutes) + + elif self.action == 'delete_downtime': + if self.services=='host': + self.delete_host_downtime(self.host) + elif self.services=='all': + self.delete_host_downtime(self.host, comment='') + else: + self.delete_host_downtime(self.host, services=self.services) + elif self.action == "servicegroup_host_downtime": if self.servicegroup: self.schedule_servicegroup_host_downtime(servicegroup = self.servicegroup, minutes = self.minutes) @@ -913,6 +1048,8 @@ def act(self): elif self.action == 'enable_alerts': if self.services == 'host': self.enable_host_notifications(self.host) + elif self.services == 'all': + self.enable_host_svc_notifications(self.host) else: self.enable_svc_notifications(self.host, services=self.services) @@ -920,6 +1057,8 @@ def act(self): elif self.action == 'disable_alerts': if self.services == 'host': self.disable_host_notifications(self.host) + elif self.services == 'all': + self.disable_host_svc_notifications(self.host) else: self.disable_svc_notifications(self.host, services=self.services) @@ -943,4 +1082,6 @@ def act(self): ###################################################################### # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/monitoring/newrelic_deployment.py b/monitoring/newrelic_deployment.py index 3d9bc6c0ec3..c8f8703230d 100644 --- a/monitoring/newrelic_deployment.py +++ b/monitoring/newrelic_deployment.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: newrelic_deployment @@ -76,10 +80,11 @@ ''' EXAMPLES = ''' -- newrelic_deployment: token=AAAAAA - app_name=myapp - user='ansible deployment' - revision=1.0 +- newrelic_deployment: + token: AAAAAA + app_name: myapp + user: ansible deployment + revision: '1.0' ''' import urllib @@ -92,7 +97,7 @@ def main(): module = AnsibleModule( argument_spec=dict( - token=dict(required=True), + token=dict(required=True, no_log=True), app_name=dict(required=False), application_id=dict(required=False), changelog=dict(required=False), @@ -143,5 +148,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() - +if __name__ == '__main__': + main() diff --git a/monitoring/pagerduty.py b/monitoring/pagerduty.py index 99a9be8a044..43d93501c16 100644 --- a/monitoring/pagerduty.py +++ b/monitoring/pagerduty.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: pagerduty @@ -117,43 +121,54 @@ EXAMPLES=''' # List ongoing maintenance windows using a user/passwd -- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing +- pagerduty: + name: companyabc + user: example@example.com + passwd: password123 + state: ongoing # List ongoing maintenance windows using a token -- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing +- pagerduty: + name: companyabc + token: xxxxxxxxxxxxxx + state: ongoing # Create a 1 hour maintenance window for service FOO123, using a user/passwd -- pagerduty: name=companyabc - user=example@example.com - passwd=password123 - state=running - service=FOO123 +- pagerduty: + name: companyabc + user: example@example.com + passwd: password123 + state: running + service: FOO123 # Create a 5 minute maintenance window for service FOO123, using a token -- pagerduty: name=companyabc - token=xxxxxxxxxxxxxx - hours=0 - minutes=5 - state=running - service=FOO123 +- pagerduty: + name: companyabc + token: xxxxxxxxxxxxxx + hours: 0 + minutes: 5 + state: running + service: FOO123 # Create a 4 hour maintenance window for service FOO123 with the description "deployment". -- pagerduty: name=companyabc - user=example@example.com - passwd=password123 - state=running - service=FOO123 - hours=4 - desc=deployment +- pagerduty: + name: companyabc + user: example@example.com + passwd: password123 + state: running + service: FOO123 + hours: 4 + desc: deployment register: pd_window # Delete the previous maintenance window -- pagerduty: name=companyabc - user=example@example.com - passwd=password123 - state=absent - service={{ pd_window.result.maintenance_window.id }} +- pagerduty: + name: companyabc + user: example@example.com + passwd: password123 + state: absent + service: '{{ pd_window.result.maintenance_window.id }}' ''' import datetime @@ -203,7 +218,7 @@ def create(module, name, user, passwd, token, requester_id, service, hours, minu data = json.dumps(request_data) response, info = fetch_url(module, url, data=data, headers=headers, method='POST') - if info['status'] != 200: + if info['status'] != 201: module.fail_json(msg="failed to create the window: %s" % info['msg']) try: @@ -229,7 +244,7 @@ def absent(module, name, user, passwd, token, requester_id, service): data = json.dumps(request_data) response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE') - if info['status'] != 200: + if info['status'] != 204: module.fail_json(msg="failed to delete the window: %s" % info['msg']) try: @@ -296,4 +311,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/monitoring/pagerduty_alert.py b/monitoring/pagerduty_alert.py index e2d127f0155..f011b902703 100644 --- a/monitoring/pagerduty_alert.py +++ b/monitoring/pagerduty_alert.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: pagerduty_alert diff --git a/monitoring/pingdom.py b/monitoring/pingdom.py index 4346e8ca6fe..d37ae44ab19 100644 --- a/monitoring/pingdom.py +++ b/monitoring/pingdom.py @@ -15,6 +15,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: pingdom @@ -69,18 +73,20 @@ EXAMPLES = ''' # Pause the check with the ID of 12345. -- pingdom: uid=example@example.com - passwd=password123 - key=apipassword123 - checkid=12345 - state=paused +- pingdom: + uid: example@example.com + passwd: password123 + key: apipassword123 + checkid: 12345 + state: paused # Unpause the check with the ID of 12345. -- pingdom: uid=example@example.com - passwd=password123 - key=apipassword123 - checkid=12345 - state=running +- pingdom: + uid: example@example.com + passwd: password123 + key: apipassword123 + checkid: 12345 + state: running ''' try: @@ -149,4 +155,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/monitoring/rollbar_deployment.py b/monitoring/rollbar_deployment.py index 060193b78a5..5ee332fcf2c 100644 --- a/monitoring/rollbar_deployment.py +++ b/monitoring/rollbar_deployment.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: rollbar_deployment @@ -68,16 +72,22 @@ ''' EXAMPLES = ''' -- rollbar_deployment: token=AAAAAA - environment='staging' - user='ansible' - revision=4.2, - rollbar_user='admin', - comment='Test Deploy' +- rollbar_deployment: + token: AAAAAA + environment: staging + user: ansible + revision: '4.2' + rollbar_user: admin + comment: Test Deploy ''' import urllib +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import fetch_url + + def main(): module = AnsibleModule( @@ -120,7 +130,8 @@ def main(): try: data = urllib.urlencode(params) response, info = fetch_url(module, url, data=data) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg='Unable to notify Rollbar: %s' % e) else: if info['status'] == 200: @@ -128,7 +139,6 @@ def main(): else: module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/monitoring/sensu_check.py b/monitoring/sensu_check.py index a1bd36ca665..77a39647cf6 100644 --- a/monitoring/sensu_check.py +++ b/monitoring/sensu_check.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: sensu_check @@ -36,7 +40,8 @@ - This is the key that is used to determine whether a check exists required: true state: - description: Whether the check should be present or not + description: + - Whether the check should be present or not choices: [ 'present', 'absent' ] required: false default: present @@ -102,7 +107,8 @@ required: false default: [] metric: - description: Whether the check is a metric + description: + - Whether the check is a metric choices: [ 'yes', 'no' ] required: false default: no @@ -144,49 +150,77 @@ default: null high_flap_threshold: description: - - The low threshhold for flap detection + - The high threshhold for flap detection + required: false + default: null + custom: + version_added: "2.1" + description: + - A hash/dictionary of custom parameters for mixing to the configuration. + - You can't rewrite others module parameters using this + required: false + default: {} + source: + version_added: "2.1" + description: + - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch). required: false default: null requirements: [ ] -author: Anders Ingemann +author: "Anders Ingemann (@andsens)" ''' EXAMPLES = ''' # Fetch metrics about the CPU load every 60 seconds, # the sensu server has a handler called 'relay' which forwards stats to graphite - name: get cpu metrics - sensu_check: name=cpu_load - command=/etc/sensu/plugins/system/cpu-mpstat-metrics.rb - metric=yes handlers=relay subscribers=common interval=60 + sensu_check: + name: cpu_load + command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb + metric: yes + handlers: relay + subscribers: common + interval: 60 # Check whether nginx is running - name: check nginx process - sensu_check: name=nginx_running - command='/etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid' - handlers=default subscribers=nginx interval=60 + sensu_check: + name: nginx_running + command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid + handlers: default + subscribers: nginx + interval: 60 # Stop monitoring the disk capacity. # Note that the check will still show up in the sensu dashboard, # to remove it completely you need to issue a DELETE request to the sensu api. - name: check disk - sensu_check: name=check_disk_capacity + sensu_check: + name: check_disk_capacity + state: absent ''' +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + def sensu_check(module, path, name, state='present', backup=False): changed = False reasons = [] - try: - import json - except ImportError: - import simplejson as json - + stream = None try: try: stream = open(path, 'r') - config = json.load(stream.read()) - except IOError, e: + config = json.load(stream) + except IOError: + e = get_exception() if e.errno is 2: # File not found, non-fatal if state == 'absent': reasons.append('file did not exist and state is `absent\'') @@ -237,6 +271,7 @@ def sensu_check(module, path, name, state='present', backup=False): 'aggregate', 'low_flap_threshold', 'high_flap_threshold', + 'source', ] for opt in simple_opts: if module.params[opt] is not None: @@ -250,6 +285,31 @@ def sensu_check(module, path, name, state='present', backup=False): changed = True reasons.append('`{opt}\' was removed'.format(opt=opt)) + if module.params['custom']: + # Convert to json + custom_params = module.params['custom'] + overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type','subdue','subdue_begin','subdue_end']) + if overwrited_fields: + msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields)) + module.fail_json(msg=msg) + + for k,v in custom_params.items(): + if k in config['checks'][name]: + if not config['checks'][name][k] == v: + changed = True + reasons.append('`custom param {opt}\' was changed'.format(opt=k)) + else: + changed = True + reasons.append('`custom param {opt}\' was added'.format(opt=k)) + check[k] = v + simple_opts += custom_params.keys() + + # Remove obsolete custom params + for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type','subdue','subdue_begin','subdue_end']): + changed = True + reasons.append('`custom param {opt}\' was deleted'.format(opt=opt)) + del check[opt] + if module.params['metric']: if 'type' not in check or check['type'] != 'metric': check['type'] = 'metric' @@ -281,7 +341,8 @@ def sensu_check(module, path, name, state='present', backup=False): try: stream = open(path, 'w') stream.write(json.dumps(config, indent=2) + '\n') - except IOError, e: + except IOError: + e = get_exception() module.fail_json(msg=str(e)) finally: if stream: @@ -313,6 +374,8 @@ def main(): 'aggregate': {'type': 'bool'}, 'low_flap_threshold': {'type': 'int'}, 'high_flap_threshold': {'type': 'int'}, + 'custom': {'type': 'dict'}, + 'source': {'type': 'str'}, } required_together = [['subdue_begin', 'subdue_end']] @@ -333,4 +396,7 @@ def main(): module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons) from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/monitoring/sensu_subscription.py b/monitoring/sensu_subscription.py new file mode 100644 index 00000000000..90535ad2d0b --- /dev/null +++ b/monitoring/sensu_subscription.py @@ -0,0 +1,165 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Anders Ingemann +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: sensu_subscription +short_description: Manage Sensu subscriptions +version_added: 2.2 +description: + - Manage which I(sensu channels) a machine should subscribe to +options: + name: + description: + - The name of the channel + required: true + state: + description: + - Whether the machine should subscribe or unsubscribe from the channel + choices: [ 'present', 'absent' ] + required: false + default: present + path: + description: + - Path to the subscriptions json file + required: false + default: /etc/sensu/conf.d/subscriptions.json + backup: + description: + - Create a backup file (if yes), including the timestamp information so you + - can get the original file back if you somehow clobbered it incorrectly. + choices: [ 'yes', 'no' ] + required: false + default: no +requirements: [ ] +author: Anders Ingemann +''' + +RETURN = ''' +reasons: + description: the reasons why the moule changed or did not change something + returned: success + type: list + sample: ["channel subscription was absent and state is `present'"] +''' + +EXAMPLES = ''' +# Subscribe to the nginx channel +- name: subscribe to nginx checks + sensu_subscription: name=nginx + +# Unsubscribe from the common checks channel +- name: unsubscribe from common checks + sensu_subscription: name=common state=absent +''' + + +def sensu_subscription(module, path, name, state='present', backup=False): + changed = False + reasons = [] + + try: + import json + except ImportError: + import simplejson as json + + try: + config = json.load(open(path)) + except IOError: + e = get_exception() + if e.errno is 2: # File not found, non-fatal + if state == 'absent': + reasons.append('file did not exist and state is `absent\'') + return changed, reasons + config = {} + else: + module.fail_json(msg=str(e)) + except ValueError: + msg = '{path} contains invalid JSON'.format(path=path) + module.fail_json(msg=msg) + + if 'client' not in config: + if state == 'absent': + reasons.append('`client\' did not exist and state is `absent\'') + return changed, reasons + config['client'] = {} + changed = True + reasons.append('`client\' did not exist') + + if 'subscriptions' not in config['client']: + if state == 'absent': + reasons.append('`client.subscriptions\' did not exist and state is `absent\'') + return changed + config['client']['subscriptions'] = [] + changed = True + reasons.append('`client.subscriptions\' did not exist') + + if name not in config['client']['subscriptions']: + if state == 'absent': + reasons.append('channel subscription was absent') + return changed + config['client']['subscriptions'].append(name) + changed = True + reasons.append('channel subscription was absent and state is `present\'') + else: + if state == 'absent': + config['client']['subscriptions'].remove(name) + changed = True + reasons.append('channel subscription was present and state is `absent\'') + + if changed and not module.check_mode: + if backup: + module.backup_local(path) + try: + open(path, 'w').write(json.dumps(config, indent=2) + '\n') + except IOError: + e = get_exception() + module.fail_json(msg='Failed to write to file %s: %s' % (path, str(e))) + + return changed, reasons + + +def main(): + arg_spec = {'name': {'type': 'str', 'required': True}, + 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'}, + 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']}, + 'backup': {'type': 'str', 'default': 'no', 'type': 'bool'}, + } + + module = AnsibleModule(argument_spec=arg_spec, + supports_check_mode=True) + + path = module.params['path'] + name = module.params['name'] + state = module.params['state'] + backup = module.params['backup'] + + changed, reasons = sensu_subscription(module, path, name, state, backup) + + module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons) + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/monitoring/stackdriver.py b/monitoring/stackdriver.py index 7b3688cbefc..b20b1911588 100644 --- a/monitoring/stackdriver.py +++ b/monitoring/stackdriver.py @@ -15,6 +15,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: stackdriver @@ -84,18 +88,40 @@ ''' EXAMPLES = ''' -- stackdriver: key=AAAAAA event=deploy deployed_to=production deployed_by=leeroyjenkins repository=MyWebApp revision_id=abcd123 - -- stackdriver: key=AAAAAA event=annotation msg="Greetings from Ansible" annotated_by=leeroyjenkins level=WARN instance_id=i-abcd1234 +- stackdriver: + key: AAAAAA + event: deploy + deployed_to: production + deployed_by: leeroyjenkins + repository: MyWebApp + revision_id: abcd123 + +- stackdriver: + key: AAAAAA + event: annotation + msg: Greetings from Ansible + annotated_by: leeroyjenkins + level: WARN + instance_id: i-abcd1234 ''' # =========================================== # Stackdriver module specific support methods. # + try: - import json + import json except ImportError: - import simplejson as json + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import fetch_url + def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None): """Send a deploy event to Stackdriver""" @@ -189,7 +215,8 @@ def main(): module.fail_json(msg="revision_id required for deploy events") try: send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to sent deploy event: %s" % e) if event == 'annotation': @@ -197,14 +224,13 @@ def main(): module.fail_json(msg="msg required for annotation events") try: send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to sent annotation event: %s" % e) changed = True module.exit_json(changed=changed, deployed_by=deployed_by) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/monitoring/statusio_maintenance.py b/monitoring/statusio_maintenance.py new file mode 100644 index 00000000000..5533e454713 --- /dev/null +++ b/monitoring/statusio_maintenance.py @@ -0,0 +1,484 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Benjamin Copeland (@bhcopeland) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' + +module: statusio_maintenance +short_description: Create maintenance windows for your status.io dashboard +description: + - Creates a maintenance window for status.io + - Deletes a maintenance window for status.io +notes: + - You can use the apiary API url (http://docs.statusio.apiary.io/) to + capture API traffic + - Use start_date and start_time with minutes to set future maintenance window +version_added: "2.2" +author: Benjamin Copeland (@bhcopeland) +options: + title: + description: + - A descriptive title for the maintenance window + required: false + default: "A new maintenance window" + desc: + description: + - Message describing the maintenance window + required: false + default: "Created by Ansible" + state: + description: + - Desired state of the package. + required: false + default: "present" + choices: ["present", "absent"] + api_id: + description: + - Your unique API ID from status.io + required: true + api_key: + description: + - Your unique API Key from status.io + required: true + statuspage: + description: + - Your unique StatusPage ID from status.io + required: true + url: + description: + - Status.io API URL. A private apiary can be used instead. + required: false + default: "https://api.status.io" + components: + description: + - The given name of your component (server name) + required: false + aliases: ['component'] + default: None + containers: + description: + - The given name of your container (data center) + required: false + aliases: ['container'] + default: None + all_infrastructure_affected: + description: + - If it affects all components and containers + required: false + default: false + automation: + description: + - Automatically start and end the maintenance window + required: false + default: false + maintenance_notify_now: + description: + - Notify subscribers now + required: false + default: false + maintenance_notify_72_hr: + description: + - Notify subscribers 72 hours before maintenance start time + required: false + default: false + maintenance_notify_24_hr: + description: + - Notify subscribers 24 hours before maintenance start time + required: false + default: false + maintenance_notify_1_hr: + description: + - Notify subscribers 1 hour before maintenance start time + required: false + default: false + maintenance_id: + description: + - The maintenance id number when deleting a maintenance window + required: false + default: None + minutes: + description: + - The length of time in UTC that the maintenance will run \ + (starting from playbook runtime) + required: false + default: 10 + start_date: + description: + - Date maintenance is expected to start (Month/Day/Year) (UTC) + - End Date is worked out from start_date + minutes + required: false + default: None + start_time: + description: + - Time maintenance is expected to start (Hour:Minutes) (UTC) + - End Time is worked out from start_time + minutes + required: false + default: None +''' + +EXAMPLES = ''' +# Create a maintenance window for 10 minutes on server1.example.com, with +automation to stop the maintenance. +- statusio_maintenance: + title: "Router Upgrade from ansible" + desc: "Performing a Router Upgrade" + components: "server1.example.com" + api_id: "api_id" + api_key: "api_key" + statuspage: "statuspage_id" + maintenance_notify_1_hr: true + automation: true + +# Create a maintenance window for 60 minutes on multiple hosts +- name: "Create maintenance window for server1 and server2" + local_action: + module: statusio_maintenance + title: "Routine maintenance" + desc: "Some security updates" + components: + - "server1.example.com" + - "server2.example.com" + minutes: "60" + api_id: "api_id" + api_key: "api_key" + statuspage: "statuspage_id" + maintenance_notify_1_hr: true + automation: true + +# Create a future maintenance window for 24 hours to all hosts inside the +# Primary Data Center +- statusio_maintenance: + title: Data center downtime + desc: Performing a Upgrade to our data center + components: "Primary Data Center" + api_id: "api_id" + api_key: "api_key" + statuspage: "statuspage_id" + start_date: "01/01/2016" + start_time: "12:00" + minutes: 1440 + +# Delete a maintenance window +- statusio_maintenance: + title: "Remove a maintenance window" + maintenance_id: "561f90faf74bc94a4700087b" + statuspage: "statuspage_id" + api_id: "api_id" + api_key: "api_key" + state: absent + +''' +# TODO: Add RETURN documentation. +RETURN = ''' # ''' + +import datetime + + +def get_api_auth_headers(api_id, api_key, url, statuspage): + + headers = { + "x-api-id": api_id, + "x-api-key": api_key, + "Content-Type": "application/json" + } + + try: + response = open_url( + url + "/v2/component/list/" + statuspage, headers=headers) + data = json.loads(response.read()) + if data['status']['message'] == 'Authentication failed': + return 1, None, None, "Authentication failed: " \ + "Check api_id/api_key and statuspage id." + else: + auth_headers = headers + auth_content = data + except: + return 1, None, None, e + return 0, auth_headers, auth_content, None + + +def get_component_ids(auth_content, components): + host_ids = [] + lower_components = [x.lower() for x in components] + for result in auth_content["result"]: + if result['name'].lower() in lower_components: + data = { + "component_id": result["_id"], + "container_id": result["containers"][0]["_id"] + } + host_ids.append(data) + lower_components.remove(result['name'].lower()) + if len(lower_components): + # items not found in the api + return 1, None, lower_components + return 0, host_ids, None + + +def get_container_ids(auth_content, containers): + host_ids = [] + lower_containers = [x.lower() for x in containers] + for result in auth_content["result"]: + if result["containers"][0]["name"].lower() in lower_containers: + data = { + "component_id": result["_id"], + "container_id": result["containers"][0]["_id"] + } + host_ids.append(data) + lower_containers.remove(result["containers"][0]["name"].lower()) + + if len(lower_containers): + # items not found in the api + return 1, None, lower_containers + return 0, host_ids, None + + +def get_date_time(start_date, start_time, minutes): + returned_date = [] + if start_date and start_time: + try: + datetime.datetime.strptime(start_date, '%m/%d/%Y') + returned_date.append(start_date) + except (NameError, ValueError): + return 1, None, "Not a valid start_date format." + try: + datetime.datetime.strptime(start_time, '%H:%M') + returned_date.append(start_time) + except (NameError, ValueError): + return 1, None, "Not a valid start_time format." + try: + # Work out end date/time based on minutes + date_time_start = datetime.datetime.strptime( + start_time + start_date, '%H:%M%m/%d/%Y') + delta = date_time_start + datetime.timedelta(minutes=minutes) + returned_date.append(delta.strftime("%m/%d/%Y")) + returned_date.append(delta.strftime("%H:%M")) + except (NameError, ValueError): + return 1, None, "Couldn't work out a valid date" + else: + now = datetime.datetime.utcnow() + delta = now + datetime.timedelta(minutes=minutes) + # start_date + returned_date.append(now.strftime("%m/%d/%Y")) + returned_date.append(now.strftime("%H:%M")) + # end_date + returned_date.append(delta.strftime("%m/%d/%Y")) + returned_date.append(delta.strftime("%H:%M")) + return 0, returned_date, None + + +def create_maintenance(auth_headers, url, statuspage, host_ids, + all_infrastructure_affected, automation, title, desc, + returned_date, maintenance_notify_now, + maintenance_notify_72_hr, maintenance_notify_24_hr, + maintenance_notify_1_hr): + returned_dates = [[x] for x in returned_date] + component_id = [] + container_id = [] + for val in host_ids: + component_id.append(val['component_id']) + container_id.append(val['container_id']) + try: + values = json.dumps({ + "statuspage_id": statuspage, + "components": component_id, + "containers": container_id, + "all_infrastructure_affected": + str(int(all_infrastructure_affected)), + "automation": str(int(automation)), + "maintenance_name": title, + "maintenance_details": desc, + "date_planned_start": returned_dates[0], + "time_planned_start": returned_dates[1], + "date_planned_end": returned_dates[2], + "time_planned_end": returned_dates[3], + "maintenance_notify_now": str(int(maintenance_notify_now)), + "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)), + "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)), + "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr)) + }) + response = open_url( + url + "/v2/maintenance/schedule", data=values, + headers=auth_headers) + data = json.loads(response.read()) + + if data["status"]["error"] == "yes": + return 1, None, data["status"]["message"] + except Exception: + e = get_exception() + return 1, None, str(e) + return 0, None, None + + +def delete_maintenance(auth_headers, url, statuspage, maintenance_id): + try: + values = json.dumps({ + "statuspage_id": statuspage, + "maintenance_id": maintenance_id, + }) + response = open_url( + url=url + "/v2/maintenance/delete", + data=values, + headers=auth_headers) + data = json.loads(response.read()) + if data["status"]["error"] == "yes": + return 1, None, "Invalid maintenance_id" + except Exception: + e = get_exception() + return 1, None, str(e) + return 0, None, None + + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_id=dict(required=True), + api_key=dict(required=True, no_log=True), + statuspage=dict(required=True), + state=dict(required=False, default='present', + choices=['present', 'absent']), + url=dict(default='https://api.status.io', required=False), + components=dict(type='list', required=False, default=None, + aliases=['component']), + containers=dict(type='list', required=False, default=None, + aliases=['container']), + all_infrastructure_affected=dict(type='bool', default=False, + required=False), + automation=dict(type='bool', default=False, required=False), + title=dict(required=False, default='A new maintenance window'), + desc=dict(required=False, default='Created by Ansible'), + minutes=dict(type='int', required=False, default=10), + maintenance_notify_now=dict(type='bool', default=False, + required=False), + maintenance_notify_72_hr=dict(type='bool', default=False, + required=False), + maintenance_notify_24_hr=dict(type='bool', default=False, + required=False), + maintenance_notify_1_hr=dict(type='bool', default=False, + required=False), + maintenance_id=dict(required=False, default=None), + start_date=dict(default=None, required=False), + start_time=dict(default=None, required=False) + ), + supports_check_mode=True, + ) + + api_id = module.params['api_id'] + api_key = module.params['api_key'] + statuspage = module.params['statuspage'] + state = module.params['state'] + url = module.params['url'] + components = module.params['components'] + containers = module.params['containers'] + all_infrastructure_affected = module.params['all_infrastructure_affected'] + automation = module.params['automation'] + title = module.params['title'] + desc = module.params['desc'] + minutes = module.params['minutes'] + maintenance_notify_now = module.params['maintenance_notify_now'] + maintenance_notify_72_hr = module.params['maintenance_notify_72_hr'] + maintenance_notify_24_hr = module.params['maintenance_notify_24_hr'] + maintenance_notify_1_hr = module.params['maintenance_notify_1_hr'] + maintenance_id = module.params['maintenance_id'] + start_date = module.params['start_date'] + start_time = module.params['start_time'] + + if state == "present": + + if api_id and api_key: + (rc, auth_headers, auth_content, error) = \ + get_api_auth_headers(api_id, api_key, url, statuspage) + if rc != 0: + module.fail_json(msg="Failed to get auth keys: %s" % error) + else: + auth_headers = {} + auth_content = {} + + if minutes or start_time and start_date: + (rc, returned_date, error) = get_date_time( + start_date, start_time, minutes) + if rc != 0: + module.fail_json(msg="Failed to set date/time: %s" % error) + + if not components and not containers: + return module.fail_json(msg="A Component or Container must be " + "defined") + elif components and containers: + return module.fail_json(msg="Components and containers cannot " + "be used together") + else: + if components: + (rc, host_ids, error) = get_component_ids(auth_content, + components) + if rc != 0: + module.fail_json(msg="Failed to find component %s" % error) + + if containers: + (rc, host_ids, error) = get_container_ids(auth_content, + containers) + if rc != 0: + module.fail_json(msg="Failed to find container %s" % error) + + if module.check_mode: + module.exit_json(changed=True) + else: + (rc, _, error) = create_maintenance( + auth_headers, url, statuspage, host_ids, + all_infrastructure_affected, automation, + title, desc, returned_date, maintenance_notify_now, + maintenance_notify_72_hr, maintenance_notify_24_hr, + maintenance_notify_1_hr) + if rc == 0: + module.exit_json(changed=True, result="Successfully created " + "maintenance") + else: + module.fail_json(msg="Failed to create maintenance: %s" + % error) + + if state == "absent": + + if api_id and api_key: + (rc, auth_headers, auth_content, error) = \ + get_api_auth_headers(api_id, api_key, url, statuspage) + if rc != 0: + module.fail_json(msg="Failed to get auth keys: %s" % error) + else: + auth_headers = {} + + if module.check_mode: + module.exit_json(changed=True) + else: + (rc, _, error) = delete_maintenance( + auth_headers, url, statuspage, maintenance_id) + if rc == 0: + module.exit_json( + changed=True, + result="Successfully deleted maintenance" + ) + else: + module.fail_json( + msg="Failed to delete maintenance: %s" % error) + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +if __name__ == '__main__': + main() diff --git a/monitoring/uptimerobot.py b/monitoring/uptimerobot.py index bdff8f1f134..3a87c3838a6 100644 --- a/monitoring/uptimerobot.py +++ b/monitoring/uptimerobot.py @@ -15,6 +15,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: uptimerobot @@ -53,18 +57,27 @@ EXAMPLES = ''' # Pause the monitor with an ID of 12345. -- uptimerobot: monitorid=12345 - apikey=12345-1234512345 - state=paused +- uptimerobot: + monitorid: 12345 + apikey: 12345-1234512345 + state: paused # Start the monitor with an ID of 12345. -- uptimerobot: monitorid=12345 - apikey=12345-1234512345 - state=started - +- uptimerobot: + monitorid: 12345 + apikey: 12345-1234512345 + state: started ''' -import json +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + import urllib import time diff --git a/monitoring/zabbix_group.py b/monitoring/zabbix_group.py index 4aad1218789..ff90db01bea 100644 --- a/monitoring/zabbix_group.py +++ b/monitoring/zabbix_group.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: zabbix_group @@ -49,6 +53,18 @@ description: - Zabbix user password. required: true + http_login_user: + description: + - Basic Auth login + required: false + default: None + version_added: "2.1" + http_login_password: + description: + - Basic Auth password + required: false + default: None + version_added: "2.1" state: description: - Create or delete host group. @@ -114,7 +130,7 @@ def create_host_group(self, group_names): try: group_add_list = [] for group_name in group_names: - result = self._zapi.hostgroup.exists({'name': group_name}) + result = self._zapi.hostgroup.get({'filter': {'name': group_name}}) if not result: try: if self._module.check_mode: @@ -124,7 +140,7 @@ def create_host_group(self, group_names): except Already_Exists: return group_add_list return group_add_list - except Exception, e: + except Exception as e: self._module.fail_json(msg="Failed to create host group(s): %s" % e) # delete host group(s) @@ -133,7 +149,7 @@ def delete_host_group(self, group_ids): if self._module.check_mode: self._module.exit_json(changed=True) self._zapi.hostgroup.delete(group_ids) - except Exception, e: + except Exception as e: self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e) # get group ids by name @@ -150,10 +166,12 @@ def get_group_ids(self, host_groups): def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, aliases=['url']), - login_user=dict(required=True), - login_password=dict(required=True, no_log=True), - host_groups=dict(required=True, aliases=['host_group']), + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str',required=False, default=None), + http_login_password=dict(type='str',required=False, default=None, no_log=True), + host_groups=dict(type='list', required=True, aliases=['host_group']), state=dict(default="present", choices=['present','absent']), timeout=dict(type='int', default=10) ), @@ -166,6 +184,8 @@ def main(): server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] host_groups = module.params['host_groups'] state = module.params['state'] timeout = module.params['timeout'] @@ -174,9 +194,9 @@ def main(): # login to zabbix try: - zbx = ZabbixAPI(server_url, timeout=timeout) + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password) zbx.login(login_user, login_password) - except Exception, e: + except Exception as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) hostGroup = HostGroup(module, zbx) @@ -206,4 +226,6 @@ def main(): module.exit_json(changed=False) from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 6fac82c7177..aa113efe508 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: zabbix_host @@ -26,7 +30,7 @@ description: - This module allows you to create, modify and delete Zabbix host entries and associated group and template data. version_added: "2.0" -author: +author: - "(@cove)" - "Tony Minfei Ding" - "Harrison Gu (@harrisongu)" @@ -47,11 +51,28 @@ description: - Zabbix user password. required: true + http_login_user: + description: + - Basic Auth login + required: false + default: None + version_added: "2.1" + http_login_password: + description: + - Basic Auth password + required: false + default: None + version_added: "2.1" host_name: description: - Name of the host in Zabbix. - host_name is the unique identifier used and cannot be updated using this module. required: true + visible_name: + description: + - Visible name of the host in Zabbix. + required: false + version_added: '2.3' host_groups: description: - List of host groups the host is part of. @@ -61,6 +82,13 @@ - List of templates linked to the host. required: false default: None + inventory_mode: + description: + - Configure the inventory mode. + choices: ['automatic', 'manual', 'disabled'] + required: false + default: None + version_added: '2.1' status: description: - Monitoring status of the host. @@ -91,6 +119,13 @@ - 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface' required: false default: [] + force: + description: + - Overwrite the host configuration, even if already present + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "2.0" ''' EXAMPLES = ''' @@ -101,6 +136,7 @@ login_user: username login_password: password host_name: ExampleHost + visible_name: ExampleName host_groups: - Example group1 - Example group2 @@ -109,6 +145,7 @@ - Example template2 status: enabled state: present + inventory_mode: automatic interfaces: - type: 1 main: 1 @@ -143,8 +180,8 @@ class ZabbixAPIExtends(ZabbixAPI): hostinterface = None - def __init__(self, server, timeout, **kwargs): - ZabbixAPI.__init__(self, server, timeout=timeout) + def __init__(self, server, timeout, user, passwd, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd) self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs)) @@ -155,13 +192,13 @@ def __init__(self, module, zbx): # exist host def is_host_exist(self, host_name): - result = self._zapi.host.exists({'host': host_name}) + result = self._zapi.host.get({'filter': {'host': host_name}}) return result # check if host group exists def check_host_group_exist(self, group_names): for group_name in group_names: - result = self._zapi.hostgroup.exists({'name': group_name}) + result = self._zapi.hostgroup.get({'filter': {'name': group_name}}) if not result: self._module.fail_json(msg="Hostgroup not found: %s" % group_name) return True @@ -179,24 +216,30 @@ def get_template_ids(self, template_list): template_ids.append(template_id) return template_ids - def add_host(self, host_name, group_ids, status, interfaces, proxy_id): + def add_host(self, host_name, group_ids, status, interfaces, proxy_id, visible_name): try: if self._module.check_mode: self._module.exit_json(changed=True) parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status} if proxy_id: parameters['proxy_hostid'] = proxy_id + if visible_name: + parameters['name'] = visible_name host_list = self._zapi.host.create(parameters) if len(host_list) >= 1: return host_list['hostids'][0] - except Exception, e: + except Exception as e: self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e)) - def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id): + def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id, visible_name): try: if self._module.check_mode: self._module.exit_json(changed=True) - parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'proxy_hostid': proxy_id} + parameters = {'hostid': host_id, 'groups': group_ids, 'status': status} + if proxy_id: + parameters['proxy_hostid'] = proxy_id + if visible_name: + parameters['name'] = visible_name self._zapi.host.update(parameters) interface_list_copy = exist_interface_list if interfaces: @@ -224,15 +267,15 @@ def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_i remove_interface_ids.append(interface_id) if len(remove_interface_ids) > 0: self._zapi.hostinterface.delete(remove_interface_ids) - except Exception, e: + except Exception as e: self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e)) def delete_host(self, host_id, host_name): try: if self._module.check_mode: self._module.exit_json(changed=True) - self._zapi.host.delete({'hostid': host_id}) - except Exception, e: + self._zapi.host.delete([host_id]) + except Exception as e: self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e)) # get host by host name @@ -282,9 +325,11 @@ def get_host_groups_by_host_id(self, host_id): # check the exist_interfaces whether it equals the interfaces or not def check_interface_properties(self, exist_interface_list, interfaces): interfaces_port_list = [] - if len(interfaces) >= 1: - for interface in interfaces: - interfaces_port_list.append(int(interface['port'])) + + if interfaces is not None: + if len(interfaces) >= 1: + for interface in interfaces: + interfaces_port_list.append(int(interface['port'])) exist_interface_ports = [] if len(exist_interface_list) >= 1: @@ -311,7 +356,7 @@ def get_host_status_by_host(self, host): # check all the properties before link or clear template def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids, - exist_interfaces, host, proxy_id): + exist_interfaces, host, proxy_id, visible_name): # get the existing host's groups exist_host_groups = self.get_host_groups_by_host_id(host_id) if set(host_groups) != set(exist_host_groups): @@ -333,7 +378,10 @@ def check_all_properties(self, host_id, host_groups, status, interfaces, templat if host['proxy_hostid'] != proxy_id: return True - + + if host['name'] != visible_name: + return True + return False # link or clear template of the host @@ -353,24 +401,52 @@ def link_or_clear_template(self, host_id, template_id_list): if self._module.check_mode: self._module.exit_json(changed=True) self._zapi.host.update(request_str) - except Exception, e: + except Exception as e: self._module.fail_json(msg="Failed to link template to host: %s" % e) + # Update the host inventory_mode + def update_inventory_mode(self, host_id, inventory_mode): + + # nothing was set, do nothing + if not inventory_mode: + return + + if inventory_mode == "automatic": + inventory_mode = int(1) + elif inventory_mode == "manual": + inventory_mode = int(0) + elif inventory_mode == "disabled": + inventory_mode = int(-1) + + # watch for - https://support.zabbix.com/browse/ZBX-6033 + request_str = {'hostid': host_id, 'inventory_mode': inventory_mode} + try: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._zapi.host.update(request_str) + except Exception as e: + self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e) def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, aliases=['url']), - login_user=dict(required=True), - login_password=dict(required=True, no_log=True), - host_name=dict(required=True), - host_groups=dict(required=False), - link_templates=dict(required=False), + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + host_name=dict(type='str', required=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + host_groups=dict(type='list', required=False), + link_templates=dict(type='list', required=False), status=dict(default="enabled", choices=['enabled', 'disabled']), state=dict(default="present", choices=['present', 'absent']), + inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']), timeout=dict(type='int', default=10), - interfaces=dict(required=False), - proxy=dict(required=False) + interfaces=dict(type='list', required=False), + force=dict(type='bool', default=True), + proxy=dict(type='str', required=False), + visible_name=dict(type='str', required=False) + ), supports_check_mode=True ) @@ -381,13 +457,18 @@ def main(): server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] host_name = module.params['host_name'] + visible_name = module.params['visible_name'] host_groups = module.params['host_groups'] link_templates = module.params['link_templates'] + inventory_mode = module.params['inventory_mode'] status = module.params['status'] state = module.params['state'] timeout = module.params['timeout'] interfaces = module.params['interfaces'] + force = module.params['force'] proxy = module.params['proxy'] # convert enabled to 0; disabled to 1 @@ -396,9 +477,9 @@ def main(): zbx = None # login to zabbix try: - zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password) zbx.login(login_user, login_password) - except Exception, e: + except Exception as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) host = Host(module, zbx) @@ -418,15 +499,16 @@ def main(): if interface['type'] == 1: ip = interface['ip'] - proxy_id = "0" - - if proxy: - proxy_id = host.get_proxyid_by_proxy_name(proxy) - # check if host exist is_host_exist = host.is_host_exist(host_name) if is_host_exist: + # Use proxy specified, or set to None when updating host + if proxy: + proxy_id = host.get_proxyid_by_proxy_name(proxy) + else: + proxy_id = None + # get host id by host name zabbix_host_obj = host.get_host_by_host_name(host_name) host_id = zabbix_host_obj['hostid'] @@ -439,6 +521,9 @@ def main(): if not group_ids: module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name) + if not force: + module.fail_json(changed=False, result="Host present, Can't update configuration without force") + # get exist host's interfaces exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id}) exist_interfaces_copy = copy.deepcopy(exist_interfaces) @@ -448,10 +533,10 @@ def main(): if len(exist_interfaces) > interfaces_len: if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, - exist_interfaces, zabbix_host_obj, proxy_id): + exist_interfaces, zabbix_host_obj, proxy_id, visible_name): host.link_or_clear_template(host_id, template_ids) host.update_host(host_name, group_ids, status, host_id, - interfaces, exist_interfaces, proxy_id) + interfaces, exist_interfaces, proxy_id, visible_name) module.exit_json(changed=True, result="Successfully update host %s (%s) and linked with template '%s'" % (host_name, ip, link_templates)) @@ -459,15 +544,26 @@ def main(): module.exit_json(changed=False) else: if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids, - exist_interfaces_copy, zabbix_host_obj, proxy_id): - host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id) + exist_interfaces_copy, zabbix_host_obj, proxy_id, visible_name): + host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id, visible_name) host.link_or_clear_template(host_id, template_ids) + host.update_inventory_mode(host_id, inventory_mode) module.exit_json(changed=True, result="Successfully update host %s (%s) and linked with template '%s'" % (host_name, ip, link_templates)) else: module.exit_json(changed=False) else: + if state == "absent": + # the host is already deleted. + module.exit_json(changed=False) + + # Use proxy specified, or set to 0 when adding new host + if proxy: + proxy_id = host.get_proxyid_by_proxy_name(proxy) + else: + proxy_id = 0 + if not group_ids: module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name) @@ -475,11 +571,13 @@ def main(): module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name) # create host - host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id) + host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id, visible_name) host.link_or_clear_template(host_id, template_ids) + host.update_inventory_mode(host_id, inventory_mode) module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % ( host_name, ip, link_templates)) from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/monitoring/zabbix_hostmacro.py b/monitoring/zabbix_hostmacro.py index e8d65370760..75c552cf229 100644 --- a/monitoring/zabbix_hostmacro.py +++ b/monitoring/zabbix_hostmacro.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: zabbix_hostmacro @@ -46,6 +50,18 @@ description: - Zabbix user password. required: true + http_login_user: + description: + - Basic Auth login + required: false + default: None + version_added: "2.1" + http_login_password: + description: + - Basic Auth password + required: false + default: None + version_added: "2.1" host_name: description: - Name of the host. @@ -80,8 +96,8 @@ login_user: username login_password: password host_name: ExampleHost - macro_name:Example macro - macro_value:Example value + macro_name: Example macro + macro_value: Example value state: present ''' @@ -99,8 +115,8 @@ # Extend the ZabbixAPI # Since the zabbix-api python module too old (version 1.0, no higher version so far). class ZabbixAPIExtends(ZabbixAPI): - def __init__(self, server, timeout, **kwargs): - ZabbixAPI.__init__(self, server, timeout=timeout) + def __init__(self, server, timeout, user, passwd, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd) class HostMacro(object): @@ -108,11 +124,6 @@ def __init__(self, module, zbx): self._module = module self._zapi = zbx - # exist host - def is_host_exist(self, host_name): - result = self._zapi.host.exists({'host': host_name}) - return result - # get host id by host name def get_host_id(self, host_name): try: @@ -122,7 +133,7 @@ def get_host_id(self, host_name): else: host_id = host_list[0]['hostid'] return host_id - except Exception, e: + except Exception as e: self._module.fail_json(msg="Failed to get the host %s id: %s." % (host_name, e)) # get host macro @@ -133,7 +144,7 @@ def get_host_macro(self, macro_name, host_id): if len(host_macro_list) > 0: return host_macro_list[0] return None - except Exception, e: + except Exception as e: self._module.fail_json(msg="Failed to get host macro %s: %s" % (macro_name, e)) # create host macro @@ -143,18 +154,20 @@ def create_host_macro(self, macro_name, macro_value, host_id): self._module.exit_json(changed=True) self._zapi.usermacro.create({'hostid': host_id, 'macro': '{$' + macro_name + '}', 'value': macro_value}) self._module.exit_json(changed=True, result="Successfully added host macro %s " % macro_name) - except Exception, e: + except Exception as e: self._module.fail_json(msg="Failed to create host macro %s: %s" % (macro_name, e)) # update host macro def update_host_macro(self, host_macro_obj, macro_name, macro_value): host_macro_id = host_macro_obj['hostmacroid'] + if host_macro_obj['macro'] == '{$'+macro_name+'}' and host_macro_obj['value'] == macro_value: + self._module.exit_json(changed=False, result="Host macro %s already up to date" % macro_name) try: if self._module.check_mode: self._module.exit_json(changed=True) self._zapi.usermacro.update({'hostmacroid': host_macro_id, 'value': macro_value}) self._module.exit_json(changed=True, result="Successfully updated host macro %s " % macro_name) - except Exception, e: + except Exception as e: self._module.fail_json(msg="Failed to updated host macro %s: %s" % (macro_name, e)) # delete host macro @@ -165,18 +178,20 @@ def delete_host_macro(self, host_macro_obj, macro_name): self._module.exit_json(changed=True) self._zapi.usermacro.delete([host_macro_id]) self._module.exit_json(changed=True, result="Successfully deleted host macro %s " % macro_name) - except Exception, e: + except Exception as e: self._module.fail_json(msg="Failed to delete host macro %s: %s" % (macro_name, e)) def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, aliases=['url']), - login_user=dict(required=True), - login_password=dict(required=True, no_log=True), - host_name=dict(required=True), - macro_name=dict(required=True), - macro_value=dict(required=True), + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + host_name=dict(type='str', required=True), + macro_name=dict(type='str', required=True), + macro_value=dict(type='str', required=True), state=dict(default="present", choices=['present', 'absent']), timeout=dict(type='int', default=10) ), @@ -189,6 +204,8 @@ def main(): server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] host_name = module.params['host_name'] macro_name = (module.params['macro_name']).upper() macro_value = module.params['macro_value'] @@ -198,9 +215,9 @@ def main(): zbx = None # login to zabbix try: - zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password) zbx.login(login_user, login_password) - except Exception, e: + except Exception as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) host_macro_class_obj = HostMacro(module, zbx) @@ -226,5 +243,6 @@ def main(): host_macro_class_obj.update_host_macro(host_macro_obj, macro_name, macro_value) from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/monitoring/zabbix_maintenance.py b/monitoring/zabbix_maintenance.py index 2d611382919..4d4c1d972a2 100644 --- a/monitoring/zabbix_maintenance.py +++ b/monitoring/zabbix_maintenance.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' module: zabbix_maintenance @@ -52,6 +56,18 @@ description: - Zabbix user password. required: true + http_login_user: + description: + - Basic Auth login + required: false + default: None + version_added: "2.1" + http_login_password: + description: + - Basic Auth password + required: false + default: None + version_added: "2.1" host_names: description: - Hosts to manage maintenance window for. @@ -91,6 +107,12 @@ - Type of maintenance. With data collection, or without. required: false default: "true" + timeout: + description: + - The timeout of API request (seconds). + default: 10 + version_added: "2.1" + required: false notes: - Useful for setting hosts in maintenance mode before big update, and removing maintenance window after update. @@ -104,40 +126,48 @@ EXAMPLES = ''' # Create maintenance window named "Update of www1" # for host www1.example.com for 90 minutes -- zabbix_maintenance: name="Update of www1" - host_name=www1.example.com - state=present - minutes=90 - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD +- zabbix_maintenance: + name: Update of www1 + host_name: www1.example.com + state: present + minutes: 90 + server_url: 'https://monitoring.example.com' + login_user: ansible + login_password: pAsSwOrD # Create maintenance window named "Mass update" # for host www1.example.com and host groups Office and Dev -- zabbix_maintenance: name="Update of www1" - host_name=www1.example.com - host_groups=Office,Dev - state=present - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD +- zabbix_maintenance: + name: Update of www1 + host_name: www1.example.com + host_groups: + - Office + - Dev + state: present + server_url: 'https://monitoring.example.com' + login_user: ansible + login_password: pAsSwOrD # Create maintenance window named "update" # for hosts www1.example.com and db1.example.com and without data collection. -- zabbix_maintenance: name=update - host_names=www1.example.com,db1.example.com - state=present - collect_data=false - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD +- zabbix_maintenance: + name: update + host_names: + - www1.example.com + - db1.example.com + state: present + collect_data: false + server_url: 'https://monitoring.example.com' + login_user: ansible + login_password: pAsSwOrD # Remove maintenance window named "Test1" -- zabbix_maintenance: name=Test1 - state=absent - server_url=https://monitoring.example.com - login_user=ansible - login_password=pAsSwOrD +- zabbix_maintenance: + name: Test1 + state: absent + server_url: 'https://monitoring.example.com' + login_user: ansible + login_password: pAsSwOrD ''' import datetime @@ -202,18 +232,6 @@ def delete_maintenance(zbx, maintenance_id): return 0, None, None -def check_maintenance(zbx, name): - try: - result = zbx.maintenance.exists( - { - "name": name - } - ) - except BaseException as e: - return 1, None, str(e) - return 0, result, None - - def get_group_ids(zbx, host_groups): group_ids = [] for group in host_groups: @@ -266,15 +284,18 @@ def main(): module = AnsibleModule( argument_spec=dict( state=dict(required=False, default='present', choices=['present', 'absent']), - server_url=dict(required=True, default=None, aliases=['url']), + server_url=dict(type='str', required=True, default=None, aliases=['url']), host_names=dict(type='list', required=False, default=None, aliases=['host_name']), minutes=dict(type='int', required=False, default=10), host_groups=dict(type='list', required=False, default=None, aliases=['host_group']), - login_user=dict(required=True), - login_password=dict(required=True, no_log=True), - name=dict(required=True), - desc=dict(required=False, default="Created by Ansible"), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), + name=dict(type='str', required=True), + desc=dict(type='str', required=False, default="Created by Ansible"), collect_data=dict(type='bool', required=False, default=True), + timeout=dict(type='int', default=10), ), supports_check_mode=True, ) @@ -287,18 +308,22 @@ def main(): state = module.params['state'] login_user = module.params['login_user'] login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] minutes = module.params['minutes'] name = module.params['name'] desc = module.params['desc'] server_url = module.params['server_url'] collect_data = module.params['collect_data'] + timeout = module.params['timeout'] + if collect_data: maintenance_type = 0 else: maintenance_type = 1 try: - zbx = ZabbixAPI(server_url) + zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password) zbx.login(login_user, login_password) except BaseException as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) @@ -325,11 +350,11 @@ def main(): else: host_ids = [] - (rc, exists, error) = check_maintenance(zbx, name) + (rc, maintenance, error) = get_maintenance_id(zbx, name) if rc != 0: module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error)) - if not exists: + if not maintenance: if not host_names and not host_groups: module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.") @@ -344,26 +369,23 @@ def main(): if state == "absent": - (rc, exists, error) = check_maintenance(zbx, name) + (rc, maintenance, error) = get_maintenance_id(zbx, name) if rc != 0: module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error)) - if exists: - (rc, maintenance, error) = get_maintenance_id(zbx, name) - if rc != 0: - module.fail_json(msg="Failed to get maintenance id: %s" % error) - - if maintenance: - if module.check_mode: + if maintenance: + if module.check_mode: + changed = True + else: + (rc, _, error) = delete_maintenance(zbx, maintenance) + if rc == 0: changed = True else: - (rc, _, error) = delete_maintenance(zbx, maintenance) - if rc == 0: - changed = True - else: - module.fail_json(msg="Failed to remove maintenance: %s" % error) + module.fail_json(msg="Failed to remove maintenance: %s" % error) module.exit_json(changed=changed) from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/monitoring/zabbix_screen.py b/monitoring/zabbix_screen.py index 12ef6c69b6f..7e0ade2abe7 100644 --- a/monitoring/zabbix_screen.py +++ b/monitoring/zabbix_screen.py @@ -20,6 +20,10 @@ # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: zabbix_screen @@ -27,7 +31,7 @@ description: - This module allows you to create, modify and delete Zabbix screens and associated graph data. version_added: "2.0" -author: +author: - "(@cove)" - "Tony Minfei Ding" - "Harrison Gu (@harrisongu)" @@ -48,11 +52,23 @@ description: - Zabbix user password. required: true + http_login_user: + description: + - Basic Auth login + required: false + default: None + version_added: "2.1" + http_login_password: + description: + - Basic Auth password + required: false + default: None + version_added: "2.1" timeout: description: - The timeout of API request (seconds). default: 10 - zabbix_screens: + screens: description: - List of screens to be created/updated/deleted(see example). - If the screen(s) already been added, the screen(s) name won't be updated. @@ -142,8 +158,8 @@ class ZabbixAPIExtends(ZabbixAPI): screenitem = None - def __init__(self, server, timeout, **kwargs): - ZabbixAPI.__init__(self, server, timeout=timeout) + def __init__(self, server, timeout, user, passwd, **kwargs): + ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd) self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs)) @@ -315,11 +331,13 @@ def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, def main(): module = AnsibleModule( argument_spec=dict( - server_url=dict(required=True, aliases=['url']), - login_user=dict(required=True), - login_password=dict(required=True, no_log=True), + server_url=dict(type='str', required=True, aliases=['url']), + login_user=dict(type='str', required=True), + login_password=dict(type='str', required=True, no_log=True), + http_login_user=dict(type='str', required=False, default=None), + http_login_password=dict(type='str', required=False, default=None, no_log=True), timeout=dict(type='int', default=10), - screens=dict(type='dict', required=True) + screens=dict(type='list', required=True) ), supports_check_mode=True ) @@ -330,15 +348,17 @@ def main(): server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] + http_login_user = module.params['http_login_user'] + http_login_password = module.params['http_login_password'] timeout = module.params['timeout'] screens = module.params['screens'] zbx = None # login to zabbix try: - zbx = ZabbixAPIExtends(server_url, timeout=timeout) + zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password) zbx.login(login_user, login_password) - except Exception, e: + except Exception as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) screen = Screen(module, zbx) diff --git a/network/a10/a10_server.py b/network/a10/a10_server.py index 2ad66c23588..3a298cb25f4 100644 --- a/network/a10/a10_server.py +++ b/network/a10/a10_server.py @@ -3,7 +3,8 @@ """ Ansible module to manage A10 Networks slb server objects -(c) 2014, Mischa Peters +(c) 2014, Mischa Peters , +2016, Eric Chou This file is part of Ansible @@ -21,57 +22,44 @@ along with Ansible. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: a10_server version_added: 1.8 -short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' server object. description: - - Manage slb server objects on A10 Networks devices via aXAPI -author: "Mischa Peters (@mischapeters)" + - Manage SLB (Server Load Balancer) server objects on A10 Networks devices via aXAPIv2. +author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014" notes: - - Requires A10 Networks aXAPI 2.1 + - Requires A10 Networks aXAPI 2.1. +extends_documentation_fragment: a10 options: - host: - description: - - hostname or ip of your A10 Networks device - required: true - default: null - aliases: [] - choices: [] - username: - description: - - admin account of your A10 Networks device - required: true - default: null - aliases: ['user', 'admin'] - choices: [] - password: + partition: + version_added: "2.3" description: - - admin password of your A10 Networks device - required: true + - set active-partition + required: false default: null - aliases: ['pass', 'pwd'] - choices: [] server_name: description: - - slb server name + - The SLB (Server Load Balancer) server name. required: true - default: null aliases: ['server'] - choices: [] server_ip: description: - - slb server IP address + - The SLB server IPv4 address. required: false default: null aliases: ['ip', 'address'] - choices: [] server_status: description: - - slb virtual server status + - The SLB virtual server status. required: false - default: enable + default: enabled aliases: ['status'] choices: ['enabled', 'disabled'] server_ports: @@ -82,15 +70,25 @@ required when C(state) is C(present). required: false default: null - aliases: [] - choices: [] state: description: - - create, update or remove slb server + - This is to specify the operation to create, update or remove SLB server. required: false default: present - aliases: [] choices: ['present', 'absent'] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + required: false + version_added: 2.3 + default: 'yes' + choices: ['yes', 'no'] + +''' + +RETURN = ''' +# ''' EXAMPLES = ''' @@ -99,6 +97,7 @@ host: a10.mydomain.com username: myadmin password: mypassword + partition: mypartition server: test server_ip: 1.1.1.100 server_ports: @@ -109,6 +108,15 @@ ''' +RETURN = ''' +content: + description: the full info regarding the slb_server + returned: success + type: string + sample: "mynewserver" +''' + + VALID_PORT_FIELDS = ['port_num', 'protocol', 'status'] def validate_ports(module, ports): @@ -154,6 +162,7 @@ def main(): server_ip=dict(type='str', aliases=['ip', 'address']), server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), server_ports=dict(type='list', aliases=['port'], default=[]), + partition=dict(type='str', default=[]), ) ) @@ -163,6 +172,7 @@ def main(): ) host = module.params['host'] + partition = module.params['partition'] username = module.params['username'] password = module.params['password'] state = module.params['state'] @@ -197,6 +207,8 @@ def main(): if slb_server_status: json_post['server']['status'] = axapi_enabled_disabled(slb_server_status) + slb_server_partition = axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition})) + slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) slb_server_exists = not axapi_failure(slb_server_data) @@ -270,8 +282,8 @@ def status_needs_update(current_status, new_status): else: result = dict(msg="the server was not present") - # if the config has changed, or we want to force a save, save the config unless otherwise requested - if changed or write_config: + # if the config has changed, save the config unless otherwise requested + if changed and write_config: write_result = axapi_call(module, session_url + '&method=system.action.write_memory') if axapi_failure(write_result): module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) @@ -280,9 +292,12 @@ def status_needs_update(current_status, new_status): axapi_call(module, session_url + '&method=session.close') module.exit_json(changed=changed, content=result) -# standard ansible module imports -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.a10 import * +# ansible module imports +import json +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import url_argument_spec +from ansible.module_utils.a10 import axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_get_port_protocol, axapi_enabled_disabled + -main() +if __name__ == '__main__': + main() diff --git a/network/a10/a10_server_axapi3.py b/network/a10/a10_server_axapi3.py new file mode 100644 index 00000000000..46f7bf05746 --- /dev/null +++ b/network/a10/a10_server_axapi3.py @@ -0,0 +1,255 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to manage A10 Networks slb server objects +(c) 2014, Mischa Peters , 2016, Eric Chou + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: a10_server_axapi3 +version_added: 2.3 +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices +description: + - Manage SLB (Server Load Balancer) server objects on A10 Networks devices via aXAPIv3. +author: "Eric Chou (@ericchou) based on previous work by Mischa Peters (@mischapeters)" +extends_documentation_fragment: a10 +options: + server_name: + description: + - The SLB (Server Load Balancer) server name. + required: true + aliases: ['server'] + server_ip: + description: + - The SLB (Server Load Balancer) server IPv4 address. + required: true + aliases: ['ip', 'address'] + server_status: + description: + - The SLB (Server Load Balancer) virtual server status. + required: false + default: enable + aliases: ['action'] + choices: ['enable', 'disable'] + server_ports: + description: + - A list of ports to create for the server. Each list item should be a dictionary which specifies the C(port:) + and C(protocol:). + required: false + default: null + operation: + description: + - Create, Update or Remove SLB server. For create and update operation, we use the IP address and server + name specified in the POST message. For delete operation, we use the server name in the request URI. + required: false + default: create + choices: ['create', 'update', 'remove'] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + +''' + +RETURN = ''' +# +''' + +EXAMPLES = ''' +# Create a new server +- a10_server: + host: a10.mydomain.com + username: myadmin + password: mypassword + server: test + server_ip: 1.1.1.100 + validate_certs: false + server_status: enable + write_config: yes + operation: create + server_ports: + - port-number: 8080 + protocol: tcp + action: enable + - port-number: 8443 + protocol: TCP + +''' + +VALID_PORT_FIELDS = ['port-number', 'protocol', 'action'] + +def validate_ports(module, ports): + for item in ports: + for key in item: + if key not in VALID_PORT_FIELDS: + module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) + + # validate the port number is present and an integer + if 'port-number' in item: + try: + item['port-number'] = int(item['port-number']) + except: + module.fail_json(msg="port-number entries in the port definitions must be integers") + else: + module.fail_json(msg="port definitions must define the port-number field") + + # validate the port protocol is present, no need to convert to the internal API integer value in v3 + if 'protocol' in item: + protocol = item['protocol'] + if not protocol: + module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS)) + else: + item['protocol'] = protocol + else: + module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS)) + + # 'status' is 'action' in AXAPIv3 + # no need to convert the status, a.k.a action, to the internal API integer value in v3 + # action is either enabled or disabled + if 'action' in item: + action = item['action'] + if action not in ['enable', 'disable']: + module.fail_json(msg="server action must be enable or disable") + else: + item['action'] = 'enable' + + +def main(): + argument_spec = a10_argument_spec() + argument_spec.update(url_argument_spec()) + argument_spec.update( + dict( + operation=dict(type='str', default='create', choices=['create', 'update', 'delete']), + server_name=dict(type='str', aliases=['server'], required=True), + server_ip=dict(type='str', aliases=['ip', 'address'], required=True), + server_status=dict(type='str', default='enable', aliases=['action'], choices=['enable', 'disable']), + server_ports=dict(type='list', aliases=['port'], default=[]), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + host = module.params['host'] + username = module.params['username'] + password = module.params['password'] + operation = module.params['operation'] + write_config = module.params['write_config'] + slb_server = module.params['server_name'] + slb_server_ip = module.params['server_ip'] + slb_server_status = module.params['server_status'] + slb_server_ports = module.params['server_ports'] + + axapi_base_url = 'https://{}/axapi/v3/'.format(host) + axapi_auth_url = axapi_base_url + 'auth/' + signature = axapi_authenticate_v3(module, axapi_auth_url, username, password) + + # validate the ports data structure + validate_ports(module, slb_server_ports) + + + json_post = { + "server-list": [ + { + "name": slb_server, + "host": slb_server_ip + } + ] + } + + # add optional module parameters + if slb_server_ports: + json_post['server-list'][0]['port-list'] = slb_server_ports + + if slb_server_status: + json_post['server-list'][0]['action'] = slb_server_status + + slb_server_data = axapi_call_v3(module, axapi_base_url+'slb/server/', method='GET', body='', signature=signature) + + # for empty slb server list + if axapi_failure(slb_server_data): + slb_server_exists = False + else: + slb_server_list = [server['name'] for server in slb_server_data['server-list']] + if slb_server in slb_server_list: + slb_server_exists = True + else: + slb_server_exists = False + + changed = False + if operation == 'create': + if slb_server_exists == False: + result = axapi_call_v3(module, axapi_base_url+'slb/server/', method='POST', body=json.dumps(json_post), signature=signature) + if axapi_failure(result): + module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg']) + changed = True + else: + module.fail_json(msg="server already exists, use state='update' instead") + changed = False + # if we changed things, get the full info regarding result + if changed: + result = axapi_call_v3(module, axapi_base_url + 'slb/server/' + slb_server, method='GET', body='', signature=signature) + else: + result = slb_server_data + elif operation == 'delete': + if slb_server_exists: + result = axapi_call_v3(module, axapi_base_url + 'slb/server/' + slb_server, method='DELETE', body='', signature=signature) + if axapi_failure(result): + module.fail_json(msg="failed to delete server: %s" % result['response']['err']['msg']) + changed = True + else: + result = dict(msg="the server was not present") + elif operation == 'update': + if slb_server_exists: + result = axapi_call_v3(module, axapi_base_url + 'slb/server/', method='PUT', body=json.dumps(json_post), signature=signature) + if axapi_failure(result): + module.fail_json(msg="failed to update server: %s" % result['response']['err']['msg']) + changed = True + else: + result = dict(msg="the server was not present") + + # if the config has changed, save the config unless otherwise requested + if changed and write_config: + write_result = axapi_call_v3(module, axapi_base_url+'write/memory/', method='POST', body='', signature=signature) + if axapi_failure(write_result): + module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) + + # log out gracefully and exit + axapi_call_v3(module, axapi_base_url + 'logoff/', method='POST', body='', signature=signature) + module.exit_json(changed=changed, content=result) + + +import json +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import url_argument_spec +from ansible.module_utils.a10 import axapi_call_v3, a10_argument_spec, axapi_authenticate_v3, axapi_failure + + +if __name__ == '__main__': + main() diff --git a/network/a10/a10_service_group.py b/network/a10/a10_service_group.py index af664084b6a..486fcb0b3e1 100644 --- a/network/a10/a10_service_group.py +++ b/network/a10/a10_service_group.py @@ -3,7 +3,8 @@ """ Ansible module to manage A10 Networks slb service-group objects -(c) 2014, Mischa Peters +(c) 2014, Mischa Peters , +Eric Chou This file is part of Ansible @@ -21,56 +22,45 @@ along with Ansible. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: a10_service_group version_added: 1.8 -short_description: Manage A10 Networks devices' service groups +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' service groups. description: - - Manage slb service-group objects on A10 Networks devices via aXAPI -author: "Mischa Peters (@mischapeters)" + - Manage SLB (Server Load Balancing) service-group objects on A10 Networks devices via aXAPIv2. +author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014" notes: - - Requires A10 Networks aXAPI 2.1 - - When a server doesn't exist and is added to the service-group the server will be created + - Requires A10 Networks aXAPI 2.1. + - When a server doesn't exist and is added to the service-group the server will be created. +extends_documentation_fragment: a10 options: - host: + partition: + version_added: "2.3" description: - - hostname or ip of your A10 Networks device - required: true - default: null - aliases: [] - choices: [] - username: - description: - - admin account of your A10 Networks device - required: true - default: null - aliases: ['user', 'admin'] - choices: [] - password: - description: - - admin password of your A10 Networks device - required: true + - set active-partition + required: false default: null - aliases: ['pass', 'pwd'] - choices: [] service_group: description: - - slb service-group name + - The SLB (Server Load Balancing) service-group name required: true default: null aliases: ['service', 'pool', 'group'] - choices: [] service_group_protocol: description: - - slb service-group protocol + - The SLB service-group protocol of TCP or UDP. required: false default: tcp aliases: ['proto', 'protocol'] choices: ['tcp', 'udp'] service_group_method: description: - - slb service-group loadbalancing method + - The SLB service-group load balancing method, such as round-robin or weighted-rr. required: false default: round-robin aliases: ['method'] @@ -82,17 +72,6 @@ specify the C(status:). See the examples below for details. required: false default: null - aliases: [] - choices: [] - write_config: - description: - - If C(yes), any changes will cause a write of the running configuration - to non-volatile memory. This will save I(all) configuration changes, - including those that may have been made manually or through other modules, - so care should be taken when specifying C(yes). - required: false - default: "no" - choices: ["yes", "no"] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used @@ -103,12 +82,17 @@ ''' +RETURN = ''' +# +''' + EXAMPLES = ''' # Create a new service-group - a10_service_group: host: a10.mydomain.com username: myadmin password: mypassword + partition: mypartition service_group: sg-80-tcp servers: - server: foo1.mydomain.com @@ -123,6 +107,14 @@ ''' +RETURN = ''' +content: + description: the full info regarding the slb_service_group + returned: success + type: string + sample: "mynewservicegroup" +''' + VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method'] VALID_SERVER_FIELDS = ['server', 'port', 'status'] @@ -174,6 +166,7 @@ def main(): 'src-ip-only-hash', 'src-ip-hash']), servers=dict(type='list', aliases=['server', 'member'], default=[]), + partition=dict(type='str', default=[]), ) ) @@ -185,6 +178,7 @@ def main(): host = module.params['host'] username = module.params['username'] password = module.params['password'] + partition = module.params['partition'] state = module.params['state'] write_config = module.params['write_config'] slb_service_group = module.params['service_group'] @@ -226,7 +220,8 @@ def main(): # first we authenticate to get a session id session_url = axapi_authenticate(module, axapi_base_url, username, password) - + # then we select the active-partition + slb_server_partition = axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition})) # then we check to see if the specified group exists slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) slb_service_group_exist = not axapi_failure(slb_result) @@ -334,8 +329,11 @@ def main(): module.exit_json(changed=changed, content=result) # standard ansible module imports -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.a10 import * +import json +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import url_argument_spec +from ansible.module_utils.a10 import axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_enabled_disabled + -main() +if __name__ == '__main__': + main() diff --git a/network/a10/a10_virtual_server.py b/network/a10/a10_virtual_server.py index 1a04f1a1754..212e65203ac 100644 --- a/network/a10/a10_virtual_server.py +++ b/network/a10/a10_virtual_server.py @@ -3,7 +3,8 @@ """ Ansible module to manage A10 Networks slb virtual server objects -(c) 2014, Mischa Peters +(c) 2014, Mischa Peters , +Eric Chou This file is part of Ansible @@ -21,56 +22,43 @@ along with Ansible. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: a10_virtual_server version_added: 1.8 -short_description: Manage A10 Networks devices' virtual servers +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' virtual servers. description: - - Manage slb virtual server objects on A10 Networks devices via aXAPI -author: "Mischa Peters (@mischapeters)" + - Manage SLB (Server Load Balancing) virtual server objects on A10 Networks devices via aXAPIv2. +author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014" notes: - - Requires A10 Networks aXAPI 2.1 -requirements: [] + - Requires A10 Networks aXAPI 2.1. +extends_documentation_fragment: a10 options: - host: + partition: + version_added: "2.3" description: - - hostname or ip of your A10 Networks device - required: true - default: null - aliases: [] - choices: [] - username: - description: - - admin account of your A10 Networks device - required: true - default: null - aliases: ['user', 'admin'] - choices: [] - password: - description: - - admin password of your A10 Networks device - required: true + - set active-partition + required: false default: null - aliases: ['pass', 'pwd'] - choices: [] virtual_server: description: - - slb virtual server name + - The SLB (Server Load Balancing) virtual server name. required: true default: null aliases: ['vip', 'virtual'] - choices: [] virtual_server_ip: description: - - slb virtual server ip address + - The SLB virtual server IPv4 address. required: false default: null aliases: ['ip', 'address'] - choices: [] virtual_server_status: description: - - slb virtual server status + - The SLB virtual server status, such as enabled or disabled. required: false default: enable aliases: ['status'] @@ -82,15 +70,6 @@ specify the C(service_group:) as well as the C(status:). See the examples below for details. This parameter is required when C(state) is C(present). required: false - write_config: - description: - - If C(yes), any changes will cause a write of the running configuration - to non-volatile memory. This will save I(all) configuration changes, - including those that may have been made manually or through other modules, - so care should be taken when specifying C(yes). - required: false - default: "no" - choices: ["yes", "no"] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used @@ -101,12 +80,17 @@ ''' +RETURN = ''' +# +''' + EXAMPLES = ''' # Create a new virtual server - a10_virtual_server: host: a10.mydomain.com username: myadmin password: mypassword + partition: mypartition virtual_server: vserver1 virtual_server_ip: 1.1.1.1 virtual_server_ports: @@ -122,6 +106,14 @@ ''' +RETURN = ''' +content: + description: the full info regarding the slb_virtual + returned: success + type: string + sample: "mynewvirtualserver" +''' + VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status'] def validate_ports(module, ports): @@ -170,6 +162,7 @@ def main(): virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True), virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), virtual_server_ports=dict(type='list', required=True), + partition=dict(type='str', default=[]), ) ) @@ -181,6 +174,7 @@ def main(): host = module.params['host'] username = module.params['username'] password = module.params['password'] + partition = module.params['partition'] state = module.params['state'] write_config = module.params['write_config'] slb_virtual = module.params['virtual_server'] @@ -196,6 +190,7 @@ def main(): axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host session_url = axapi_authenticate(module, axapi_base_url, username, password) + slb_server_partition = axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition})) slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual})) slb_virtual_exists = not axapi_failure(slb_virtual_data) @@ -289,9 +284,11 @@ def needs_update(src_ports, dst_ports): module.exit_json(changed=changed, content=result) # standard ansible module imports -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -from ansible.module_utils.a10 import * +import json +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import url_argument_spec +from ansible.module_utils.a10 import axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_enabled_disabled, axapi_get_vport_protocol + + if __name__ == '__main__': main() - diff --git a/network/asa/__init__.py b/network/asa/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/asa/asa_acl.py b/network/asa/asa_acl.py new file mode 100644 index 00000000000..366284155f2 --- /dev/null +++ b/network/asa/asa_acl.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: asa_acl +version_added: "2.2" +author: "Patrick Ogenstad (@ogenstad)" +short_description: Manage access-lists on a Cisco ASA +description: + - This module allows you to work with access-lists on a Cisco ASA device. +extends_documentation_fragment: asa +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + required: true + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system. + required: false + default: null + after: + description: + - The ordered set of commands to append to the end of the command + stack if a changed needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + required: false + default: null + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. Finally if match is set to I(exact), command lines + must be an equal match. + required: false + default: line + choices: ['line', 'strict', 'exact'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct. + required: false + default: line + choices: ['line', 'block'] + force: + description: + - The force argument instructs the module to not consider the + current devices running-config. When set to true, this will + cause the module to push the contents of I(src) into the device + without first checking if already configured. + required: false + default: false + choices: ['yes', 'no'] + config: + description: + - The module, by default, will connect to the remote device and + retrieve the current running-config to use as a base for comparing + against the contents of source. There are times when it is not + desirable to have the task get the current running-config for + every task in a playbook. The I(config) argument allows the + implementer to pass in the configuruation to use as the base + config for comparision. + required: false + default: null +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: cisco + password: cisco + transport: cli + authorize: yes + auth_pass: cisco + +- asa_acl: + lines: + - access-list ACL-ANSIBLE extended permit tcp any any eq 82 + - access-list ACL-ANSIBLE extended permit tcp any any eq www + - access-list ACL-ANSIBLE extended permit tcp any any eq 97 + - access-list ACL-ANSIBLE extended permit tcp any any eq 98 + - access-list ACL-ANSIBLE extended permit tcp any any eq 99 + before: clear configure access-list ACL-ANSIBLE + match: strict + replace: block + provider: "{{ cli }}" + +- asa_acl: + lines: + - access-list ACL-OUTSIDE extended permit tcp any any eq www + - access-list ACL-OUTSIDE extended permit tcp any any eq https + context: customer_a + provider: "{{ cli }}" +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['...', '...'] + +responses: + description: The set of responses from issuing the commands on the device + retured: when not check_mode + type: list + sample: ['...', '...'] +""" +import ansible.module_utils.asa + +from ansible.module_utils.network import NetworkModule +from ansible.module_utils.netcfg import NetworkConfig, dumps + + +def get_config(module, acl_name): + contents = module.params['config'] + if not contents: + contents = module.config.get_config() + + filtered_config = list() + for item in contents.split('\n'): + if item.startswith('access-list %s ' % acl_name): + filtered_config.append(item) + + return NetworkConfig(indent=1, contents='\n'.join(filtered_config)) + +def parse_acl_name(module): + first_line = True + for line in module.params['lines']: + ace = line.split() + if ace[0] != 'access-list': + module.fail_json(msg='All lines/commands must begin with "access-list" %s is not permitted' % ace[0]) + if len(ace) <= 1: + module.fail_json(msg='All lines/commands must contain the name of the access-list') + if first_line: + acl_name = ace[1] + else: + if acl_name != ace[1]: + module.fail_json(msg='All lines/commands must use the same access-list %s is not %s' % (ace[1], acl_name)) + first_line = False + + return acl_name + +def main(): + + argument_spec = dict( + lines=dict(aliases=['commands'], required=True, type='list'), + before=dict(type='list'), + after=dict(type='list'), + match=dict(default='line', choices=['line', 'strict', 'exact']), + replace=dict(default='line', choices=['line', 'block']), + force=dict(default=False, type='bool'), + config=dict() + ) + + module = NetworkModule(argument_spec=argument_spec, + supports_check_mode=True) + + lines = module.params['lines'] + + before = module.params['before'] + after = module.params['after'] + + match = module.params['match'] + replace = module.params['replace'] + + result = dict(changed=False) + + candidate = NetworkConfig(indent=1) + candidate.add(lines) + + acl_name = parse_acl_name(module) + + if not module.params['force']: + contents = get_config(module, acl_name) + config = NetworkConfig(indent=1, contents=contents) + + commands = candidate.difference(config) + commands = dumps(commands, 'commands').split('\n') + commands = [str(c) for c in commands if c] + else: + commands = str(candidate).split('\n') + + if commands: + if not module.check_mode: + response = module.config(commands) + result['responses'] = response + result['changed'] = True + + result['updates'] = commands + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/asa/asa_command.py b/network/asa/asa_command.py new file mode 100644 index 00000000000..3bffcca0425 --- /dev/null +++ b/network/asa/asa_command.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: asa_command +version_added: "2.2" +author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)" +short_description: Run arbitrary commands on Cisco ASA devices. +description: + - Sends arbitrary commands to an ASA node and returns the results + read from the device. The M(asa_command) module includes an + argument that will cause the module to wait for a specific condition + before returning or timing out if the condition is not met. +extends_documentation_fragment: asa +options: + commands: + description: + - List of commands to send to the remote device over the + configured provider. The resulting output from the command + is returned. If the I(wait_for) argument is provided, the + module is not returned until the condition is satisfied or + the number of retires as expired. + required: true + wait_for: + description: + - List of conditions to evaluate against the output of the + command. The task will wait for each condition to be true + before moving forward. If the conditional is not true + within the configured number of retries, the task fails. + See examples. + required: false + default: null + aliases: ['waitfor'] + match: + description: + - The I(match) argument is used in conjunction with the + I(wait_for) argument to specify the match policy. Valid + values are C(all) or C(any). If the value is set to C(all) + then all conditionals in the wait_for must be satisfied. If + the value is set to C(any) then only one of the values must be + satisfied. + required: false + default: all + choices: ['any', 'all'] + retries: + description: + - Specifies the number of retries a command should by tried + before it is considered failed. The command is run on the + target device every retry and evaluated against the + I(wait_for) conditions. + required: false + default: 10 + interval: + description: + - Configures the interval in seconds to wait between retries + of the command. If the command does not pass the specified + conditions, the interval indicates how long to wait before + trying the command again. + required: false + default: 1 +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: cisco + password: cisco + authorize: yes + auth_pass: cisco + transport: cli + + +- asa_command: + commands: + - show version + provider: "{{ cli }}" + +- asa_command: + commands: + - show asp drop + - show memory + provider: "{{ cli }}" + +- asa_command: + commands: + - show version + provider: "{{ cli }}" + context: system +""" + +RETURN = """ +stdout: + description: the set of responses from the commands + returned: always + type: list + sample: ['...', '...'] + +stdout_lines: + description: The value of stdout split into a list + returned: always + type: list + sample: [['...', '...'], ['...'], ['...']] + +failed_conditions: + description: the conditionals that failed + retured: failed + type: list + sample: ['...', '...'] +""" +from ansible.module_utils.basic import get_exception +from ansible.module_utils.netcli import CommandRunner +from ansible.module_utils.netcli import AddCommandError, FailedConditionsError +from ansible.module_utils.asa import NetworkModule, NetworkError + +VALID_KEYS = ['command', 'prompt', 'response'] + +def to_lines(stdout): + for item in stdout: + if isinstance(item, basestring): + item = str(item).split('\n') + yield item + +def parse_commands(module): + for cmd in module.params['commands']: + if isinstance(cmd, basestring): + cmd = dict(command=cmd, output=None) + elif 'command' not in cmd: + module.fail_json(msg='command keyword argument is required') + elif not set(cmd.keys()).issubset(VALID_KEYS): + module.fail_json(msg='unknown keyword specified') + yield cmd + +def main(): + spec = dict( + # { command: , prompt: , response: } + commands=dict(type='list', required=True), + + wait_for=dict(type='list', aliases=['waitfor']), + match=dict(default='all', choices=['all', 'any']), + + retries=dict(default=10, type='int'), + interval=dict(default=1, type='int') + ) + + module = NetworkModule(argument_spec=spec, + connect_on_load=False, + supports_check_mode=True) + + commands = list(parse_commands(module)) + conditionals = module.params['wait_for'] or list() + + warnings = list() + + runner = CommandRunner(module) + + for cmd in commands: + if module.check_mode and not cmd['command'].startswith('show'): + warnings.append('only show commands are supported when using ' + 'check mode, not executing `%s`' % cmd['command']) + else: + if cmd['command'].startswith('conf'): + module.fail_json(msg='asa_command does not support running ' + 'config mode commands. Please use ' + 'asa_config instead') + try: + runner.add_command(**cmd) + except AddCommandError: + exc = get_exception() + warnings.append('duplicate command detected: %s' % cmd) + + for item in conditionals: + runner.add_conditional(item) + + runner.retries = module.params['retries'] + runner.interval = module.params['interval'] + runner.match = module.params['match'] + + try: + runner.run() + except FailedConditionsError: + exc = get_exception() + module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc)) + + result = dict(changed=False, stdout=list()) + + for cmd in commands: + try: + output = runner.get_command(cmd['command']) + except ValueError: + output = 'command not executed due to check_mode, see warnings' + result['stdout'].append(output) + + result['warnings'] = warnings + result['stdout_lines'] = list(to_lines(result['stdout'])) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() + diff --git a/network/asa/asa_config.py b/network/asa/asa_config.py new file mode 100644 index 00000000000..ffd082684ec --- /dev/null +++ b/network/asa/asa_config.py @@ -0,0 +1,346 @@ +#!/usr/bin/python +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: asa_config +version_added: "2.2" +author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)" +short_description: Manage Cisco ASA configuration sections +description: + - Cisco ASA configurations use a simple block indent file syntax + for segmenting configuration into sections. This module provides + an implementation for working with ASA configuration sections in + a deterministic way. +extends_documentation_fragment: asa +options: + lines: + description: + - The ordered set of commands that should be configured in the + section. The commands must be the exact same commands as found + in the device running-config. Be sure to note the configuration + command syntax as some commands are automatically modified by the + device config parser. + required: false + default: null + aliases: ['commands'] + parents: + description: + - The ordered set of parents that uniquely identify the section + the commands should be checked against. If the parents argument + is omitted, the commands are checked against the set of top + level or global commands. + required: false + default: null + src: + description: + - Specifies the source path to the file that contains the configuration + or configuration template to load. The path to the source file can + either be the full path on the Ansible control host or a relative + path from the playbook or role root directory. This argument is mutually + exclusive with I(lines). + required: false + default: null + before: + description: + - The ordered set of commands to push on to the command stack if + a change needs to be made. This allows the playbook designer + the opportunity to perform configuration commands prior to pushing + any changes without affecting how the set of commands are matched + against the system + required: false + default: null + after: + description: + - The ordered set of commands to append to the end of the command + stack if a change needs to be made. Just like with I(before) this + allows the playbook designer to append a set of commands to be + executed after the command set. + required: false + default: null + match: + description: + - Instructs the module on the way to perform the matching of + the set of commands against the current device config. If + match is set to I(line), commands are matched line by line. If + match is set to I(strict), command lines are matched with respect + to position. If match is set to I(exact), command lines + must be an equal match. Finally, if match is set to I(none), the + module will not attempt to compare the source configuration with + the running configuration on the remote device. + required: false + default: line + choices: ['line', 'strict', 'exact', 'none'] + replace: + description: + - Instructs the module on the way to perform the configuration + on the device. If the replace argument is set to I(line) then + the modified lines are pushed to the device in configuration + mode. If the replace argument is set to I(block) then the entire + command block is pushed to the device in configuration mode if any + line is not correct + required: false + default: line + choices: ['line', 'block'] + update: + description: + - The I(update) argument controls how the configuration statements + are processed on the remote device. Valid choices for the I(update) + argument are I(merge) and I(check). When the argument is set to + I(merge), the configuration changes are merged with the current + device running configuration. When the argument is set to I(check) + the configuration updates are determined but not actually configured + on the remote device. + required: false + default: merge + choices: ['merge', 'check'] + commit: + description: + - This argument specifies the update method to use when applying the + configuration changes to the remote node. If the value is set to + I(merge) the configuration updates are merged with the running- + config. If the value is set to I(check), no changes are made to + the remote host. + required: false + default: merge + choices: ['merge', 'check'] + backup: + description: + - This argument will cause the module to create a full backup of + the current C(running-config) from the remote device before any + changes are made. The backup file is written to the C(backup) + folder in the playbook root directory. If the directory does not + exist, it is created. + required: false + default: no + choices: ['yes', 'no'] + config: + description: + - The C(config) argument allows the playbook designer to supply + the base configuration to be used to validate configuration + changes necessary. If this argument is provided, the module + will not download the running-config from the remote node. + required: false + default: null + defaults: + description: + - This argument specifies whether or not to collect all defaults + when getting the remote device running config. When enabled, + the module will get the current config by issuing the command + C(show running-config all). + required: false + default: no + choices: ['yes', 'no'] + passwords: + description: + - This argument specifies to include passwords in the config + when retrieving the running-config from the remote device. This + includes passwords related to VPN endpoints. This argument is + mutually exclusive with I(defaults). + required: false + default: no + choices: ['yes', 'no'] + save: + description: + - The C(save) argument instructs the module to save the running- + config to the startup-config at the conclusion of the module + running. If check mode is specified, this argument is ignored. + required: false + default: no + choices: ['yes', 'no'] +""" + +EXAMPLES = """ +# Note: examples below use the following provider dict to handle +# transport and authentication to the node. +vars: + cli: + host: "{{ inventory_hostname }}" + username: cisco + password: cisco + authorize: yes + auth_pass: cisco + transport: cli + +- asa_config: + lines: + - network-object host 10.80.30.18 + - network-object host 10.80.30.19 + - network-object host 10.80.30.20 + parents: ['object-group network OG-MONITORED-SERVERS'] + provider: "{{ cli }}" + +- asa_config: + host: "{{ inventory_hostname }}" + lines: + - message-length maximum client auto + - message-length maximum 512 + match: line + parents: ['policy-map type inspect dns PM-DNS', 'parameters'] + authorize: yes + auth_pass: cisco + username: admin + password: cisco + context: ansible + +- asa_config: + lines: + - ikev1 pre-shared-key MyS3cretVPNK3y + parents: tunnel-group 1.1.1.1 ipsec-attributes + passwords: yes + provider: "{{ cli }}" + +""" + +RETURN = """ +updates: + description: The set of commands that will be pushed to the remote device + returned: always + type: list + sample: ['...', '...'] +backup_path: + description: The full path to the backup file + returned: when backup is yes + type: path + sample: /playbooks/ansible/backup/asa_config.2016-07-16@22:28:34 +responses: + description: The set of responses from issuing the commands on the device + returned: when not check_mode + type: list + sample: ['...', '...'] +""" +import re + +import ansible.module_utils.asa + +from ansible.module_utils.basic import get_exception +from ansible.module_utils.network import NetworkModule, NetworkError +from ansible.module_utils.netcfg import NetworkConfig, dumps + +def get_config(module): + contents = module.params['config'] + if not contents: + if module.params['defaults']: + include = 'defaults' + elif module.params['passwords']: + include = 'passwords' + else: + include = None + contents = module.config.get_config(include=include) + return NetworkConfig(indent=1, contents=contents) + +def get_candidate(module): + candidate = NetworkConfig(indent=1) + if module.params['src']: + candidate.load(module.params['src']) + elif module.params['lines']: + parents = module.params['parents'] or list() + candidate.add(module.params['lines'], parents=parents) + return candidate + +def run(module, result): + match = module.params['match'] + replace = module.params['replace'] + path = module.params['parents'] + + candidate = get_candidate(module) + + if match != 'none': + config = get_config(module) + configobjs = candidate.difference(config, path=path, match=match, + replace=replace) + else: + configobjs = candidate.items + + if configobjs: + commands = dumps(configobjs, 'commands').split('\n') + + if module.params['lines']: + if module.params['before']: + commands[:0] = module.params['before'] + + if module.params['after']: + commands.extend(module.params['after']) + + result['updates'] = commands + + # send the configuration commands to the device and merge + # them with the current running config + if not module.check_mode: + module.config.load_config(commands) + result['changed'] = True + + if module.params['save']: + if not module.check_mode: + module.config.save_config() + result['changed'] = True + +def main(): + """ main entry point for module execution + """ + argument_spec = dict( + src=dict(type='path'), + + lines=dict(aliases=['commands'], type='list'), + parents=dict(type='list'), + + before=dict(type='list'), + after=dict(type='list'), + + match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), + replace=dict(default='line', choices=['line', 'block']), + + config=dict(), + defaults=dict(type='bool', default=False), + passwords=dict(type='bool', default=False), + + backup=dict(type='bool', default=False), + save=dict(type='bool', default=False), + ) + + mutually_exclusive = [('lines', 'src'), ('defaults', 'passwords')] + + required_if = [('match', 'strict', ['lines']), + ('match', 'exact', ['lines']), + ('replace', 'block', ['lines'])] + + module = NetworkModule(argument_spec=argument_spec, + connect_on_load=False, + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + result = dict(changed=False) + + if module.params['backup']: + result['__backup__'] = module.config.get_config() + + try: + run(module, result) + except NetworkError: + exc = get_exception() + module.fail_json(msg=str(exc), **exc.kwargs) + + module.exit_json(**result) + +if __name__ == '__main__': + main() diff --git a/network/citrix/netscaler.py b/network/citrix/netscaler.py index 384a625bdca..30442ade78c 100644 --- a/network/citrix/netscaler.py +++ b/network/citrix/netscaler.py @@ -21,6 +21,10 @@ along with Ansible. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: netscaler @@ -87,13 +91,26 @@ EXAMPLES = ''' # Disable the server -ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass" +- netscaler: + nsc_host: nsc.example.com + user: apiuser + password: apipass # Enable the server -ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass action=enable" +- netscaler: + nsc_host: nsc.example.com + user: apiuser + password: apipass + action: enable # Disable the service local:8080 -ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass name=local:8080 type=service action=disable" +- netscaler: + nsc_host: nsc.example.com + user: apiuser + password: apipass + name: 'local:8080' + type: service + action: disable ''' @@ -173,7 +190,8 @@ def main(): rc = 0 try: rc, result = core(module) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) if rc != 0: @@ -186,4 +204,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/network/cloudflare_dns.py b/network/cloudflare_dns.py new file mode 100644 index 00000000000..621e92ac1f0 --- /dev/null +++ b/network/cloudflare_dns.py @@ -0,0 +1,672 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016 Michael Gruener +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: cloudflare_dns +author: "Michael Gruener (@mgruener)" +requirements: + - "python >= 2.6" +version_added: "2.1" +short_description: manage Cloudflare DNS records +description: + - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)" +options: + account_api_token: + description: + - "Account API token. You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://www.cloudflare.com/a/account)" + required: true + account_email: + description: + - "Account email." + required: true + port: + description: Service port. Required for C(type=SRV) + required: false + default: null + priority: + description: Record priority. Required for C(type=MX) and C(type=SRV) + required: false + default: "1" + proto: + description: Service protocol. Required for C(type=SRV) + required: false + choices: [ 'tcp', 'udp' ] + default: null + proxied: + description: Proxy through cloudflare network or just use DNS + required: false + default: no + version_added: "2.3" + record: + description: + - Record to add. Required if C(state=present). Default is C(@) (e.g. the zone name) + required: false + default: "@" + aliases: [ "name" ] + service: + description: Record service. Required for C(type=SRV) + required: false + default: null + solo: + description: + - Whether the record should be the only one for that record type and record name. Only use with C(state=present) + - This will delete all other records with the same record name and type. + required: false + default: null + state: + description: + - Whether the record(s) should exist or not + required: false + choices: [ 'present', 'absent' ] + default: present + timeout: + description: + - Timeout for Cloudflare API calls + required: false + default: 30 + ttl: + description: + - The TTL to give the new record. Must be between 120 and 2,147,483,647 seconds, or 1 for automatic. + required: false + default: 1 (automatic) + type: + description: + - The type of DNS record to create. Required if C(state=present) + required: false + choices: [ 'A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF' ] + default: null + value: + description: + - The record value. Required for C(state=present) + required: false + default: null + aliases: [ "content" ] + weight: + description: Service weight. Required for C(type=SRV) + required: false + default: "1" + zone: + description: + - The name of the Zone to work with (e.g. "example.com"). The Zone must already exist. + required: true + aliases: ["domain"] +''' + +EXAMPLES = ''' +# create a test.my.com A record to point to 127.0.0.1 +- cloudflare_dns: + zone: my.com + record: test + type: A + value: 127.0.0.1 + account_email: test@example.com + account_api_token: dummyapitoken + register: record + +# create a my.com CNAME record to example.com +- cloudflare_dns: + zone: my.com + type: CNAME + value: example.com + state: present + account_email: test@example.com + account_api_token: dummyapitoken + +# change it's ttl +- cloudflare_dns: + zone: my.com + type: CNAME + value: example.com + ttl: 600 + state: present + account_email: test@example.com + account_api_token: dummyapitoken + +# and delete the record +- cloudflare_dns: + zone: my.com + type: CNAME + value: example.com + state: absent + account_email: test@example.com + account_api_token: dummyapitoken + +# create a my.com CNAME record to example.com and proxy through cloudflare's network +- cloudflare_dns: + zone: my.com + type: CNAME + value: example.com + state: present + proxied: yes + account_email: test@example.com + account_api_token: dummyapitoken + +# create TXT record "test.my.com" with value "unique value" +# delete all other TXT records named "test.my.com" +- cloudflare_dns: + domain: my.com + record: test + type: TXT + value: unique value + state: present + solo: true + account_email: test@example.com + account_api_token: dummyapitoken + +# create a SRV record _foo._tcp.my.com +- cloudflare_dns: + domain: my.com + service: foo + proto: tcp + port: 3500 + priority: 10 + weight: 20 + type: SRV + value: fooserver.my.com +''' + +RETURN = ''' +record: + description: dictionary containing the record data + returned: success, except on record deletion + type: dictionary + contains: + content: + description: the record content (details depend on record type) + returned: success + type: string + sample: 192.0.2.91 + created_on: + description: the record creation date + returned: success + type: string + sample: 2016-03-25T19:09:42.516553Z + data: + description: additional record data + returned: success, if type is SRV + type: dictionary + sample: { + name: "jabber", + port: 8080, + priority: 10, + proto: "_tcp", + service: "_xmpp", + target: "jabberhost.sample.com", + weight: 5, + } + id: + description: the record id + returned: success + type: string + sample: f9efb0549e96abcb750de63b38c9576e + locked: + description: No documentation available + returned: success + type: boolean + sample: False + meta: + description: No documentation available + returned: success + type: dictionary + sample: { auto_added: false } + modified_on: + description: record modification date + returned: success + type: string + sample: 2016-03-25T19:09:42.516553Z + name: + description: the record name as FQDN (including _service and _proto for SRV) + returned: success + type: string + sample: www.sample.com + priority: + description: priority of the MX record + returned: success, if type is MX + type: int + sample: 10 + proxiable: + description: whether this record can be proxied through cloudflare + returned: success + type: boolean + sample: False + proxied: + description: whether the record is proxied through cloudflare + returned: success + type: boolean + sample: False + ttl: + description: the time-to-live for the record + returned: success + type: int + sample: 300 + type: + description: the record type + returned: success + type: string + sample: A + zone_id: + description: the id of the zone containing the record + returned: success + type: string + sample: abcede0bf9f0066f94029d2e6b73856a + zone_name: + description: the name of the zone containing the record + returned: success + type: string + sample: sample.com +''' + +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + +import urllib + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import fetch_url + + +class CloudflareAPI(object): + + cf_api_endpoint = 'https://api.cloudflare.com/client/v4' + changed = False + + def __init__(self, module): + self.module = module + self.account_api_token = module.params['account_api_token'] + self.account_email = module.params['account_email'] + self.port = module.params['port'] + self.priority = module.params['priority'] + self.proto = module.params['proto'] + self.proxied = module.params['proxied'] + self.record = module.params['record'] + self.service = module.params['service'] + self.is_solo = module.params['solo'] + self.state = module.params['state'] + self.timeout = module.params['timeout'] + self.ttl = module.params['ttl'] + self.type = module.params['type'] + self.value = module.params['value'] + self.weight = module.params['weight'] + self.zone = module.params['zone'] + + if self.record == '@': + self.record = self.zone + + if (self.type in ['CNAME','NS','MX','SRV']) and (self.value is not None): + self.value = self.value.rstrip('.') + + if (self.type == 'SRV'): + if (self.proto is not None) and (not self.proto.startswith('_')): + self.proto = '_' + self.proto + if (self.service is not None) and (not self.service.startswith('_')): + self.service = '_' + self.service + + if not self.record.endswith(self.zone): + self.record = self.record + '.' + self.zone + + def _cf_simple_api_call(self,api_call,method='GET',payload=None): + headers = { 'X-Auth-Email': self.account_email, + 'X-Auth-Key': self.account_api_token, + 'Content-Type': 'application/json' } + data = None + if payload: + try: + data = json.dumps(payload) + except Exception: + e = get_exception() + self.module.fail_json(msg="Failed to encode payload as JSON: %s " % str(e)) + + resp, info = fetch_url(self.module, + self.cf_api_endpoint + api_call, + headers=headers, + data=data, + method=method, + timeout=self.timeout) + + if info['status'] not in [200,304,400,401,403,429,405,415]: + self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}".format(api_call,info['status'])) + + error_msg = '' + if info['status'] == 401: + # Unauthorized + error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call) + elif info['status'] == 403: + # Forbidden + error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call) + elif info['status'] == 429: + # Too many requests + error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call) + elif info['status'] == 405: + # Method not allowed + error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call) + elif info['status'] == 415: + # Unsupported Media Type + error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call) + elif info ['status'] == 400: + # Bad Request + error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'],method,api_call) + + result = None + try: + content = resp.read() + except AttributeError: + if info['body']: + content = info['body'] + else: + error_msg += "; The API response was empty" + + if content: + try: + result = json.loads(content) + except json.JSONDecodeError: + error_msg += "; Failed to parse API response: {0}".format(content) + + # received an error status but no data with details on what failed + if (info['status'] not in [200,304]) and (result is None): + self.module.fail_json(msg=error_msg) + + if not result['success']: + error_msg += "; Error details: " + for error in result['errors']: + error_msg += "code: {0}, error: {1}; ".format(error['code'],error['message']) + if 'error_chain' in error: + for chain_error in error['error_chain']: + error_msg += "code: {0}, error: {1}; ".format(chain_error['code'],chain_error['message']) + self.module.fail_json(msg=error_msg) + + return result, info['status'] + + def _cf_api_call(self,api_call,method='GET',payload=None): + result, status = self._cf_simple_api_call(api_call,method,payload) + + data = result['result'] + + if 'result_info' in result: + pagination = result['result_info'] + if pagination['total_pages'] > 1: + next_page = int(pagination['page']) + 1 + parameters = ['page={0}'.format(next_page)] + # strip "page" parameter from call parameters (if there are any) + if '?' in api_call: + raw_api_call,query = api_call.split('?',1) + parameters += [param for param in query.split('&') if not param.startswith('page')] + else: + raw_api_call = api_call + while next_page <= pagination['total_pages']: + raw_api_call += '?' + '&'.join(parameters) + result, status = self._cf_simple_api_call(raw_api_call,method,payload) + data += result['result'] + next_page += 1 + + return data, status + + def _get_zone_id(self,zone=None): + if not zone: + zone = self.zone + + zones = self.get_zones(zone) + if len(zones) > 1: + self.module.fail_json(msg="More than one zone matches {0}".format(zone)) + + if len(zones) < 1: + self.module.fail_json(msg="No zone found with name {0}".format(zone)) + + return zones[0]['id'] + + def get_zones(self,name=None): + if not name: + name = self.zone + param = '' + if name: + param = '?' + urllib.urlencode({'name' : name}) + zones,status = self._cf_api_call('/zones' + param) + return zones + + def get_dns_records(self,zone_name=None,type=None,record=None,value=''): + if not zone_name: + zone_name = self.zone + if not type: + type = self.type + if not record: + record = self.record + # necessary because None as value means to override user + # set module value + if (not value) and (value is not None): + value = self.value + + zone_id = self._get_zone_id() + api_call = '/zones/{0}/dns_records'.format(zone_id) + query = {} + if type: + query['type'] = type + if record: + query['name'] = record + if value: + query['content'] = value + if query: + api_call += '?' + urllib.urlencode(query) + + records,status = self._cf_api_call(api_call) + return records + + def delete_dns_records(self,**kwargs): + params = {} + for param in ['port','proto','service','solo','type','record','value','weight','zone']: + if param in kwargs: + params[param] = kwargs[param] + else: + params[param] = getattr(self,param) + + records = [] + content = params['value'] + search_record = params['record'] + if params['type'] == 'SRV': + content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] + search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] + if params['solo']: + search_value = None + else: + search_value = content + + records = self.get_dns_records(params['zone'],params['type'],search_record,search_value) + + for rr in records: + if params['solo']: + if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)): + self.changed = True + if not self.module.check_mode: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'],rr['id']),'DELETE') + else: + self.changed = True + if not self.module.check_mode: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'],rr['id']),'DELETE') + return self.changed + + def ensure_dns_record(self,**kwargs): + params = {} + for param in ['port','priority','proto','proxied','service','ttl','type','record','value','weight','zone']: + if param in kwargs: + params[param] = kwargs[param] + else: + params[param] = getattr(self,param) + + search_value = params['value'] + search_record = params['record'] + new_record = None + if (params['type'] is None) or (params['record'] is None): + self.module.fail_json(msg="You must provide a type and a record to create a new record") + + if (params['type'] in [ 'A','AAAA','CNAME','TXT','MX','NS','SPF']): + if not params['value']: + self.module.fail_json(msg="You must provide a non-empty value to create this record type") + + # there can only be one CNAME per record + # ignoring the value when searching for existing + # CNAME records allows us to update the value if it + # changes + if params['type'] == 'CNAME': + search_value = None + + new_record = { + "type": params['type'], + "name": params['record'], + "content": params['value'], + "ttl": params['ttl'] + } + + if (params['type'] in [ 'A', 'AAAA', 'CNAME' ]): + new_record["proxied"] = params["proxied"] + + if params['type'] == 'MX': + for attr in [params['priority'],params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide priority and a value to create this record type") + new_record = { + "type": params['type'], + "name": params['record'], + "content": params['value'], + "priority": params['priority'], + "ttl": params['ttl'] + } + + if params['type'] == 'SRV': + for attr in [params['port'],params['priority'],params['proto'],params['service'],params['weight'],params['value']]: + if (attr is None) or (attr == ''): + self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type") + srv_data = { + "target": params['value'], + "port": params['port'], + "weight": params['weight'], + "priority": params['priority'], + "name": params['record'][:-len('.' + params['zone'])], + "proto": params['proto'], + "service": params['service'] + } + new_record = { "type": params['type'], "ttl": params['ttl'], 'data': srv_data } + search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] + search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] + + zone_id = self._get_zone_id(params['zone']) + records = self.get_dns_records(params['zone'],params['type'],search_record,search_value) + # in theory this should be impossible as cloudflare does not allow + # the creation of duplicate records but lets cover it anyways + if len(records) > 1: + self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") + # record already exists, check if it must be updated + if len(records) == 1: + cur_record = records[0] + do_update = False + if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl'] ): + do_update = True + if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']): + do_update = True + if ('data' in new_record) and ('data' in cur_record): + if (cur_record['data'] > new_record['data']) - (cur_record['data'] < new_record['data']): + do_update = True + if (type == 'CNAME') and (cur_record['content'] != new_record['content']): + do_update = True + if do_update: + if not self.module.check_mode: + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id,records[0]['id']),'PUT',new_record) + self.changed = True + return result,self.changed + else: + return records,self.changed + if not self.module.check_mode: + result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id),'POST',new_record) + self.changed = True + return result,self.changed + +def main(): + module = AnsibleModule( + argument_spec = dict( + account_api_token = dict(required=True, no_log=True, type='str'), + account_email = dict(required=True, type='str'), + port = dict(required=False, default=None, type='int'), + priority = dict(required=False, default=1, type='int'), + proto = dict(required=False, default=None, choices=[ 'tcp', 'udp' ], type='str'), + proxied = dict(required=False, default=False, type='bool'), + record = dict(required=False, default='@', aliases=['name'], type='str'), + service = dict(required=False, default=None, type='str'), + solo = dict(required=False, default=None, type='bool'), + state = dict(required=False, default='present', choices=['present', 'absent'], type='str'), + timeout = dict(required=False, default=30, type='int'), + ttl = dict(required=False, default=1, type='int'), + type = dict(required=False, default=None, choices=[ 'A', 'AAAA', 'CNAME', 'TXT', 'SRV', 'MX', 'NS', 'SPF' ], type='str'), + value = dict(required=False, default=None, aliases=['content'], type='str'), + weight = dict(required=False, default=1, type='int'), + zone = dict(required=True, default=None, aliases=['domain'], type='str'), + ), + supports_check_mode = True, + required_if = ([ + ('state','present',['record','type']), + ('type','MX',['priority','value']), + ('type','SRV',['port','priority','proto','service','value','weight']), + ('type','A',['value']), + ('type','AAAA',['value']), + ('type','CNAME',['value']), + ('type','TXT',['value']), + ('type','NS',['value']), + ('type','SPF',['value']) + ] + ), + required_one_of = ( + [['record','value','type']] + ) + ) + + changed = False + cf_api = CloudflareAPI(module) + + # sanity checks + if cf_api.is_solo and cf_api.state == 'absent': + module.fail_json(msg="solo=true can only be used with state=present") + + # perform add, delete or update (only the TTL can be updated) of one or + # more records + if cf_api.state == 'present': + # delete all records matching record name + type + if cf_api.is_solo: + changed = cf_api.delete_dns_records(solo=cf_api.is_solo) + result,changed = cf_api.ensure_dns_record() + if isinstance(result,list): + module.exit_json(changed=changed,result={'record': result[0]}) + else: + module.exit_json(changed=changed,result={'record': result}) + else: + # force solo to False, just to be sure + changed = cf_api.delete_dns_records(solo=False) + module.exit_json(changed=changed) + + +if __name__ == '__main__': + main() diff --git a/network/dnsimple.py b/network/dnsimple.py index 5cecfbd8169..3f6c2188b04 100644 --- a/network/dnsimple.py +++ b/network/dnsimple.py @@ -14,6 +14,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: dnsimple @@ -97,36 +101,67 @@ ''' EXAMPLES = ''' -# authenticate using email and API token -- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken - -# fetch all domains -- local_action dnsimple - register: domains +# authenticate using email and API token and fetch all domains +- dnsimple: + account_email: test@example.com + account_api_token: dummyapitoken + delegate_to: localhost # fetch my.com domain records -- local_action: dnsimple domain=my.com state=present +- dnsimple: + domain: my.com + state: present + delegate_to: localhost register: records # delete a domain -- local_action: dnsimple domain=my.com state=absent +- dnsimple: + domain: my.com + state: absent + delegate_to: localhost # create a test.my.com A record to point to 127.0.0.01 -- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1 +- dnsimple: + domain: my.com + record: test + type: A + value: 127.0.0.1 + delegate_to: localhost register: record # and then delete it -- local_action: dnsimple domain=my.com record_ids={{ record['id'] }} +- dnsimple: + domain: my.com + record_ids: '{{ record["id"] }}' + delegate_to: localhost # create a my.com CNAME record to example.com -- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present +- dnsimple + domain: my.com + record: '' + type: CNAME + value: example.com + state: present + delegate_to: localhost # change it's ttl -- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present +- dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + ttl: 600 + state: present + delegate_to: localhost # and delete the record -- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent - +- dnsimple: + domain: my.com + record: '' + type: CNAME + value: example.com + state: absent + delegate_to: localhost ''' import os @@ -159,7 +194,7 @@ def main(): ) if not HAS_DNSIMPLE: - module.fail_json("dnsimple required for this module") + module.fail_json(msg="dnsimple required for this module") account_email = module.params.get('account_email') account_api_token = module.params.get('account_api_token') @@ -294,12 +329,15 @@ def main(): else: module.fail_json(msg="'%s' is an unknown value for the state argument" % state) - except DNSimpleException, e: + except DNSimpleException: + e = get_exception() module.fail_json(msg="Unable to contact DNSimple: %s" % e.message) module.fail_json(msg="Unknown what you wanted me to do") # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception -main() +if __name__ == '__main__': + main() diff --git a/network/dnsmadeeasy.py b/network/dnsmadeeasy.py index cce7bd10082..7650960e434 100644 --- a/network/dnsmadeeasy.py +++ b/network/dnsmadeeasy.py @@ -14,23 +14,27 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: dnsmadeeasy version_added: "1.3" short_description: Interface with dnsmadeeasy.com (a DNS hosting service). description: - - "Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(http://www.dnsmadeeasy.com/services/rest-api/)" + - "Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)" options: account_key: description: - - Accout API Key. + - Account API Key. required: true default: null account_secret: description: - - Accout Secret Key. + - Account Secret Key. required: true default: null @@ -92,21 +96,48 @@ EXAMPLES = ''' # fetch my.com domain records -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present register: response # create / ensure the presence of a record -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_type="A" record_value="127.0.0.1" +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_type: A + record_value: 127.0.0.1 # update the previously created record -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_value="192.168.0.1" +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test + record_value: 192.0.2.23 # fetch a specific record -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: present + record_name: test register: response # delete a record / ensure it is absent -- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=absent record_name="test" +- dnsmadeeasy: + account_key: key + account_secret: secret + domain: my.com + state: absent + record_name: test ''' # ============================================ @@ -121,7 +152,8 @@ from time import strftime, gmtime import hashlib import hmac -except ImportError, e: +except ImportError: + e = get_exception() IMPORT_ERROR = str(e) class DME2: @@ -170,7 +202,7 @@ def query(self, resource, method, data=None): try: return json.load(response) - except Exception, e: + except Exception: return {} def getDomain(self, domain_id): @@ -204,16 +236,17 @@ def getMatchingRecord(self, record_name, record_type, record_value): if not self.all_records: self.all_records = self.getRecords() - # TODO SRV type not yet implemented if record_type in ["A", "AAAA", "CNAME", "HTTPRED", "PTR"]: for result in self.all_records: if result['name'] == record_name and result['type'] == record_type: return result return False - elif record_type in ["MX", "NS", "TXT"]: + elif record_type in ["MX", "NS", "TXT", "SRV"]: for result in self.all_records: if record_type == "MX": value = record_value.split(" ")[1] + elif record_type == "SRV": + value = record_value.split(" ")[3] else: value = record_value if result['name'] == record_name and result['type'] == record_type and result['value'] == value: @@ -309,6 +342,13 @@ def main(): new_record["mxLevel"] = new_record["value"].split(" ")[0] new_record["value"] = new_record["value"].split(" ")[1] + # Special handling for SRV records + if new_record["type"] == "SRV": + new_record["priority"] = new_record["value"].split(" ")[0] + new_record["weight"] = new_record["value"].split(" ")[1] + new_record["port"] = new_record["value"].split(" ")[2] + new_record["value"] = new_record["value"].split(" ")[3] + # Compare new record against existing one changed = False if current_record: @@ -357,4 +397,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/network/exoscale/__init__.py b/network/exoscale/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/exoscale/exo_dns_domain.py b/network/exoscale/exo_dns_domain.py new file mode 100644 index 00000000000..b0046c803dc --- /dev/null +++ b/network/exoscale/exo_dns_domain.py @@ -0,0 +1,259 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: exo_dns_domain +short_description: Manages domain records on Exoscale DNS API. +description: + - Create and remove domain records. +version_added: "2.2" +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the record. + required: true + state: + description: + - State of the resource. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + api_key: + description: + - API key of the Exoscale DNS API. + required: false + default: null + api_secret: + description: + - Secret key of the Exoscale DNS API. + required: false + default: null + api_timeout: + description: + - HTTP timeout to Exoscale DNS API. + required: false + default: 10 + api_region: + description: + - Name of the ini section in the C(cloustack.ini) file. + required: false + default: cloudstack + validate_certs: + description: + - Validate SSL certs of the Exoscale DNS API. + required: false + default: true +requirements: + - "python >= 2.6" +notes: + - As Exoscale DNS uses the same API key and secret for all services, we reuse the config used for Exscale Compute based on CloudStack. + The config is read from several locations, in the following order. + The C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) environment variables. + A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file, + A C(cloudstack.ini) file in the current working directory. + A C(.cloudstack.ini) file in the users home directory. + Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini). + Use the argument C(api_region) to select the section name, default section is C(cloudstack). + - This module does not support multiple A records and will complain properly if you try. + - More information Exoscale DNS can be found on https://community.exoscale.ch/documentation/dns/. + - This module supports check mode and diff. +''' + +EXAMPLES = ''' +# Create a domain. +- local_action: + module: exo_dns_domain + name: example.com + +# Remove a domain. +- local_action: + module: exo_dns_domain + name: example.com + state: absent +''' + +RETURN = ''' +--- +exo_dns_domain: + description: API domain results + returned: success + type: dictionary + contains: + account_id: + description: Your account ID + returned: success + type: int + sample: 34569 + auto_renew: + description: Whether domain is auto renewed or not + returned: success + type: bool + sample: false + created_at: + description: When the domain was created + returned: success + type: string + sample: "2016-08-12T15:24:23.989Z" + expires_on: + description: When the domain expires + returned: success + type: string + sample: "2016-08-12T15:24:23.989Z" + id: + description: ID of the domain + returned: success + type: int + sample: "2016-08-12T15:24:23.989Z" + lockable: + description: Whether the domain is lockable or not + returned: success + type: bool + sample: true + name: + description: Domain name + returned: success + type: string + sample: example.com + record_count: + description: Number of records related to this domain + returned: success + type: int + sample: 5 + registrant_id: + description: ID of the registrant + returned: success + type: int + sample: null + service_count: + description: Number of services + returned: success + type: int + sample: 0 + state: + description: State of the domain + returned: success + type: string + sample: "hosted" + token: + description: Token + returned: success + type: string + sample: "r4NzTRp6opIeFKfaFYvOd6MlhGyD07jl" + unicode_name: + description: Domain name as unicode + returned: success + type: string + sample: "example.com" + updated_at: + description: When the domain was updated last. + returned: success + type: string + sample: "2016-08-12T15:24:23.989Z" + user_id: + description: ID of the user + returned: success + type: int + sample: null + whois_protected: + description: Wheter the whois is protected or not + returned: success + type: bool + sample: false +''' + +# import exoscale common +from ansible.module_utils.exoscale import * + + +class ExoDnsDomain(ExoDns): + + def __init__(self, module): + super(ExoDnsDomain, self).__init__(module) + self.name = self.module.params.get('name').lower() + + def get_domain(self): + domains = self.api_query("/domains", "GET") + for z in domains: + if z['domain']['name'].lower() == self.name: + return z + return None + + def present_domain(self): + domain = self.get_domain() + data = { + 'domain': { + 'name': self.name, + } + } + if not domain: + self.result['diff']['after'] = data['domain'] + self.result['changed'] = True + if not self.module.check_mode: + domain = self.api_query("/domains", "POST", data) + return domain + + def absent_domain(self): + domain = self.get_domain() + if domain: + self.result['diff']['before'] = domain + self.result['changed'] = True + if not self.module.check_mode: + self.api_query("/domains/%s" % domain['domain']['name'], "DELETE") + return domain + + def get_result(self, resource): + if resource: + self.result['exo_dns_domain'] = resource['domain'] + return self.result + + +def main(): + argument_spec = exo_dns_argument_spec() + argument_spec.update(dict( + name=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=exo_dns_required_together(), + supports_check_mode=True + ) + + exo_dns_domain = ExoDnsDomain(module) + if module.params.get('state') == "present": + resource = exo_dns_domain.present_domain() + else: + resource = exo_dns_domain.absent_domain() + result = exo_dns_domain.get_result(resource) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/exoscale/exo_dns_record.py b/network/exoscale/exo_dns_record.py new file mode 100644 index 00000000000..495508d3d47 --- /dev/null +++ b/network/exoscale/exo_dns_record.py @@ -0,0 +1,395 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: exo_dns_record +short_description: Manages DNS records on Exoscale DNS. +description: + - Create, update and delete records. +version_added: "2.2" +author: "René Moser (@resmo)" +options: + name: + description: + - Name of the record. + required: false + default: "" + domain: + description: + - Domain the record is related to. + required: true + record_type: + description: + - Type of the record. + required: false + default: A + choices: ['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL'] + aliases: ['rtype', 'type'] + content: + description: + - Content of the record. + - Required if C(state=present) or C(name="") + required: false + default: null + aliases: ['value', 'address'] + ttl: + description: + - TTL of the record in seconds. + required: false + default: 3600 + prio: + description: + - Priority of the record. + required: false + default: null + aliases: ['priority'] + multiple: + description: + - Whether there are more than one records with similar C(name). + - Only allowed with C(record_type=A). + - C(content) will not be updated as it is used as key to find the record. + required: false + default: null + aliases: ['priority'] + state: + description: + - State of the record. + required: false + default: 'present' + choices: [ 'present', 'absent' ] + api_key: + description: + - API key of the Exoscale DNS API. + required: false + default: null + api_secret: + description: + - Secret key of the Exoscale DNS API. + required: false + default: null + api_timeout: + description: + - HTTP timeout to Exoscale DNS API. + required: false + default: 10 + api_region: + description: + - Name of the ini section in the C(cloustack.ini) file. + required: false + default: cloudstack + validate_certs: + description: + - Validate SSL certs of the Exoscale DNS API. + required: false + default: true +requirements: + - "python >= 2.6" +notes: + - As Exoscale DNS uses the same API key and secret for all services, we reuse the config used for Exscale Compute based on CloudStack. + The config is read from several locations, in the following order. + The C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) environment variables. + A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file, + A C(cloudstack.ini) file in the current working directory. + A C(.cloudstack.ini) file in the users home directory. + Optionally multiple credentials and endpoints can be specified using ini sections in C(cloudstack.ini). + Use the argument C(api_region) to select the section name, default section is C(cloudstack). + - This module does not support multiple A records and will complain properly if you try. + - More information Exoscale DNS can be found on https://community.exoscale.ch/documentation/dns/. + - This module supports check mode and diff. +''' + +EXAMPLES = ''' +# Create or update an A record. +- local_action: + module: exo_dns_record + name: web-vm-1 + domain: example.com + content: 1.2.3.4 + +# Update an existing A record with a new IP. +- local_action: + module: exo_dns_record + name: web-vm-1 + domain: example.com + content: 1.2.3.5 + +# Create another A record with same name. +- local_action: + module: exo_dns_record + name: web-vm-1 + domain: example.com + content: 1.2.3.6 + multiple: yes + +# Create or update a CNAME record. +- local_action: + module: exo_dns_record + name: www + domain: example.com + record_type: CNAME + content: web-vm-1 + +# Create or update a MX record. +- local_action: + module: exo_dns_record + domain: example.com + record_type: MX + content: mx1.example.com + prio: 10 + +# delete a MX record. +- local_action: + module: exo_dns_record + domain: example.com + record_type: MX + content: mx1.example.com + state: absent + +# Remove a record. +- local_action: + module: exo_dns_record + name: www + domain: example.com + state: absent +''' + +RETURN = ''' +--- +exo_dns_record: + description: API record results + returned: success + type: dictionary + contains: + content: + description: value of the record + returned: success + type: string + sample: 1.2.3.4 + created_at: + description: When the record was created + returned: success + type: string + sample: "2016-08-12T15:24:23.989Z" + domain: + description: Name of the domain + returned: success + type: string + sample: example.com + domain_id: + description: ID of the domain + returned: success + type: int + sample: 254324 + id: + description: ID of the record + returned: success + type: int + sample: 254324 + name: + description: name of the record + returned: success + type: string + sample: www + parent_id: + description: ID of the parent + returned: success + type: int + sample: null + prio: + description: Priority of the record + returned: success + type: int + sample: 10 + record_type: + description: Priority of the record + returned: success + type: string + sample: A + system_record: + description: Whether the record is a system record or not + returned: success + type: bool + sample: false + ttl: + description: Time to live of the record + returned: success + type: int + sample: 3600 + updated_at: + description: When the record was updated + returned: success + type: string + sample: "2016-08-12T15:24:23.989Z" +''' + +# import exoscale common +from ansible.module_utils.exoscale import * + + +class ExoDnsRecord(ExoDns): + + def __init__(self, module): + super(ExoDnsRecord, self).__init__(module) + + self.content = self.module.params.get('content') + if self.content: + self.content = self.content.lower() + + self.domain = self.module.params.get('domain').lower() + self.name = self.module.params.get('name').lower() + if self.name == self.domain: + self.name = "" + + self.multiple = self.module.params.get('multiple') + self.record_type = self.module.params.get('record_type') + if self.multiple and self.record_type != 'A': + self.module.fail_json("Multiple is only usable with record_type A") + + + def _create_record(self, record): + self.result['changed'] = True + data = { + 'record': { + 'name': self.name, + 'record_type': self.record_type, + 'content': self.content, + 'ttl': self.module.params.get('ttl'), + 'prio': self.module.params.get('prio'), + } + } + self.result['diff']['after'] = data['record'] + if not self.module.check_mode: + record = self.api_query("/domains/%s/records" % self.domain, "POST", data) + return record + + def _update_record(self, record): + data = { + 'record': { + 'name': self.name, + 'content': self.content, + 'ttl': self.module.params.get('ttl'), + 'prio': self.module.params.get('prio'), + } + } + if self.has_changed(data['record'], record['record']): + self.result['changed'] = True + if not self.module.check_mode: + record = self.api_query("/domains/%s/records/%s" % (self.domain, record['record']['id']), "PUT", data) + return record + + def get_record(self): + domain = self.module.params.get('domain') + records = self.api_query("/domains/%s/records" % domain, "GET") + + record = None + for r in records: + found_record = None + if r['record']['record_type'] == self.record_type: + r_name = r['record']['name'].lower() + r_content = r['record']['content'].lower() + + # there are multiple A records but we found an exact match + if self.multiple and self.name == r_name and self.content == r_content: + record = r + break + + # We do not expect to found more then one record with that content + if not self.multiple and not self.name and self.content == r_content: + found_record = r + + # We do not expect to found more then one record with that name + elif not self.multiple and self.name and self.name == r_name: + found_record = r + + if record and found_record: + self.module.fail_json(msg="More than one record with your params. Use multiple=yes for more than one A record.") + if found_record: + record = found_record + return record + + def present_record(self): + record = self.get_record() + if not record: + record = self._create_record(record); + else: + record = self._update_record(record); + return record + + def absent_record(self): + record = self.get_record() + if record: + self.result['diff']['before'] = record + self.result['changed'] = True + if not self.module.check_mode: + self.api_query("/domains/%s/records/%s" % (self.domain, record['record']['id']), "DELETE") + return record + + def get_result(self, resource): + if resource: + self.result['exo_dns_record'] = resource['record'] + self.result['exo_dns_record']['domain'] = self.domain + return self.result + + +def main(): + argument_spec = exo_dns_argument_spec() + argument_spec.update(dict( + name=dict(default=""), + record_type=dict(choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL'], aliases=['rtype', 'type'], default='A'), + content=dict(aliases=['value', 'address']), + multiple=(dict(type='bool', default=False)), + ttl=dict(type='int', default=3600), + prio=dict(type='int', aliases=['priority']), + domain=dict(required=True), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=exo_dns_required_together(), + required_if=[ + ['state', 'present', ['content']], + ['name', '', ['content']], + ], + required_one_of=[ + ['content', 'name'], + ], + supports_check_mode=True, + ) + + exo_dns_record = ExoDnsRecord(module) + if module.params.get('state') == "present": + resource = exo_dns_record.present_record() + else: + resource = exo_dns_record.absent_record() + + result = exo_dns_record.get_result(resource) + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_device_dns.py b/network/f5/bigip_device_dns.py new file mode 100644 index 00000000000..a6c1e8e30d7 --- /dev/null +++ b/network/f5/bigip_device_dns.py @@ -0,0 +1,403 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_device_dns +short_description: Manage BIG-IP device DNS settings +description: + - Manage BIG-IP device DNS settings +version_added: "2.2" +options: + cache: + description: + - Specifies whether the system caches DNS lookups or performs the + operation each time a lookup is needed. Please note that this applies + only to Access Policy Manager features, such as ACLs, web application + rewrites, and authentication. + required: false + default: disable + choices: + - enable + - disable + name_servers: + description: + - A list of name serverz that the system uses to validate DNS lookups + forwarders: + description: + - A list of BIND servers that the system can use to perform DNS lookups + search: + description: + - A list of domains that the system searches for local domain lookups, + to resolve local host names. + ip_version: + description: + - Specifies whether the DNS specifies IP addresses using IPv4 or IPv6. + required: false + choices: + - 4 + - 6 + state: + description: + - The state of the variable on the system. When C(present), guarantees + that an existing variable is set to C(value). + required: false + default: present + choices: + - absent + - present +notes: + - Requires the f5-sdk Python package on the host. This is as easy as pip + install requests +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Set the DNS settings on the BIG-IP + bigip_device_dns: + name_servers: + - 208.67.222.222 + - 208.67.220.220 + search: + - localdomain + - lab.local + state: present + password: "secret" + server: "lb.mydomain.com" + user: "admin" + validate_certs: "no" + delegate_to: localhost +''' + +RETURN = ''' +cache: + description: The new value of the DNS caching + returned: changed + type: string + sample: "enabled" +name_servers: + description: List of name servers that were added or removed + returned: changed + type: list + sample: "['192.0.2.10', '172.17.12.10']" +forwarders: + description: List of forwarders that were added or removed + returned: changed + type: list + sample: "['192.0.2.10', '172.17.12.10']" +search: + description: List of search domains that were added or removed + returned: changed + type: list + sample: "['192.0.2.10', '172.17.12.10']" +ip_version: + description: IP version that was set that DNS will specify IP addresses in + returned: changed + type: int + sample: 4 +''' + +try: + from f5.bigip.contexts import TransactionContextManager + from f5.bigip import ManagementRoot + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + + +REQUIRED = ['name_servers', 'search', 'forwarders', 'ip_version', 'cache'] +CACHE = ['disable', 'enable'] +IP = [4, 6] + + +class BigIpDeviceDns(object): + def __init__(self, *args, **kwargs): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + # The params that change in the module + self.cparams = dict() + + # Stores the params that are sent to the module + self.params = kwargs + self.api = ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def flush(self): + result = dict() + changed = False + state = self.params['state'] + + if self.dhcp_enabled(): + raise F5ModuleError( + "DHCP on the mgmt interface must be disabled to make use of " + + "this module" + ) + + if state == 'absent': + changed = self.absent() + else: + changed = self.present() + + result.update(**self.cparams) + result.update(dict(changed=changed)) + return result + + def dhcp_enabled(self): + r = self.api.tm.sys.dbs.db.load(name='dhclient.mgmt') + if r.value == 'enable': + return True + else: + return False + + def read(self): + result = dict() + + cache = self.api.tm.sys.dbs.db.load(name='dns.cache') + proxy = self.api.tm.sys.dbs.db.load(name='dns.proxy.__iter__') + dns = self.api.tm.sys.dns.load() + + result['cache'] = str(cache.value) + result['forwarders'] = str(proxy.value).split(' ') + + if hasattr(dns, 'nameServers'): + result['name_servers'] = dns.nameServers + if hasattr(dns, 'search'): + result['search'] = dns.search + if hasattr(dns, 'include') and 'options inet6' in dns.include: + result['ip_version'] = 6 + else: + result['ip_version'] = 4 + return result + + def present(self): + params = dict() + current = self.read() + + # Temporary locations to hold the changed params + update = dict( + dns=None, + forwarders=None, + cache=None + ) + + nameservers = self.params['name_servers'] + search_domains = self.params['search'] + ip_version = self.params['ip_version'] + forwarders = self.params['forwarders'] + cache = self.params['cache'] + check_mode = self.params['check_mode'] + + if nameservers: + if 'name_servers' in current: + if nameservers != current['name_servers']: + params['nameServers'] = nameservers + else: + params['nameServers'] = nameservers + + if search_domains: + if 'search' in current: + if search_domains != current['search']: + params['search'] = search_domains + else: + params['search'] = search_domains + + if ip_version: + if 'ip_version' in current: + if ip_version != int(current['ip_version']): + if ip_version == 6: + params['include'] = 'options inet6' + elif ip_version == 4: + params['include'] = '' + else: + if ip_version == 6: + params['include'] = 'options inet6' + elif ip_version == 4: + params['include'] = '' + + if params: + self.cparams.update(camel_dict_to_snake_dict(params)) + + if 'include' in params: + del self.cparams['include'] + if params['include'] == '': + self.cparams['ip_version'] = 4 + else: + self.cparams['ip_version'] = 6 + + update['dns'] = params.copy() + params = dict() + + if forwarders: + if 'forwarders' in current: + if forwarders != current['forwarders']: + params['forwarders'] = forwarders + else: + params['forwarders'] = forwarders + + if params: + self.cparams.update(camel_dict_to_snake_dict(params)) + update['forwarders'] = ' '.join(params['forwarders']) + params = dict() + + if cache: + if 'cache' in current: + if cache != current['cache']: + params['cache'] = cache + + if params: + self.cparams.update(camel_dict_to_snake_dict(params)) + update['cache'] = params['cache'] + params = dict() + + if self.cparams: + changed = True + if check_mode: + return changed + else: + return False + + tx = self.api.tm.transactions.transaction + with TransactionContextManager(tx) as api: + cache = api.tm.sys.dbs.db.load(name='dns.cache') + proxy = api.tm.sys.dbs.db.load(name='dns.proxy.__iter__') + dns = api.tm.sys.dns.load() + + # Empty values can be supplied, but you cannot supply the + # None value, so we check for that specifically + if update['cache'] is not None: + cache.update(value=update['cache']) + if update['forwarders'] is not None: + proxy.update(value=update['forwarders']) + if update['dns'] is not None: + dns.update(**update['dns']) + return changed + + def absent(self): + params = dict() + current = self.read() + + # Temporary locations to hold the changed params + update = dict( + dns=None, + forwarders=None + ) + + nameservers = self.params['name_servers'] + search_domains = self.params['search'] + forwarders = self.params['forwarders'] + check_mode = self.params['check_mode'] + + if forwarders and 'forwarders' in current: + set_current = set(current['forwarders']) + set_new = set(forwarders) + + forwarders = set_current - set_new + if forwarders != set_current: + forwarders = list(forwarders) + params['forwarders'] = ' '.join(forwarders) + + if params: + changed = True + self.cparams.update(camel_dict_to_snake_dict(params)) + update['forwarders'] = params['forwarders'] + params = dict() + + if nameservers and 'name_servers' in current: + set_current = set(current['name_servers']) + set_new = set(nameservers) + + nameservers = set_current - set_new + if nameservers != set_current: + params['nameServers'] = list(nameservers) + + if search_domains and 'search' in current: + set_current = set(current['search']) + set_new = set(search_domains) + + search_domains = set_current - set_new + if search_domains != set_current: + params['search'] = list(search_domains) + + if params: + changed = True + self.cparams.update(camel_dict_to_snake_dict(params)) + update['dns'] = params.copy() + params = dict() + + if not self.cparams: + return False + + if check_mode: + return changed + + tx = self.api.tm.transactions.transaction + with TransactionContextManager(tx) as api: + proxy = api.tm.sys.dbs.db.load(name='dns.proxy.__iter__') + dns = api.tm.sys.dns.load() + + if update['forwarders'] is not None: + proxy.update(value=update['forwarders']) + if update['dns'] is not None: + dns.update(**update['dns']) + return changed + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + cache=dict(required=False, choices=CACHE, default=None), + name_servers=dict(required=False, default=None, type='list'), + forwarders=dict(required=False, default=None, type='list'), + search=dict(required=False, default=None, type='list'), + ip_version=dict(required=False, default=None, choices=IP, type='int') + ) + argument_spec.update(meta_args) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[REQUIRED], + supports_check_mode=True + ) + + try: + obj = BigIpDeviceDns(check_mode=module.check_mode, **module.params) + result = obj.flush() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_device_ntp.py b/network/f5/bigip_device_ntp.py new file mode 100644 index 00000000000..23ed81b7819 --- /dev/null +++ b/network/f5/bigip_device_ntp.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_device_ntp +short_description: Manage NTP servers on a BIG-IP +description: + - Manage NTP servers on a BIG-IP +version_added: "2.2" +options: + ntp_servers: + description: + - A list of NTP servers to set on the device. At least one of C(ntp_servers) + or C(timezone) is required. + required: false + default: [] + state: + description: + - The state of the NTP servers on the system. When C(present), guarantees + that the NTP servers are set on the system. When C(absent), removes the + specified NTP servers from the device configuration. + required: false + default: present + choices: + - absent + - present + timezone: + description: + - The timezone to set for NTP lookups. At least one of C(ntp_servers) or + C(timezone) is required. + default: UTC + required: false +notes: + - Requires the f5-sdk Python package on the host. This is as easy as pip + install f5-sdk. +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Set NTP server + bigip_device_ntp: + ntp_servers: + - "192.0.2.23" + password: "secret" + server: "lb.mydomain.com" + user: "admin" + validate_certs: "no" + delegate_to: localhost + +- name: Set timezone + bigip_device_ntp: + password: "secret" + server: "lb.mydomain.com" + timezone: "America/Los_Angeles" + user: "admin" + validate_certs: "no" + delegate_to: localhost +''' + +RETURN = ''' +ntp_servers: + description: The NTP servers that were set on the device + returned: changed + type: list + sample: ["192.0.2.23", "192.0.2.42"] +timezone: + description: The timezone that was set on the device + returned: changed + type: string + sample: "true" +''' + +try: + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + + +class BigIpDeviceNtp(object): + def __init__(self, *args, **kwargs): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + # The params that change in the module + self.cparams = dict() + + # Stores the params that are sent to the module + self.params = kwargs + self.api = ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def flush(self): + result = dict() + changed = False + state = self.params['state'] + + try: + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + if 'servers' in self.cparams: + self.cparams['ntp_servers'] = self.cparams.pop('servers') + + result.update(**self.cparams) + result.update(dict(changed=changed)) + return result + + def read(self): + """Read information and transform it + + The values that are returned by BIG-IP in the f5-sdk can have encoding + attached to them as well as be completely missing in some cases. + + Therefore, this method will transform the data from the BIG-IP into a + format that is more easily consumable by the rest of the class and the + parameters that are supported by the module. + """ + p = dict() + r = self.api.tm.sys.ntp.load() + + if hasattr(r, 'servers'): + # Deliberately using sets to supress duplicates + p['servers'] = set([str(x) for x in r.servers]) + if hasattr(r, 'timezone'): + p['timezone'] = str(r.timezone) + return p + + def present(self): + changed = False + params = dict() + current = self.read() + + check_mode = self.params['check_mode'] + ntp_servers = self.params['ntp_servers'] + timezone = self.params['timezone'] + + # NTP servers can be set independently + if ntp_servers is not None: + if 'servers' in current: + items = set(ntp_servers) + if items != current['servers']: + params['servers'] = list(ntp_servers) + else: + params['servers'] = ntp_servers + + # Timezone can be set independently + if timezone is not None: + if 'timezone' in current and current['timezone'] != timezone: + params['timezone'] = timezone + + if params: + changed = True + self.cparams = camel_dict_to_snake_dict(params) + if check_mode: + return changed + else: + return changed + + r = self.api.tm.sys.ntp.load() + r.update(**params) + r.refresh() + + return changed + + def absent(self): + changed = False + params = dict() + current = self.read() + + check_mode = self.params['check_mode'] + ntp_servers = self.params['ntp_servers'] + + if not ntp_servers: + raise F5ModuleError( + "Absent can only be used when removing NTP servers" + ) + + if ntp_servers and 'servers' in current: + servers = current['servers'] + new_servers = [x for x in servers if x not in ntp_servers] + + if servers != new_servers: + params['servers'] = new_servers + + if params: + changed = True + self.cparams = camel_dict_to_snake_dict(params) + if check_mode: + return changed + else: + return changed + + r = self.api.tm.sys.ntp.load() + r.update(**params) + r.refresh() + return changed + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + ntp_servers=dict(required=False, type='list', default=None), + timezone=dict(default=None, required=False) + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[ + ['ntp_servers', 'timezone'] + ], + supports_check_mode=True + ) + + try: + obj = BigIpDeviceNtp(check_mode=module.check_mode, **module.params) + result = obj.flush() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_device_sshd.py b/network/f5/bigip_device_sshd.py new file mode 100644 index 00000000000..87ffeb6bee0 --- /dev/null +++ b/network/f5/bigip_device_sshd.py @@ -0,0 +1,350 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_device_sshd +short_description: Manage the SSHD settings of a BIG-IP +description: + - Manage the SSHD settings of a BIG-IP +version_added: "2.2" +options: + allow: + description: + - Specifies, if you have enabled SSH access, the IP address or address + range for other systems that can use SSH to communicate with this + system. + choices: + - all + - IP address, such as 172.27.1.10 + - IP range, such as 172.27.*.* or 172.27.0.0/255.255.0.0 + banner: + description: + - Whether to enable the banner or not. + required: false + choices: + - enabled + - disabled + banner_text: + description: + - Specifies the text to include on the pre-login banner that displays + when a user attempts to login to the system using SSH. + required: false + inactivity_timeout: + description: + - Specifies the number of seconds before inactivity causes an SSH + session to log out. + required: false + log_level: + description: + - Specifies the minimum SSHD message level to include in the system log. + choices: + - debug + - debug1 + - debug2 + - debug3 + - error + - fatal + - info + - quiet + - verbose + login: + description: + - Specifies, when checked C(enabled), that the system accepts SSH + communications. + choices: + - enabled + - disabled + required: false + port: + description: + - Port that you want the SSH daemon to run on. + required: false +notes: + - Requires the f5-sdk Python package on the host This is as easy as pip + install f5-sdk. + - Requires BIG-IP version 12.0.0 or greater +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Set the banner for the SSHD service from a string + bigip_device_sshd: + banner: "enabled" + banner_text: "banner text goes here" + password: "secret" + server: "lb.mydomain.com" + user: "admin" + delegate_to: localhost + +- name: Set the banner for the SSHD service from a file + bigip_device_sshd: + banner: "enabled" + banner_text: "{{ lookup('file', '/path/to/file') }}" + password: "secret" + server: "lb.mydomain.com" + user: "admin" + delegate_to: localhost + +- name: Set the SSHD service to run on port 2222 + bigip_device_sshd: + password: "secret" + port: 2222 + server: "lb.mydomain.com" + user: "admin" + delegate_to: localhost +''' + +RETURN = ''' +allow: + description: > + Specifies, if you have enabled SSH access, the IP address or address + range for other systems that can use SSH to communicate with this + system. + returned: changed + type: string + sample: "192.0.2.*" +banner: + description: Whether the banner is enabled or not. + returned: changed + type: string + sample: "true" +banner_text: + description: > + Specifies the text included on the pre-login banner that + displays when a user attempts to login to the system using SSH. + returned: changed and success + type: string + sample: "This is a corporate device. Connecting to it without..." +inactivity_timeout: + description: > + The number of seconds before inactivity causes an SSH. + session to log out + returned: changed + type: int + sample: "10" +log_level: + description: The minimum SSHD message level to include in the system log. + returned: changed + type: string + sample: "debug" +login: + description: Specifies that the system accepts SSH communications or not. + return: changed + type: bool + sample: true +port: + description: Port that you want the SSH daemon to run on. + return: changed + type: int + sample: 22 +''' + +try: + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + +CHOICES = ['enabled', 'disabled'] +LEVELS = ['debug', 'debug1', 'debug2', 'debug3', 'error', 'fatal', 'info', + 'quiet', 'verbose'] + + +class BigIpDeviceSshd(object): + def __init__(self, *args, **kwargs): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + # The params that change in the module + self.cparams = dict() + + # Stores the params that are sent to the module + self.params = kwargs + self.api = ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def update(self): + changed = False + current = self.read() + params = dict() + + allow = self.params['allow'] + banner = self.params['banner'] + banner_text = self.params['banner_text'] + timeout = self.params['inactivity_timeout'] + log_level = self.params['log_level'] + login = self.params['login'] + port = self.params['port'] + check_mode = self.params['check_mode'] + + if allow: + if 'allow' in current: + items = set(allow) + if items != current['allow']: + params['allow'] = list(items) + else: + params['allow'] = allow + + if banner: + if 'banner' in current: + if banner != current['banner']: + params['banner'] = banner + else: + params['banner'] = banner + + if banner_text: + if 'banner_text' in current: + if banner_text != current['banner_text']: + params['bannerText'] = banner_text + else: + params['bannerText'] = banner_text + + if timeout: + if 'inactivity_timeout' in current: + if timeout != current['inactivity_timeout']: + params['inactivityTimeout'] = timeout + else: + params['inactivityTimeout'] = timeout + + if log_level: + if 'log_level' in current: + if log_level != current['log_level']: + params['logLevel'] = log_level + else: + params['logLevel'] = log_level + + if login: + if 'login' in current: + if login != current['login']: + params['login'] = login + else: + params['login'] = login + + if port: + if 'port' in current: + if port != current['port']: + params['port'] = port + else: + params['port'] = port + + if params: + changed = True + if check_mode: + return changed + self.cparams = camel_dict_to_snake_dict(params) + else: + return changed + + r = self.api.tm.sys.sshd.load() + r.update(**params) + r.refresh() + + return changed + + def read(self): + """Read information and transform it + + The values that are returned by BIG-IP in the f5-sdk can have encoding + attached to them as well as be completely missing in some cases. + + Therefore, this method will transform the data from the BIG-IP into a + format that is more easily consumable by the rest of the class and the + parameters that are supported by the module. + """ + p = dict() + r = self.api.tm.sys.sshd.load() + + if hasattr(r, 'allow'): + # Deliberately using sets to supress duplicates + p['allow'] = set([str(x) for x in r.allow]) + if hasattr(r, 'banner'): + p['banner'] = str(r.banner) + if hasattr(r, 'bannerText'): + p['banner_text'] = str(r.bannerText) + if hasattr(r, 'inactivityTimeout'): + p['inactivity_timeout'] = str(r.inactivityTimeout) + if hasattr(r, 'logLevel'): + p['log_level'] = str(r.logLevel) + if hasattr(r, 'login'): + p['login'] = str(r.login) + if hasattr(r, 'port'): + p['port'] = int(r.port) + return p + + def flush(self): + result = dict() + changed = False + + try: + changed = self.update() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + result.update(**self.cparams) + result.update(dict(changed=changed)) + return result + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + allow=dict(required=False, default=None, type='list'), + banner=dict(required=False, default=None, choices=CHOICES), + banner_text=dict(required=False, default=None), + inactivity_timeout=dict(required=False, default=None, type='int'), + log_level=dict(required=False, default=None, choices=LEVELS), + login=dict(required=False, default=None, choices=CHOICES), + port=dict(required=False, default=None, type='int'), + state=dict(default='present', choices=['present']) + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + try: + obj = BigIpDeviceSshd(check_mode=module.check_mode, **module.params) + result = obj.flush() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_facts.py b/network/f5/bigip_facts.py index 1b106ba0a3e..33d5e1937e6 100644 --- a/network/f5/bigip_facts.py +++ b/network/f5/bigip_facts.py @@ -1,6 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - +# # (c) 2013, Matt Hite # # This file is part of Ansible @@ -18,101 +18,85 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: bigip_facts -short_description: "Collect facts from F5 BIG-IP devices" +short_description: Collect facts from F5 BIG-IP devices description: - - "Collect facts from F5 BIG-IP devices via iControl SOAP API" + - Collect facts from F5 BIG-IP devices via iControl SOAP API version_added: "1.6" -author: "Matt Hite (@mhite)" +author: + - Matt Hite (@mhite) + - Tim Rupp (@caphrim007) notes: - - "Requires BIG-IP software version >= 11.4" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Tested with manager and above account privilege level" - + - Requires BIG-IP software version >= 11.4 + - F5 developed module 'bigsuds' required (see http://devcentral.f5.com) + - Best run as a local_action in your playbook + - Tested with manager and above account privilege level + - C(provision) facts were added in 2.2 requirements: - - bigsuds + - bigsuds options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 2.0 - session: - description: - - BIG-IP session support; may be useful to avoid concurrency - issues in certain circumstances. - required: false - default: true - choices: [] - aliases: [] - include: - description: - - Fact category or list of categories to collect - required: true - default: null - choices: ['address_class', 'certificate', 'client_ssl_profile', - 'device', 'device_group', 'interface', 'key', 'node', 'pool', - 'rule', 'self_ip', 'software', 'system_info', 'traffic_group', - 'trunk', 'virtual_address', 'virtual_server', 'vlan'] - aliases: [] - filter: - description: - - Shell-style glob matching string used to filter fact keys. Not - applicable for software and system_info fact categories. - required: false - default: null - choices: [] - aliases: [] + session: + description: + - BIG-IP session support; may be useful to avoid concurrency + issues in certain circumstances. + required: false + default: true + choices: [] + aliases: [] + include: + description: + - Fact category or list of categories to collect + required: true + default: null + choices: + - address_class + - certificate + - client_ssl_profile + - device + - device_group + - interface + - key + - node + - pool + - provision + - rule + - self_ip + - software + - system_info + - traffic_group + - trunk + - virtual_address + - virtual_server + - vlan + aliases: [] + filter: + description: + - Shell-style glob matching string used to filter fact keys. Not + applicable for software, provision, and system_info fact categories. + required: false + default: null + choices: [] + aliases: [] +extends_documentation_fragment: f5 ''' EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: bigip-test - tasks: - - name: Collect BIG-IP facts - local_action: > - bigip_facts - server=lb.mydomain.com - user=admin - password=mysecret - include=interface,vlan - +- name: Collect BIG-IP facts + bigip_facts: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + include: "interface,vlan" + delegate_to: localhost ''' try: - import bigsuds from suds import MethodNotFound, WebFault except ImportError: bigsuds_found = False @@ -120,12 +104,9 @@ bigsuds_found = True import fnmatch -import traceback import re +import traceback -# =========================================== -# bigip_facts module specific support methods. -# class F5(object): """F5 iControl class. @@ -136,8 +117,8 @@ class F5(object): api: iControl API instance. """ - def __init__(self, host, user, password, session=False): - self.api = bigsuds.BIGIP(hostname=host, username=user, password=password) + def __init__(self, host, user, password, session=False, validate_certs=True, port=443): + self.api = bigip_api(host, user, password, validate_certs, port) if session: self.start_session() @@ -967,6 +948,7 @@ def get_verification_status(self): def get_definition(self): return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)] + class Nodes(object): """Nodes class. @@ -1101,7 +1083,7 @@ def get_list(self): def get_address_class(self): key = self.api.LocalLB.Class.get_address_class(self.address_classes) value = self.api.LocalLB.Class.get_address_class_member_data_value(key) - result = map(zip, [x['members'] for x in key], value) + result = list(map(zip, [x['members'] for x in key], value)) return result def get_description(self): @@ -1364,6 +1346,35 @@ def get_uptime(self): return self.api.System.SystemInfo.get_uptime() +class ProvisionInfo(object): + """Provision information class. + + F5 BIG-IP provision information class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, api): + self.api = api + + def get_list(self): + result = [] + list = self.api.Management.Provision.get_list() + for item in list: + item = item.lower().replace('tmos_module_', '') + result.append(item) + return result + + def get_provisioned_list(self): + result = [] + list = self.api.Management.Provision.get_provisioned_list() + for item in list: + item = item.lower().replace('tmos_module_', '') + result.append(item) + return result + + def generate_dict(api_obj, fields): result_dict = {} lists = [] @@ -1383,6 +1394,7 @@ def generate_dict(api_obj, fields): result_dict[j] = temp return result_dict + def generate_simple_dict(api_obj, fields): result_dict = {} for field in fields: @@ -1394,6 +1406,7 @@ def generate_simple_dict(api_obj, fields): result_dict[field] = api_response return result_dict + def generate_interface_dict(f5, regex): interfaces = Interfaces(f5.get_api(), regex) fields = ['active_media', 'actual_flow_control', 'bundle_state', @@ -1408,6 +1421,7 @@ def generate_interface_dict(f5, regex): 'stp_protocol_detection_reset_state'] return generate_dict(interfaces, fields) + def generate_self_ip_dict(f5, regex): self_ips = SelfIPs(f5.get_api(), regex) fields = ['address', 'allow_access_list', 'description', @@ -1416,6 +1430,7 @@ def generate_self_ip_dict(f5, regex): 'vlan', 'is_traffic_group_inherited'] return generate_dict(self_ips, fields) + def generate_trunk_dict(f5, regex): trunks = Trunks(f5.get_api(), regex) fields = ['active_lacp_state', 'configured_member_count', 'description', @@ -1425,6 +1440,7 @@ def generate_trunk_dict(f5, regex): 'stp_protocol_detection_reset_state'] return generate_dict(trunks, fields) + def generate_vlan_dict(f5, regex): vlans = Vlans(f5.get_api(), regex) fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description', @@ -1436,6 +1452,7 @@ def generate_vlan_dict(f5, regex): 'source_check_state', 'true_mac_address', 'vlan_id'] return generate_dict(vlans, fields) + def generate_vs_dict(f5, regex): virtual_servers = VirtualServers(f5.get_api(), regex) fields = ['actual_hardware_acceleration', 'authentication_profile', @@ -1456,6 +1473,7 @@ def generate_vs_dict(f5, regex): 'translate_port_state', 'type', 'vlan', 'wildmask'] return generate_dict(virtual_servers, fields) + def generate_pool_dict(f5, regex): pools = Pools(f5.get_api(), regex) fields = ['action_on_service_down', 'active_member_count', @@ -1472,6 +1490,7 @@ def generate_pool_dict(f5, regex): 'simple_timeout', 'slow_ramp_time'] return generate_dict(pools, fields) + def generate_device_dict(f5, regex): devices = Devices(f5.get_api(), regex) fields = ['active_modules', 'base_mac_address', 'blade_addresses', @@ -1484,14 +1503,16 @@ def generate_device_dict(f5, regex): 'timelimited_modules', 'timezone', 'unicast_addresses'] return generate_dict(devices, fields) + def generate_device_group_dict(f5, regex): device_groups = DeviceGroups(f5.get_api(), regex) - fields = ['all_preferred_active', 'autosync_enabled_state','description', + fields = ['all_preferred_active', 'autosync_enabled_state', 'description', 'device', 'full_load_on_sync_state', 'incremental_config_sync_size_maximum', 'network_failover_enabled_state', 'sync_status', 'type'] return generate_dict(device_groups, fields) + def generate_traffic_group_dict(f5, regex): traffic_groups = TrafficGroups(f5.get_api(), regex) fields = ['auto_failback_enabled_state', 'auto_failback_time', @@ -1500,12 +1521,14 @@ def generate_traffic_group_dict(f5, regex): 'unit_id'] return generate_dict(traffic_groups, fields) + def generate_rule_dict(f5, regex): rules = Rules(f5.get_api(), regex) fields = ['definition', 'description', 'ignore_vertification', 'verification_status'] return generate_dict(rules, fields) + def generate_node_dict(f5, regex): nodes = Nodes(f5.get_api(), regex) fields = ['address', 'connection_limit', 'description', 'dynamic_ratio', @@ -1513,6 +1536,7 @@ def generate_node_dict(f5, regex): 'object_status', 'rate_limit', 'ratio', 'session_status'] return generate_dict(nodes, fields) + def generate_virtual_address_dict(f5, regex): virtual_addresses = VirtualAddresses(f5.get_api(), regex) fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit', @@ -1521,19 +1545,23 @@ def generate_virtual_address_dict(f5, regex): 'route_advertisement_state', 'traffic_group'] return generate_dict(virtual_addresses, fields) + def generate_address_class_dict(f5, regex): address_classes = AddressClasses(f5.get_api(), regex) fields = ['address_class', 'description'] return generate_dict(address_classes, fields) + def generate_certificate_dict(f5, regex): certificates = Certificates(f5.get_api(), regex) return dict(zip(certificates.get_list(), certificates.get_certificate_list())) + def generate_key_dict(f5, regex): keys = Keys(f5.get_api(), regex) return dict(zip(keys.get_list(), keys.get_key_list())) + def generate_client_ssl_profile_dict(f5, regex): profiles = ProfileClientSSL(f5.get_api(), regex) fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth', @@ -1557,6 +1585,7 @@ def generate_client_ssl_profile_dict(f5, regex): 'unclean_shutdown_state', 'is_base_profile', 'is_system_profile'] return generate_dict(profiles, fields) + def generate_system_info_dict(f5): system_info = SystemInfo(f5.get_api()) fields = ['base_mac_address', @@ -1569,62 +1598,68 @@ def generate_system_info_dict(f5): 'time_zone', 'uptime'] return generate_simple_dict(system_info, fields) + def generate_software_list(f5): software = Software(f5.get_api()) software_list = software.get_all_software_status() return software_list -def disable_ssl_cert_validation(): - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context + +def generate_provision_dict(f5): + provisioned = ProvisionInfo(f5.get_api()) + fields = ['list', 'provisioned_list'] + return generate_simple_dict(provisioned, fields) def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + session=dict(type='bool', default=False), + include=dict(type='list', required=True), + filter=dict(type='str', required=False), + ) + argument_spec.update(meta_args) + module = AnsibleModule( - argument_spec = dict( - server = dict(type='str', required=True), - user = dict(type='str', required=True), - password = dict(type='str', required=True), - validate_certs = dict(default='yes', type='bool'), - session = dict(type='bool', default=False), - include = dict(type='list', required=True), - filter = dict(type='str', required=False), - ) + argument_spec=argument_spec ) if not bigsuds_found: - module.fail_json(msg="the python suds and bigsuds modules is required") + module.fail_json(msg="the python suds and bigsuds modules are required") server = module.params['server'] + server_port = module.params['server_port'] user = module.params['user'] password = module.params['password'] validate_certs = module.params['validate_certs'] session = module.params['session'] fact_filter = module.params['filter'] + + if validate_certs: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') + if fact_filter: regex = fnmatch.translate(fact_filter) else: regex = None - include = map(lambda x: x.lower(), module.params['include']) + include = [x.lower() for x in module.params['include']] valid_includes = ('address_class', 'certificate', 'client_ssl_profile', 'device', 'device_group', 'interface', 'key', 'node', - 'pool', 'rule', 'self_ip', 'software', 'system_info', - 'traffic_group', 'trunk', 'virtual_address', - 'virtual_server', 'vlan') + 'pool', 'provision', 'rule', 'self_ip', 'software', + 'system_info', 'traffic_group', 'trunk', + 'virtual_address', 'virtual_server', 'vlan') include_test = map(lambda x: x in valid_includes, include) if not all(include_test): module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) - if not validate_certs: - disable_ssl_cert_validation() - try: facts = {} if len(include) > 0: - f5 = F5(server, user, password, session) + f5 = F5(server, user, password, session, validate_certs, server_port) saved_active_folder = f5.get_active_folder() saved_recursive_query_state = f5.get_recursive_query_state() if saved_active_folder != "/": @@ -1644,6 +1679,8 @@ def main(): facts['virtual_server'] = generate_vs_dict(f5, regex) if 'pool' in include: facts['pool'] = generate_pool_dict(f5, regex) + if 'provision' in include: + facts['provision'] = generate_provision_dict(f5) if 'device' in include: facts['device'] = generate_device_dict(f5, regex) if 'device_group' in include: @@ -1678,14 +1715,14 @@ def main(): result = {'ansible_facts': facts} - except Exception, e: + except Exception as e: module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc())) module.exit_json(**result) # include magic from lib/ansible/module_common.py from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * if __name__ == '__main__': main() - diff --git a/network/f5/bigip_gtm_datacenter.py b/network/f5/bigip_gtm_datacenter.py new file mode 100644 index 00000000000..fff876007cf --- /dev/null +++ b/network/f5/bigip_gtm_datacenter.py @@ -0,0 +1,372 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_gtm_datacenter +short_description: Manage Datacenter configuration in BIG-IP +description: + - Manage BIG-IP data center configuration. A data center defines the location + where the physical network components reside, such as the server and link + objects that share the same subnet on the network. This module is able to + manipulate the data center definitions in a BIG-IP +version_added: "2.2" +options: + contact: + description: + - The name of the contact for the data center. + description: + description: + - The description of the data center. + enabled: + description: + - Whether the data center should be enabled. At least one of C(state) and + C(enabled) are required. + choices: + - yes + - no + location: + description: + - The location of the data center. + name: + description: + - The name of the data center. + required: true + state: + description: + - The state of the datacenter on the BIG-IP. When C(present), guarantees + that the data center exists. When C(absent) removes the data center + from the BIG-IP. C(enabled) will enable the data center and C(disabled) + will ensure the data center is disabled. At least one of state and + enabled are required. + choices: + - present + - absent +notes: + - Requires the f5-sdk Python package on the host. This is as easy as + pip install f5-sdk. +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Create data center "New York" + bigip_gtm_datacenter: + server: "big-ip" + name: "New York" + location: "222 West 23rd" + delegate_to: localhost +''' + +RETURN = ''' +contact: + description: The contact that was set on the datacenter + returned: changed + type: string + sample: "admin@root.local" +description: + description: The description that was set for the datacenter + returned: changed + type: string + sample: "Datacenter in NYC" +enabled: + description: Whether the datacenter is enabled or not + returned: changed + type: bool + sample: true +location: + description: The location that is set for the datacenter + returned: changed + type: string + sample: "222 West 23rd" +name: + description: Name of the datacenter being manipulated + returned: changed + type: string + sample: "foo" +''' + +try: + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + + +class BigIpGtmDatacenter(object): + def __init__(self, *args, **kwargs): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + # The params that change in the module + self.cparams = dict() + + # Stores the params that are sent to the module + self.params = kwargs + self.api = ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def create(self): + params = dict() + + check_mode = self.params['check_mode'] + contact = self.params['contact'] + description = self.params['description'] + location = self.params['location'] + name = self.params['name'] + partition = self.params['partition'] + enabled = self.params['enabled'] + + # Specifically check for None because a person could supply empty + # values which would technically still be valid + if contact is not None: + params['contact'] = contact + + if description is not None: + params['description'] = description + + if location is not None: + params['location'] = location + + if enabled is not None: + params['enabled'] = True + else: + params['disabled'] = False + + params['name'] = name + params['partition'] = partition + + self.cparams = camel_dict_to_snake_dict(params) + if check_mode: + return True + + d = self.api.tm.gtm.datacenters.datacenter + d.create(**params) + + if not self.exists(): + raise F5ModuleError("Failed to create the datacenter") + return True + + def read(self): + """Read information and transform it + + The values that are returned by BIG-IP in the f5-sdk can have encoding + attached to them as well as be completely missing in some cases. + + Therefore, this method will transform the data from the BIG-IP into a + format that is more easily consumable by the rest of the class and the + parameters that are supported by the module. + """ + p = dict() + name = self.params['name'] + partition = self.params['partition'] + r = self.api.tm.gtm.datacenters.datacenter.load( + name=name, + partition=partition + ) + + if hasattr(r, 'servers'): + # Deliberately using sets to supress duplicates + p['servers'] = set([str(x) for x in r.servers]) + if hasattr(r, 'contact'): + p['contact'] = str(r.contact) + if hasattr(r, 'location'): + p['location'] = str(r.location) + if hasattr(r, 'description'): + p['description'] = str(r.description) + if r.enabled: + p['enabled'] = True + else: + p['enabled'] = False + p['name'] = name + return p + + def update(self): + changed = False + params = dict() + current = self.read() + + check_mode = self.params['check_mode'] + contact = self.params['contact'] + description = self.params['description'] + location = self.params['location'] + name = self.params['name'] + partition = self.params['partition'] + enabled = self.params['enabled'] + + if contact is not None: + if 'contact' in current: + if contact != current['contact']: + params['contact'] = contact + else: + params['contact'] = contact + + if description is not None: + if 'description' in current: + if description != current['description']: + params['description'] = description + else: + params['description'] = description + + if location is not None: + if 'location' in current: + if location != current['location']: + params['location'] = location + else: + params['location'] = location + + if enabled is not None: + if current['enabled'] != enabled: + if enabled is True: + params['enabled'] = True + params['disabled'] = False + else: + params['disabled'] = True + params['enabled'] = False + + if params: + changed = True + if check_mode: + return changed + self.cparams = camel_dict_to_snake_dict(params) + else: + return changed + + r = self.api.tm.gtm.datacenters.datacenter.load( + name=name, + partition=partition + ) + r.update(**params) + r.refresh() + + return True + + def delete(self): + params = dict() + check_mode = self.params['check_mode'] + + params['name'] = self.params['name'] + params['partition'] = self.params['partition'] + + self.cparams = camel_dict_to_snake_dict(params) + if check_mode: + return True + + dc = self.api.tm.gtm.datacenters.datacenter.load(**params) + dc.delete() + + if self.exists(): + raise F5ModuleError("Failed to delete the datacenter") + return True + + def present(self): + changed = False + + if self.exists(): + changed = self.update() + else: + changed = self.create() + + return changed + + def absent(self): + changed = False + + if self.exists(): + changed = self.delete() + + return changed + + def exists(self): + name = self.params['name'] + partition = self.params['partition'] + + return self.api.tm.gtm.datacenters.datacenter.exists( + name=name, + partition=partition + ) + + def flush(self): + result = dict() + state = self.params['state'] + enabled = self.params['enabled'] + + if state is None and enabled is None: + module.fail_json(msg="Neither 'state' nor 'enabled' set") + + try: + if state == "present": + changed = self.present() + + # Ensure that this field is not returned to the user since it + # is not a valid parameter to the module. + if 'disabled' in self.cparams: + del self.cparams['disabled'] + elif state == "absent": + changed = self.absent() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + result.update(**self.cparams) + result.update(dict(changed=changed)) + return result + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + contact=dict(required=False, default=None), + description=dict(required=False, default=None), + enabled=dict(required=False, type='bool', default=None, choices=BOOLEANS), + location=dict(required=False, default=None), + name=dict(required=True) + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + try: + obj = BigIpGtmDatacenter(check_mode=module.check_mode, **module.params) + result = obj.flush() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_gtm_facts.py b/network/f5/bigip_gtm_facts.py new file mode 100644 index 00000000000..9e3fc8b492f --- /dev/null +++ b/network/f5/bigip_gtm_facts.py @@ -0,0 +1,495 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_gtm_facts +short_description: Collect facts from F5 BIG-IP GTM devices. +description: + - Collect facts from F5 BIG-IP GTM devices. +version_added: "2.3" +options: + include: + description: + - Fact category to collect + required: true + choices: + - pool + - wide_ip + - virtual_server + filter: + description: + - Perform regex filter of response. Filtering is done on the name of + the resource. Valid filters are anything that can be provided to + Python's C(re) module. + required: false + default: None +notes: + - Requires the f5-sdk Python package on the host. This is as easy as + pip install f5-sdk +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Get pool facts + bigip_gtm_facts: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + include: "pool" + filter: "my_pool" + delegate_to: localhost +''' + +RETURN = ''' +wide_ip: + description: + Contains the lb method for the wide ip and the pools + that are within the wide ip. + returned: changed + type: dict + sample: + wide_ip: + - enabled: "True" + failure_rcode: "noerror" + failure_rcode_response: "disabled" + failure_rcode_ttl: "0" + full_path: "/Common/foo.ok.com" + last_resort_pool: "" + minimal_response: "enabled" + name: "foo.ok.com" + partition: "Common" + persist_cidr_ipv4: "32" + persist_cidr_ipv6: "128" + persistence: "disabled" + pool_lb_mode: "round-robin" + pools: + - name: "d3qw" + order: "0" + partition: "Common" + ratio: "1" + ttl_persistence: "3600" + type: "naptr" +pool: + description: Contains the pool object status and enabled status. + returned: changed + type: dict + sample: + pool: + - alternate_mode: "round-robin" + dynamic_ratio: "disabled" + enabled: "True" + fallback_mode: "return-to-dns" + full_path: "/Common/d3qw" + load_balancing_mode: "round-robin" + manual_resume: "disabled" + max_answers_returned: "1" + members: + - disabled: "True" + flags: "a" + full_path: "ok3.com" + member_order: "0" + name: "ok3.com" + order: "10" + preference: "10" + ratio: "1" + service: "80" + name: "d3qw" + partition: "Common" + qos_hit_ratio: "5" + qos_hops: "0" + qos_kilobytes_second: "3" + qos_lcs: "30" + qos_packet_rate: "1" + qos_rtt: "50" + qos_topology: "0" + qos_vs_capacity: "0" + qos_vs_score: "0" + ttl: "30" + type: "naptr" + verify_member_availability: "disabled" +virtual_server: + description: + Contains the virtual server enabled and availability + status, and address + returned: changed + type: dict + sample: + virtual_server: + - addresses: + - device_name: "/Common/qweqwe" + name: "10.10.10.10" + translation: "none" + datacenter: "/Common/xfxgh" + enabled: "True" + expose_route_domains: "no" + full_path: "/Common/qweqwe" + iq_allow_path: "yes" + iq_allow_service_check: "yes" + iq_allow_snmp: "yes" + limit_cpu_usage: "0" + limit_cpu_usage_status: "disabled" + limit_max_bps: "0" + limit_max_bps_status: "disabled" + limit_max_connections: "0" + limit_max_connections_status: "disabled" + limit_max_pps: "0" + limit_max_pps_status: "disabled" + limit_mem_avail: "0" + limit_mem_avail_status: "disabled" + link_discovery: "disabled" + monitor: "/Common/bigip " + name: "qweqwe" + partition: "Common" + product: "single-bigip" + virtual_server_discovery: "disabled" + virtual_servers: + - destination: "10.10.10.10:0" + enabled: "True" + full_path: "jsdfhsd" + limit_max_bps: "0" + limit_max_bps_status: "disabled" + limit_max_connections: "0" + limit_max_connections_status: "disabled" + limit_max_pps: "0" + limit_max_pps_status: "disabled" + name: "jsdfhsd" + translation_address: "none" + translation_port: "0" +''' + +try: + from distutils.version import LooseVersion + from f5.bigip.contexts import TransactionContextManager + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + +import re + + +class BigIpGtmFactsCommon(object): + def __init__(self): + self.api = None + self.attributes_to_remove = [ + 'kind', 'generation', 'selfLink', '_meta_data', + 'membersReference', 'datacenterReference', + 'virtualServersReference', 'nameReference' + ] + self.gtm_types = dict( + a_s='a', + aaaas='aaaa', + cnames='cname', + mxs='mx', + naptrs='naptr', + srvs='srv' + ) + self.request_params = dict( + params='expandSubcollections=true' + ) + + def is_version_less_than_12(self): + version = self.api.tmos_version + if LooseVersion(version) < LooseVersion('12.0.0'): + return True + else: + return False + + def format_string_facts(self, parameters): + result = dict() + for attribute in self.attributes_to_remove: + parameters.pop(attribute, None) + for key, val in parameters.iteritems(): + result[key] = str(val) + return result + + def filter_matches_name(self, name): + if not self.params['filter']: + return True + matches = re.match(self.params['filter'], str(name)) + if matches: + return True + else: + return False + + def get_facts_from_collection(self, collection, collection_type=None): + results = [] + for item in collection: + if not self.filter_matches_name(item.name): + continue + facts = self.format_facts(item, collection_type) + results.append(facts) + return results + + def connect_to_bigip(self, **kwargs): + return ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + +class BigIpGtmFactsPools(BigIpGtmFactsCommon): + def __init__(self, *args, **kwargs): + super(BigIpGtmFactsPools, self).__init__() + self.params = kwargs + + def get_facts(self): + self.api = self.connect_to_bigip(**self.params) + return self.get_facts_from_device() + + def get_facts_from_device(self): + try: + if self.is_version_less_than_12(): + return self.get_facts_without_types() + else: + return self.get_facts_with_types() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + def get_facts_with_types(self): + result = [] + for key, type in self.gtm_types.iteritems(): + facts = self.get_all_facts_by_type(key, type) + if facts: + result.append(facts) + return result + + def get_facts_without_types(self): + pools = self.api.tm.gtm.pools.get_collection(**self.request_params) + return self.get_facts_from_collection(pools) + + def get_all_facts_by_type(self, key, type): + collection = getattr(self.api.tm.gtm.pools, key) + pools = collection.get_collection(**self.request_params) + return self.get_facts_from_collection(pools, type) + + def format_facts(self, pool, collection_type): + result = dict() + pool_dict = pool.to_dict() + result.update(self.format_string_facts(pool_dict)) + result.update(self.format_member_facts(pool)) + if collection_type: + result['type'] = collection_type + return camel_dict_to_snake_dict(result) + + def format_member_facts(self, pool): + result = [] + if not 'items' in pool.membersReference: + return dict(members=[]) + for member in pool.membersReference['items']: + member_facts = self.format_string_facts(member) + result.append(member_facts) + return dict(members=result) + + +class BigIpGtmFactsWideIps(BigIpGtmFactsCommon): + def __init__(self, *args, **kwargs): + super(BigIpGtmFactsWideIps, self).__init__() + self.params = kwargs + + def get_facts(self): + self.api = self.connect_to_bigip(**self.params) + return self.get_facts_from_device() + + def get_facts_from_device(self): + try: + if self.is_version_less_than_12(): + return self.get_facts_without_types() + else: + return self.get_facts_with_types() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + def get_facts_with_types(self): + result = [] + for key, type in self.gtm_types.iteritems(): + facts = self.get_all_facts_by_type(key, type) + if facts: + result.append(facts) + return result + + def get_facts_without_types(self): + wideips = self.api.tm.gtm.wideips.get_collection( + **self.request_params + ) + return self.get_facts_from_collection(wideips) + + def get_all_facts_by_type(self, key, type): + collection = getattr(self.api.tm.gtm.wideips, key) + wideips = collection.get_collection(**self.request_params) + return self.get_facts_from_collection(wideips, type) + + def format_facts(self, wideip, collection_type): + result = dict() + wideip_dict = wideip.to_dict() + result.update(self.format_string_facts(wideip_dict)) + result.update(self.format_pool_facts(wideip)) + if collection_type: + result['type'] = collection_type + return camel_dict_to_snake_dict(result) + + def format_pool_facts(self, wideip): + result = [] + if not hasattr(wideip, 'pools'): + return dict(pools=[]) + for pool in wideip.pools: + pool_facts = self.format_string_facts(pool) + result.append(pool_facts) + return dict(pools=result) + + +class BigIpGtmFactsVirtualServers(BigIpGtmFactsCommon): + def __init__(self, *args, **kwargs): + super(BigIpGtmFactsVirtualServers, self).__init__() + self.params = kwargs + + def get_facts(self): + try: + self.api = self.connect_to_bigip(**self.params) + return self.get_facts_from_device() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + def get_facts_from_device(self): + servers = self.api.tm.gtm.servers.get_collection( + **self.request_params + ) + return self.get_facts_from_collection(servers) + + def format_facts(self, server, collection_type=None): + result = dict() + server_dict = server.to_dict() + result.update(self.format_string_facts(server_dict)) + result.update(self.format_address_facts(server)) + result.update(self.format_virtual_server_facts(server)) + return camel_dict_to_snake_dict(result) + + def format_address_facts(self, server): + result = [] + if not hasattr(server, 'addresses'): + return dict(addresses=[]) + for address in server.addresses: + address_facts = self.format_string_facts(address) + result.append(address_facts) + return dict(addresses=result) + + def format_virtual_server_facts(self, server): + result = [] + if not 'items' in server.virtualServersReference: + return dict(virtual_servers=[]) + for server in server.virtualServersReference['items']: + server_facts = self.format_string_facts(server) + result.append(server_facts) + return dict(virtual_servers=result) + +class BigIpGtmFactsManager(object): + def __init__(self, *args, **kwargs): + self.params = kwargs + self.api = None + + def get_facts(self): + result = dict() + facts = dict() + + if 'pool' in self.params['include']: + facts['pool'] = self.get_pool_facts() + if 'wide_ip' in self.params['include']: + facts['wide_ip'] = self.get_wide_ip_facts() + if 'virtual_server' in self.params['include']: + facts['virtual_server'] = self.get_virtual_server_facts() + + result.update(**facts) + result.update(dict(changed=True)) + return result + + def get_pool_facts(self): + pools = BigIpGtmFactsPools(**self.params) + return pools.get_facts() + + def get_wide_ip_facts(self): + wide_ips = BigIpGtmFactsWideIps(**self.params) + return wide_ips.get_facts() + + def get_virtual_server_facts(self): + wide_ips = BigIpGtmFactsVirtualServers(**self.params) + return wide_ips.get_facts() + + +class BigIpGtmFactsModuleConfig(object): + def __init__(self): + self.argument_spec = dict() + self.meta_args = dict() + self.supports_check_mode = False + self.valid_includes = ['pool', 'wide_ip', 'virtual_server'] + self.initialize_meta_args() + self.initialize_argument_spec() + + def initialize_meta_args(self): + args = dict( + include=dict(type='list', required=True), + filter=dict(type='str', required=False) + ) + self.meta_args = args + + def initialize_argument_spec(self): + self.argument_spec = f5_argument_spec() + self.argument_spec.update(self.meta_args) + + def create(self): + return AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=self.supports_check_mode + ) + + +def main(): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + config = BigIpGtmFactsModuleConfig() + module = config.create() + + try: + obj = BigIpGtmFactsManager( + check_mode=module.check_mode, **module.params + ) + result = obj.get_facts() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_gtm_virtual_server.py b/network/f5/bigip_gtm_virtual_server.py new file mode 100644 index 00000000000..03be3a9df64 --- /dev/null +++ b/network/f5/bigip_gtm_virtual_server.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Michael Perzel +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_gtm_virtual_server +short_description: "Manages F5 BIG-IP GTM virtual servers" +description: + - "Manages F5 BIG-IP GTM virtual servers" +version_added: "2.2" +author: + - Michael Perzel (@perzizzle) + - Tim Rupp (@caphrim007) +notes: + - "Requires BIG-IP software version >= 11.4" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" + - "Tested with manager and above account privilege level" + +requirements: + - bigsuds +options: + state: + description: + - Virtual server state + required: false + default: present + choices: ['present', 'absent','enabled','disabled'] + virtual_server_name: + description: + - Virtual server name + required: True + virtual_server_server: + description: + - Virtual server server + required: true + host: + description: + - Virtual server host + required: false + default: None + aliases: ['address'] + port: + description: + - Virtual server port + required: false + default: None +extends_documentation_fragment: f5 +''' + +EXAMPLES = ''' + - name: Enable virtual server + local_action: > + bigip_gtm_virtual_server + server=192.0.2.1 + user=admin + password=mysecret + virtual_server_name=myname + virtual_server_server=myserver + state=enabled +''' + +RETURN = '''# ''' + +try: + import bigsuds +except ImportError: + bigsuds_found = False +else: + bigsuds_found = True + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.f5 import bigip_api, f5_argument_spec + + +def server_exists(api, server): + # hack to determine if virtual server exists + result = False + try: + api.GlobalLB.Server.get_object_status([server]) + result = True + except bigsuds.OperationFailed: + e = get_exception() + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + + +def virtual_server_exists(api, name, server): + # hack to determine if virtual server exists + result = False + try: + virtual_server_id = {'name': name, 'server': server} + api.GlobalLB.VirtualServerV2.get_object_status([virtual_server_id]) + result = True + except bigsuds.OperationFailed: + e = get_exception() + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + + +def add_virtual_server(api, virtual_server_name, virtual_server_server, address, port): + addresses = {'address': address, 'port': port} + virtual_server_id = {'name': virtual_server_name, 'server': virtual_server_server} + api.GlobalLB.VirtualServerV2.create([virtual_server_id], [addresses]) + + +def remove_virtual_server(api, virtual_server_name, virtual_server_server): + virtual_server_id = {'name': virtual_server_name, 'server': virtual_server_server} + api.GlobalLB.VirtualServerV2.delete_virtual_server([virtual_server_id]) + + +def get_virtual_server_state(api, name, server): + virtual_server_id = {'name': name, 'server': server} + state = api.GlobalLB.VirtualServerV2.get_enabled_state([virtual_server_id]) + state = state[0].split('STATE_')[1].lower() + return state + + +def set_virtual_server_state(api, name, server, state): + virtual_server_id = {'name': name, 'server': server} + state = "STATE_%s" % state.strip().upper() + api.GlobalLB.VirtualServerV2.set_enabled_state([virtual_server_id], [state]) + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), + host=dict(type='str', default=None, aliases=['address']), + port=dict(type='int', default=None), + virtual_server_name=dict(type='str', required=True), + virtual_server_server=dict(type='str', required=True) + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + + server = module.params['server'] + server_port = module.params['server_port'] + validate_certs = module.params['validate_certs'] + user = module.params['user'] + password = module.params['password'] + virtual_server_name = module.params['virtual_server_name'] + virtual_server_server = module.params['virtual_server_server'] + state = module.params['state'] + address = module.params['host'] + port = module.params['port'] + + result = {'changed': False} # default + + try: + api = bigip_api(server, user, password, validate_certs, port=server_port) + + if state == 'absent': + if virtual_server_exists(api, virtual_server_name, virtual_server_server): + if not module.check_mode: + remove_virtual_server(api, virtual_server_name, virtual_server_server) + result = {'changed': True} + else: + # check-mode return value + result = {'changed': True} + elif state == 'present': + if virtual_server_name and virtual_server_server and address and port: + if not virtual_server_exists(api, virtual_server_name, virtual_server_server): + if not module.check_mode: + if server_exists(api, virtual_server_server): + add_virtual_server(api, virtual_server_name, virtual_server_server, address, port) + result = {'changed': True} + else: + module.fail_json(msg="server does not exist") + else: + # check-mode return value + result = {'changed': True} + else: + # virtual server exists -- potentially modify attributes --future feature + result = {'changed': False} + else: + module.fail_json(msg="Address and port are required to create virtual server") + elif state == 'enabled': + if not virtual_server_exists(api, virtual_server_name, virtual_server_server): + module.fail_json(msg="virtual server does not exist") + if state != get_virtual_server_state(api, virtual_server_name, virtual_server_server): + if not module.check_mode: + set_virtual_server_state(api, virtual_server_name, virtual_server_server, state) + result = {'changed': True} + else: + result = {'changed': True} + elif state == 'disabled': + if not virtual_server_exists(api, virtual_server_name, virtual_server_server): + module.fail_json(msg="virtual server does not exist") + if state != get_virtual_server_state(api, virtual_server_name, virtual_server_server): + if not module.check_mode: + set_virtual_server_state(api, virtual_server_name, virtual_server_server, state) + result = {'changed': True} + else: + result = {'changed': True} + + except Exception: + e = get_exception() + module.fail_json(msg="received exception: %s" % e) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_gtm_wide_ip.py b/network/f5/bigip_gtm_wide_ip.py new file mode 100644 index 00000000000..c1712902f40 --- /dev/null +++ b/network/f5/bigip_gtm_wide_ip.py @@ -0,0 +1,167 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Michael Perzel +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_gtm_wide_ip +short_description: "Manages F5 BIG-IP GTM wide ip" +description: + - "Manages F5 BIG-IP GTM wide ip" +version_added: "2.0" +author: + - Michael Perzel (@perzizzle) + - Tim Rupp (@caphrim007) +notes: + - "Requires BIG-IP software version >= 11.4" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" + - "Tested with manager and above account privilege level" + +requirements: + - bigsuds +options: + lb_method: + description: + - LB method of wide ip + required: true + choices: ['return_to_dns', 'null', 'round_robin', + 'ratio', 'topology', 'static_persist', 'global_availability', + 'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops', + 'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps', + 'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score'] + wide_ip: + description: + - Wide IP name + required: true +extends_documentation_fragment: f5 +''' + +EXAMPLES = ''' + - name: Set lb method + local_action: > + bigip_gtm_wide_ip + server=192.0.2.1 + user=admin + password=mysecret + lb_method=round_robin + wide_ip=my-wide-ip.example.com +''' + +try: + import bigsuds +except ImportError: + bigsuds_found = False +else: + bigsuds_found = True + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.f5 import bigip_api, f5_argument_spec + + +def get_wide_ip_lb_method(api, wide_ip): + lb_method = api.GlobalLB.WideIP.get_lb_method(wide_ips=[wide_ip])[0] + lb_method = lb_method.strip().replace('LB_METHOD_', '').lower() + return lb_method + +def get_wide_ip_pools(api, wide_ip): + try: + return api.GlobalLB.WideIP.get_wideip_pool([wide_ip]) + except Exception: + e = get_exception() + print(e) + +def wide_ip_exists(api, wide_ip): + # hack to determine if wide_ip exists + result = False + try: + api.GlobalLB.WideIP.get_object_status(wide_ips=[wide_ip]) + result = True + except bigsuds.OperationFailed: + e = get_exception() + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + +def set_wide_ip_lb_method(api, wide_ip, lb_method): + lb_method = "LB_METHOD_%s" % lb_method.strip().upper() + api.GlobalLB.WideIP.set_lb_method(wide_ips=[wide_ip], lb_methods=[lb_method]) + +def main(): + argument_spec = f5_argument_spec() + + lb_method_choices = ['return_to_dns', 'null', 'round_robin', + 'ratio', 'topology', 'static_persist', 'global_availability', + 'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops', + 'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps', + 'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score'] + meta_args = dict( + lb_method = dict(type='str', required=True, choices=lb_method_choices), + wide_ip = dict(type='str', required=True) + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + + server = module.params['server'] + server_port = module.params['server_port'] + user = module.params['user'] + password = module.params['password'] + wide_ip = module.params['wide_ip'] + lb_method = module.params['lb_method'] + validate_certs = module.params['validate_certs'] + + result = {'changed': False} # default + + try: + api = bigip_api(server, user, password, validate_certs, port=server_port) + + if not wide_ip_exists(api, wide_ip): + module.fail_json(msg="wide ip %s does not exist" % wide_ip) + + if lb_method is not None and lb_method != get_wide_ip_lb_method(api, wide_ip): + if not module.check_mode: + set_wide_ip_lb_method(api, wide_ip, lb_method) + result = {'changed': True} + else: + result = {'changed': True} + + except Exception: + e = get_exception() + module.fail_json(msg="received exception: %s" % e) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_hostname.py b/network/f5/bigip_hostname.py new file mode 100644 index 00000000000..9dc9d085c5a --- /dev/null +++ b/network/f5/bigip_hostname.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_hostname +short_description: Manage the hostname of a BIG-IP. +description: + - Manage the hostname of a BIG-IP. +version_added: "2.3" +options: + hostname: + description: + - Hostname of the BIG-IP host. + required: true +notes: + - Requires the f5-sdk Python package on the host. This is as easy as pip + install f5-sdk. +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Set the hostname of the BIG-IP + bigip_hostname: + hostname: "bigip.localhost.localdomain" + password: "admin" + server: "bigip.localhost.localdomain" + user: "admin" + delegate_to: localhost +''' + +RETURN = ''' +hostname: + description: The new hostname of the device + returned: changed + type: string + sample: "big-ip01.internal" +''' + +try: + from f5.bigip.contexts import TransactionContextManager + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + + +class BigIpHostnameManager(object): + def __init__(self, *args, **kwargs): + self.changed_params = dict() + self.params = kwargs + self.api = None + + def connect_to_bigip(self, **kwargs): + return ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def ensure_hostname_is_present(self): + self.changed_params['hostname'] = self.params['hostname'] + + if self.params['check_mode']: + return True + + tx = self.api.tm.transactions.transaction + with TransactionContextManager(tx) as api: + r = api.tm.sys.global_settings.load() + r.update(hostname=self.params['hostname']) + + if self.hostname_exists(): + return True + else: + raise F5ModuleError("Failed to set the hostname") + + def hostname_exists(self): + if self.params['hostname'] == self.current_hostname(): + return True + else: + return False + + def present(self): + if self.hostname_exists(): + return False + else: + + return self.ensure_hostname_is_present() + + def current_hostname(self): + r = self.api.tm.sys.global_settings.load() + return r.hostname + + def apply_changes(self): + result = dict() + + changed = self.apply_to_running_config() + if changed: + self.save_running_config() + + result.update(**self.changed_params) + result.update(dict(changed=changed)) + return result + + def apply_to_running_config(self): + try: + self.api = self.connect_to_bigip(**self.params) + return self.present() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + def save_running_config(self): + self.api.tm.sys.config.exec_cmd('save') + + +class BigIpHostnameModuleConfig(object): + def __init__(self): + self.argument_spec = dict() + self.meta_args = dict() + self.supports_check_mode = True + + self.initialize_meta_args() + self.initialize_argument_spec() + + def initialize_meta_args(self): + args = dict( + hostname=dict(required=True) + ) + self.meta_args = args + + def initialize_argument_spec(self): + self.argument_spec = f5_argument_spec() + self.argument_spec.update(self.meta_args) + + def create(self): + return AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=self.supports_check_mode + ) + + +def main(): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + config = BigIpHostnameModuleConfig() + module = config.create() + + try: + obj = BigIpHostnameManager( + check_mode=module.check_mode, **module.params + ) + result = obj.apply_changes() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_irule.py b/network/f5/bigip_irule.py new file mode 100644 index 00000000000..52b8f30fb58 --- /dev/null +++ b/network/f5/bigip_irule.py @@ -0,0 +1,388 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_irule +short_description: Manage iRules across different modules on a BIG-IP. +description: + - Manage iRules across different modules on a BIG-IP. +version_added: "2.2" +options: + content: + description: + - When used instead of 'src', sets the contents of an iRule directly to + the specified value. This is for simple values, but can be used with + lookup plugins for anything complex or with formatting. Either one + of C(src) or C(content) must be provided. + module: + description: + - The BIG-IP module to add the iRule to. + required: true + choices: + - ltm + - gtm + partition: + description: + - The partition to create the iRule on. + required: false + default: Common + name: + description: + - The name of the iRule. + required: true + src: + description: + - The iRule file to interpret and upload to the BIG-IP. Either one + of C(src) or C(content) must be provided. + required: true + state: + description: + - Whether the iRule should exist or not. + required: false + default: present + choices: + - present + - absent +notes: + - Requires the f5-sdk Python package on the host. This is as easy as + pip install f5-sdk. +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Add the iRule contained in templated irule.tcl to the LTM module + bigip_irule: + content: "{{ lookup('template', 'irule-template.tcl') }}" + module: "ltm" + name: "MyiRule" + password: "secret" + server: "lb.mydomain.com" + state: "present" + user: "admin" + delegate_to: localhost + +- name: Add the iRule contained in static file irule.tcl to the LTM module + bigip_irule: + module: "ltm" + name: "MyiRule" + password: "secret" + server: "lb.mydomain.com" + src: "irule-static.tcl" + state: "present" + user: "admin" + delegate_to: localhost +''' + +RETURN = ''' +module: + description: The module that the iRule was added to + returned: changed and success + type: string + sample: "gtm" +src: + description: The filename that included the iRule source + returned: changed and success, when provided + type: string + sample: "/opt/src/irules/example1.tcl" +name: + description: The name of the iRule that was managed + returned: changed and success + type: string + sample: "my-irule" +content: + description: The content of the iRule that was managed + returned: changed and success + type: string + sample: "when LB_FAILED { set wipHost [LB::server addr] }" +partition: + description: The partition in which the iRule was managed + returned: changed and success + type: string + sample: "Common" +''' + +try: + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + +MODULES = ['gtm', 'ltm'] + + +class BigIpiRule(object): + def __init__(self, *args, **kwargs): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + if kwargs['state'] != 'absent': + if not kwargs['content'] and not kwargs['src']: + raise F5ModuleError( + "Either 'content' or 'src' must be provided" + ) + + source = kwargs['src'] + if source: + with open(source) as f: + kwargs['content'] = f.read() + + # The params that change in the module + self.cparams = dict() + + # Stores the params that are sent to the module + self.params = kwargs + self.api = ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def flush(self): + result = dict() + state = self.params['state'] + + try: + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + result.update(**self.cparams) + result.update(dict(changed=changed)) + return result + + def read(self): + """Read information and transform it + + The values that are returned by BIG-IP in the f5-sdk can have encoding + attached to them as well as be completely missing in some cases. + + Therefore, this method will transform the data from the BIG-IP into a + format that is more easily consumable by the rest of the class and the + parameters that are supported by the module. + """ + p = dict() + name = self.params['name'] + partition = self.params['partition'] + module = self.params['module'] + + if module == 'ltm': + r = self.api.tm.ltm.rules.rule.load( + name=name, + partition=partition + ) + elif module == 'gtm': + r = self.api.tm.gtm.rules.rule.load( + name=name, + partition=partition + ) + + if hasattr(r, 'apiAnonymous'): + p['content'] = str(r.apiAnonymous.strip()) + p['name'] = name + return p + + def delete(self): + params = dict() + check_mode = self.params['check_mode'] + module = self.params['module'] + + params['name'] = self.params['name'] + params['partition'] = self.params['partition'] + + self.cparams = camel_dict_to_snake_dict(params) + if check_mode: + return True + + if module == 'ltm': + r = self.api.tm.ltm.rules.rule.load(**params) + r.delete() + elif module == 'gtm': + r = self.api.tm.gtm.rules.rule.load(**params) + r.delete() + + if self.exists(): + raise F5ModuleError("Failed to delete the iRule") + return True + + def exists(self): + name = self.params['name'] + partition = self.params['partition'] + module = self.params['module'] + + if module == 'ltm': + return self.api.tm.ltm.rules.rule.exists( + name=name, + partition=partition + ) + elif module == 'gtm': + return self.api.tm.gtm.rules.rule.exists( + name=name, + partition=partition + ) + + def present(self): + if self.exists(): + return self.update() + else: + return self.create() + + def update(self): + params = dict() + current = self.read() + changed = False + + check_mode = self.params['check_mode'] + content = self.params['content'] + name = self.params['name'] + partition = self.params['partition'] + module = self.params['module'] + + if content is not None: + content = content.strip() + if 'content' in current: + if content != current['content']: + params['apiAnonymous'] = content + else: + params['apiAnonymous'] = content + + if params: + changed = True + params['name'] = name + params['partition'] = partition + self.cparams = camel_dict_to_snake_dict(params) + if 'api_anonymous' in self.cparams: + self.cparams['content'] = self.cparams.pop('api_anonymous') + if self.params['src']: + self.cparams['src'] = self.params['src'] + + if check_mode: + return changed + else: + return changed + + if module == 'ltm': + d = self.api.tm.ltm.rules.rule.load( + name=name, + partition=partition + ) + d.update(**params) + d.refresh() + elif module == 'gtm': + d = self.api.tm.gtm.rules.rule.load( + name=name, + partition=partition + ) + d.update(**params) + d.refresh() + + return True + + def create(self): + params = dict() + + check_mode = self.params['check_mode'] + content = self.params['content'] + name = self.params['name'] + partition = self.params['partition'] + module = self.params['module'] + + if check_mode: + return True + + if content is not None: + params['apiAnonymous'] = content.strip() + + params['name'] = name + params['partition'] = partition + + self.cparams = camel_dict_to_snake_dict(params) + if 'api_anonymous' in self.cparams: + self.cparams['content'] = self.cparams.pop('api_anonymous') + if self.params['src']: + self.cparams['src'] = self.params['src'] + + if check_mode: + return True + + if module == 'ltm': + d = self.api.tm.ltm.rules.rule + d.create(**params) + elif module == 'gtm': + d = self.api.tm.gtm.rules.rule + d.create(**params) + + if not self.exists(): + raise F5ModuleError("Failed to create the iRule") + return True + + def absent(self): + changed = False + + if self.exists(): + changed = self.delete() + + return changed + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + content=dict(required=False, default=None), + src=dict(required=False, default=None), + name=dict(required=True), + module=dict(required=True, choices=MODULES) + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['content', 'src'] + ] + ) + + try: + obj = BigIpiRule(check_mode=module.check_mode, **module.params) + result = obj.flush() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_monitor_http.py b/network/f5/bigip_monitor_http.py index ea24e995e27..02017569c8c 100644 --- a/network/f5/bigip_monitor_http.py +++ b/network/f5/bigip_monitor_http.py @@ -1,6 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - +# # (c) 2013, serge van Ginderachter # based on Matt Hite's bigip_pool module # (c) 2013, Matt Hite @@ -20,156 +20,141 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: bigip_monitor_http short_description: "Manages F5 BIG-IP LTM http monitors" description: - - "Manages F5 BIG-IP LTM monitors via iControl SOAP API" + - Manages F5 BIG-IP LTM monitors via iControl SOAP API version_added: "1.4" -author: "Serge van Ginderachter (@srvg)" +author: + - Serge van Ginderachter (@srvg) + - Tim Rupp (@caphrim007) notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" + - "Requires BIG-IP software version >= 11" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" + - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" requirements: - - bigsuds + - bigsuds options: - server: - description: - - BIG-IP host - required: true - default: null - user: - description: - - BIG-IP username - required: true - default: null - password: - description: - - BIG-IP password - required: true - default: null - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 2.0 - state: - description: - - Monitor state - required: false - default: 'present' - choices: ['present', 'absent'] - name: - description: - - Monitor name - required: true - default: null - aliases: ['monitor'] - partition: - description: - - Partition for the monitor - required: false - default: 'Common' - parent: - description: - - The parent template of this monitor template - required: false - default: 'http' - parent_partition: - description: - - Partition for the parent monitor - required: false - default: 'Common' - send: - description: - - The send string for the monitor call - required: true - default: none - receive: - description: - - The receive string for the monitor call - required: true - default: none - receive_disable: - description: - - The receive disable string for the monitor call - required: true - default: none - ip: - description: - - IP address part of the ipport definition. The default API setting - is "0.0.0.0". - required: false - default: none - port: - description: - - port address part op the ipport definition. The default API - setting is 0. - required: false - default: none - interval: - description: - - The interval specifying how frequently the monitor instance - of this template will run. By default, this interval is used for up and - down states. The default API setting is 5. - required: false - default: none - timeout: - description: - - The number of seconds in which the node or service must respond to - the monitor request. If the target responds within the set time - period, it is considered up. If the target does not respond within - the set time period, it is considered down. You can change this - number to any number you want, however, it should be 3 times the - interval number of seconds plus 1 second. The default API setting - is 16. - required: false - default: none - time_until_up: - description: - - Specifies the amount of time in seconds after the first successful - response before a node will be marked up. A value of 0 will cause a - node to be marked up immediately after a valid response is received - from the node. The default API setting is 0. - required: false - default: none + state: + description: + - Monitor state + required: false + default: 'present' + choices: + - present + - absent + name: + description: + - Monitor name + required: true + default: null + aliases: + - monitor + partition: + description: + - Partition for the monitor + required: false + default: 'Common' + parent: + description: + - The parent template of this monitor template + required: false + default: 'http' + parent_partition: + description: + - Partition for the parent monitor + required: false + default: 'Common' + send: + description: + - The send string for the monitor call + required: true + default: none + receive: + description: + - The receive string for the monitor call + required: true + default: none + receive_disable: + description: + - The receive disable string for the monitor call + required: true + default: none + ip: + description: + - IP address part of the ipport definition. The default API setting + is "0.0.0.0". + required: false + default: none + port: + description: + - Port address part of the ip/port definition. The default API + setting is 0. + required: false + default: none + interval: + description: + - The interval specifying how frequently the monitor instance + of this template will run. By default, this interval is used for up and + down states. The default API setting is 5. + required: false + default: none + timeout: + description: + - The number of seconds in which the node or service must respond to + the monitor request. If the target responds within the set time + period, it is considered up. If the target does not respond within + the set time period, it is considered down. You can change this + number to any number you want, however, it should be 3 times the + interval number of seconds plus 1 second. The default API setting + is 16. + required: false + default: none + time_until_up: + description: + - Specifies the amount of time in seconds after the first successful + response before a node will be marked up. A value of 0 will cause a + node to be marked up immediately after a valid response is received + from the node. The default API setting is 0. + required: false + default: none +extends_documentation_fragment: f5 ''' EXAMPLES = ''' - name: BIGIP F5 | Create HTTP Monitor - local_action: - module: bigip_monitor_http - state: present - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ item.monitorname }}" - send: "{{ item.send }}" - receive: "{{ item.receive }}" - with_items: f5monitors + bigip_monitor_http: + state: "present" + server: "lb.mydomain.com" + user: "admin" + password: "secret" + name: "my_http_monitor" + send: "http string to send" + receive: "http string to receive" + delegate_to: localhost + - name: BIGIP F5 | Remove HTTP Monitor - local_action: - module: bigip_monitor_http - state: absent - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ monitorname }}" + bigip_monitor_http: + state: "absent" + server: "lb.mydomain.com" + user: "admin" + password: "secret" + name: "my_http_monitor" + delegate_to: localhost ''' TEMPLATE_TYPE = 'TTYPE_HTTP' DEFAULT_PARENT_TYPE = 'http' - def check_monitor_exists(module, api, monitor, parent): - # hack to determine if monitor exists result = False try: @@ -179,7 +164,7 @@ def check_monitor_exists(module, api, monitor, parent): result = True else: module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent)) - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: @@ -189,10 +174,15 @@ def check_monitor_exists(module, api, monitor, parent): def create_monitor(api, monitor, template_attributes): - try: - api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) - except bigsuds.OperationFailed, e: + api.LocalLB.Monitor.create_template( + templates=[{ + 'template_name': monitor, + 'template_type': TEMPLATE_TYPE + }], + template_attributes=[template_attributes] + ) + except bigsuds.OperationFailed as e: if "already exists" in str(e): return False else: @@ -202,10 +192,9 @@ def create_monitor(api, monitor, template_attributes): def delete_monitor(api, monitor): - try: api.LocalLB.Monitor.delete_template(template_names=[monitor]) - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: # maybe it was deleted since we checked if "was not found" in str(e): return False @@ -216,10 +205,12 @@ def delete_monitor(api, monitor): def check_string_property(api, monitor, str_property): - try: - return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0] - except bigsuds.OperationFailed, e: + template_prop = api.LocalLB.Monitor.get_template_string_property( + [monitor], [str_property['type']] + )[0] + return str_property == template_prop + except bigsuds.OperationFailed as e: # happens in check mode if not created yet if "was not found" in str(e): return True @@ -229,15 +220,19 @@ def check_string_property(api, monitor, str_property): def set_string_property(api, monitor, str_property): - - api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property]) + api.LocalLB.Monitor.set_template_string_property( + template_names=[monitor], + values=[str_property] + ) def check_integer_property(api, monitor, int_property): - try: - return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0] - except bigsuds.OperationFailed, e: + template_prop = api.LocalLB.Monitor.get_template_integer_property( + [monitor], [int_property['type']] + )[0] + return int_property == template_prop + except bigsuds.OperationFailed as e: # happens in check mode if not created yet if "was not found" in str(e): return True @@ -246,10 +241,11 @@ def check_integer_property(api, monitor, int_property): raise - def set_integer_property(api, monitor, int_property): - - api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property]) + api.LocalLB.Monitor.set_template_integer_property( + template_names=[monitor], + values=[int_property] + ) def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): @@ -269,54 +265,53 @@ def update_monitor_properties(api, module, monitor, template_string_properties, def get_ipport(api, monitor): - return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0] def set_ipport(api, monitor, ipport): - try: - api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport]) + api.LocalLB.Monitor.set_template_destination( + template_names=[monitor], destinations=[ipport] + ) return True, "" - - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "Cannot modify the address type of monitor" in str(e): return False, "Cannot modify the address type of monitor if already assigned to a pool." else: # genuine exception raise -# =========================================== -# main loop -# -# writing a module for other monitor types should -# only need an updated main() (and monitor specific functions) def main(): - - # begin monitor specific stuff - argument_spec=f5_argument_spec(); - argument_spec.update( dict( - name = dict(required=True), - parent = dict(default=DEFAULT_PARENT_TYPE), - parent_partition = dict(default='Common'), - send = dict(required=False), - receive = dict(required=False), - receive_disable = dict(required=False), - ip = dict(required=False), - port = dict(required=False, type='int'), - interval = dict(required=False, type='int'), - timeout = dict(required=False, type='int'), - time_until_up = dict(required=False, type='int', default=0) - ) + argument_spec = f5_argument_spec() + + meta_args = dict( + name=dict(required=True), + parent=dict(default=DEFAULT_PARENT_TYPE), + parent_partition=dict(default='Common'), + send=dict(required=False), + receive=dict(required=False), + receive_disable=dict(required=False), + ip=dict(required=False), + port=dict(required=False, type='int'), + interval=dict(required=False, type='int'), + timeout=dict(required=False, type='int'), + time_until_up=dict(required=False, type='int', default=0) ) + argument_spec.update(meta_args) module = AnsibleModule( - argument_spec = argument_spec, + argument_spec=argument_spec, supports_check_mode=True ) - (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + server = module.params['server'] + server_port = module.params['server_port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + partition = module.params['partition'] + validate_certs = module.params['validate_certs'] parent_partition = module.params['parent_partition'] name = module.params['name'] @@ -333,18 +328,17 @@ def main(): # end monitor specific stuff - api = bigip_api(server, user, password) + api = bigip_api(server, user, password, validate_certs, port=server_port) monitor_exists = check_monitor_exists(module, api, monitor, parent) - # ipport is a special setting - if monitor_exists: # make sure to not update current settings if not asked + if monitor_exists: cur_ipport = get_ipport(api, monitor) if ip is None: ip = cur_ipport['ipport']['address'] if port is None: port = cur_ipport['ipport']['port'] - else: # use API defaults if not defined to create it + else: if interval is None: interval = 5 if timeout is None: @@ -389,19 +383,26 @@ def main(): {'type': 'STYPE_RECEIVE_DRAIN', 'value': receive_disable}] - template_integer_properties = [{'type': 'ITYPE_INTERVAL', - 'value': interval}, - {'type': 'ITYPE_TIMEOUT', - 'value': timeout}, - {'type': 'ITYPE_TIME_UNTIL_UP', - 'value': time_until_up}] + template_integer_properties = [ + { + 'type': 'ITYPE_INTERVAL', + 'value': interval + }, + { + 'type': 'ITYPE_TIMEOUT', + 'value': timeout + }, + { + 'type': 'ITYPE_TIME_UNTIL_UP', + 'value': time_until_up + } + ] # main logic, monitor generic try: result = {'changed': False} # default - if state == 'absent': if monitor_exists: if not module.check_mode: @@ -410,10 +411,9 @@ def main(): result['changed'] |= delete_monitor(api, monitor) else: result['changed'] |= True - - else: # state present - ## check for monitor itself - if not monitor_exists: # create it + else: + # check for monitor itself + if not monitor_exists: if not module.check_mode: # again, check changed status here b/c race conditions # if other task already created it @@ -421,22 +421,20 @@ def main(): else: result['changed'] |= True - ## check for monitor parameters + # check for monitor parameters # whether it already existed, or was just created, now update # the update functions need to check for check mode but # cannot update settings if it doesn't exist which happens in check mode result['changed'] |= update_monitor_properties(api, module, monitor, - template_string_properties, - template_integer_properties) + template_string_properties, + template_integer_properties) # we just have to update the ipport if monitor already exists and it's different if monitor_exists and cur_ipport != ipport: set_ipport(api, monitor, ipport) result['changed'] |= True - #else: monitor doesn't exist (check mode) or ipport is already ok - - - except Exception, e: + # else: monitor doesn't exist (check mode) or ipport is already ok + except Exception as e: module.fail_json(msg="received exception: %s" % e) module.exit_json(**result) @@ -444,5 +442,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.f5 import * -main() +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_monitor_tcp.py b/network/f5/bigip_monitor_tcp.py index 0900e95fd20..aedc71f642b 100644 --- a/network/f5/bigip_monitor_tcp.py +++ b/network/f5/bigip_monitor_tcp.py @@ -18,167 +18,154 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: bigip_monitor_tcp short_description: "Manages F5 BIG-IP LTM tcp monitors" description: - - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API" + - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API" version_added: "1.4" -author: "Serge van Ginderachter (@srvg)" +author: + - Serge van Ginderachter (@srvg) + - Tim Rupp (@caphrim007) notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" + - "Requires BIG-IP software version >= 11" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" + - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" requirements: - - bigsuds + - bigsuds options: - server: - description: - - BIG-IP host - required: true - default: null - user: - description: - - BIG-IP username - required: true - default: null - password: - description: - - BIG-IP password - required: true - default: null - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 2.0 - state: - description: - - Monitor state - required: false - default: 'present' - choices: ['present', 'absent'] - name: - description: - - Monitor name - required: true - default: null - aliases: ['monitor'] - partition: - description: - - Partition for the monitor - required: false - default: 'Common' - type: - description: - - The template type of this monitor template - required: false - default: 'tcp' - choices: [ 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN'] - parent: - description: - - The parent template of this monitor template - required: false - default: 'tcp' - choices: [ 'tcp', 'tcp_echo', 'tcp_half_open'] - parent_partition: - description: - - Partition for the parent monitor - required: false - default: 'Common' - send: - description: - - The send string for the monitor call - required: true - default: none - receive: - description: - - The receive string for the monitor call - required: true - default: none - ip: - description: - - IP address part of the ipport definition. The default API setting - is "0.0.0.0". - required: false - default: none - port: - description: - - port address part op the ipport definition. The default API - setting is 0. - required: false - default: none - interval: - description: - - The interval specifying how frequently the monitor instance - of this template will run. By default, this interval is used for up and - down states. The default API setting is 5. - required: false - default: none - timeout: - description: - - The number of seconds in which the node or service must respond to - the monitor request. If the target responds within the set time - period, it is considered up. If the target does not respond within - the set time period, it is considered down. You can change this - number to any number you want, however, it should be 3 times the - interval number of seconds plus 1 second. The default API setting - is 16. - required: false - default: none - time_until_up: - description: - - Specifies the amount of time in seconds after the first successful - response before a node will be marked up. A value of 0 will cause a - node to be marked up immediately after a valid response is received - from the node. The default API setting is 0. - required: false - default: none + state: + description: + - Monitor state + required: false + default: 'present' + choices: + - present + - absent + name: + description: + - Monitor name + required: true + default: null + aliases: + - monitor + partition: + description: + - Partition for the monitor + required: false + default: 'Common' + type: + description: + - The template type of this monitor template + required: false + default: 'tcp' + choices: + - TTYPE_TCP + - TTYPE_TCP_ECHO + - TTYPE_TCP_HALF_OPEN + parent: + description: + - The parent template of this monitor template + required: false + default: 'tcp' + choices: + - tcp + - tcp_echo + - tcp_half_open + parent_partition: + description: + - Partition for the parent monitor + required: false + default: 'Common' + send: + description: + - The send string for the monitor call + required: true + default: none + receive: + description: + - The receive string for the monitor call + required: true + default: none + ip: + description: + - IP address part of the ipport definition. The default API setting + is "0.0.0.0". + required: false + default: none + port: + description: + - Port address part op the ipport definition. The default API + setting is 0. + required: false + default: none + interval: + description: + - The interval specifying how frequently the monitor instance + of this template will run. By default, this interval is used for up and + down states. The default API setting is 5. + required: false + default: none + timeout: + description: + - The number of seconds in which the node or service must respond to + the monitor request. If the target responds within the set time + period, it is considered up. If the target does not respond within + the set time period, it is considered down. You can change this + number to any number you want, however, it should be 3 times the + interval number of seconds plus 1 second. The default API setting + is 16. + required: false + default: none + time_until_up: + description: + - Specifies the amount of time in seconds after the first successful + response before a node will be marked up. A value of 0 will cause a + node to be marked up immediately after a valid response is received + from the node. The default API setting is 0. + required: false + default: none +extends_documentation_fragment: f5 ''' EXAMPLES = ''' - -- name: BIGIP F5 | Create TCP Monitor - local_action: - module: bigip_monitor_tcp - state: present - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ item.monitorname }}" - type: tcp - send: "{{ item.send }}" - receive: "{{ item.receive }}" - with_items: f5monitors-tcp -- name: BIGIP F5 | Create TCP half open Monitor - local_action: - module: bigip_monitor_tcp - state: present - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ item.monitorname }}" - type: tcp - send: "{{ item.send }}" - receive: "{{ item.receive }}" - with_items: f5monitors-halftcp -- name: BIGIP F5 | Remove TCP Monitor - local_action: - module: bigip_monitor_tcp - state: absent - server: "{{ f5server }}" - user: "{{ f5user }}" - password: "{{ f5password }}" - name: "{{ monitorname }}" - with_flattened: - - f5monitors-tcp - - f5monitors-halftcp - +- name: Create TCP Monitor + bigip_monitor_tcp: + state: "present" + server: "lb.mydomain.com" + user: "admin" + password: "secret" + name: "my_tcp_monitor" + type: "tcp" + send: "tcp string to send" + receive: "tcp string to receive" + delegate_to: localhost + +- name: Create TCP half open Monitor + bigip_monitor_tcp: + state: "present" + server: "lb.mydomain.com" + user: "admin" + password: "secret" + name: "my_tcp_monitor" + type: "tcp" + send: "tcp string to send" + receive: "http string to receive" + delegate_to: localhost + +- name: Remove TCP Monitor + bigip_monitor_tcp: + state: "absent" + server: "lb.mydomain.com" + user: "admin" + password: "secret" + name: "my_tcp_monitor" ''' TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP' @@ -187,7 +174,6 @@ def check_monitor_exists(module, api, monitor, parent): - # hack to determine if monitor exists result = False try: @@ -197,7 +183,7 @@ def check_monitor_exists(module, api, monitor, parent): result = True else: module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent)) - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: @@ -207,10 +193,15 @@ def check_monitor_exists(module, api, monitor, parent): def create_monitor(api, monitor, template_attributes): - try: - api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) - except bigsuds.OperationFailed, e: + api.LocalLB.Monitor.create_template( + templates=[{ + 'template_name': monitor, + 'template_type': TEMPLATE_TYPE + }], + template_attributes=[template_attributes] + ) + except bigsuds.OperationFailed as e: if "already exists" in str(e): return False else: @@ -220,10 +211,9 @@ def create_monitor(api, monitor, template_attributes): def delete_monitor(api, monitor): - try: api.LocalLB.Monitor.delete_template(template_names=[monitor]) - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: # maybe it was deleted since we checked if "was not found" in str(e): return False @@ -234,41 +224,46 @@ def delete_monitor(api, monitor): def check_string_property(api, monitor, str_property): - try: - return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0] - except bigsuds.OperationFailed, e: + template_prop = api.LocalLB.Monitor.get_template_string_property( + [monitor], [str_property['type']] + )[0] + return str_property == template_prop + except bigsuds.OperationFailed as e: # happens in check mode if not created yet if "was not found" in str(e): return True else: # genuine exception raise - return True def set_string_property(api, monitor, str_property): - - api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property]) + api.LocalLB.Monitor.set_template_string_property( + template_names=[monitor], + values=[str_property] + ) def check_integer_property(api, monitor, int_property): - try: - return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0] - except bigsuds.OperationFailed, e: + return int_property == api.LocalLB.Monitor.get_template_integer_property( + [monitor], [int_property['type']] + )[0] + except bigsuds.OperationFailed as e: # happens in check mode if not created yet if "was not found" in str(e): return True else: # genuine exception raise - return True def set_integer_property(api, monitor, int_property): - - api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property]) + api.LocalLB.Monitor.set_template_integer_property( + template_names=[monitor], + values=[int_property] + ) def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): @@ -278,6 +273,7 @@ def update_monitor_properties(api, module, monitor, template_string_properties, if not module.check_mode: set_string_property(api, monitor, str_property) changed = True + for int_property in template_integer_properties: if int_property['value'] is not None and not check_integer_property(api, monitor, int_property): if not module.check_mode: @@ -288,54 +284,59 @@ def update_monitor_properties(api, module, monitor, template_string_properties, def get_ipport(api, monitor): - return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0] def set_ipport(api, monitor, ipport): - try: - api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport]) + api.LocalLB.Monitor.set_template_destination( + template_names=[monitor], destinations=[ipport] + ) return True, "" - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "Cannot modify the address type of monitor" in str(e): return False, "Cannot modify the address type of monitor if already assigned to a pool." else: # genuine exception raise -# =========================================== -# main loop -# -# writing a module for other monitor types should -# only need an updated main() (and monitor specific functions) def main(): - - # begin monitor specific stuff - argument_spec=f5_argument_spec(); - argument_spec.update(dict( - name = dict(required=True), - type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES), - parent = dict(default=DEFAULT_PARENT), - parent_partition = dict(default='Common'), - send = dict(required=False), - receive = dict(required=False), - ip = dict(required=False), - port = dict(required=False, type='int'), - interval = dict(required=False, type='int'), - timeout = dict(required=False, type='int'), - time_until_up = dict(required=False, type='int', default=0) - ) + argument_spec = f5_argument_spec() + + meta_args = dict( + name=dict(required=True), + type=dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES), + parent=dict(default=DEFAULT_PARENT), + parent_partition=dict(default='Common'), + send=dict(required=False), + receive=dict(required=False), + ip=dict(required=False), + port=dict(required=False, type='int'), + interval=dict(required=False, type='int'), + timeout=dict(required=False, type='int'), + time_until_up=dict(required=False, type='int', default=0) ) + argument_spec.update(meta_args) module = AnsibleModule( - argument_spec = argument_spec, + argument_spec=argument_spec, supports_check_mode=True ) - (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + if module.params['validate_certs']: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') + + server = module.params['server'] + server_port = module.params['server_port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + partition = module.params['partition'] + validate_certs = module.params['validate_certs'] parent_partition = module.params['parent_partition'] name = module.params['name'] @@ -356,29 +357,30 @@ def main(): # end monitor specific stuff - api = bigip_api(server, user, password) + api = bigip_api(server, user, password, validate_certs, port=server_port) monitor_exists = check_monitor_exists(module, api, monitor, parent) - # ipport is a special setting - if monitor_exists: # make sure to not update current settings if not asked + if monitor_exists: + # make sure to not update current settings if not asked cur_ipport = get_ipport(api, monitor) if ip is None: ip = cur_ipport['ipport']['address'] if port is None: port = cur_ipport['ipport']['port'] - else: # use API defaults if not defined to create it - if interval is None: + else: + # use API defaults if not defined to create it + if interval is None: interval = 5 - if timeout is None: + if timeout is None: timeout = 16 - if ip is None: + if ip is None: ip = '0.0.0.0' - if port is None: + if port is None: port = 0 - if send is None: + if send is None: send = '' - if receive is None: + if receive is None: receive = '' # define and set address type @@ -391,76 +393,90 @@ def main(): else: address_type = 'ATYPE_UNSET' - ipport = {'address_type': address_type, - 'ipport': {'address': ip, - 'port': port}} - - template_attributes = {'parent_template': parent, - 'interval': interval, - 'timeout': timeout, - 'dest_ipport': ipport, - 'is_read_only': False, - 'is_directly_usable': True} + ipport = { + 'address_type': address_type, + 'ipport': { + 'address': ip, + 'port': port + } + } + + template_attributes = { + 'parent_template': parent, + 'interval': interval, + 'timeout': timeout, + 'dest_ipport': ipport, + 'is_read_only': False, + 'is_directly_usable': True + } # monitor specific stuff if type == 'TTYPE_TCP': - template_string_properties = [{'type': 'STYPE_SEND', - 'value': send}, - {'type': 'STYPE_RECEIVE', - 'value': receive}] + template_string_properties = [ + { + 'type': 'STYPE_SEND', + 'value': send + }, + { + 'type': 'STYPE_RECEIVE', + 'value': receive + } + ] else: template_string_properties = [] - template_integer_properties = [{'type': 'ITYPE_INTERVAL', - 'value': interval}, - {'type': 'ITYPE_TIMEOUT', - 'value': timeout}, - {'type': 'ITYPE_TIME_UNTIL_UP', - 'value': interval}] + template_integer_properties = [ + { + 'type': 'ITYPE_INTERVAL', + 'value': interval + }, + { + 'type': 'ITYPE_TIMEOUT', + 'value': timeout + }, + { + 'type': 'ITYPE_TIME_UNTIL_UP', + 'value': time_until_up + } + ] # main logic, monitor generic try: result = {'changed': False} # default - if state == 'absent': if monitor_exists: if not module.check_mode: - # possible race condition if same task + # possible race condition if same task # on other node deleted it first result['changed'] |= delete_monitor(api, monitor) else: result['changed'] |= True - - else: # state present - ## check for monitor itself - if not monitor_exists: # create it - if not module.check_mode: + else: + # check for monitor itself + if not monitor_exists: + if not module.check_mode: # again, check changed status here b/c race conditions # if other task already created it result['changed'] |= create_monitor(api, monitor, template_attributes) - else: + else: result['changed'] |= True - ## check for monitor parameters + # check for monitor parameters # whether it already existed, or was just created, now update # the update functions need to check for check mode but # cannot update settings if it doesn't exist which happens in check mode - if monitor_exists and not module.check_mode: - result['changed'] |= update_monitor_properties(api, module, monitor, - template_string_properties, - template_integer_properties) - # else assume nothing changed + result['changed'] |= update_monitor_properties(api, module, monitor, + template_string_properties, + template_integer_properties) # we just have to update the ipport if monitor already exists and it's different if monitor_exists and cur_ipport != ipport: - set_ipport(api, monitor, ipport) + set_ipport(api, monitor, ipport) result['changed'] |= True - #else: monitor doesn't exist (check mode) or ipport is already ok - - - except Exception, e: + # else: monitor doesn't exist (check mode) or ipport is already ok + except Exception as e: module.fail_json(msg="received exception: %s" % e) module.exit_json(**result) @@ -468,5 +484,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.f5 import * -main() +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_node.py b/network/f5/bigip_node.py index 28eacc0d6f5..08107f6e2ce 100644 --- a/network/f5/bigip_node.py +++ b/network/f5/bigip_node.py @@ -1,6 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - +# # (c) 2013, Matt Hite # # This file is part of Ansible @@ -18,120 +18,113 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: bigip_node short_description: "Manages F5 BIG-IP LTM nodes" description: - - "Manages F5 BIG-IP LTM nodes via iControl SOAP API" + - "Manages F5 BIG-IP LTM nodes via iControl SOAP API" version_added: "1.4" -author: "Matt Hite (@mhite)" +author: + - Matt Hite (@mhite) + - Tim Rupp (@caphrim007) notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" + - "Requires BIG-IP software version >= 11" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" requirements: - - bigsuds + - bigsuds options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 2.0 - state: - description: - - Pool member state - required: true - default: present - choices: ['present', 'absent'] - aliases: [] - session_state: - description: - - Set new session availability status for node - version_added: "1.9" - required: false - default: null - choices: ['enabled', 'disabled'] - aliases: [] - monitor_state: - description: - - Set monitor availability status for node - version_added: "1.9" - required: false - default: null - choices: ['enabled', 'disabled'] - aliases: [] - partition: - description: - - Partition - required: false - default: 'Common' - choices: [] - aliases: [] - name: - description: - - "Node name" - required: false - default: null - choices: [] - host: - description: - - "Node IP. Required when state=present and node does not exist. Error when state=absent." - required: true - default: null - choices: [] - aliases: ['address', 'ip'] + state: + description: + - Pool member state + required: true + default: present + choices: ['present', 'absent'] + aliases: [] + session_state: + description: + - Set new session availability status for node + version_added: "1.9" + required: false + default: null + choices: ['enabled', 'disabled'] + aliases: [] + monitor_state: + description: + - Set monitor availability status for node + version_added: "1.9" + required: false + default: null + choices: ['enabled', 'disabled'] + aliases: [] + partition: + description: + - Partition + required: false + default: 'Common' + choices: [] + aliases: [] + name: + description: + - "Node name" + required: false + default: null + choices: [] + monitor_type: + description: + - Monitor rule type when monitors > 1 + version_added: "2.2" + required: False + default: null + choices: ['and_list', 'm_of_n'] + aliases: [] + quorum: + description: + - Monitor quorum value when monitor_type is m_of_n + version_added: "2.2" + required: False + default: null + choices: [] + aliases: [] + monitors: description: - description: - - "Node description." - required: false - default: null - choices: [] + - Monitor template name list. Always use the full path to the monitor. + version_added: "2.2" + required: False + default: null + choices: [] + aliases: [] + host: + description: + - "Node IP. Required when state=present and node does not exist. Error when state=absent." + required: true + default: null + choices: [] + aliases: ['address', 'ip'] + description: + description: + - "Node description." + required: false + default: null + choices: [] +extends_documentation_fragment: f5 ''' EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: bigip-test - tasks: - - name: Add node - local_action: > - bigip_node - server=lb.mydomain.com - user=admin - password=mysecret - state=present - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - name="{{ ansible_default_ipv4["address"] }}" +- name: Add node + bigip_node: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + partition: "Common" + host: "10.20.30.40" + name: "10.20.30.40" # Note that the BIG-IP automatically names the node using the # IP address specified in previous play's host parameter. @@ -140,26 +133,38 @@ # Alternatively, you could have specified a name with the # name parameter when state=present. - - name: Modify node description - local_action: > - bigip_node - server=lb.mydomain.com - user=admin - password=mysecret - state=present - partition=matthite - name="{{ ansible_default_ipv4["address"] }}" - description="Our best server yet" - - - name: Delete node - local_action: > - bigip_node - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - partition=matthite - name="{{ ansible_default_ipv4["address"] }}" +- name: Add node with a single 'ping' monitor + bigip_node: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + partition: "Common" + host: "10.20.30.40" + name: "mytestserver" + monitors: + - /Common/icmp + delegate_to: localhost + +- name: Modify node description + bigip_node: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + partition: "Common" + name: "10.20.30.40" + description: "Our best server yet" + delegate_to: localhost + +- name: Delete node + bigip_node: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "absent" + partition: "Common" + name: "10.20.30.40" # The BIG-IP GUI doesn't map directly to the API calls for "Node -> # General Properties -> State". The following states map to API monitor @@ -174,27 +179,26 @@ # # See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down - - name: Force node offline - local_action: > - bigip_node - server=lb.mydomain.com - user=admin - password=mysecret - state=present - session_state=disabled - monitor_state=disabled - partition=matthite - name="{{ ansible_default_ipv4["address"] }}" - +- name: Force node offline + bigip_node: + server: "lb.mydomain.com" + user: "admin" + password: "mysecret" + state: "present" + session_state: "disabled" + monitor_state: "disabled" + partition: "Common" + name: "10.20.30.40" ''' + def node_exists(api, address): # hack to determine if node exists result = False try: api.LocalLB.NodeAddressV2.get_object_status(nodes=[address]) result = True - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: @@ -202,12 +206,17 @@ def node_exists(api, address): raise return result + def create_node_address(api, address, name): try: - api.LocalLB.NodeAddressV2.create(nodes=[name], addresses=[address], limits=[0]) + api.LocalLB.NodeAddressV2.create( + nodes=[name], + addresses=[address], + limits=[0] + ) result = True desc = "" - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "already exists" in str(e): result = False desc = "referenced name or IP already in use" @@ -216,15 +225,17 @@ def create_node_address(api, address, name): raise return (result, desc) + def get_node_address(api, name): return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0] + def delete_node_address(api, address): try: api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) result = True desc = "" - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "is referenced by a member of pool" in str(e): result = False desc = "node referenced by pool" @@ -233,51 +244,89 @@ def delete_node_address(api, address): raise return (result, desc) + def set_node_description(api, name, description): api.LocalLB.NodeAddressV2.set_description(nodes=[name], descriptions=[description]) + def get_node_description(api, name): return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0] + def set_node_session_enabled_state(api, name, session_state): session_state = "STATE_%s" % session_state.strip().upper() api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name], states=[session_state]) + def get_node_session_status(api, name): result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0] result = result.split("SESSION_STATUS_")[-1].lower() return result + def set_node_monitor_state(api, name, monitor_state): monitor_state = "STATE_%s" % monitor_state.strip().upper() api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name], states=[monitor_state]) + def get_node_monitor_status(api, name): result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0] result = result.split("MONITOR_STATUS_")[-1].lower() return result +def get_monitors(api, name): + result = api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=[name])[0] + monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower() + quorum = result['quorum'] + monitor_templates = result['monitor_templates'] + return (monitor_type, quorum, monitor_templates) + + +def set_monitors(api, name, monitor_type, quorum, monitor_templates): + monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper() + monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates} + api.LocalLB.NodeAddressV2.set_monitor_rule(nodes=[name], + monitor_rules=[monitor_rule]) + + def main(): - argument_spec=f5_argument_spec(); - argument_spec.update(dict( - session_state = dict(type='str', choices=['enabled', 'disabled']), - monitor_state = dict(type='str', choices=['enabled', 'disabled']), - name = dict(type='str', required=True), - host = dict(type='str', aliases=['address', 'ip']), - description = dict(type='str') - ) + monitor_type_choices = ['and_list', 'm_of_n'] + + argument_spec = f5_argument_spec() + + meta_args = dict( + session_state=dict(type='str', choices=['enabled', 'disabled']), + monitor_state=dict(type='str', choices=['enabled', 'disabled']), + name=dict(type='str', required=True), + host=dict(type='str', aliases=['address', 'ip']), + description=dict(type='str'), + monitor_type=dict(type='str', choices=monitor_type_choices), + quorum=dict(type='int'), + monitors=dict(type='list') ) + argument_spec.update(meta_args) module = AnsibleModule( - argument_spec = argument_spec, + argument_spec=argument_spec, supports_check_mode=True ) - (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + if module.params['validate_certs']: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') + + server = module.params['server'] + server_port = module.params['server_port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + partition = module.params['partition'] + validate_certs = module.params['validate_certs'] session_state = module.params['session_state'] monitor_state = module.params['monitor_state'] @@ -285,12 +334,41 @@ def main(): name = module.params['name'] address = fq_name(partition, name) description = module.params['description'] - + monitor_type = module.params['monitor_type'] + if monitor_type: + monitor_type = monitor_type.lower() + quorum = module.params['quorum'] + monitors = module.params['monitors'] + if monitors: + monitors = [] + for monitor in module.params['monitors']: + monitors.append(fq_name(partition, monitor)) + + # sanity check user supplied values if state == 'absent' and host is not None: module.fail_json(msg="host parameter invalid when state=absent") + if monitors: + if len(monitors) == 1: + # set default required values for single monitor + quorum = 0 + monitor_type = 'single' + elif len(monitors) > 1: + if not monitor_type: + module.fail_json(msg="monitor_type required for monitors > 1") + if monitor_type == 'm_of_n' and not quorum: + module.fail_json(msg="quorum value required for monitor_type m_of_n") + if monitor_type != 'm_of_n': + quorum = 0 + elif monitor_type: + # no monitors specified but monitor_type exists + module.fail_json(msg="monitor_type require monitors parameter") + elif quorum is not None: + # no monitors specified but quorum exists + module.fail_json(msg="quorum requires monitors parameter") + try: - api = bigip_api(server, user, password) + api = bigip_api(server, user, password, validate_certs, port=server_port) result = {'changed': False} # default if state == 'absent': @@ -308,7 +386,7 @@ def main(): elif state == 'present': if not node_exists(api, address): if host is None: - module.fail_json(msg="host parameter required when " \ + module.fail_json(msg="host parameter required when " "state=present and node does not exist") if not module.check_mode: created, desc = create_node_address(api, address=host, name=address) @@ -326,6 +404,8 @@ def main(): if description is not None: set_node_description(api, address, description) result = {'changed': True} + if monitors: + set_monitors(api, address, monitor_type, quorum, monitors) else: # check-mode return value result = {'changed': True} @@ -333,8 +413,8 @@ def main(): # node exists -- potentially modify attributes if host is not None: if get_node_address(api, address) != host: - module.fail_json(msg="Changing the node address is " \ - "not supported by the API; " \ + module.fail_json(msg="Changing the node address is " + "not supported by the API; " "delete and recreate the node.") if session_state is not None: session_status = get_node_session_status(api, address) @@ -345,7 +425,7 @@ def main(): session_state) result = {'changed': True} elif session_state == 'disabled' and \ - session_status != 'force_disabled': + session_status != 'force_disabled': if not module.check_mode: set_node_session_enabled_state(api, address, session_state) @@ -359,7 +439,7 @@ def main(): monitor_state) result = {'changed': True} elif monitor_state == 'disabled' and \ - monitor_status != 'forced_down': + monitor_status != 'forced_down': if not module.check_mode: set_node_monitor_state(api, address, monitor_state) @@ -369,14 +449,19 @@ def main(): if not module.check_mode: set_node_description(api, address, description) result = {'changed': True} - - except Exception, e: + if monitors: + t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, address) + if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)): + if not module.check_mode: + set_monitors(api, address, monitor_type, quorum, monitors) + result = {'changed': True} + except Exception as e: module.fail_json(msg="received exception: %s" % e) module.exit_json(**result) -# import module snippets from ansible.module_utils.basic import * from ansible.module_utils.f5 import * -main() +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_pool.py b/network/f5/bigip_pool.py index 1628f6c68c9..eb6b8f3adaa 100644 --- a/network/f5/bigip_pool.py +++ b/network/f5/bigip_pool.py @@ -18,223 +18,217 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: bigip_pool short_description: "Manages F5 BIG-IP LTM pools" description: - - "Manages F5 BIG-IP LTM pools via iControl SOAP API" -version_added: "1.2" -author: "Matt Hite (@mhite)" + - Manages F5 BIG-IP LTM pools via iControl SOAP API +version_added: 1.2 +author: + - Matt Hite (@mhite) + - Tim Rupp (@caphrim007) notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" + - Requires BIG-IP software version >= 11 + - F5 developed module 'bigsuds' required (see http://devcentral.f5.com) + - Best run as a local_action in your playbook requirements: - - bigsuds + - bigsuds options: - server: - description: - - BIG-IP host - required: true - default: null - choices: [] - aliases: [] - user: - description: - - BIG-IP username - required: true - default: null - choices: [] - aliases: [] - password: - description: - - BIG-IP password - required: true - default: null - choices: [] - aliases: [] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 2.0 - state: - description: - - Pool/pool member state - required: false - default: present - choices: ['present', 'absent'] - aliases: [] - name: - description: - - Pool name - required: true - default: null - choices: [] - aliases: ['pool'] - partition: - description: - - Partition of pool/pool member - required: false - default: 'Common' - choices: [] - aliases: [] - lb_method: - description: - - Load balancing method - version_added: "1.3" - required: False - default: 'round_robin' - choices: ['round_robin', 'ratio_member', 'least_connection_member', - 'observed_member', 'predictive_member', 'ratio_node_address', - 'least_connection_node_address', 'fastest_node_address', - 'observed_node_address', 'predictive_node_address', - 'dynamic_ratio', 'fastest_app_response', 'least_sessions', - 'dynamic_ratio_member', 'l3_addr', 'unknown', - 'weighted_least_connection_member', - 'weighted_least_connection_node_address', - 'ratio_session', 'ratio_least_connection_member', - 'ratio_least_connection_node_address'] - aliases: [] - monitor_type: - description: - - Monitor rule type when monitors > 1 - version_added: "1.3" - required: False - default: null - choices: ['and_list', 'm_of_n'] - aliases: [] - quorum: - description: - - Monitor quorum value when monitor_type is m_of_n - version_added: "1.3" - required: False - default: null - choices: [] - aliases: [] - monitors: - description: - - Monitor template name list. Always use the full path to the monitor. - version_added: "1.3" - required: False - default: null - choices: [] - aliases: [] - slow_ramp_time: - description: - - Sets the ramp-up time (in seconds) to gradually ramp up the load on newly added or freshly detected up pool members - version_added: "1.3" - required: False - default: null - choices: [] - aliases: [] - service_down_action: - description: - - Sets the action to take when node goes down in pool - version_added: "1.3" - required: False - default: null - choices: ['none', 'reset', 'drop', 'reselect'] - aliases: [] - host: - description: - - "Pool member IP" - required: False - default: null - choices: [] - aliases: ['address'] - port: - description: - - "Pool member port" - required: False - default: null - choices: [] - aliases: [] + state: + description: + - Pool/pool member state + required: false + default: present + choices: + - present + - absent + aliases: [] + name: + description: + - Pool name + required: true + default: null + choices: [] + aliases: + - pool + partition: + description: + - Partition of pool/pool member + required: false + default: 'Common' + choices: [] + aliases: [] + lb_method: + description: + - Load balancing method + version_added: "1.3" + required: False + default: 'round_robin' + choices: + - round_robin + - ratio_member + - least_connection_member + - observed_member + - predictive_member + - ratio_node_address + - least_connection_node_address + - fastest_node_address + - observed_node_address + - predictive_node_address + - dynamic_ratio + - fastest_app_response + - least_sessions + - dynamic_ratio_member + - l3_addr + - weighted_least_connection_member + - weighted_least_connection_node_address + - ratio_session + - ratio_least_connection_member + - ratio_least_connection_node_address + aliases: [] + monitor_type: + description: + - Monitor rule type when monitors > 1 + version_added: "1.3" + required: False + default: null + choices: ['and_list', 'm_of_n'] + aliases: [] + quorum: + description: + - Monitor quorum value when monitor_type is m_of_n + version_added: "1.3" + required: False + default: null + choices: [] + aliases: [] + monitors: + description: + - Monitor template name list. Always use the full path to the monitor. + version_added: "1.3" + required: False + default: null + choices: [] + aliases: [] + slow_ramp_time: + description: + - Sets the ramp-up time (in seconds) to gradually ramp up the load on + newly added or freshly detected up pool members + version_added: "1.3" + required: False + default: null + choices: [] + aliases: [] + reselect_tries: + description: + - Sets the number of times the system tries to contact a pool member + after a passive failure + version_added: "2.2" + required: False + default: null + choices: [] + aliases: [] + service_down_action: + description: + - Sets the action to take when node goes down in pool + version_added: "1.3" + required: False + default: null + choices: + - none + - reset + - drop + - reselect + aliases: [] + host: + description: + - "Pool member IP" + required: False + default: null + choices: [] + aliases: + - address + port: + description: + - Pool member port + required: False + default: null + choices: [] + aliases: [] +extends_documentation_fragment: f5 ''' EXAMPLES = ''' +- name: Create pool + bigip_pool: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + name: "my-pool" + partition: "Common" + lb_method: "least_connection_member" + slow_ramp_time: 120 + delegate_to: localhost + +- name: Modify load balancer method + bigip_pool: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + name: "my-pool" + partition: "Common" + lb_method: "round_robin" + +- name: Add pool member + bigip_pool: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + name: "my-pool" + partition: "Common" + host: "{{ ansible_default_ipv4["address"] }}" + port: 80 + +- name: Remove pool member from pool + bigip_pool: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "absent" + name: "my-pool" + partition: "Common" + host: "{{ ansible_default_ipv4["address"] }}" + port: 80 + +- name: Delete pool + bigip_pool: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "absent" + name: "my-pool" + partition: "Common" +''' -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: localhost - tasks: - - name: Create pool - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=present - name=matthite-pool - partition=matthite - lb_method=least_connection_member - slow_ramp_time=120 - - - name: Modify load balancer method - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=present - name=matthite-pool - partition=matthite - lb_method=round_robin - -- hosts: bigip-test - tasks: - - name: Add pool member - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=present - name=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - - - name: Remove pool member from pool - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - name=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - -- hosts: localhost - tasks: - - name: Delete pool - local_action: > - bigip_pool - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - name=matthite-pool - partition=matthite - +RETURN = ''' ''' + def pool_exists(api, pool): # hack to determine if pool exists result = False try: api.LocalLB.Pool.get_object_status(pool_names=[pool]) result = True - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: @@ -242,6 +236,7 @@ def pool_exists(api, pool): raise return result + def create_pool(api, pool, lb_method): # create requires lb_method but we don't want to default # to a value on subsequent runs @@ -251,18 +246,22 @@ def create_pool(api, pool, lb_method): api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method], members=[[]]) + def remove_pool(api, pool): api.LocalLB.Pool.delete_pool(pool_names=[pool]) + def get_lb_method(api, pool): lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0] lb_method = lb_method.strip().replace('LB_METHOD_', '').lower() return lb_method + def set_lb_method(api, pool, lb_method): lb_method = "LB_METHOD_%s" % lb_method.strip().upper() api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method]) + def get_monitors(api, pool): result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule'] monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower() @@ -270,28 +269,43 @@ def get_monitors(api, pool): monitor_templates = result['monitor_templates'] return (monitor_type, quorum, monitor_templates) + def set_monitors(api, pool, monitor_type, quorum, monitor_templates): monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper() monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates} monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule} api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association]) + def get_slow_ramp_time(api, pool): result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0] return result + def set_slow_ramp_time(api, pool, seconds): api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds]) + +def get_reselect_tries(api, pool): + result = api.LocalLB.Pool.get_reselect_tries(pool_names=[pool])[0] + return result + + +def set_reselect_tries(api, pool, tries): + api.LocalLB.Pool.set_reselect_tries(pool_names=[pool], values=[tries]) + + def get_action_on_service_down(api, pool): result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0] result = result.split("SERVICE_DOWN_ACTION_")[-1].lower() return result + def set_action_on_service_down(api, pool, action): action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper() api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action]) + def member_exists(api, pool, address, port): # hack to determine if member exists result = False @@ -300,7 +314,7 @@ def member_exists(api, pool, address, port): api.LocalLB.Pool.get_member_object_status(pool_names=[pool], members=[members]) result = True - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: @@ -308,12 +322,13 @@ def member_exists(api, pool, address, port): raise return result + def delete_node_address(api, address): result = False try: api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) result = True - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "is referenced by a member of pool" in str(e): result = False else: @@ -321,14 +336,17 @@ def delete_node_address(api, address): raise return result + def remove_pool_member(api, pool, address, port): members = [{'address': address, 'port': port}] api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members]) + def add_pool_member(api, pool, address, port): members = [{'address': address, 'port': port}] api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members]) + def main(): lb_method_choices = ['round_robin', 'ratio_member', 'least_connection_member', 'observed_member', @@ -337,7 +355,7 @@ def main(): 'fastest_node_address', 'observed_node_address', 'predictive_node_address', 'dynamic_ratio', 'fastest_app_response', 'least_sessions', - 'dynamic_ratio_member', 'l3_addr', 'unknown', + 'dynamic_ratio_member', 'l3_addr', 'weighted_least_connection_member', 'weighted_least_connection_node_address', 'ratio_session', 'ratio_least_connection_member', @@ -347,29 +365,45 @@ def main(): service_down_choices = ['none', 'reset', 'drop', 'reselect'] - argument_spec=f5_argument_spec(); - argument_spec.update(dict( - name = dict(type='str', required=True, aliases=['pool']), - lb_method = dict(type='str', choices=lb_method_choices), - monitor_type = dict(type='str', choices=monitor_type_choices), - quorum = dict(type='int'), - monitors = dict(type='list'), - slow_ramp_time = dict(type='int'), - service_down_action = dict(type='str', choices=service_down_choices), - host = dict(type='str', aliases=['address']), - port = dict(type='int') - ) + argument_spec = f5_argument_spec() + + meta_args = dict( + name=dict(type='str', required=True, aliases=['pool']), + lb_method=dict(type='str', choices=lb_method_choices), + monitor_type=dict(type='str', choices=monitor_type_choices), + quorum=dict(type='int'), + monitors=dict(type='list'), + slow_ramp_time=dict(type='int'), + reselect_tries=dict(type='int'), + service_down_action=dict(type='str', choices=service_down_choices), + host=dict(type='str', aliases=['address']), + port=dict(type='int') ) + argument_spec.update(meta_args) module = AnsibleModule( - argument_spec = argument_spec, + argument_spec=argument_spec, supports_check_mode=True ) - (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + + if module.params['validate_certs']: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') + + server = module.params['server'] + server_port = module.params['server_port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + partition = module.params['partition'] + validate_certs = module.params['validate_certs'] name = module.params['name'] - pool = fq_name(partition,name) + pool = fq_name(partition, name) lb_method = module.params['lb_method'] if lb_method: lb_method = lb_method.lower() @@ -383,23 +417,21 @@ def main(): for monitor in module.params['monitors']: monitors.append(fq_name(partition, monitor)) slow_ramp_time = module.params['slow_ramp_time'] + reselect_tries = module.params['reselect_tries'] service_down_action = module.params['service_down_action'] if service_down_action: service_down_action = service_down_action.lower() host = module.params['host'] - address = fq_name(partition,host) + address = fq_name(partition, host) port = module.params['port'] - if not validate_certs: - disable_ssl_cert_validation() - # sanity check user supplied values - if (host and not port) or (port and not host): + if (host and port is None) or (port is not None and not host): module.fail_json(msg="both host and port must be supplied") - if 1 > port > 65535: - module.fail_json(msg="valid ports must be in range 1 - 65535") + if port is not None and (0 > port or port > 65535): + module.fail_json(msg="valid ports must be in range 0 - 65535") if monitors: if len(monitors) == 1: @@ -421,7 +453,7 @@ def main(): module.fail_json(msg="quorum requires monitors parameter") try: - api = bigip_api(server, user, password) + api = bigip_api(server, user, password, validate_certs, port=server_port) result = {'changed': False} # default if state == 'absent': @@ -442,7 +474,7 @@ def main(): try: remove_pool(api, pool) result = {'changed': True} - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "was not found" in str(e): result = {'changed': False} else: @@ -465,7 +497,7 @@ def main(): try: create_pool(api, pool, lb_method) result = {'changed': True} - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "already exists" in str(e): update = True else: @@ -476,6 +508,8 @@ def main(): set_monitors(api, pool, monitor_type, quorum, monitors) if slow_ramp_time: set_slow_ramp_time(api, pool, slow_ramp_time) + if reselect_tries: + set_reselect_tries(api, pool, reselect_tries) if service_down_action: set_action_on_service_down(api, pool, service_down_action) if host and port: @@ -502,6 +536,10 @@ def main(): if not module.check_mode: set_slow_ramp_time(api, pool, slow_ramp_time) result = {'changed': True} + if reselect_tries and reselect_tries != get_reselect_tries(api, pool): + if not module.check_mode: + set_reselect_tries(api, pool, reselect_tries) + result = {'changed': True} if service_down_action and service_down_action != get_action_on_service_down(api, pool): if not module.check_mode: set_action_on_service_down(api, pool, service_down_action) @@ -510,14 +548,18 @@ def main(): if not module.check_mode: add_pool_member(api, pool, address, port) result = {'changed': True} + if (host and port == 0) and not member_exists(api, pool, address, port): + if not module.check_mode: + add_pool_member(api, pool, address, port) + result = {'changed': True} - except Exception, e: + except Exception as e: module.fail_json(msg="received exception: %s" % e) module.exit_json(**result) -# import module snippets from ansible.module_utils.basic import * from ansible.module_utils.f5 import * -main() +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_pool_member.py b/network/f5/bigip_pool_member.py index ec2b7135372..42d4538f9f6 100644 --- a/network/f5/bigip_pool_member.py +++ b/network/f5/bigip_pool_member.py @@ -1,6 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - +# # (c) 2013, Matt Hite # # This file is part of Ansible @@ -18,191 +18,191 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: bigip_pool_member -short_description: "Manages F5 BIG-IP LTM pool members" +short_description: Manages F5 BIG-IP LTM pool members description: - - "Manages F5 BIG-IP LTM pool members via iControl SOAP API" -version_added: "1.4" -author: "Matt Hite (@mhite)" + - Manages F5 BIG-IP LTM pool members via iControl SOAP API +version_added: 1.4 +author: + - Matt Hite (@mhite) + - Tim Rupp (@caphrim007) notes: - - "Requires BIG-IP software version >= 11" - - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" - - "Best run as a local_action in your playbook" - - "Supersedes bigip_pool for managing pool members" - + - Requires BIG-IP software version >= 11 + - F5 developed module 'bigsuds' required (see http://devcentral.f5.com) + - Best run as a local_action in your playbook + - Supersedes bigip_pool for managing pool members requirements: - - bigsuds + - bigsuds options: - server: - description: - - BIG-IP host - required: true - user: - description: - - BIG-IP username - required: true - password: - description: - - BIG-IP password - required: true - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: 2.0 - state: - description: - - Pool member state - required: true - default: present - choices: ['present', 'absent'] - session_state: - description: - - Set new session availability status for pool member - version_added: "2.0" - required: false - default: null - choices: ['enabled', 'disabled'] - monitor_state: - description: - - Set monitor availability status for pool member - version_added: "2.0" - required: false - default: null - choices: ['enabled', 'disabled'] - pool: - description: - - Pool name. This pool must exist. - required: true - partition: - description: - - Partition - required: false - default: 'Common' - host: - description: - - Pool member IP - required: true - aliases: ['address', 'name'] - port: - description: - - Pool member port - required: true - connection_limit: - description: - - Pool member connection limit. Setting this to 0 disables the limit. - required: false - default: null + state: + description: + - Pool member state + required: true + default: present + choices: + - present + - absent + session_state: + description: + - Set new session availability status for pool member + version_added: 2.0 + required: false + default: null + choices: + - enabled + - disabled + monitor_state: + description: + - Set monitor availability status for pool member + version_added: 2.0 + required: false + default: null + choices: + - enabled + - disabled + pool: + description: + - Pool name. This pool must exist. + required: true + partition: + description: + - Partition + required: false + default: 'Common' + host: + description: + - Pool member IP + required: true + aliases: + - address + - name + port: + description: + - Pool member port + required: true + connection_limit: + description: + - Pool member connection limit. Setting this to 0 disables the limit. + required: false + default: null + description: + description: + - Pool member description + required: false + default: null + rate_limit: description: - description: - - Pool member description - required: false - default: null - rate_limit: - description: - - Pool member rate limit (connections-per-second). Setting this to 0 disables the limit. - required: false - default: null - ratio: - description: - - Pool member ratio weight. Valid values range from 1 through 100. New pool members -- unless overriden with this value -- default to 1. - required: false - default: null + - Pool member rate limit (connections-per-second). Setting this to 0 + disables the limit. + required: false + default: null + ratio: + description: + - Pool member ratio weight. Valid values range from 1 through 100. + New pool members -- unless overriden with this value -- default + to 1. + required: false + default: null + preserve_node: + description: + - When state is absent and the pool member is no longer referenced + in other pools, the default behavior removes the unused node + o bject. Setting this to 'yes' disables this behavior. + required: false + default: 'no' + choices: + - yes + - no + version_added: 2.1 +extends_documentation_fragment: f5 ''' EXAMPLES = ''' - -## playbook task examples: - ---- -# file bigip-test.yml -# ... -- hosts: bigip-test - tasks: - - name: Add pool member - local_action: > - bigip_pool_member - server=lb.mydomain.com - user=admin - password=mysecret - state=present - pool=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - description="web server" - connection_limit=100 - rate_limit=50 - ratio=2 - - - name: Modify pool member ratio and description - local_action: > - bigip_pool_member - server=lb.mydomain.com - user=admin - password=mysecret - state=present - pool=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - ratio=1 - description="nginx server" - - - name: Remove pool member from pool - local_action: > - bigip_pool_member - server=lb.mydomain.com - user=admin - password=mysecret - state=absent - pool=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - - - # The BIG-IP GUI doesn't map directly to the API calls for "Pool -> - # Members -> State". The following states map to API monitor - # and session states. - # - # Enabled (all traffic allowed): - # monitor_state=enabled, session_state=enabled - # Disabled (only persistent or active connections allowed): - # monitor_state=enabled, session_state=disabled - # Forced offline (only active connections allowed): - # monitor_state=disabled, session_state=disabled - # - # See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down - - - name: Force pool member offline - local_action: > - bigip_pool_member - server=lb.mydomain.com - user=admin - password=mysecret - state=present - session_state=disabled - monitor_state=disabled - pool=matthite-pool - partition=matthite - host="{{ ansible_default_ipv4["address"] }}" - port=80 - +- name: Add pool member + bigip_pool_member: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + pool: "my-pool" + partition: "Common" + host: "{{ ansible_default_ipv4["address"] }}" + port: 80 + description: "web server" + connection_limit: 100 + rate_limit: 50 + ratio: 2 + delegate_to: localhost + +- name: Modify pool member ratio and description + bigip_pool_member: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + pool: "my-pool" + partition: "Common" + host: "{{ ansible_default_ipv4["address"] }}" + port: 80 + ratio: 1 + description: "nginx server" + delegate_to: localhost + +- name: Remove pool member from pool + bigip_pool_member: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "absent" + pool: "my-pool" + partition: "Common" + host: "{{ ansible_default_ipv4["address"] }}" + port: 80 + delegate_to: localhost + + +# The BIG-IP GUI doesn't map directly to the API calls for "Pool -> +# Members -> State". The following states map to API monitor +# and session states. +# +# Enabled (all traffic allowed): +# monitor_state=enabled, session_state=enabled +# Disabled (only persistent or active connections allowed): +# monitor_state=enabled, session_state=disabled +# Forced offline (only active connections allowed): +# monitor_state=disabled, session_state=disabled +# +# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down + +- name: Force pool member offline + bigip_pool_member: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + session_state: "disabled" + monitor_state: "disabled" + pool: "my-pool" + partition: "Common" + host: "{{ ansible_default_ipv4["address"] }}" + port: 80 + delegate_to: localhost ''' + def pool_exists(api, pool): # hack to determine if pool exists result = False try: api.LocalLB.Pool.get_object_status(pool_names=[pool]) result = True - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: @@ -210,6 +210,7 @@ def pool_exists(api, pool): raise return result + def member_exists(api, pool, address, port): # hack to determine if member exists result = False @@ -218,7 +219,7 @@ def member_exists(api, pool, address, port): api.LocalLB.Pool.get_member_object_status(pool_names=[pool], members=[members]) result = True - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "was not found" in str(e): result = False else: @@ -226,12 +227,13 @@ def member_exists(api, pool, address, port): raise return result + def delete_node_address(api, address): result = False try: api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) result = True - except bigsuds.OperationFailed, e: + except bigsuds.OperationFailed as e: if "is referenced by a member of pool" in str(e): result = False else: @@ -239,93 +241,170 @@ def delete_node_address(api, address): raise return result + def remove_pool_member(api, pool, address, port): members = [{'address': address, 'port': port}] - api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members]) + api.LocalLB.Pool.remove_member_v2( + pool_names=[pool], + members=[members] + ) + def add_pool_member(api, pool, address, port): members = [{'address': address, 'port': port}] - api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members]) + api.LocalLB.Pool.add_member_v2( + pool_names=[pool], + members=[members] + ) + def get_connection_limit(api, pool, address, port): members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_connection_limit(pool_names=[pool], members=[members])[0][0] + result = api.LocalLB.Pool.get_member_connection_limit( + pool_names=[pool], + members=[members] + )[0][0] return result + def set_connection_limit(api, pool, address, port, limit): members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_connection_limit(pool_names=[pool], members=[members], limits=[[limit]]) + api.LocalLB.Pool.set_member_connection_limit( + pool_names=[pool], + members=[members], + limits=[[limit]] + ) + def get_description(api, pool, address, port): members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_description(pool_names=[pool], members=[members])[0][0] + result = api.LocalLB.Pool.get_member_description( + pool_names=[pool], + members=[members] + )[0][0] return result + def set_description(api, pool, address, port, description): members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_description(pool_names=[pool], members=[members], descriptions=[[description]]) + api.LocalLB.Pool.set_member_description( + pool_names=[pool], + members=[members], + descriptions=[[description]] + ) + def get_rate_limit(api, pool, address, port): members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_rate_limit(pool_names=[pool], members=[members])[0][0] + result = api.LocalLB.Pool.get_member_rate_limit( + pool_names=[pool], + members=[members] + )[0][0] return result + def set_rate_limit(api, pool, address, port, limit): members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_rate_limit(pool_names=[pool], members=[members], limits=[[limit]]) + api.LocalLB.Pool.set_member_rate_limit( + pool_names=[pool], + members=[members], + limits=[[limit]] + ) + def get_ratio(api, pool, address, port): members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_ratio(pool_names=[pool], members=[members])[0][0] + result = api.LocalLB.Pool.get_member_ratio( + pool_names=[pool], + members=[members] + )[0][0] return result + def set_ratio(api, pool, address, port, ratio): members = [{'address': address, 'port': port}] - api.LocalLB.Pool.set_member_ratio(pool_names=[pool], members=[members], ratios=[[ratio]]) + api.LocalLB.Pool.set_member_ratio( + pool_names=[pool], + members=[members], + ratios=[[ratio]] + ) + def set_member_session_enabled_state(api, pool, address, port, session_state): members = [{'address': address, 'port': port}] session_state = ["STATE_%s" % session_state.strip().upper()] - api.LocalLB.Pool.set_member_session_enabled_state(pool_names=[pool], members=[members], session_states=[session_state]) + api.LocalLB.Pool.set_member_session_enabled_state( + pool_names=[pool], + members=[members], + session_states=[session_state] + ) + def get_member_session_status(api, pool, address, port): members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_session_status(pool_names=[pool], members=[members])[0][0] + result = api.LocalLB.Pool.get_member_session_status( + pool_names=[pool], + members=[members] + )[0][0] result = result.split("SESSION_STATUS_")[-1].lower() return result + def set_member_monitor_state(api, pool, address, port, monitor_state): members = [{'address': address, 'port': port}] monitor_state = ["STATE_%s" % monitor_state.strip().upper()] - api.LocalLB.Pool.set_member_monitor_state(pool_names=[pool], members=[members], monitor_states=[monitor_state]) + api.LocalLB.Pool.set_member_monitor_state( + pool_names=[pool], + members=[members], + monitor_states=[monitor_state] + ) + def get_member_monitor_status(api, pool, address, port): members = [{'address': address, 'port': port}] - result = api.LocalLB.Pool.get_member_monitor_status(pool_names=[pool], members=[members])[0][0] + result = api.LocalLB.Pool.get_member_monitor_status( + pool_names=[pool], + members=[members] + )[0][0] result = result.split("MONITOR_STATUS_")[-1].lower() return result + def main(): - argument_spec = f5_argument_spec(); - argument_spec.update(dict( - session_state = dict(type='str', choices=['enabled', 'disabled']), - monitor_state = dict(type='str', choices=['enabled', 'disabled']), - pool = dict(type='str', required=True), - host = dict(type='str', required=True, aliases=['address', 'name']), - port = dict(type='int', required=True), - connection_limit = dict(type='int'), - description = dict(type='str'), - rate_limit = dict(type='int'), - ratio = dict(type='int') - ) + argument_spec = f5_argument_spec() + + meta_args = dict( + session_state=dict(type='str', choices=['enabled', 'disabled']), + monitor_state=dict(type='str', choices=['enabled', 'disabled']), + pool=dict(type='str', required=True), + host=dict(type='str', required=True, aliases=['address', 'name']), + port=dict(type='int', required=True), + connection_limit=dict(type='int'), + description=dict(type='str'), + rate_limit=dict(type='int'), + ratio=dict(type='int'), + preserve_node=dict(type='bool', default=False) ) + argument_spec.update(meta_args) module = AnsibleModule( - argument_spec = argument_spec, + argument_spec=argument_spec, supports_check_mode=True ) - (server,user,password,state,partition,validate_certs) = f5_parse_arguments(module) + if module.params['validate_certs']: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') + + server = module.params['server'] + server_port = module.params['server_port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + partition = module.params['partition'] + validate_certs = module.params['validate_certs'] + session_state = module.params['session_state'] monitor_state = module.params['monitor_state'] pool = fq_name(partition, module.params['pool']) @@ -336,18 +415,16 @@ def main(): host = module.params['host'] address = fq_name(partition, host) port = module.params['port'] + preserve_node = module.params['preserve_node'] - - # sanity check user supplied values - - if (host and not port) or (port and not host): + if (host and port is None) or (port is not None and not host): module.fail_json(msg="both host and port must be supplied") - if 1 > port > 65535: - module.fail_json(msg="valid ports must be in range 1 - 65535") + if 0 > port or port > 65535: + module.fail_json(msg="valid ports must be in range 0 - 65535") try: - api = bigip_api(server, user, password) + api = bigip_api(server, user, password, validate_certs, port=server_port) if not pool_exists(api, pool): module.fail_json(msg="pool %s does not exist" % pool) result = {'changed': False} # default @@ -356,8 +433,11 @@ def main(): if member_exists(api, pool, address, port): if not module.check_mode: remove_pool_member(api, pool, address, port) - deleted = delete_node_address(api, address) - result = {'changed': True, 'deleted': deleted} + if preserve_node: + result = {'changed': True} + else: + deleted = delete_node_address(api, address) + result = {'changed': True, 'deleted': deleted} else: result = {'changed': True} @@ -402,7 +482,7 @@ def main(): if not module.check_mode: set_member_session_enabled_state(api, pool, address, port, session_state) result = {'changed': True} - elif session_state == 'disabled' and session_status != 'force_disabled': + elif session_state == 'disabled' and session_status != 'forced_disabled': if not module.check_mode: set_member_session_enabled_state(api, pool, address, port, session_state) result = {'changed': True} @@ -417,13 +497,13 @@ def main(): set_member_monitor_state(api, pool, address, port, monitor_state) result = {'changed': True} - except Exception, e: + except Exception as e: module.fail_json(msg="received exception: %s" % e) module.exit_json(**result) -# import module snippets from ansible.module_utils.basic import * from ansible.module_utils.f5 import * -main() +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_routedomain.py b/network/f5/bigip_routedomain.py new file mode 100644 index 00000000000..7abe77abac2 --- /dev/null +++ b/network/f5/bigip_routedomain.py @@ -0,0 +1,530 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_routedomain +short_description: Manage route domains on a BIG-IP +description: + - Manage route domains on a BIG-IP +version_added: "2.2" +options: + bwc_policy: + description: + - The bandwidth controller for the route domain. + connection_limit: + description: + - The maximum number of concurrent connections allowed for the + route domain. Setting this to C(0) turns off connection limits. + description: + description: + - Specifies descriptive text that identifies the route domain. + flow_eviction_policy: + description: + - The eviction policy to use with this route domain. Apply an eviction + policy to provide customized responses to flow overflows and slow + flows on the route domain. + id: + description: + - The unique identifying integer representing the route domain. + required: true + parent: + description: + Specifies the route domain the system searches when it cannot + find a route in the configured domain. + required: false + routing_protocol: + description: + - Dynamic routing protocols for the system to use in the route domain. + choices: + - BFD + - BGP + - IS-IS + - OSPFv2 + - OSPFv3 + - PIM + - RIP + - RIPng + service_policy: + description: + - Service policy to associate with the route domain. + state: + description: + - Whether the route domain should exist or not. + required: false + default: present + choices: + - present + - absent + strict: + description: + - Specifies whether the system enforces cross-routing restrictions + or not. + choices: + - enabled + - disabled + vlans: + description: + - VLANs for the system to use in the route domain +notes: + - Requires the f5-sdk Python package on the host. This is as easy as + pip install f5-sdk. +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Create a route domain + bigip_routedomain: + id: "1234" + password: "secret" + server: "lb.mydomain.com" + state: "present" + user: "admin" + delegate_to: localhost + +- name: Set VLANs on the route domain + bigip_routedomain: + id: "1234" + password: "secret" + server: "lb.mydomain.com" + state: "present" + user: "admin" + vlans: + - net1 + - foo + delegate_to: localhost +''' + +RETURN = ''' +id: + description: The ID of the route domain that was changed + returned: changed + type: int + sample: 2 +description: + description: The description of the route domain + returned: changed + type: string + sample: "route domain foo" +strict: + description: The new strict isolation setting + returned: changed + type: string + sample: "enabled" +parent: + description: The new parent route domain + returned: changed + type: int + sample: 0 +vlans: + description: List of new VLANs the route domain is applied to + returned: changed + type: list + sample: ['/Common/http-tunnel', '/Common/socks-tunnel'] +routing_protocol: + description: List of routing protocols applied to the route domain + returned: changed + type: list + sample: ['bfd', 'bgp'] +bwc_policy: + description: The new bandwidth controller + returned: changed + type: string + sample: /Common/foo +connection_limit: + description: The new connection limit for the route domain + returned: changed + type: integer + sample: 100 +flow_eviction_policy: + description: The new eviction policy to use with this route domain + returned: changed + type: string + sample: /Common/default-eviction-policy +service_policy: + description: The new service policy to use with this route domain + returned: changed + type: string + sample: /Common-my-service-policy +''' + +try: + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + +PROTOCOLS = [ + 'BFD', 'BGP', 'IS-IS', 'OSPFv2', 'OSPFv3', 'PIM', 'RIP', 'RIPng' +] + +STRICTS = ['enabled', 'disabled'] + + +class BigIpRouteDomain(object): + def __init__(self, *args, **kwargs): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + # The params that change in the module + self.cparams = dict() + + kwargs['name'] = str(kwargs['id']) + + # Stores the params that are sent to the module + self.params = kwargs + self.api = ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def absent(self): + if not self.exists(): + return False + + if self.params['check_mode']: + return True + + rd = self.api.tm.net.route_domains.route_domain.load( + name=self.params['name'] + ) + rd.delete() + + if self.exists(): + raise F5ModuleError("Failed to delete the route domain") + else: + return True + + def present(self): + if self.exists(): + return self.update() + else: + if self.params['check_mode']: + return True + return self.create() + + def read(self): + """Read information and transform it + + The values that are returned by BIG-IP in the f5-sdk can have encoding + attached to them as well as be completely missing in some cases. + + Therefore, this method will transform the data from the BIG-IP into a + format that is more easily consumable by the rest of the class and the + parameters that are supported by the module. + """ + p = dict() + r = self.api.tm.net.route_domains.route_domain.load( + name=self.params['name'] + ) + + p['id'] = int(r.id) + p['name'] = str(r.name) + + if hasattr(r, 'connectionLimit'): + p['connection_limit'] = int(r.connectionLimit) + if hasattr(r, 'description'): + p['description'] = str(r.description) + if hasattr(r, 'strict'): + p['strict'] = str(r.strict) + if hasattr(r, 'parent'): + p['parent'] = r.parent + if hasattr(r, 'vlans'): + p['vlans'] = list(set([str(x) for x in r.vlans])) + if hasattr(r, 'routingProtocol'): + p['routing_protocol'] = list(set([str(x) for x in r.routingProtocol])) + if hasattr(r, 'flowEvictionPolicy'): + p['flow_eviction_policy'] = str(r.flowEvictionPolicy) + if hasattr(r, 'bwcPolicy'): + p['bwc_policy'] = str(r.bwcPolicy) + if hasattr(r, 'servicePolicy'): + p['service_policy'] = str(r.servicePolicy) + return p + + def domains(self): + result = [] + + domains = self.api.tm.net.route_domains.get_collection() + for domain in domains: + # Just checking for the addition of the partition here for + # different versions of BIG-IP + if '/' + self.params['partition'] + '/' in domain.name: + result.append(domain.name) + else: + full_name = '/%s/%s' % (self.params['partition'], domain.name) + result.append(full_name) + return result + + def create(self): + params = dict() + params['id'] = self.params['id'] + params['name'] = self.params['name'] + + partition = self.params['partition'] + description = self.params['description'] + strict = self.params['strict'] + parent = self.params['parent'] + bwc_policy = self.params['bwc_policy'] + vlans = self.params['vlans'] + routing_protocol = self.params['routing_protocol'] + connection_limit = self.params['connection_limit'] + flow_eviction_policy = self.params['flow_eviction_policy'] + service_policy = self.params['service_policy'] + + if description is not None: + params['description'] = description + + if strict is not None: + params['strict'] = strict + + if parent is not None: + parent = '/%s/%s' % (partition, parent) + if parent in self.domains(): + params['parent'] = parent + else: + raise F5ModuleError( + "The parent route domain was not found" + ) + + if bwc_policy is not None: + policy = '/%s/%s' % (partition, bwc_policy) + params['bwcPolicy'] = policy + + if vlans is not None: + params['vlans'] = [] + for vlan in vlans: + vname = '/%s/%s' % (partition, vlan) + params['vlans'].append(vname) + + if routing_protocol is not None: + params['routingProtocol'] = [] + for protocol in routing_protocol: + if protocol in PROTOCOLS: + params['routingProtocol'].append(protocol) + else: + raise F5ModuleError( + "routing_protocol must be one of: %s" % (PROTOCOLS) + ) + + if connection_limit is not None: + params['connectionLimit'] = connection_limit + + if flow_eviction_policy is not None: + policy = '/%s/%s' % (partition, flow_eviction_policy) + params['flowEvictionPolicy'] = policy + + if service_policy is not None: + policy = '/%s/%s' % (partition, service_policy) + params['servicePolicy'] = policy + + self.api.tm.net.route_domains.route_domain.create(**params) + exists = self.api.tm.net.route_domains.route_domain.exists( + name=self.params['name'] + ) + + if exists: + return True + else: + raise F5ModuleError( + "An error occurred while creating the route domain" + ) + + def update(self): + changed = False + params = dict() + current = self.read() + + check_mode = self.params['check_mode'] + partition = self.params['partition'] + description = self.params['description'] + strict = self.params['strict'] + parent = self.params['parent'] + bwc_policy = self.params['bwc_policy'] + vlans = self.params['vlans'] + routing_protocol = self.params['routing_protocol'] + connection_limit = self.params['connection_limit'] + flow_eviction_policy = self.params['flow_eviction_policy'] + service_policy = self.params['service_policy'] + + if description is not None: + if 'description' in current: + if description != current['description']: + params['description'] = description + else: + params['description'] = description + + if strict is not None: + if strict != current['strict']: + params['strict'] = strict + + if parent is not None: + parent = '/%s/%s' % (partition, parent) + if 'parent' in current: + if parent != current['parent']: + params['parent'] = parent + else: + params['parent'] = parent + + if bwc_policy is not None: + policy = '/%s/%s' % (partition, bwc_policy) + if 'bwc_policy' in current: + if policy != current['bwc_policy']: + params['bwcPolicy'] = policy + else: + params['bwcPolicy'] = policy + + if vlans is not None: + tmp = set() + for vlan in vlans: + vname = '/%s/%s' % (partition, vlan) + tmp.add(vname) + tmp = list(tmp) + if 'vlans' in current: + if tmp != current['vlans']: + params['vlans'] = tmp + else: + params['vlans'] = tmp + + if routing_protocol is not None: + tmp = set() + for protocol in routing_protocol: + if protocol in PROTOCOLS: + tmp.add(protocol) + else: + raise F5ModuleError( + "routing_protocol must be one of: %s" % (PROTOCOLS) + ) + tmp = list(tmp) + if 'routing_protocol' in current: + if tmp != current['routing_protocol']: + params['routingProtocol'] = tmp + else: + params['routingProtocol'] = tmp + + if connection_limit is not None: + if connection_limit != current['connection_limit']: + params['connectionLimit'] = connection_limit + + if flow_eviction_policy is not None: + policy = '/%s/%s' % (partition, flow_eviction_policy) + if 'flow_eviction_policy' in current: + if policy != current['flow_eviction_policy']: + params['flowEvictionPolicy'] = policy + else: + params['flowEvictionPolicy'] = policy + + if service_policy is not None: + policy = '/%s/%s' % (partition, service_policy) + if 'service_policy' in current: + if policy != current['service_policy']: + params['servicePolicy'] = policy + else: + params['servicePolicy'] = policy + + if params: + changed = True + self.cparams = camel_dict_to_snake_dict(params) + if check_mode: + return changed + else: + return changed + + try: + rd = self.api.tm.net.route_domains.route_domain.load( + name=self.params['name'] + ) + rd.update(**params) + rd.refresh() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(e) + + return True + + def exists(self): + return self.api.tm.net.route_domains.route_domain.exists( + name=self.params['name'] + ) + + def flush(self): + result = dict() + state = self.params['state'] + + if self.params['check_mode']: + if value == current: + changed = False + else: + changed = True + else: + if state == "present": + changed = self.present() + current = self.read() + result.update(current) + elif state == "absent": + changed = self.absent() + + result.update(dict(changed=changed)) + return result + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + id=dict(required=True, type='int'), + description=dict(required=False, default=None), + strict=dict(required=False, default=None, choices=STRICTS), + parent=dict(required=False, type='int', default=None), + vlans=dict(required=False, default=None, type='list'), + routing_protocol=dict(required=False, default=None, type='list'), + bwc_policy=dict(required=False, type='str', default=None), + connection_limit=dict(required=False, type='int', default=None), + flow_eviction_policy=dict(required=False, type='str', default=None), + service_policy=dict(required=False, type='str', default=None) + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + try: + obj = BigIpRouteDomain(check_mode=module.check_mode, **module.params) + result = obj.flush() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_selfip.py b/network/f5/bigip_selfip.py new file mode 100644 index 00000000000..d60dafbf7ce --- /dev/null +++ b/network/f5/bigip_selfip.py @@ -0,0 +1,704 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_selfip +short_description: Manage Self-IPs on a BIG-IP system +description: + - Manage Self-IPs on a BIG-IP system +version_added: "2.2" +options: + address: + description: + - The IP addresses for the new self IP. This value is ignored upon update + as addresses themselves cannot be changed after they are created. + allow_service: + description: + - Configure port lockdown for the Self IP. By default, the Self IP has a + "default deny" policy. This can be changed to allow TCP and UDP ports + as well as specific protocols. This list should contain C(protocol):C(port) + values. + name: + description: + - The self IP to create. + required: true + default: Value of C(address) + netmask: + description: + - The netmasks for the self IP. + required: true + state: + description: + - The state of the variable on the system. When C(present), guarantees + that the Self-IP exists with the provided attributes. When C(absent), + removes the Self-IP from the system. + required: false + default: present + choices: + - absent + - present + traffic_group: + description: + - The traffic group for the self IP addresses in an active-active, + redundant load balancer configuration. + required: false + vlan: + description: + - The VLAN that the new self IPs will be on. + required: true + route_domain: + description: + - The route domain id of the system. + If none, id of the route domain will be "0" (default route domain) + required: false + default: none + version_added: 2.3 +notes: + - Requires the f5-sdk Python package on the host. This is as easy as pip + install f5-sdk. + - Requires the netaddr Python package on the host. +extends_documentation_fragment: f5 +requirements: + - netaddr + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Create Self IP + bigip_selfip: + address: "10.10.10.10" + name: "self1" + netmask: "255.255.255.0" + password: "secret" + server: "lb.mydomain.com" + user: "admin" + validate_certs: "no" + vlan: "vlan1" + delegate_to: localhost + +- name: Create Self IP with a Route Domain + bigip_selfip: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + validate_certs: "no" + name: "self1" + address: "10.10.10.10" + netmask: "255.255.255.0" + vlan: "vlan1" + route_domain: "10" + allow_service: "default" + delegate_to: localhost + +- name: Delete Self IP + bigip_selfip: + name: "self1" + password: "secret" + server: "lb.mydomain.com" + state: "absent" + user: "admin" + validate_certs: "no" + delegate_to: localhost + +- name: Allow management web UI to be accessed on this Self IP + bigip_selfip: + name: "self1" + password: "secret" + server: "lb.mydomain.com" + state: "absent" + user: "admin" + validate_certs: "no" + allow_service: + - "tcp:443" + delegate_to: localhost + +- name: Allow HTTPS and SSH access to this Self IP + bigip_selfip: + name: "self1" + password: "secret" + server: "lb.mydomain.com" + state: "absent" + user: "admin" + validate_certs: "no" + allow_service: + - "tcp:443" + - "tpc:22" + delegate_to: localhost + +- name: Allow all services access to this Self IP + bigip_selfip: + name: "self1" + password: "secret" + server: "lb.mydomain.com" + state: "absent" + user: "admin" + validate_certs: "no" + allow_service: + - all + delegate_to: localhost + +- name: Allow only GRE and IGMP protocols access to this Self IP + bigip_selfip: + name: "self1" + password: "secret" + server: "lb.mydomain.com" + state: "absent" + user: "admin" + validate_certs: "no" + allow_service: + - gre:0 + - igmp:0 + delegate_to: localhost + +- name: Allow all TCP, but no other protocols access to this Self IP + bigip_selfip: + name: "self1" + password: "secret" + server: "lb.mydomain.com" + state: "absent" + user: "admin" + validate_certs: "no" + allow_service: + - tcp:0 + delegate_to: localhost +''' + +RETURN = ''' +allow_service: + description: Services that allowed via this Self IP + returned: changed + type: list + sample: ['igmp:0','tcp:22','udp:53'] +address: + description: The address for the Self IP + returned: created + type: string + sample: "192.0.2.10" +name: + description: The name of the Self IP + returned: + - created + - changed + - deleted + type: string + sample: "self1" +netmask: + description: The netmask of the Self IP + returned: + - changed + - created + type: string + sample: "255.255.255.0" +traffic_group: + description: The traffic group that the Self IP is a member of + return: + - changed + - created + type: string + sample: "traffic-group-local-only" +vlan: + description: The VLAN set on the Self IP + return: + - changed + - created + type: string + sample: "vlan1" +''' + +try: + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + +try: + from netaddr import IPNetwork, AddrFormatError + HAS_NETADDR = True +except ImportError: + HAS_NETADDR = False + +FLOAT = ['enabled', 'disabled'] +DEFAULT_TG = 'traffic-group-local-only' +ALLOWED_PROTOCOLS = ['eigrp', 'egp', 'gre', 'icmp', 'igmp', 'igp', 'ipip', + 'l2tp', 'ospf', 'pim', 'tcp', 'udp'] + + +class BigIpSelfIp(object): + def __init__(self, *args, **kwargs): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + # The params that change in the module + self.cparams = dict() + + # Stores the params that are sent to the module + self.params = kwargs + self.api = ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def present(self): + changed = False + + if self.exists(): + changed = self.update() + else: + changed = self.create() + + return changed + + def absent(self): + changed = False + + if self.exists(): + changed = self.delete() + + return changed + + def read(self): + """Read information and transform it + + The values that are returned by BIG-IP in the f5-sdk can have encoding + attached to them as well as be completely missing in some cases. + + Therefore, this method will transform the data from the BIG-IP into a + format that is more easily consumable by the rest of the class and the + parameters that are supported by the module. + + :return: List of values currently stored in BIG-IP, formatted for use + in this class. + """ + p = dict() + name = self.params['name'] + partition = self.params['partition'] + r = self.api.tm.net.selfips.selfip.load( + name=name, + partition=partition + ) + + if hasattr(r, 'address'): + p['route_domain'] = str(None) + if '%' in r.address: + ipaddr = [] + ipaddr = r.address.split('%', 1) + rdmask = ipaddr[1].split('/', 1) + r.address = "%s/%s" % (ipaddr[0], rdmask[1]) + p['route_domain'] = str(rdmask[0]) + ipnet = IPNetwork(r.address) + p['address'] = str(ipnet.ip) + p['netmask'] = str(ipnet.netmask) + if hasattr(r, 'trafficGroup'): + p['traffic_group'] = str(r.trafficGroup) + if hasattr(r, 'vlan'): + p['vlan'] = str(r.vlan) + if hasattr(r, 'allowService'): + if r.allowService == 'all': + p['allow_service'] = set(['all']) + else: + p['allow_service'] = set([str(x) for x in r.allowService]) + else: + p['allow_service'] = set(['none']) + p['name'] = name + return p + + def verify_services(self): + """Verifies that a supplied service string has correct format + + The string format for port lockdown is PROTOCOL:PORT. This method + will verify that the provided input matches the allowed protocols + and the port ranges before submitting to BIG-IP. + + The only allowed exceptions to this rule are the following values + + * all + * default + * none + + These are special cases that are handled differently in the API. + "all" is set as a string, "default" is set as a one item list, and + "none" removes the key entirely from the REST API. + + :raises F5ModuleError: + """ + result = [] + for svc in self.params['allow_service']: + if svc in ['all', 'none', 'default']: + result = [svc] + break + + tmp = svc.split(':') + if tmp[0] not in ALLOWED_PROTOCOLS: + raise F5ModuleError( + "The provided protocol '%s' is invalid" % (tmp[0]) + ) + try: + port = int(tmp[1]) + except Exception: + raise F5ModuleError( + "The provided port '%s' is not a number" % (tmp[1]) + ) + + if port < 0 or port > 65535: + raise F5ModuleError( + "The provided port '%s' must be between 0 and 65535" + % (port) + ) + else: + result.append(svc) + return set(result) + + def fmt_services(self, services): + """Returns services formatted for consumption by f5-sdk update + + The BIG-IP endpoint for services takes different values depending on + what you want the "allowed services" to be. It can be any of the + following + + - a list containing "protocol:port" values + - the string "all" + - a null value, or None + + This is a convenience function to massage the values the user has + supplied so that they are formatted in such a way that BIG-IP will + accept them and apply the specified policy. + + :param services: The services to format. This is always a Python set + :return: + """ + result = list(services) + if result[0] == 'all': + return 'all' + elif result[0] == 'none': + return None + else: + return list(services) + + def traffic_groups(self): + result = [] + + groups = self.api.tm.cm.traffic_groups.get_collection() + for group in groups: + # Just checking for the addition of the partition here for + # different versions of BIG-IP + if '/' + self.params['partition'] + '/' in group.name: + result.append(group.name) + else: + full_name = '/%s/%s' % (self.params['partition'], group.name) + result.append(str(full_name)) + return result + + def update(self): + changed = False + svcs = [] + params = dict() + current = self.read() + + check_mode = self.params['check_mode'] + address = self.params['address'] + allow_service = self.params['allow_service'] + name = self.params['name'] + netmask = self.params['netmask'] + partition = self.params['partition'] + traffic_group = self.params['traffic_group'] + vlan = self.params['vlan'] + route_domain = self.params['route_domain'] + + if address is not None and address != current['address']: + raise F5ModuleError( + 'Self IP addresses cannot be updated' + ) + + if netmask is not None: + # I ignore the address value here even if they provide it because + # you are not allowed to change it. + try: + address = IPNetwork(current['address']) + + new_addr = "%s/%s" % (address.ip, netmask) + nipnet = IPNetwork(new_addr) + if route_domain is not None: + nipnet = "%s%s%s" % (address.ip, route_domain, netmask) + + cur_addr = "%s/%s" % (current['address'], current['netmask']) + cipnet = IPNetwork(cur_addr) + if route_domain is not None: + cipnet = "%s%s%s" % (current['address'], current['route_domain'], current['netmask']) + + if nipnet != cipnet: + if route_domain is not None: + address = "%s%s%s/%s" % (address.ip, '%', route_domain, netmask) + else: + address = "%s/%s" % (nipnet.ip, nipnet.prefixlen) + params['address'] = address + except AddrFormatError: + raise F5ModuleError( + 'The provided address/netmask value was invalid' + ) + + if traffic_group is not None: + traffic_group = "/%s/%s" % (partition, traffic_group) + if traffic_group not in self.traffic_groups(): + raise F5ModuleError( + 'The specified traffic group was not found' + ) + + if 'traffic_group' in current: + if traffic_group != current['traffic_group']: + params['trafficGroup'] = traffic_group + else: + params['trafficGroup'] = traffic_group + + if vlan is not None: + vlans = self.get_vlans() + vlan = "/%s/%s" % (partition, vlan) + + if 'vlan' in current: + if vlan != current['vlan']: + params['vlan'] = vlan + else: + params['vlan'] = vlan + + if vlan not in vlans: + raise F5ModuleError( + 'The specified VLAN was not found' + ) + + if allow_service is not None: + svcs = self.verify_services() + if 'allow_service' in current: + if svcs != current['allow_service']: + params['allowService'] = self.fmt_services(svcs) + else: + params['allowService'] = self.fmt_services(svcs) + + if params: + changed = True + params['name'] = name + params['partition'] = partition + if check_mode: + return changed + self.cparams = camel_dict_to_snake_dict(params) + if svcs: + self.cparams['allow_service'] = list(svcs) + else: + return changed + + r = self.api.tm.net.selfips.selfip.load( + name=name, + partition=partition + ) + r.update(**params) + r.refresh() + + return True + + def get_vlans(self): + """Returns formatted list of VLANs + + The VLAN values stored in BIG-IP are done so using their fully + qualified name which includes the partition. Therefore, "correct" + values according to BIG-IP look like this + + /Common/vlan1 + + This is in contrast to the formats that most users think of VLANs + as being stored as + + vlan1 + + To provide for the consistent user experience while not turfing + BIG-IP, we need to massage the values that are provided by the + user so that they include the partition. + + :return: List of vlans formatted with preceeding partition + """ + partition = self.params['partition'] + vlans = self.api.tm.net.vlans.get_collection() + return [str("/" + partition + "/" + x.name) for x in vlans] + + def create(self): + params = dict() + + svcs = [] + check_mode = self.params['check_mode'] + address = self.params['address'] + allow_service = self.params['allow_service'] + name = self.params['name'] + netmask = self.params['netmask'] + partition = self.params['partition'] + traffic_group = self.params['traffic_group'] + vlan = self.params['vlan'] + route_domain = self.params['route_domain'] + + if address is None or netmask is None: + raise F5ModuleError( + 'An address and a netmask must be specififed' + ) + + if vlan is None: + raise F5ModuleError( + 'A VLAN name must be specified' + ) + else: + vlan = "/%s/%s" % (partition, vlan) + + try: + ipin = "%s/%s" % (address, netmask) + ipnet = IPNetwork(ipin) + if route_domain is not None: + params['address'] = "%s%s%s/%s" % (ipnet.ip, '%', route_domain, ipnet.prefixlen) + else: + params['address'] = "%s/%s" % (ipnet.ip, ipnet.prefixlen) + except AddrFormatError: + raise F5ModuleError( + 'The provided address/netmask value was invalid' + ) + + if traffic_group is None: + params['trafficGroup'] = "/%s/%s" % (partition, DEFAULT_TG) + else: + traffic_group = "/%s/%s" % (partition, traffic_group) + if traffic_group in self.traffic_groups(): + params['trafficGroup'] = traffic_group + else: + raise F5ModuleError( + 'The specified traffic group was not found' + ) + + vlans = self.get_vlans() + if vlan in vlans: + params['vlan'] = vlan + else: + raise F5ModuleError( + 'The specified VLAN was not found' + ) + + if allow_service is not None: + svcs = self.verify_services() + params['allowService'] = self.fmt_services(svcs) + + params['name'] = name + params['partition'] = partition + + self.cparams = camel_dict_to_snake_dict(params) + if svcs: + self.cparams['allow_service'] = list(svcs) + + if check_mode: + return True + + d = self.api.tm.net.selfips.selfip + d.create(**params) + + if self.exists(): + return True + else: + raise F5ModuleError("Failed to create the self IP") + + def delete(self): + params = dict() + check_mode = self.params['check_mode'] + + params['name'] = self.params['name'] + params['partition'] = self.params['partition'] + + self.cparams = camel_dict_to_snake_dict(params) + if check_mode: + return True + + dc = self.api.tm.net.selfips.selfip.load(**params) + dc.delete() + + if self.exists(): + raise F5ModuleError("Failed to delete the self IP") + return True + + def exists(self): + name = self.params['name'] + partition = self.params['partition'] + return self.api.tm.net.selfips.selfip.exists( + name=name, + partition=partition + ) + + def flush(self): + result = dict() + state = self.params['state'] + + try: + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + result.update(**self.cparams) + result.update(dict(changed=changed)) + return result + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + address=dict(required=False, default=None), + allow_service=dict(type='list', default=None), + name=dict(required=True), + netmask=dict(required=False, default=None), + traffic_group=dict(required=False, default=None), + vlan=dict(required=False, default=None), + route_domain=dict(required=False, default=None) + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + try: + if not HAS_NETADDR: + raise F5ModuleError( + "The netaddr python module is required." + ) + + obj = BigIpSelfIp(check_mode=module.check_mode, **module.params) + result = obj.flush() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_snat_pool.py b/network/f5/bigip_snat_pool.py new file mode 100644 index 00000000000..52341e4dfe8 --- /dev/null +++ b/network/f5/bigip_snat_pool.py @@ -0,0 +1,417 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_snat_pool +short_description: Manage SNAT pools on a BIG-IP. +description: + - Manage SNAT pools on a BIG-IP. +version_added: "2.3" +options: + append: + description: + - When C(yes), will only add members to the SNAT pool. When C(no), will + replace the existing member list with the provided member list. + choices: + - yes + - no + default: no + members: + description: + - List of members to put in the SNAT pool. When a C(state) of present is + provided, this parameter is required. Otherwise, it is optional. + required: false + default: None + aliases: ['member'] + name: + description: The name of the SNAT pool. + required: True + state: + description: + - Whether the SNAT pool should exist or not. + required: false + default: present + choices: + - present + - absent +notes: + - Requires the f5-sdk Python package on the host. This is as easy as + pip install f5-sdk + - Requires the netaddr Python package on the host. This is as easy as + pip install netaddr +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Add the SNAT pool 'my-snat-pool' + bigip_snat_pool: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + name: "my-snat-pool" + state: "present" + members: + - 10.10.10.10 + - 20.20.20.20 + delegate_to: localhost + +- name: Change the SNAT pool's members to a single member + bigip_snat_pool: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + name: "my-snat-pool" + state: "present" + member: "30.30.30.30" + delegate_to: localhost + +- name: Append a new list of members to the existing pool + bigip_snat_pool: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + name: "my-snat-pool" + state: "present" + members: + - 10.10.10.10 + - 20.20.20.20 + delegate_to: localhost + +- name: Remove the SNAT pool 'my-snat-pool' + bigip_snat_pool: + server: "lb.mydomain.com" + user: "admin" + password: "secret" + name: "johnd" + state: "absent" + delegate_to: localhost +''' + +RETURN = ''' +members: + description: + - List of members that are part of the SNAT pool. + returned: changed and success + type: list + sample: "['10.10.10.10']" +''' + +try: + from f5.bigip.contexts import TransactionContextManager + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + +try: + from netaddr import IPAddress, AddrFormatError + HAS_NETADDR = True +except ImportError: + HAS_NETADDR = False + + +class BigIpSnatPoolManager(object): + def __init__(self, *args, **kwargs): + self.changed_params = dict() + self.params = kwargs + self.api = None + + def apply_changes(self): + result = dict() + + changed = self.apply_to_running_config() + if changed: + self.save_running_config() + + result.update(**self.changed_params) + result.update(dict(changed=changed)) + return result + + def apply_to_running_config(self): + try: + self.api = self.connect_to_bigip(**self.params) + if self.params['state'] == "present": + return self.present() + elif self.params['state'] == "absent": + return self.absent() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + def save_running_config(self): + self.api.tm.sys.config.exec_cmd('save') + + def present(self): + if self.params['members'] is None: + raise F5ModuleError( + "The members parameter must be specified" + ) + + if self.snat_pool_exists(): + return self.update_snat_pool() + else: + return self.ensure_snat_pool_is_present() + + def absent(self): + changed = False + if self.snat_pool_exists(): + changed = self.ensure_snat_pool_is_absent() + return changed + + def connect_to_bigip(self, **kwargs): + return ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def read_snat_pool_information(self): + pool = self.load_snat_pool() + return self.format_snat_pool_information(pool) + + def format_snat_pool_information(self, pool): + """Ensure that the pool information is in a standard format + + The SDK provides information back in a format that may change with + the version of BIG-IP being worked with. Therefore, we need to make + sure that the data is formatted in a way that our module expects it. + + Additionally, this takes care of minor variations between Python 2 + and Python 3. + + :param pool: + :return: + """ + result = dict() + result['name'] = str(pool.name) + if hasattr(pool, 'members'): + result['members'] = self.format_current_members(pool) + return result + + def format_current_members(self, pool): + result = set() + partition_prefix = "/{0}/".format(self.params['partition']) + + for member in pool.members: + member = str(member.replace(partition_prefix, '')) + result.update([member]) + return list(result) + + def load_snat_pool(self): + return self.api.tm.ltm.snatpools.snatpool.load( + name=self.params['name'], + partition=self.params['partition'] + ) + + def snat_pool_exists(self): + return self.api.tm.ltm.snatpools.snatpool.exists( + name=self.params['name'], + partition=self.params['partition'] + ) + + def update_snat_pool(self): + params = self.get_changed_parameters() + if params: + self.changed_params = camel_dict_to_snake_dict(params) + if self.params['check_mode']: + return True + else: + return False + params['name'] = self.params['name'] + params['partition'] = self.params['partition'] + self.update_snat_pool_on_device(params) + return True + + def update_snat_pool_on_device(self, params): + tx = self.api.tm.transactions.transaction + with TransactionContextManager(tx) as api: + r = api.tm.ltm.snatpools.snatpool.load( + name=self.params['name'], + partition=self.params['partition'] + ) + r.modify(**params) + + def get_changed_parameters(self): + result = dict() + current = self.read_snat_pool_information() + if self.are_members_changed(current): + result['members'] = self.get_new_member_list(current['members']) + return result + + def are_members_changed(self, current): + if self.params['members'] is None: + return False + if 'members' not in current: + return True + if set(self.params['members']) == set(current['members']): + return False + if not self.params['append']: + return True + + # Checking to see if the supplied list is a subset of the current + # list is only relevant if the `append` parameter is provided. + new_members = set(self.params['members']) + current_members = set(current['members']) + if new_members.issubset(current_members): + return False + else: + return True + + def get_new_member_list(self, current_members): + result = set() + + if self.params['append']: + result.update(set(current_members)) + result.update(set(self.params['members'])) + else: + result.update(set(self.params['members'])) + return list(result) + + def ensure_snat_pool_is_present(self): + params = self.get_snat_pool_creation_parameters() + self.changed_params = camel_dict_to_snake_dict(params) + if self.params['check_mode']: + return True + self.create_snat_pool_on_device(params) + if self.snat_pool_exists(): + return True + else: + raise F5ModuleError("Failed to create the SNAT pool") + + def get_snat_pool_creation_parameters(self): + members = self.get_formatted_members_list() + return dict( + name=self.params['name'], + partition=self.params['partition'], + members=members + ) + + def get_formatted_members_list(self): + result = set() + try: + for ip in self.params['members']: + address = str(IPAddress(ip)) + result.update([address]) + return list(result) + except AddrFormatError: + raise F5ModuleError( + 'The provided member address is not a valid IP address' + ) + + def create_snat_pool_on_device(self, params): + tx = self.api.tm.transactions.transaction + with TransactionContextManager(tx) as api: + api.tm.ltm.snatpools.snatpool.create(**params) + + def ensure_snat_pool_is_absent(self): + if self.params['check_mode']: + return True + self.delete_snat_pool_from_device() + if self.snat_pool_exists(): + raise F5ModuleError("Failed to delete the SNAT pool") + return True + + def delete_snat_pool_from_device(self): + tx = self.api.tm.transactions.transaction + with TransactionContextManager(tx) as api: + pool = api.tm.ltm.snatpools.snatpool.load( + name=self.params['name'], + partition=self.params['partition'] + ) + pool.delete() + + +class BigIpSnatPoolModuleConfig(object): + def __init__(self): + self.argument_spec = dict() + self.meta_args = dict() + self.supports_check_mode = True + self.states = ['absent', 'present'] + + self.initialize_meta_args() + self.initialize_argument_spec() + + def initialize_meta_args(self): + args = dict( + append=dict( + default=False, + type='bool', + choices=BOOLEANS + ), + name=dict(required=True), + members=dict( + required=False, + default=None, + type='list', + aliases=['member'] + ), + state=dict( + default='present', + choices=self.states + ) + ) + self.meta_args = args + + def initialize_argument_spec(self): + self.argument_spec = f5_argument_spec() + self.argument_spec.update(self.meta_args) + + def create(self): + return AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=self.supports_check_mode + ) + + +def main(): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + if not HAS_NETADDR: + raise F5ModuleError("The python netaddr module is required") + + config = BigIpSnatPoolModuleConfig() + module = config.create() + + try: + obj = BigIpSnatPoolManager( + check_mode=module.check_mode, **module.params + ) + result = obj.apply_changes() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_ssl_certificate.py b/network/f5/bigip_ssl_certificate.py new file mode 100644 index 00000000000..fe0a753e834 --- /dev/null +++ b/network/f5/bigip_ssl_certificate.py @@ -0,0 +1,520 @@ +#!/usr/bin/python +# +# (c) 2016, Kevin Coming (@waffie1) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: bigip_ssl_certificate +short_description: Import/Delete certificates from BIG-IP +description: + - This module will import/delete SSL certificates on BIG-IP LTM. + Certificates can be imported from certificate and key files on the local + disk, in PEM format. +version_added: 2.2 +options: + cert_content: + description: + - When used instead of 'cert_src', sets the contents of a certificate directly + to the specified value. This is used with lookup plugins or for anything + with formatting or templating. Either one of C(key_src), + C(key_content), C(cert_src) or C(cert_content) must be provided when + C(state) is C(present). + required: false + key_content: + description: + - When used instead of 'key_src', sets the contents of a certificate key + directly to the specified value. This is used with lookup plugins or for + anything with formatting or templating. Either one of C(key_src), + C(key_content), C(cert_src) or C(cert_content) must be provided when + C(state) is C(present). + required: false + state: + description: + - Certificate and key state. This determines if the provided certificate + and key is to be made C(present) on the device or C(absent). + required: true + default: present + choices: + - present + - absent + partition: + description: + - BIG-IP partition to use when adding/deleting certificate. + required: false + default: Common + name: + description: + - SSL Certificate Name. This is the cert/key pair name used + when importing a certificate/key into the F5. It also + determines the filenames of the objects on the LTM + (:Partition:name.cer_11111_1 and :Partition_name.key_11111_1). + required: true + cert_src: + description: + - This is the local filename of the certificate. Either one of C(key_src), + C(key_content), C(cert_src) or C(cert_content) must be provided when + C(state) is C(present). + required: false + key_src: + description: + - This is the local filename of the private key. Either one of C(key_src), + C(key_content), C(cert_src) or C(cert_content) must be provided when + C(state) is C(present). + required: false + passphrase: + description: + - Passphrase on certificate private key + required: false +notes: + - Requires the f5-sdk Python package on the host. This is as easy as pip + install f5-sdk. + - Requires the netaddr Python package on the host. + - If you use this module, you will not be able to remove the certificates + and keys that are managed, via the web UI. You can only remove them via + tmsh or these modules. +extends_documentation_fragment: f5 +requirements: + - f5-sdk >= 1.5.0 + - BigIP >= v12 +author: + - Kevin Coming (@waffie1) + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Import PEM Certificate from local disk + bigip_ssl_certificate: + name: "certificate-name" + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + cert_src: "/path/to/cert.crt" + key_src: "/path/to/key.key" + delegate_to: localhost + +- name: Use a file lookup to import PEM Certificate + bigip_ssl_certificate: + name: "certificate-name" + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "present" + cert_content: "{{ lookup('file', '/path/to/cert.crt') }}" + key_content: "{{ lookup('file', '/path/to/key.key') }}" + delegate_to: localhost + +- name: "Delete Certificate" + bigip_ssl_certificate: + name: "certificate-name" + server: "lb.mydomain.com" + user: "admin" + password: "secret" + state: "absent" + delegate_to: localhost +''' + +RETURN = ''' +cert_name: + description: > + The name of the SSL certificate. The C(cert_name) and + C(key_name) will be equal to each other. + returned: + - created + - changed + - deleted + type: string + sample: "cert1" +key_name: + description: > + The name of the SSL certificate key. The C(key_name) and + C(cert_name) will be equal to each other. + returned: + - created + - changed + - deleted + type: string + sample: "key1" +partition: + description: Partition in which the cert/key was created + returned: + - changed + - created + - deleted + type: string + sample: "Common" +key_checksum: + description: SHA1 checksum of the key that was provided + return: + - changed + - created + type: string + sample: "cf23df2207d99a74fbe169e3eba035e633b65d94" +cert_checksum: + description: SHA1 checksum of the cert that was provided + return: + - changed + - created + type: string + sample: "f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0" +''' + + +try: + from f5.bigip.contexts import TransactionContextManager + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + + +import hashlib +import StringIO + + +class BigIpSslCertificate(object): + def __init__(self, *args, **kwargs): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + required_args = ['key_content', 'key_src', 'cert_content', 'cert_src'] + + ksource = kwargs['key_src'] + if ksource: + with open(ksource) as f: + kwargs['key_content'] = f.read() + + csource = kwargs['cert_src'] + if csource: + with open(csource) as f: + kwargs['cert_content'] = f.read() + + if kwargs['state'] == 'present': + if not any(kwargs[k] is not None for k in required_args): + raise F5ModuleError( + "Either 'key_content', 'key_src', 'cert_content' or " + "'cert_src' must be provided" + ) + + # This is the remote BIG-IP path from where it will look for certs + # to install. + self.dlpath = '/var/config/rest/downloads' + + # The params that change in the module + self.cparams = dict() + + # Stores the params that are sent to the module + self.params = kwargs + self.api = ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def exists(self): + cert = self.cert_exists() + key = self.key_exists() + + if cert and key: + return True + else: + return False + + def get_hash(self, content): + k = hashlib.sha1() + s = StringIO.StringIO(content) + while True: + data = s.read(1024) + if not data: + break + k.update(data) + return k.hexdigest() + + def present(self): + current = self.read() + changed = False + do_key = False + do_cert = False + chash = None + khash = None + + check_mode = self.params['check_mode'] + name = self.params['name'] + partition = self.params['partition'] + cert_content = self.params['cert_content'] + key_content = self.params['key_content'] + passphrase = self.params['passphrase'] + + # Technically you dont need to provide us with anything in the form + # of content for your cert, but that's kind of illogical, so we just + # return saying you didn't "do" anything if you left the cert and keys + # empty. + if not cert_content and not key_content: + return False + + if key_content is not None: + if 'key_checksum' in current: + khash = self.get_hash(key_content) + if khash not in current['key_checksum']: + do_key = "update" + else: + do_key = "create" + + if cert_content is not None: + if 'cert_checksum' in current: + chash = self.get_hash(cert_content) + if chash not in current['cert_checksum']: + do_cert = "update" + else: + do_cert = "create" + + if do_cert or do_key: + changed = True + params = dict() + params['cert_name'] = name + params['key_name'] = name + params['partition'] = partition + if khash: + params['key_checksum'] = khash + if chash: + params['cert_checksum'] = chash + self.cparams = params + + if check_mode: + return changed + + if not do_cert and not do_key: + return False + + tx = self.api.tm.transactions.transaction + with TransactionContextManager(tx) as api: + if do_cert: + # Upload the content of a certificate as a StringIO object + cstring = StringIO.StringIO(cert_content) + filename = "%s.crt" % (name) + filepath = os.path.join(self.dlpath, filename) + api.shared.file_transfer.uploads.upload_stringio( + cstring, + filename + ) + + if do_cert == "update": + # Install the certificate + params = { + 'name': name, + 'partition': partition + } + cert = api.tm.sys.file.ssl_certs.ssl_cert.load(**params) + + # This works because, while the source path is the same, + # calling update causes the file to be re-read + cert.update() + changed = True + elif do_cert == "create": + # Install the certificate + params = { + 'sourcePath': "file://" + filepath, + 'name': name, + 'partition': partition + } + api.tm.sys.file.ssl_certs.ssl_cert.create(**params) + changed = True + + if do_key: + # Upload the content of a certificate key as a StringIO object + kstring = StringIO.StringIO(key_content) + filename = "%s.key" % (name) + filepath = os.path.join(self.dlpath, filename) + api.shared.file_transfer.uploads.upload_stringio( + kstring, + filename + ) + + if do_key == "update": + # Install the key + params = { + 'name': name, + 'partition': partition + } + key = api.tm.sys.file.ssl_keys.ssl_key.load(**params) + + params = dict() + + if passphrase: + params['passphrase'] = passphrase + else: + params['passphrase'] = None + + key.update(**params) + changed = True + elif do_key == "create": + # Install the key + params = { + 'sourcePath': "file://" + filepath, + 'name': name, + 'partition': partition + } + if passphrase: + params['passphrase'] = self.params['passphrase'] + else: + params['passphrase'] = None + + api.tm.sys.file.ssl_keys.ssl_key.create(**params) + changed = True + return changed + + def key_exists(self): + return self.api.tm.sys.file.ssl_keys.ssl_key.exists( + name=self.params['name'], + partition=self.params['partition'] + ) + + def cert_exists(self): + return self.api.tm.sys.file.ssl_certs.ssl_cert.exists( + name=self.params['name'], + partition=self.params['partition'] + ) + + def read(self): + p = dict() + name = self.params['name'] + partition = self.params['partition'] + + if self.key_exists(): + key = self.api.tm.sys.file.ssl_keys.ssl_key.load( + name=name, + partition=partition + ) + if hasattr(key, 'checksum'): + p['key_checksum'] = str(key.checksum) + + if self.cert_exists(): + cert = self.api.tm.sys.file.ssl_certs.ssl_cert.load( + name=name, + partition=partition + ) + if hasattr(cert, 'checksum'): + p['cert_checksum'] = str(cert.checksum) + + p['name'] = name + return p + + def flush(self): + result = dict() + state = self.params['state'] + + try: + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + result.update(**self.cparams) + result.update(dict(changed=changed)) + return result + + def absent(self): + changed = False + + if self.exists(): + changed = self.delete() + + return changed + + def delete(self): + changed = False + + check_mode = self.params['check_mode'] + + delete_cert = self.cert_exists() + delete_key = self.key_exists() + + if not delete_cert and not delete_key: + return changed + + if check_mode: + params = dict() + params['cert_name'] = name + params['key_name'] = name + params['partition'] = partition + self.cparams = params + return True + + tx = self.api.tm.transactions.transaction + with TransactionContextManager(tx) as api: + if delete_cert: + # Delete the certificate + c = api.tm.sys.file.ssl_certs.ssl_cert.load( + name=self.params['name'], + partition=self.params['partition'] + ) + c.delete() + changed = True + + if delete_key: + # Delete the certificate key + k = self.api.tm.sys.file.ssl_keys.ssl_key.load( + name=self.params['name'], + partition=self.params['partition'] + ) + k.delete() + changed = True + return changed + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + name=dict(type='str', required=True), + cert_content=dict(type='str', default=None), + cert_src=dict(type='path', default=None), + key_content=dict(type='str', default=None), + key_src=dict(type='path', default=None), + passphrase=dict(type='str', default=None, no_log=True) + ) + + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['key_content', 'key_src'], + ['cert_content', 'cert_src'] + ] + ) + + try: + obj = BigIpSslCertificate(check_mode=module.check_mode, + **module.params) + result = obj.flush() + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_sys_db.py b/network/f5/bigip_sys_db.py new file mode 100644 index 00000000000..b451461b9c2 --- /dev/null +++ b/network/f5/bigip_sys_db.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_sys_db +short_description: Manage BIG-IP system database variables +description: + - Manage BIG-IP system database variables +version_added: "2.2" +options: + key: + description: + - The database variable to manipulate. + required: true + state: + description: + - The state of the variable on the system. When C(present), guarantees + that an existing variable is set to C(value). When C(reset) sets the + variable back to the default value. At least one of value and state + C(reset) are required. + required: false + default: present + choices: + - present + - reset + value: + description: + - The value to set the key to. At least one of value and state C(reset) + are required. + required: false +notes: + - Requires the f5-sdk Python package on the host. This is as easy as pip + install f5-sdk. + - Requires BIG-IP version 12.0.0 or greater +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Set the boot.quiet DB variable on the BIG-IP + bigip_sys_db: + user: "admin" + password: "secret" + server: "lb.mydomain.com" + key: "boot.quiet" + value: "disable" + delegate_to: localhost + +- name: Disable the initial setup screen + bigip_sys_db: + user: "admin" + password: "secret" + server: "lb.mydomain.com" + key: "setup.run" + value: "false" + delegate_to: localhost + +- name: Reset the initial setup screen + bigip_sys_db: + user: "admin" + password: "secret" + server: "lb.mydomain.com" + key: "setup.run" + state: "reset" + delegate_to: localhost +''' + +RETURN = ''' +name: + description: The key in the system database that was specified + returned: changed and success + type: string + sample: "setup.run" +default_value: + description: The default value of the key + returned: changed and success + type: string + sample: "true" +value: + description: The value that you set the key to + returned: changed and success + type: string + sample: "false" +''' + +try: + from f5.bigip import ManagementRoot + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + + +class BigIpSysDb(object): + def __init__(self, *args, **kwargs): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + self.params = kwargs + self.api = ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def flush(self): + result = dict() + state = self.params['state'] + value = self.params['value'] + + if not state == 'reset' and not value: + raise F5ModuleError( + "When setting a key, a value must be supplied" + ) + + current = self.read() + + if self.params['check_mode']: + if value == current: + changed = False + else: + changed = True + else: + if state == "present": + changed = self.present() + elif state == "reset": + changed = self.reset() + current = self.read() + result.update( + name=current.name, + default_value=current.defaultValue, + value=current.value + ) + + result.update(dict(changed=changed)) + return result + + def read(self): + dbs = self.api.tm.sys.dbs.db.load( + name=self.params['key'] + ) + return dbs + + def present(self): + current = self.read() + + if current.value == self.params['value']: + return False + + current.update(value=self.params['value']) + current.refresh() + + if current.value != self.params['value']: + raise F5ModuleError( + "Failed to set the DB variable" + ) + return True + + def reset(self): + current = self.read() + + default = current.defaultValue + if current.value == default: + return False + + current.update(value=default) + current.refresh() + + if current.value != current.defaultValue: + raise F5ModuleError( + "Failed to reset the DB variable" + ) + + return True + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + key=dict(required=True), + state=dict(default='present', choices=['present', 'reset']), + value=dict(required=False, default=None) + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + try: + obj = BigIpSysDb(check_mode=module.check_mode, **module.params) + result = obj.flush() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_sys_global.py b/network/f5/bigip_sys_global.py new file mode 100644 index 00000000000..7e6cfd78064 --- /dev/null +++ b/network/f5/bigip_sys_global.py @@ -0,0 +1,430 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_sys_global +short_description: Manage BIG-IP global settings. +description: + - Manage BIG-IP global settings. +version_added: "2.3" +options: + banner_text: + description: + - Specifies the text to present in the advisory banner. + console_timeout: + description: + - Specifies the number of seconds of inactivity before the system logs + off a user that is logged on. + gui_setup: + description: + - C(enable) or C(disabled) the Setup utility in the browser-based + Configuration utility + choices: + - enabled + - disabled + lcd_display: + description: + - Specifies, when C(enabled), that the system menu displays on the + LCD screen on the front of the unit. This setting has no effect + when used on the VE platform. + choices: + - enabled + - disabled + mgmt_dhcp: + description: + - Specifies whether or not to enable DHCP client on the management + interface + choices: + - enabled + - disabled + net_reboot: + description: + - Specifies, when C(enabled), that the next time you reboot the system, + the system boots to an ISO image on the network, rather than an + internal media drive. + choices: + - enabled + - disabled + quiet_boot: + description: + - Specifies, when C(enabled), that the system suppresses informational + text on the console during the boot cycle. When C(disabled), the + system presents messages and informational text on the console during + the boot cycle. + security_banner: + description: + - Specifies whether the system displays an advisory message on the + login screen. + choices: + - enabled + - disabled + state: + description: + - The state of the variable on the system. When C(present), guarantees + that an existing variable is set to C(value). + required: false + default: present + choices: + - present +notes: + - Requires the f5-sdk Python package on the host. This is as easy as pip + install f5-sdk. +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Disable the setup utility + bigip_sys_global: + gui_setup: "disabled" + password: "secret" + server: "lb.mydomain.com" + user: "admin" + state: "present" + delegate_to: localhost +''' + +RETURN = ''' +banner_text: + description: The new text to present in the advisory banner. + returned: changed + type: string + sample: "This is a corporate device. Do not touch." +console_timeout: + description: > + The new number of seconds of inactivity before the system + logs off a user that is logged on. + returned: changed + type: integer + sample: 600 +gui_setup: + description: The new setting for the Setup utility. + returned: changed + type: string + sample: enabled +lcd_display: + description: The new setting for displaying the system menu on the LCD. + returned: changed + type: string + sample: enabled +mgmt_dhcp: + description: > + The new setting for whether the mgmt interface should DHCP + or not + returned: changed + type: string + sample: enabled +net_reboot: + description: > + The new setting for whether the system should boot to an ISO on the + network or not + returned: changed + type: string + sample: enabled +quiet_boot: + description: > + The new setting for whether the system should suppress information to + the console during boot or not. + returned: changed + type: string + sample: enabled +security_banner: + description: > + The new setting for whether the system should display an advisory message + on the login screen or not + returned: changed + type: string + sample: enabled +''' + +try: + from f5.bigip.contexts import TransactionContextManager + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + + +class BigIpSysGlobalManager(object): + def __init__(self, *args, **kwargs): + self.changed_params = dict() + self.params = kwargs + self.api = None + + def apply_changes(self): + result = dict() + + changed = self.apply_to_running_config() + + result.update(**self.changed_params) + result.update(dict(changed=changed)) + return result + + def apply_to_running_config(self): + try: + self.api = self.connect_to_bigip(**self.params) + return self.update_sys_global_settings() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + def connect_to_bigip(self, **kwargs): + return ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def read_sys_global_information(self): + settings = self.load_sys_global() + return self.format_sys_global_information(settings) + + def load_sys_global(self): + return self.api.tm.sys.global_settings.load() + + def get_changed_parameters(self): + result = dict() + current = self.read_sys_global_information() + if self.security_banner_is_changed(current): + result['guiSecurityBanner'] = self.params['security_banner'] + if self.banner_text_is_changed(current): + result['guiSecurityBannerText'] = self.params['banner_text'] + if self.gui_setup_is_changed(current): + result['guiSetup'] = self.params['gui_setup'] + if self.lcd_display_is_changed(current): + result['lcdDisplay'] = self.params['lcd_display'] + if self.mgmt_dhcp_is_changed(current): + result['mgmtDhcp'] = self.params['mgmt_dhcp'] + if self.net_reboot_is_changed(current): + result['netReboot'] = self.params['net_reboot'] + if self.quiet_boot_is_changed(current): + result['quietBoot'] = self.params['quiet_boot'] + if self.console_timeout_is_changed(current): + result['consoleInactivityTimeout'] = self.params['console_timeout'] + return result + + def security_banner_is_changed(self, current): + if self.params['security_banner'] is None: + return False + if 'security_banner' not in current: + return True + if self.params['security_banner'] == current['security_banner']: + return False + else: + return True + + def banner_text_is_changed(self, current): + if self.params['banner_text'] is None: + return False + if 'banner_text' not in current: + return True + if self.params['banner_text'] == current['banner_text']: + return False + else: + return True + + def gui_setup_is_changed(self, current): + if self.params['gui_setup'] is None: + return False + if 'gui_setup' not in current: + return True + if self.params['gui_setup'] == current['gui_setup']: + return False + else: + return True + + def lcd_display_is_changed(self, current): + if self.params['lcd_display'] is None: + return False + if 'lcd_display' not in current: + return True + if self.params['lcd_display'] == current['lcd_display']: + return False + else: + return True + + def mgmt_dhcp_is_changed(self, current): + if self.params['mgmt_dhcp'] is None: + return False + if 'mgmt_dhcp' not in current: + return True + if self.params['mgmt_dhcp'] == current['mgmt_dhcp']: + return False + else: + return True + + def net_reboot_is_changed(self, current): + if self.params['net_reboot'] is None: + return False + if 'net_reboot' not in current: + return True + if self.params['net_reboot'] == current['net_reboot']: + return False + else: + return True + + def quiet_boot_is_changed(self, current): + if self.params['quiet_boot'] is None: + return False + if 'quiet_boot' not in current: + return True + if self.params['quiet_boot'] == current['quiet_boot']: + return False + else: + return True + + def console_timeout_is_changed(self, current): + if self.params['console_timeout'] is None: + return False + if 'console_timeout' not in current: + return True + if self.params['console_timeout'] == current['console_timeout']: + return False + else: + return True + + def format_sys_global_information(self, settings): + result = dict() + if hasattr(settings, 'guiSecurityBanner'): + result['security_banner'] = str(settings.guiSecurityBanner) + if hasattr(settings, 'guiSecurityBannerText'): + result['banner_text'] = str(settings.guiSecurityBannerText) + if hasattr(settings, 'guiSetup'): + result['gui_setup'] = str(settings.guiSetup) + if hasattr(settings, 'lcdDisplay'): + result['lcd_display'] = str(settings.lcdDisplay) + if hasattr(settings, 'mgmtDhcp'): + result['mgmt_dhcp'] = str(settings.mgmtDhcp) + if hasattr(settings, 'netReboot'): + result['net_reboot'] = str(settings.netReboot) + if hasattr(settings, 'quietBoot'): + result['quiet_boot'] = str(settings.quietBoot) + if hasattr(settings, 'consoleInactivityTimeout'): + result['console_timeout'] = int(settings.consoleInactivityTimeout) + return result + + def update_sys_global_settings(self): + params = self.get_changed_parameters() + if params: + self.changed_params = camel_dict_to_snake_dict(params) + if self.params['check_mode']: + return True + else: + return False + self.update_sys_global_settings_on_device(params) + return True + + def update_sys_global_settings_on_device(self, params): + tx = self.api.tm.transactions.transaction + with TransactionContextManager(tx) as api: + r = api.tm.sys.global_settings.load() + r.update(**params) + + +class BigIpSysGlobalModuleConfig(object): + def __init__(self): + self.argument_spec = dict() + self.meta_args = dict() + self.supports_check_mode = True + self.states = ['present'] + self.on_off_choices = ['enabled', 'disabled'] + + self.initialize_meta_args() + self.initialize_argument_spec() + + def initialize_meta_args(self): + args = dict( + security_banner=dict( + required=False, + choices=self.on_off_choices, + default=None + ), + banner_text=dict(required=False, default=None), + gui_setup=dict( + required=False, + choices=self.on_off_choices, + default=None + ), + lcd_display=dict( + required=False, + choices=self.on_off_choices, + default=None + ), + mgmt_dhcp=dict( + required=False, + choices=self.on_off_choices, + default=None + ), + net_reboot=dict( + required=False, + choices=self.on_off_choices, + default=None + ), + quiet_boot=dict( + required=False, + choices=self.on_off_choices, + default=None + ), + console_timeout=dict(required=False, type='int', default=None), + state=dict(default='present', choices=['present']) + ) + self.meta_args = args + + def initialize_argument_spec(self): + self.argument_spec = f5_argument_spec() + self.argument_spec.update(self.meta_args) + + def create(self): + return AnsibleModule( + argument_spec=self.argument_spec, + supports_check_mode=self.supports_check_mode + ) + + +def main(): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + config = BigIpSysGlobalModuleConfig() + module = config.create() + + try: + obj = BigIpSysGlobalManager( + check_mode=module.check_mode, **module.params + ) + result = obj.apply_changes() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_virtual_server.py b/network/f5/bigip_virtual_server.py new file mode 100644 index 00000000000..ddcf2cd0e6a --- /dev/null +++ b/network/f5/bigip_virtual_server.py @@ -0,0 +1,717 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Etienne Carriere +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_virtual_server +short_description: "Manages F5 BIG-IP LTM virtual servers" +description: + - "Manages F5 BIG-IP LTM virtual servers via iControl SOAP API" +version_added: "2.1" +author: + - Etienne Carriere (@Etienne-Carriere) + - Tim Rupp (@caphrim007) +notes: + - "Requires BIG-IP software version >= 11" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" +requirements: + - bigsuds +options: + state: + description: + - Virtual Server state + - Absent, delete the VS if present + - C(present) (and its synonym enabled), create if needed the VS and set + state to enabled + - C(disabled), create if needed the VS and set state to disabled + required: false + default: present + choices: + - present + - absent + - enabled + - disabled + aliases: [] + partition: + description: + - Partition + required: false + default: 'Common' + name: + description: + - Virtual server name + required: true + aliases: + - vs + destination: + description: + - Destination IP of the virtual server (only host is currently supported). + Required when state=present and vs does not exist. + required: true + aliases: + - address + - ip + port: + description: + - Port of the virtual server . Required when state=present and vs does not exist + required: false + default: None + all_profiles: + description: + - List of all Profiles (HTTP,ClientSSL,ServerSSL,etc) that must be used + by the virtual server + required: false + default: None + all_rules: + version_added: "2.2" + description: + - List of rules to be applied in priority order + required: false + default: None + enabled_vlans: + version_added: "2.2" + description: + - List of vlans to be enabled. When a VLAN named C(ALL) is used, all + VLANs will be allowed. + required: false + default: None + pool: + description: + - Default pool for the virtual server + required: false + default: None + snat: + description: + - Source network address policy + required: false + choices: + - None + - Automap + - Name of a SNAT pool (eg "/Common/snat_pool_name") to enable SNAT with the specific pool + default: None + default_persistence_profile: + description: + - Default Profile which manages the session persistence + required: false + default: None + route_advertisement_state: + description: + - Enable route advertisement for destination + required: false + default: disabled + version_added: "2.3" + description: + description: + - Virtual server description + required: false + default: None +extends_documentation_fragment: f5 +''' + +EXAMPLES = ''' +- name: Add virtual server + bigip_virtual_server: + server: lb.mydomain.net + user: admin + password: secret + state: present + partition: MyPartition + name: myvirtualserver + destination: "{{ ansible_default_ipv4['address'] }}" + port: 443 + pool: "{{ mypool }}" + snat: Automap + description: Test Virtual Server + all_profiles: + - http + - clientssl + enabled_vlans: + - /Common/vlan2 + delegate_to: localhost + +- name: Modify Port of the Virtual Server + bigip_virtual_server: + server: lb.mydomain.net + user: admin + password: secret + state: present + partition: MyPartition + name: myvirtualserver + port: 8080 + delegate_to: localhost + +- name: Delete virtual server + bigip_virtual_server: + server: lb.mydomain.net + user: admin + password: secret + state: absent + partition: MyPartition + name: myvirtualserver + delegate_to: localhost +''' + +RETURN = ''' +--- +deleted: + description: Name of a virtual server that was deleted + returned: changed + type: string + sample: "my-virtual-server" +''' + + +# map of state values +STATES = { + 'enabled': 'STATE_ENABLED', + 'disabled': 'STATE_DISABLED' +} + +STATUSES = { + 'enabled': 'SESSION_STATUS_ENABLED', + 'disabled': 'SESSION_STATUS_DISABLED', + 'offline': 'SESSION_STATUS_FORCED_DISABLED' +} + + +def vs_exists(api, vs): + # hack to determine if pool exists + result = False + try: + api.LocalLB.VirtualServer.get_object_status(virtual_servers=[vs]) + result = True + except bigsuds.OperationFailed as e: + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + + +def vs_create(api, name, destination, port, pool): + _profiles = [[{'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': 'tcp'}]] + created = False + # a bit of a hack to handle concurrent runs of this module. + # even though we've checked the vs doesn't exist, + # it may exist by the time we run create_vs(). + # this catches the exception and does something smart + # about it! + try: + api.LocalLB.VirtualServer.create( + definitions=[{'name': [name], 'address': [destination], 'port': port, 'protocol': 'PROTOCOL_TCP'}], + wildmasks=['255.255.255.255'], + resources=[{'type': 'RESOURCE_TYPE_POOL', 'default_pool_name': pool}], + profiles=_profiles) + created = True + return created + except bigsuds.OperationFailed as e: + if "already exists" not in str(e): + raise Exception('Error on creating Virtual Server : %s' % e) + + +def vs_remove(api, name): + api.LocalLB.VirtualServer.delete_virtual_server( + virtual_servers=[name] + ) + + +def get_rules(api, name): + return api.LocalLB.VirtualServer.get_rule( + virtual_servers=[name] + )[0] + + +def set_rules(api, name, rules_list): + updated = False + if rules_list is None: + return False + rules_list = list(enumerate(rules_list)) + try: + current_rules = map(lambda x: (x['priority'], x['rule_name']), get_rules(api, name)) + to_add_rules = [] + for i, x in rules_list: + if (i, x) not in current_rules: + to_add_rules.append({'priority': i, 'rule_name': x}) + to_del_rules = [] + for i, x in current_rules: + if (i, x) not in rules_list: + to_del_rules.append({'priority': i, 'rule_name': x}) + if len(to_del_rules) > 0: + api.LocalLB.VirtualServer.remove_rule( + virtual_servers=[name], + rules=[to_del_rules] + ) + updated = True + if len(to_add_rules) > 0: + api.LocalLB.VirtualServer.add_rule( + virtual_servers=[name], + rules=[to_add_rules] + ) + updated = True + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting rules : %s' % e) + + +def get_profiles(api, name): + return api.LocalLB.VirtualServer.get_profile( + virtual_servers=[name] + )[0] + + +def set_profiles(api, name, profiles_list): + updated = False + try: + if profiles_list is None: + return False + current_profiles = list(map(lambda x: x['profile_name'], get_profiles(api, name))) + to_add_profiles = [] + for x in profiles_list: + if x not in current_profiles: + to_add_profiles.append({'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': x}) + to_del_profiles = [] + for x in current_profiles: + if (x not in profiles_list) and (x != "/Common/tcp"): + to_del_profiles.append({'profile_context': 'PROFILE_CONTEXT_TYPE_ALL', 'profile_name': x}) + if len(to_del_profiles) > 0: + api.LocalLB.VirtualServer.remove_profile( + virtual_servers=[name], + profiles=[to_del_profiles] + ) + updated = True + if len(to_add_profiles) > 0: + api.LocalLB.VirtualServer.add_profile( + virtual_servers=[name], + profiles=[to_add_profiles] + ) + updated = True + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting profiles : %s' % e) + + +def get_vlan(api, name): + return api.LocalLB.VirtualServer.get_vlan( + virtual_servers=[name] + )[0] + + +def set_enabled_vlans(api, name, vlans_enabled_list): + updated = False + to_add_vlans = [] + try: + if vlans_enabled_list is None: + return updated + current_vlans = get_vlan(api, name) + + # Set allowed list back to default ("all") + # + # This case allows you to undo what you may have previously done. + # The default case is "All VLANs and Tunnels". This case will handle + # that situation. + if 'ALL' in vlans_enabled_list: + # The user is coming from a situation where they previously + # were specifying a list of allowed VLANs + if len(current_vlans['vlans']) > 0 or \ + current_vlans['state'] is "STATE_ENABLED": + api.LocalLB.VirtualServer.set_vlan( + virtual_servers=[name], + vlans=[{'state': 'STATE_DISABLED', 'vlans': []}] + ) + updated = True + else: + if current_vlans['state'] is "STATE_DISABLED": + to_add_vlans = vlans_enabled_list + else: + for vlan in vlans_enabled_list: + if vlan not in current_vlans['vlans']: + updated = True + to_add_vlans = vlans_enabled_list + break + if updated: + api.LocalLB.VirtualServer.set_vlan( + virtual_servers=[name], + vlans=[{ + 'state': 'STATE_ENABLED', + 'vlans': [to_add_vlans] + }] + ) + + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting enabled vlans : %s' % e) + + +def set_snat(api, name, snat): + updated = False + try: + current_state = get_snat_type(api, name) + current_snat_pool = get_snat_pool(api, name) + if snat is None: + return updated + elif snat == 'None' and current_state != 'SRC_TRANS_NONE': + api.LocalLB.VirtualServer.set_source_address_translation_none( + virtual_servers=[name] + ) + updated = True + elif snat == 'Automap' and current_state != 'SRC_TRANS_AUTOMAP': + api.LocalLB.VirtualServer.set_source_address_translation_automap( + virtual_servers=[name] + ) + updated = True + elif snat_settings_need_updating(snat, current_state, current_snat_pool): + api.LocalLB.VirtualServer.set_source_address_translation_snat_pool( + virtual_servers=[name], + pools=[snat] + ) + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting snat : %s' % e) + + +def get_snat_type(api, name): + return api.LocalLB.VirtualServer.get_source_address_translation_type( + virtual_servers=[name] + )[0] + + +def get_snat_pool(api, name): + return api.LocalLB.VirtualServer.get_source_address_translation_snat_pool( + virtual_servers=[name] + )[0] + + +def snat_settings_need_updating(snat, current_state, current_snat_pool): + if snat == 'None' or snat == 'Automap': + return False + elif snat and current_state != 'SRC_TRANS_SNATPOOL': + return True + elif snat and current_state == 'SRC_TRANS_SNATPOOL' and current_snat_pool != snat: + return True + else: + return False + + +def get_pool(api, name): + return api.LocalLB.VirtualServer.get_default_pool_name( + virtual_servers=[name] + )[0] + + +def set_pool(api, name, pool): + updated = False + try: + current_pool = get_pool(api, name) + if pool is not None and (pool != current_pool): + api.LocalLB.VirtualServer.set_default_pool_name( + virtual_servers=[name], + default_pools=[pool] + ) + updated = True + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting pool : %s' % e) + + +def get_destination(api, name): + return api.LocalLB.VirtualServer.get_destination_v2( + virtual_servers=[name] + )[0] + + +def set_destination(api, name, destination): + updated = False + try: + current_destination = get_destination(api, name) + if destination is not None and destination != current_destination['address']: + api.LocalLB.VirtualServer.set_destination_v2( + virtual_servers=[name], + destinations=[{'address': destination, 'port': current_destination['port']}] + ) + updated = True + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting destination : %s' % e) + + +def set_port(api, name, port): + updated = False + try: + current_destination = get_destination(api, name) + if port is not None and port != current_destination['port']: + api.LocalLB.VirtualServer.set_destination_v2( + virtual_servers=[name], + destinations=[{'address': current_destination['address'], 'port': port}] + ) + updated = True + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting port : %s' % e) + + +def get_state(api, name): + return api.LocalLB.VirtualServer.get_enabled_state( + virtual_servers=[name] + )[0] + + +def set_state(api, name, state): + updated = False + try: + current_state = get_state(api, name) + # We consider that being present is equivalent to enabled + if state == 'present': + state = 'enabled' + if STATES[state] != current_state: + api.LocalLB.VirtualServer.set_enabled_state( + virtual_servers=[name], + states=[STATES[state]] + ) + updated = True + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting state : %s' % e) + + +def get_description(api, name): + return api.LocalLB.VirtualServer.get_description( + virtual_servers=[name] + )[0] + + +def set_description(api, name, description): + updated = False + try: + current_description = get_description(api, name) + if description is not None and current_description != description: + api.LocalLB.VirtualServer.set_description( + virtual_servers=[name], + descriptions=[description] + ) + updated = True + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting description : %s ' % e) + + +def get_persistence_profiles(api, name): + return api.LocalLB.VirtualServer.get_persistence_profile( + virtual_servers=[name] + )[0] + + +def set_default_persistence_profiles(api, name, persistence_profile): + updated = False + if persistence_profile is None: + return updated + try: + current_persistence_profiles = get_persistence_profiles(api, name) + default = None + for profile in current_persistence_profiles: + if profile['default_profile']: + default = profile['profile_name'] + break + if default is not None and default != persistence_profile: + api.LocalLB.VirtualServer.remove_persistence_profile( + virtual_servers=[name], + profiles=[[{'profile_name': default, 'default_profile': True}]] + ) + if default != persistence_profile: + api.LocalLB.VirtualServer.add_persistence_profile( + virtual_servers=[name], + profiles=[[{'profile_name': persistence_profile, 'default_profile': True}]] + ) + updated = True + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting default persistence profile : %s' % e) + + +def get_route_advertisement_status(api, address): + result = api.LocalLB.VirtualAddressV2.get_route_advertisement_state(virtual_addresses=[address]).pop(0) + result = result.split("STATE_")[-1].lower() + return result + + +def set_route_advertisement_state(api, destination, partition, route_advertisement_state): + updated = False + + try: + state = "STATE_%s" % route_advertisement_state.strip().upper() + address = fq_name(partition, destination,) + current_route_advertisement_state=get_route_advertisement_status(api,address) + if current_route_advertisement_state != route_advertisement_state: + api.LocalLB.VirtualAddressV2.set_route_advertisement_state(virtual_addresses=[address], states=[state]) + updated = True + return updated + except bigsuds.OperationFailed as e: + raise Exception('Error on setting profiles : %s' % e) + + +def main(): + argument_spec = f5_argument_spec() + argument_spec.update(dict( + state=dict(type='str', default='present', + choices=['present', 'absent', 'disabled', 'enabled']), + name=dict(type='str', required=True, aliases=['vs']), + destination=dict(type='str', aliases=['address', 'ip']), + port=dict(type='int'), + all_profiles=dict(type='list'), + all_rules=dict(type='list'), + enabled_vlans=dict(type='list'), + pool=dict(type='str'), + description=dict(type='str'), + snat=dict(type='str'), + route_advertisement_state=dict(type='str', default='disabled', choices=['enabled', 'disabled']), + default_persistence_profile=dict(type='str') + )) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + + if module.params['validate_certs']: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') + + server = module.params['server'] + server_port = module.params['server_port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + partition = module.params['partition'] + validate_certs = module.params['validate_certs'] + + name = fq_name(partition, module.params['name']) + destination = module.params['destination'] + port = module.params['port'] + all_profiles = fq_list_names(partition, module.params['all_profiles']) + all_rules = fq_list_names(partition, module.params['all_rules']) + + enabled_vlans = module.params['enabled_vlans'] + if enabled_vlans is None or 'ALL' in enabled_vlans: + all_enabled_vlans = enabled_vlans + else: + all_enabled_vlans = fq_list_names(partition, enabled_vlans) + + pool = fq_name(partition, module.params['pool']) + description = module.params['description'] + snat = module.params['snat'] + route_advertisement_state = module.params['route_advertisement_state'] + default_persistence_profile = fq_name(partition, module.params['default_persistence_profile']) + + if 1 > port > 65535: + module.fail_json(msg="valid ports must be in range 1 - 65535") + + try: + api = bigip_api(server, user, password, validate_certs, port=server_port) + result = {'changed': False} # default + + if state == 'absent': + if not module.check_mode: + if vs_exists(api, name): + # hack to handle concurrent runs of module + # pool might be gone before we actually remove + try: + vs_remove(api, name) + result = {'changed': True, 'deleted': name} + except bigsuds.OperationFailed as e: + if "was not found" in str(e): + result['changed'] = False + else: + raise + else: + # check-mode return value + result = {'changed': True} + + else: + update = False + if not vs_exists(api, name): + if (not destination) or (not port): + module.fail_json(msg="both destination and port must be supplied to create a VS") + if not module.check_mode: + # a bit of a hack to handle concurrent runs of this module. + # even though we've checked the virtual_server doesn't exist, + # it may exist by the time we run virtual_server(). + # this catches the exception and does something smart + # about it! + try: + vs_create(api, name, destination, port, pool) + set_profiles(api, name, all_profiles) + set_enabled_vlans(api, name, all_enabled_vlans) + set_rules(api, name, all_rules) + set_snat(api, name, snat) + set_description(api, name, description) + set_default_persistence_profiles(api, name, default_persistence_profile) + set_state(api, name, state) + set_route_advertisement_state(api, destination, partition, route_advertisement_state) + result = {'changed': True} + except bigsuds.OperationFailed as e: + raise Exception('Error on creating Virtual Server : %s' % e) + else: + # check-mode return value + result = {'changed': True} + else: + update = True + if update: + # VS exists + if not module.check_mode: + # Have a transaction for all the changes + try: + api.System.Session.start_transaction() + result['changed'] |= set_destination(api, name, fq_name(partition, destination)) + result['changed'] |= set_port(api, name, port) + result['changed'] |= set_pool(api, name, pool) + result['changed'] |= set_description(api, name, description) + result['changed'] |= set_snat(api, name, snat) + result['changed'] |= set_profiles(api, name, all_profiles) + result['changed'] |= set_enabled_vlans(api, name, all_enabled_vlans) + result['changed'] |= set_rules(api, name, all_rules) + result['changed'] |= set_default_persistence_profiles(api, name, default_persistence_profile) + result['changed'] |= set_state(api, name, state) + result['changed'] |= set_route_advertisement_state(api, destination, partition, route_advertisement_state) + api.System.Session.submit_transaction() + except Exception as e: + raise Exception("Error on updating Virtual Server : %s" % e) + else: + # check-mode return value + result = {'changed': True} + + except Exception as e: + module.fail_json(msg="received exception: %s" % e) + + module.exit_json(**result) +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/f5/bigip_vlan.py b/network/f5/bigip_vlan.py new file mode 100644 index 00000000000..40df948f6c6 --- /dev/null +++ b/network/f5/bigip_vlan.py @@ -0,0 +1,451 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 F5 Networks Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: bigip_vlan +short_description: Manage VLANs on a BIG-IP system +description: + - Manage VLANs on a BIG-IP system +version_added: "2.2" +options: + description: + description: + - The description to give to the VLAN. + tagged_interfaces: + description: + - Specifies a list of tagged interfaces and trunks that you want to + configure for the VLAN. Use tagged interfaces or trunks when + you want to assign a single interface or trunk to multiple VLANs. + required: false + aliases: + - tagged_interface + untagged_interfaces: + description: + - Specifies a list of untagged interfaces and trunks that you want to + configure for the VLAN. + required: false + aliases: + - untagged_interface + name: + description: + - The VLAN to manage. If the special VLAN C(ALL) is specified with + the C(state) value of C(absent) then all VLANs will be removed. + required: true + state: + description: + - The state of the VLAN on the system. When C(present), guarantees + that the VLAN exists with the provided attributes. When C(absent), + removes the VLAN from the system. + required: false + default: present + choices: + - absent + - present + tag: + description: + - Tag number for the VLAN. The tag number can be any integer between 1 + and 4094. The system automatically assigns a tag number if you do not + specify a value. +notes: + - Requires the f5-sdk Python package on the host. This is as easy as pip + install f5-sdk. + - Requires BIG-IP versions >= 12.0.0 +extends_documentation_fragment: f5 +requirements: + - f5-sdk +author: + - Tim Rupp (@caphrim007) +''' + +EXAMPLES = ''' +- name: Create VLAN + bigip_vlan: + name: "net1" + password: "secret" + server: "lb.mydomain.com" + user: "admin" + validate_certs: "no" + delegate_to: localhost + +- name: Set VLAN tag + bigip_vlan: + name: "net1" + password: "secret" + server: "lb.mydomain.com" + tag: "2345" + user: "admin" + validate_certs: "no" + delegate_to: localhost + +- name: Add VLAN 2345 as tagged to interface 1.1 + bigip_vlan: + tagged_interface: 1.1 + name: "net1" + password: "secret" + server: "lb.mydomain.com" + tag: "2345" + user: "admin" + validate_certs: "no" + delegate_to: localhost + +- name: Add VLAN 1234 as tagged to interfaces 1.1 and 1.2 + bigip_vlan: + tagged_interfaces: + - 1.1 + - 1.2 + name: "net1" + password: "secret" + server: "lb.mydomain.com" + tag: "1234" + user: "admin" + validate_certs: "no" + delegate_to: localhost +''' + +RETURN = ''' +description: + description: The description set on the VLAN + returned: changed + type: string + sample: foo VLAN +interfaces: + description: Interfaces that the VLAN is assigned to + returned: changed + type: list + sample: ['1.1','1.2'] +name: + description: The name of the VLAN + returned: changed + type: string + sample: net1 +partition: + description: The partition that the VLAN was created on + returned: changed + type: string + sample: Common +tag: + description: The ID of the VLAN + returned: changed + type: int + sample: 2345 +''' + +try: + from f5.bigip import ManagementRoot + from icontrol.session import iControlUnexpectedHTTPError + HAS_F5SDK = True +except ImportError: + HAS_F5SDK = False + + +class BigIpVlan(object): + def __init__(self, *args, **kwargs): + if not HAS_F5SDK: + raise F5ModuleError("The python f5-sdk module is required") + + # The params that change in the module + self.cparams = dict() + + # Stores the params that are sent to the module + self.params = kwargs + self.api = ManagementRoot(kwargs['server'], + kwargs['user'], + kwargs['password'], + port=kwargs['server_port']) + + def present(self): + if self.exists(): + return self.update() + else: + return self.create() + + def absent(self): + changed = False + + if self.exists(): + changed = self.delete() + + return changed + + def read(self): + """Read information and transform it + + The values that are returned by BIG-IP in the f5-sdk can have encoding + attached to them as well as be completely missing in some cases. + + Therefore, this method will transform the data from the BIG-IP into a + format that is more easily consumable by the rest of the class and the + parameters that are supported by the module. + """ + p = dict() + name = self.params['name'] + partition = self.params['partition'] + r = self.api.tm.net.vlans.vlan.load( + name=name, + partition=partition + ) + ifcs = r.interfaces_s.get_collection() + if hasattr(r, 'tag'): + p['tag'] = int(r.tag) + if hasattr(r, 'description'): + p['description'] = str(r.description) + if len(ifcs) is not 0: + untagged = [] + tagged = [] + for x in ifcs: + if hasattr(x, 'tagged'): + tagged.append(str(x.name)) + elif hasattr(x, 'untagged'): + untagged.append(str(x.name)) + if untagged: + p['untagged_interfaces'] = list(set(untagged)) + if tagged: + p['tagged_interfaces'] = list(set(tagged)) + p['name'] = name + return p + + def create(self): + params = dict() + + check_mode = self.params['check_mode'] + description = self.params['description'] + name = self.params['name'] + untagged_interfaces = self.params['untagged_interfaces'] + tagged_interfaces = self.params['tagged_interfaces'] + partition = self.params['partition'] + tag = self.params['tag'] + + if tag is not None: + params['tag'] = tag + + if untagged_interfaces is not None or tagged_interfaces is not None: + tmp = [] + ifcs = self.api.tm.net.interfaces.get_collection() + ifcs = [str(x.name) for x in ifcs] + + if len(ifcs) is 0: + raise F5ModuleError( + 'No interfaces were found' + ) + + pinterfaces = [] + if untagged_interfaces: + interfaces = untagged_interfaces + elif tagged_interfaces: + interfaces = tagged_interfaces + + for ifc in interfaces: + ifc = str(ifc) + if ifc in ifcs: + pinterfaces.append(ifc) + + if tagged_interfaces: + tmp = [dict(name=x, tagged=True) for x in pinterfaces] + elif untagged_interfaces: + tmp = [dict(name=x, untagged=True) for x in pinterfaces] + + if tmp: + params['interfaces'] = tmp + + if description is not None: + params['description'] = self.params['description'] + + params['name'] = name + params['partition'] = partition + + self.cparams = camel_dict_to_snake_dict(params) + if check_mode: + return True + + d = self.api.tm.net.vlans.vlan + d.create(**params) + + if self.exists(): + return True + else: + raise F5ModuleError("Failed to create the VLAN") + + def update(self): + changed = False + params = dict() + current = self.read() + + check_mode = self.params['check_mode'] + description = self.params['description'] + name = self.params['name'] + tag = self.params['tag'] + partition = self.params['partition'] + tagged_interfaces = self.params['tagged_interfaces'] + untagged_interfaces = self.params['untagged_interfaces'] + + if untagged_interfaces is not None or tagged_interfaces is not None: + ifcs = self.api.tm.net.interfaces.get_collection() + ifcs = [str(x.name) for x in ifcs] + + if len(ifcs) is 0: + raise F5ModuleError( + 'No interfaces were found' + ) + + pinterfaces = [] + if untagged_interfaces: + interfaces = untagged_interfaces + elif tagged_interfaces: + interfaces = tagged_interfaces + + for ifc in interfaces: + ifc = str(ifc) + if ifc in ifcs: + pinterfaces.append(ifc) + else: + raise F5ModuleError( + 'The specified interface "%s" was not found' % (ifc) + ) + + if tagged_interfaces: + tmp = [dict(name=x, tagged=True) for x in pinterfaces] + if 'tagged_interfaces' in current: + if pinterfaces != current['tagged_interfaces']: + params['interfaces'] = tmp + else: + params['interfaces'] = tmp + elif untagged_interfaces: + tmp = [dict(name=x, untagged=True) for x in pinterfaces] + if 'untagged_interfaces' in current: + if pinterfaces != current['untagged_interfaces']: + params['interfaces'] = tmp + else: + params['interfaces'] = tmp + + if description is not None: + if 'description' in current: + if description != current['description']: + params['description'] = description + else: + params['description'] = description + + if tag is not None: + if 'tag' in current: + if tag != current['tag']: + params['tag'] = tag + else: + params['tag'] = tag + + if params: + changed = True + params['name'] = name + params['partition'] = partition + if check_mode: + return changed + self.cparams = camel_dict_to_snake_dict(params) + else: + return changed + + r = self.api.tm.net.vlans.vlan.load( + name=name, + partition=partition + ) + r.update(**params) + r.refresh() + + return True + + def delete(self): + params = dict() + check_mode = self.params['check_mode'] + + params['name'] = self.params['name'] + params['partition'] = self.params['partition'] + + self.cparams = camel_dict_to_snake_dict(params) + if check_mode: + return True + + dc = self.api.tm.net.vlans.vlan.load(**params) + dc.delete() + + if self.exists(): + raise F5ModuleError("Failed to delete the VLAN") + return True + + def exists(self): + name = self.params['name'] + partition = self.params['partition'] + return self.api.tm.net.vlans.vlan.exists( + name=name, + partition=partition + ) + + def flush(self): + result = dict() + state = self.params['state'] + + try: + if state == "present": + changed = self.present() + elif state == "absent": + changed = self.absent() + except iControlUnexpectedHTTPError as e: + raise F5ModuleError(str(e)) + + result.update(**self.cparams) + result.update(dict(changed=changed)) + return result + + +def main(): + argument_spec = f5_argument_spec() + + meta_args = dict( + description=dict(required=False, default=None), + tagged_interfaces=dict(required=False, default=None, type='list', aliases=['tagged_interface']), + untagged_interfaces=dict(required=False, default=None, type='list', aliases=['untagged_interface']), + name=dict(required=True), + tag=dict(required=False, default=None, type='int') + ) + argument_spec.update(meta_args) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[ + ['tagged_interfaces', 'untagged_interfaces'] + ] + ) + + try: + obj = BigIpVlan(check_mode=module.check_mode, **module.params) + result = obj.flush() + + module.exit_json(**result) + except F5ModuleError as e: + module.fail_json(msg=str(e)) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.f5 import * + +if __name__ == '__main__': + main() diff --git a/network/haproxy.py b/network/haproxy.py index cada704e342..5ee3006629e 100644 --- a/network/haproxy.py +++ b/network/haproxy.py @@ -18,11 +18,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: haproxy version_added: "1.9" short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands. +author: "Ravi Bhure (@ravibhure)" description: - Enable, disable, and set weights for HAProxy backend servers using socket commands. @@ -60,6 +65,12 @@ required: true default: null choices: [ "enabled", "disabled" ] + fail_on_not_found: + description: + - Fail whenever trying to enable/disable a backend host that does not exist + required: false + default: false + version_added: "2.2" wait: description: - Wait until the server reports a status of 'UP' when `state=enabled`, or @@ -91,38 +102,80 @@ EXAMPLES = ''' # disable server in 'www' backend pool -- haproxy: state=disabled host={{ inventory_hostname }} backend=www +- haproxy: + state: disabled + host: '{{ inventory_hostname }}' + backend: www # disable server without backend pool name (apply to all available backend pool) -- haproxy: state=disabled host={{ inventory_hostname }} +- haproxy: + state: disabled + host: '{{ inventory_hostname }}' # disable server, provide socket file -- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www +- haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www # disable server, provide socket file, wait until status reports in maintenance -- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www wait=yes +- haproxy: + state: disabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + backend: www + wait: yes # disable backend server in 'www' backend pool and drop open sessions to it -- haproxy: state=disabled host={{ inventory_hostname }} backend=www socket=/var/run/haproxy.sock shutdown_sessions=true +- haproxy: + state: disabled + host: '{{ inventory_hostname }}' + backend: www + socket: /var/run/haproxy.sock + shutdown_sessions: true + +# disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found +- haproxy: + state: disabled + host: '{{ inventory_hostname }}' + fail_on_not_found: yes # enable server in 'www' backend pool -- haproxy: state=enabled host={{ inventory_hostname }} backend=www +- haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www # enable server in 'www' backend pool wait until healthy -- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes +- haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + wait: yes # enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health -- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes wait_retries=10 wait_interval=5 +- haproxy: + state: enabled + host: '{{ inventory_hostname }}' + backend: www + wait: yes + wait_retries: 10 + wait_interval: 5 # enable server in 'www' backend pool with change server(s) weight -- haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www - -author: "Ravi Bhure (@ravibhure)" +- haproxy: + state: enabled + host: '{{ inventory_hostname }}' + socket: /var/run/haproxy.sock + weight: 10 + backend: www ''' import socket import csv import time +from string import Template DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock" @@ -156,17 +209,17 @@ def __init__(self, module): self.weight = self.module.params['weight'] self.socket = self.module.params['socket'] self.shutdown_sessions = self.module.params['shutdown_sessions'] + self.fail_on_not_found = self.module.params['fail_on_not_found'] self.wait = self.module.params['wait'] self.wait_retries = self.module.params['wait_retries'] self.wait_interval = self.module.params['wait_interval'] - self.command_results = [] + self.command_results = {} def execute(self, cmd, timeout=200, capture_output=True): """ Executes a HAProxy command by sending a message to a HAProxy's local UNIX socket and waiting up to 'timeout' milliseconds for the response. """ - self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.client.connect(self.socket) self.client.sendall('%s\n' % cmd) @@ -177,10 +230,67 @@ def execute(self, cmd, timeout=200, capture_output=True): result += buf buf = self.client.recv(RECV_SIZE) if capture_output: - self.command_results = result.strip() + self.capture_command_output(cmd, result.strip()) self.client.close() return result + + def capture_command_output(self, cmd, output): + """ + Capture the output for a command + """ + if 'command' not in self.command_results: + self.command_results['command'] = [] + self.command_results['command'].append(cmd) + if 'output' not in self.command_results: + self.command_results['output'] = [] + self.command_results['output'].append(output) + + + def discover_all_backends(self): + """ + Discover all entries with svname = 'BACKEND' and return a list of their corresponding + pxnames + """ + data = self.execute('show stat', 200, False).lstrip('# ') + r = csv.DictReader(data.splitlines()) + return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r))) + + + def execute_for_backends(self, cmd, pxname, svname, wait_for_status = None): + """ + Run some command on the specified backends. If no backends are provided they will + be discovered automatically (all backends) + """ + # Discover backends if none are given + if pxname is None: + backends = self.discover_all_backends() + else: + backends = [pxname] + + # Run the command for each requested backend + for backend in backends: + # Fail when backends were not found + state = self.get_state_for(backend, svname) + if (self.fail_on_not_found or self.wait) and state is None: + self.module.fail_json(msg="The specified backend '%s/%s' was not found!" % (backend, svname)) + + self.execute(Template(cmd).substitute(pxname = backend, svname = svname)) + if self.wait: + self.wait_until_status(backend, svname, wait_for_status) + + + def get_state_for(self, pxname, svname): + """ + Find the state of specific services. When pxname is not set, get all backends for a specific host. + Returns a list of dictionaries containing the status and weight for those services. + """ + data = self.execute('show stat', 200, False).lstrip('# ') + r = csv.DictReader(data.splitlines()) + state = tuple(map(lambda d: { 'status': d['status'], 'weight': d['weight'] }, filter(lambda d: (pxname is None or d['pxname'] == pxname) and d['svname'] == svname, r))) + return state or None + + def wait_until_status(self, pxname, svname, status): """ Wait for a service to reach the specified status. Try RETRIES times @@ -189,55 +299,28 @@ def wait_until_status(self, pxname, svname, status): not found, the module will fail. """ for i in range(1, self.wait_retries): - data = self.execute('show stat', 200, False).lstrip('# ') - r = csv.DictReader(data.splitlines()) - found = False - for row in r: - if row['pxname'] == pxname and row['svname'] == svname: - found = True - if row['status'] == status: - return True; - else: - time.sleep(self.wait_interval) - - if not found: - self.module.fail_json(msg="unable to find server %s/%s" % (pxname, svname)) + state = self.get_state_for(pxname, svname) + + # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here + if state[0]['status'] == status: + return True + else: + time.sleep(self.wait_interval) self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % (pxname, svname, status, self.wait_retries)) + def enabled(self, host, backend, weight): """ Enabled action, marks server to UP and checks are re-enabled, also supports to get current weight for server (default) and set the weight for haproxy backend server when provides. """ - svname = host - if self.backend is None: - output = self.execute('show stat') - #sanitize and make a list of lines - output = output.lstrip('# ').strip() - output = output.split('\n') - result = output - - for line in result: - if 'BACKEND' in line: - result = line.split(',')[0] - pxname = result - cmd = "get weight %s/%s ; enable server %s/%s" % (pxname, svname, pxname, svname) - if weight: - cmd += "; set weight %s/%s %s" % (pxname, svname, weight) - self.execute(cmd) - if self.wait: - self.wait_until_status(pxname, svname, 'UP') + cmd = "get weight $pxname/$svname; enable server $pxname/$svname" + if weight: + cmd += "; set weight $pxname/$svname %s" % weight + self.execute_for_backends(cmd, backend, host, 'UP') - else: - pxname = backend - cmd = "get weight %s/%s ; enable server %s/%s" % (pxname, svname, pxname, svname) - if weight: - cmd += "; set weight %s/%s %s" % (pxname, svname, weight) - self.execute(cmd) - if self.wait: - self.wait_until_status(pxname, svname, 'UP') def disabled(self, host, backend, shutdown_sessions): """ @@ -245,50 +328,40 @@ def disabled(self, host, backend, shutdown_sessions): performed on the server until it leaves maintenance, also it shutdown sessions while disabling backend host server. """ - svname = host - if self.backend is None: - output = self.execute('show stat') - #sanitize and make a list of lines - output = output.lstrip('# ').strip() - output = output.split('\n') - result = output - - for line in result: - if 'BACKEND' in line: - result = line.split(',')[0] - pxname = result - cmd = "get weight %s/%s ; disable server %s/%s" % (pxname, svname, pxname, svname) - if shutdown_sessions: - cmd += "; shutdown sessions server %s/%s" % (pxname, svname) - self.execute(cmd) - if self.wait: - self.wait_until_status(pxname, svname, 'MAINT') + cmd = "get weight $pxname/$svname; disable server $pxname/$svname" + if shutdown_sessions: + cmd += "; shutdown sessions server $pxname/$svname" + self.execute_for_backends(cmd, backend, host, 'MAINT') - else: - pxname = backend - cmd = "get weight %s/%s ; disable server %s/%s" % (pxname, svname, pxname, svname) - if shutdown_sessions: - cmd += "; shutdown sessions server %s/%s" % (pxname, svname) - self.execute(cmd) - if self.wait: - self.wait_until_status(pxname, svname, 'MAINT') def act(self): """ Figure out what you want to do from ansible, and then do it. """ + # Get the state before the run + state_before = self.get_state_for(self.backend, self.host) + self.command_results['state_before'] = state_before # toggle enable/disbale server if self.state == 'enabled': self.enabled(self.host, self.backend, self.weight) - elif self.state == 'disabled': self.disabled(self.host, self.backend, self.shutdown_sessions) - else: self.module.fail_json(msg="unknown state specified: '%s'" % self.state) - self.module.exit_json(stdout=self.command_results, changed=True) + # Get the state after the run + state_after = self.get_state_for(self.backend, self.host) + self.command_results['state_after'] = state_after + + # Report change status + if state_before != state_after: + self.command_results['changed'] = True + self.module.exit_json(**self.command_results) + else: + self.command_results['changed'] = False + self.module.exit_json(**self.command_results) + def main(): @@ -300,12 +373,12 @@ def main(): backend=dict(required=False, default=None), weight=dict(required=False, default=None), socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION), - shutdown_sessions=dict(required=False, default=False), + shutdown_sessions=dict(required=False, default=False, type='bool'), + fail_on_not_found=dict(required=False, default=False, type='bool'), wait=dict(required=False, default=False, type='bool'), wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'), wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'), ), - ) if not socket: @@ -317,4 +390,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/network/illumos/__init__.py b/network/illumos/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/illumos/dladm_etherstub.py b/network/illumos/dladm_etherstub.py new file mode 100644 index 00000000000..861e0a70131 --- /dev/null +++ b/network/illumos/dladm_etherstub.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Števko +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: dladm_etherstub +short_description: Manage etherstubs on Solaris/illumos systems. +description: + - Create or delete etherstubs on Solaris/illumos systems. +version_added: "2.2" +author: Adam Števko (@xen0l) +options: + name: + description: + - Etherstub name. + required: true + temporary: + description: + - Specifies that the etherstub is temporary. Temporary etherstubs + do not persist across reboots. + required: false + default: false + choices: [ "true", "false" ] + state: + description: + - Create or delete Solaris/illumos etherstub. + required: false + default: "present" + choices: [ "present", "absent" ] +''' + +EXAMPLES = ''' +# Create 'stub0' etherstub +- dladm_etherstub: + name: stub0 + state: present + +# Remove 'stub0 etherstub +- dladm_etherstub: + name: stub0 + state: absent +''' + +RETURN = ''' +name: + description: etherstub name + returned: always + type: string + sample: "switch0" +state: + description: state of the target + returned: always + type: string + sample: "present" +temporary: + description: etherstub's persistence + returned: always + type: boolean + sample: "True" +''' + + +class Etherstub(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def etherstub_exists(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('show-etherstub') + cmd.append(self.name) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def create_etherstub(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('create-etherstub') + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_etherstub(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('delete-etherstub') + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + temporary=dict(default=False, type='bool'), + state=dict(default='present', choices=['absent', 'present']), + ), + supports_check_mode=True + ) + + etherstub = Etherstub(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = etherstub.name + result['state'] = etherstub.state + result['temporary'] = etherstub.temporary + + if etherstub.state == 'absent': + if etherstub.etherstub_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = etherstub.delete_etherstub() + if rc != 0: + module.fail_json(name=etherstub.name, msg=err, rc=rc) + elif etherstub.state == 'present': + if not etherstub.etherstub_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = etherstub.create_etherstub() + + if rc is not None and rc != 0: + module.fail_json(name=etherstub.name, msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/illumos/dladm_vnic.py b/network/illumos/dladm_vnic.py new file mode 100644 index 00000000000..0718517d475 --- /dev/null +++ b/network/illumos/dladm_vnic.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Števko +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: dladm_vnic +short_description: Manage VNICs on Solaris/illumos systems. +description: + - Create or delete VNICs on Solaris/illumos systems. +version_added: "2.2" +author: Adam Števko (@xen0l) +options: + name: + description: + - VNIC name. + required: true + link: + description: + - VNIC underlying link name. + required: true + temporary: + description: + - Specifies that the VNIC is temporary. Temporary VNICs + do not persist across reboots. + required: false + default: false + choices: [ "true", "false" ] + mac: + description: + - Sets the VNIC's MAC address. Must be valid unicast MAC address. + required: false + default: false + aliases: [ "macaddr" ] + vlan: + description: + - Enable VLAN tagging for this VNIC. The VLAN tag will have id + I(vlan). + required: false + default: false + aliases: [ "vlan_id" ] + state: + description: + - Create or delete Solaris/illumos VNIC. + required: false + default: "present" + choices: [ "present", "absent" ] +''' + +EXAMPLES = ''' +# Create 'vnic0' VNIC over 'bnx0' link +- dladm_vnic: + name: vnic0 + link: bnx0 + state: present + +# Create VNIC with specified MAC and VLAN tag over 'aggr0' +- dladm_vnic: + name: vnic1 + link: aggr0 + mac: '00:00:5E:00:53:23' + vlan: 4 + +# Remove 'vnic0' VNIC +- dladm_vnic: + name: vnic0 + link: bnx0 + state: absent +''' + +RETURN = ''' +name: + description: VNIC name + returned: always + type: string + sample: "vnic0" +link: + description: VNIC underlying link name + returned: always + type: string + sample: "igb0" +state: + description: state of the target + returned: always + type: string + sample: "present" +temporary: + description: VNIC's persistence + returned: always + type: boolean + sample: "True" +mac: + description: MAC address to use for VNIC + returned: if mac is specified + type: string + sample: "00:00:5E:00:53:42" +vlan: + description: VLAN to use for VNIC + returned: success + type: int + sample: 42 +''' + +import re + + +class VNIC(object): + + UNICAST_MAC_REGEX = r'^[a-f0-9][2-9a-f0]:([a-f0-9]{2}:){4}[a-f0-9]{2}$' + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.link = module.params['link'] + self.mac = module.params['mac'] + self.vlan = module.params['vlan'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def vnic_exists(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('show-vnic') + cmd.append(self.name) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def create_vnic(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('create-vnic') + + if self.temporary: + cmd.append('-t') + + if self.mac: + cmd.append('-m') + cmd.append(self.mac) + + if self.vlan: + cmd.append('-v') + cmd.append(self.vlan) + + cmd.append('-l') + cmd.append(self.link) + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_vnic(self): + cmd = [self.module.get_bin_path('dladm', True)] + + cmd.append('delete-vnic') + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def is_valid_unicast_mac(self): + + mac_re = re.match(self.UNICAST_MAC_REGEX, self.mac) + + return mac_re is None + + def is_valid_vlan_id(self): + + return 0 <= self.vlan <= 4095 + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + link=dict(required=True), + mac=dict(default=None, aliases=['macaddr']), + vlan=dict(default=None, aliases=['vlan_id']), + temporary=dict(default=False, type='bool'), + state=dict(default='present', choices=['absent', 'present']), + ), + supports_check_mode=True + ) + + vnic = VNIC(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = vnic.name + result['link'] = vnic.link + result['state'] = vnic.state + result['temporary'] = vnic.temporary + + if vnic.mac is not None: + if vnic.is_valid_unicast_mac(): + module.fail_json(msg='Invalid unicast MAC address', + mac=vnic.mac, + name=vnic.name, + state=vnic.state, + link=vnic.link, + vlan=vnic.vlan) + result['mac'] = vnic.mac + + if vnic.vlan is not None: + if vnic.is_valid_vlan_id(): + module.fail_json(msg='Invalid VLAN tag', + mac=vnic.mac, + name=vnic.name, + state=vnic.state, + link=vnic.link, + vlan=vnic.vlan) + result['vlan'] = vnic.vlan + + if vnic.state == 'absent': + if vnic.vnic_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = vnic.delete_vnic() + if rc != 0: + module.fail_json(name=vnic.name, msg=err, rc=rc) + elif vnic.state == 'present': + if not vnic.vnic_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = vnic.create_vnic() + + if rc is not None and rc != 0: + module.fail_json(name=vnic.name, msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/illumos/flowadm.py b/network/illumos/flowadm.py new file mode 100644 index 00000000000..8b5807f7090 --- /dev/null +++ b/network/illumos/flowadm.py @@ -0,0 +1,523 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Adam Števko +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: flowadm +short_description: Manage bandwidth resource control and priority for protocols, services and zones. +description: + - Create/modify/remove networking bandwidth and associated resources for a type of traffic on a particular link. +version_added: "2.2" +author: Adam Števko (@xen0l) +options: + name: + description: > + - A flow is defined as a set of attributes based on Layer 3 and Layer 4 + headers, which can be used to identify a protocol, service, or a zone. + required: true + aliases: [ 'flow' ] + link: + description: + - Specifiies a link to configure flow on. + required: false + local_ip: + description: + - Identifies a network flow by the local IP address. + required: false + remove_ip: + description: + - Identifies a network flow by the remote IP address. + required: false + transport: + description: > + - Specifies a Layer 4 protocol to be used. It is typically used in combination with I(local_port) to + identify the service that needs special attention. + required: false + local_port: + description: + - Identifies a service specified by the local port. + required: false + dsfield: + description: > + - Identifies the 8-bit differentiated services field (as defined in + RFC 2474). The optional dsfield_mask is used to state the bits of interest in + the differentiated services field when comparing with the dsfield + value. Both values must be in hexadecimal. + required: false + maxbw: + description: > + - Sets the full duplex bandwidth for the flow. The bandwidth is + specified as an integer with one of the scale suffixes(K, M, or G + for Kbps, Mbps, and Gbps). If no units are specified, the input + value will be read as Mbps. + required: false + priority: + description: + - Sets the relative priority for the flow. + required: false + default: 'medium' + choices: [ 'low', 'medium', 'high' ] + temporary: + description: + - Specifies that the configured flow is temporary. Temporary + flows do not persist across reboots. + required: false + default: false + choices: [ "true", "false" ] + state: + description: + - Create/delete/enable/disable an IP address on the network interface. + required: false + default: present + choices: [ 'absent', 'present', 'resetted' ] +''' + +EXAMPLES = ''' +# Limit SSH traffic to 100M via vnic0 interface +- flowadm: + link: vnic0 + flow: ssh_out + transport: tcp + local_port: 22 + maxbw: 100M + state: present + +# Reset flow properties +- flowadm: + name: dns + state: resetted + +# Configure policy for EF PHB (DSCP value of 101110 from RFC 2598) with a bandwidth of 500 Mbps and a high priority. +- flowadm: + link: bge0 + dsfield: '0x2e:0xfc' + maxbw: 500M + priority: high + flow: efphb-flow + state: present +''' + +RETURN = ''' +name: + description: flow name + returned: always + type: string + sample: "http_drop" +link: + description: flow's link + returned: if link is defined + type: string + sample: "vnic0" +state: + description: state of the target + returned: always + type: string + sample: "present" +temporary: + description: flow's persistence + returned: always + type: boolean + sample: "True" +priority: + description: flow's priority + returned: if priority is defined + type: string + sample: "low" +transport: + description: flow's transport + returned: if transport is defined + type: string + sample: "tcp" +maxbw: + description: flow's maximum bandwidth + returned: if maxbw is defined + type: string + sample: "100M" +local_Ip: + description: flow's local IP address + returned: if local_ip is defined + type: string + sample: "10.0.0.42" +local_port: + description: flow's local port + returned: if local_port is defined + type: int + sample: 1337 +remote_Ip: + description: flow's remote IP address + returned: if remote_ip is defined + type: string + sample: "10.0.0.42" +dsfield: + description: flow's differentiated services value + returned: if dsfield is defined + type: string + sample: "0x2e:0xfc" +''' + + +import socket + +SUPPORTED_TRANSPORTS = ['tcp', 'udp', 'sctp', 'icmp', 'icmpv6'] +SUPPORTED_PRIORITIES = ['low', 'medium', 'high'] + +SUPPORTED_ATTRIBUTES = ['local_ip', 'remote_ip', 'transport', 'local_port', 'dsfield'] +SUPPORTPED_PROPERTIES = ['maxbw', 'priority'] + + +class Flow(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.link = module.params['link'] + self.local_ip = module.params['local_ip'] + self.remote_ip = module.params['remote_ip'] + self.transport = module.params['transport'] + self.local_port = module.params['local_port'] + self.dsfield = module.params['dsfield'] + self.maxbw = module.params['maxbw'] + self.priority = module.params['priority'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + self._needs_updating = { + 'maxbw': False, + 'priority': False, + } + + @classmethod + def is_valid_port(cls, port): + return 1 <= int(port) <= 65535 + + @classmethod + def is_valid_address(cls, ip): + + if ip.count('/') == 1: + ip_address, netmask = ip.split('/') + else: + ip_address = ip + + if len(ip_address.split('.')) == 4: + try: + socket.inet_pton(socket.AF_INET, ip_address) + except socket.error: + return False + + if not 0 <= netmask <= 32: + return False + else: + try: + socket.inet_pton(socket.AF_INET6, ip_address) + except socket.error: + return False + + if not 0 <= netmask <= 128: + return False + + return True + + @classmethod + def is_hex(cls, number): + try: + int(number, 16) + except ValueError: + return False + + return True + + @classmethod + def is_valid_dsfield(cls, dsfield): + + dsmask = None + + if dsfield.count(':') == 1: + dsval = dsfield.split(':')[0] + else: + dsval, dsmask = dsfield.split(':') + + if dsmask and not 0x01 <= int(dsmask, 16) <= 0xff and not 0x01 <= int(dsval, 16) <= 0xff: + return False + elif not 0x01 <= int(dsval, 16) <= 0xff: + return False + + return True + + def flow_exists(self): + cmd = [self.module.get_bin_path('flowadm')] + + cmd.append('show-flow') + cmd.append(self.name) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + return False + + def delete_flow(self): + cmd = [self.module.get_bin_path('flowadm')] + + cmd.append('remove-flow') + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def create_flow(self): + cmd = [self.module.get_bin_path('flowadm')] + + cmd.append('add-flow') + cmd.append('-l') + cmd.append(self.link) + + if self.local_ip: + cmd.append('-a') + cmd.append('local_ip=' + self.local_ip) + + if self.remote_ip: + cmd.append('-a') + cmd.append('remote_ip=' + self.remote_ip) + + if self.transport: + cmd.append('-a') + cmd.append('transport=' + self.transport) + + if self.local_port: + cmd.append('-a') + cmd.append('local_port=' + self.local_port) + + if self.dsfield: + cmd.append('-a') + cmd.append('dsfield=' + self.dsfield) + + if self.maxbw: + cmd.append('-p') + cmd.append('maxbw=' + self.maxbw) + + if self.priority: + cmd.append('-p') + cmd.append('priority=' + self.priority) + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def _query_flow_props(self): + cmd = [self.module.get_bin_path('flowadm')] + + cmd.append('show-flowprop') + cmd.append('-c') + cmd.append('-o') + cmd.append('property,possible') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def flow_needs_udpating(self): + (rc, out, err) = self._query_flow_props() + + NEEDS_UPDATING = False + + if rc == 0: + properties = (line.split(':') for line in out.rstrip().split('\n')) + for prop, value in properties: + if prop == 'maxbw' and self.maxbw != value: + self._needs_updating.update({prop: True}) + NEEDS_UPDATING = True + + elif prop == 'priority' and self.priority != value: + self._needs_updating.update({prop: True}) + NEEDS_UPDATING = True + + return NEEDS_UPDATING + else: + self.module.fail_json(msg='Error while checking flow properties: %s' % err, + stderr=err, + rc=rc) + + def update_flow(self): + cmd = [self.module.get_bin_path('flowadm')] + + cmd.append('set-flowprop') + + if self.maxbw and self._needs_updating['maxbw']: + cmd.append('-p') + cmd.append('maxbw=' + self.maxbw) + + if self.priority and self._needs_updating['priority']: + cmd.append('-p') + cmd.append('priority=' + self.priority) + + if self.temporary: + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True, aliases=['flow']), + link=dict(required=False), + local_ip=dict(required=False), + remote_ip=dict(required=False), + transport=dict(required=False, choices=SUPPORTED_TRANSPORTS), + local_port=dict(required=False), + dsfield=dict(required=False), + maxbw=dict(required=False), + priority=dict(required=False, + default='medium', + choices=SUPPORTED_PRIORITIES), + temporary=dict(default=False, type='bool'), + state=dict(required=False, + default='present', + choices=['absent', 'present', 'resetted']), + ), + mutually_exclusive=[ + ('local_ip', 'remote_ip'), + ('local_ip', 'transport'), + ('local_ip', 'local_port'), + ('local_ip', 'dsfield'), + ('remote_ip', 'transport'), + ('remote_ip', 'local_port'), + ('remote_ip', 'dsfield'), + ('transport', 'dsfield'), + ('local_port', 'dsfield'), + ], + supports_check_mode=True + ) + + flow = Flow(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = flow.name + result['state'] = flow.state + result['temporary'] = flow.temporary + + if flow.link: + result['link'] = flow.link + + if flow.maxbw: + result['maxbw'] = flow.maxbw + + if flow.priority: + result['priority'] = flow.priority + + if flow.local_ip: + if flow.is_valid_address(flow.local_ip): + result['local_ip'] = flow.local_ip + + if flow.remote_ip: + if flow.is_valid_address(flow.remote_ip): + result['remote_ip'] = flow.remote_ip + + if flow.transport: + result['transport'] = flow.transport + + if flow.local_port: + if flow.is_valid_port(flow.local_port): + result['local_port'] = flow.local_port + else: + module.fail_json(msg='Invalid port: %s' % flow.local_port, + rc=1) + + if flow.dsfield: + if flow.is_valid_dsfield(flow.dsfield): + result['dsfield'] = flow.dsfield + else: + module.fail_json(msg='Invalid dsfield: %s' % flow.dsfield, + rc=1) + + if flow.state == 'absent': + if flow.flow_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = flow.delete_flow() + if rc != 0: + module.fail_json(msg='Error while deleting flow: "%s"' % err, + name=flow.name, + stderr=err, + rc=rc) + + elif flow.state == 'present': + if not flow.flow_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = flow.create_flow() + if rc != 0: + module.fail_json(msg='Error while creating flow: "%s"' % err, + name=flow.name, + stderr=err, + rc=rc) + else: + if flow.flow_needs_udpating(): + (rc, out, err) = flow.update_flow() + if rc != 0: + module.fail_json(msg='Error while updating flow: "%s"' % err, + name=flow.name, + stderr=err, + rc=rc) + + elif flow.state == 'resetted': + if flow.flow_exists(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = flow.reset_flow() + if rc != 0: + module.fail_json(msg='Error while resetting flow: "%s"' % err, + name=flow.name, + stderr=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/illumos/ipadm_if.py b/network/illumos/ipadm_if.py new file mode 100644 index 00000000000..d3d0c0af0bd --- /dev/null +++ b/network/illumos/ipadm_if.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Števko +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipadm_if +short_description: Manage IP interfaces on Solaris/illumos systems. +description: + - Create, delete, enable or disable IP interfaces on Solaris/illumos + systems. +version_added: "2.2" +author: Adam Števko (@xen0l) +options: + name: + description: + - IP interface name. + required: true + temporary: + description: + - Specifies that the IP interface is temporary. Temporary IP + interfaces do not persist across reboots. + required: false + default: false + choices: [ "true", "false" ] + state: + description: + - Create or delete Solaris/illumos IP interfaces. + required: false + default: "present" + choices: [ "present", "absent", "enabled", "disabled" ] +''' + +EXAMPLES = ''' +# Create vnic0 interface +- ipadm_if: + name: vnic0 + state: enabled + +# Disable vnic0 interface +- ipadm_if: + name: vnic0 + state: disabled +''' + +RETURN = ''' +name: + description: IP interface name + returned: always + type: string + sample: "vnic0" +state: + description: state of the target + returned: always + type: string + sample: "present" +temporary: + description: persistence of a IP interface + returned: always + type: boolean + sample: "True" +''' + + +class IPInterface(object): + + def __init__(self, module): + self.module = module + + self.name = module.params['name'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def interface_exists(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('show-if') + cmd.append(self.name) + + (rc, _, _) = self.module.run_command(cmd) + if rc == 0: + return True + else: + return False + + def interface_is_disabled(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('show-if') + cmd.append('-o') + cmd.append('state') + cmd.append(self.name) + + (rc, out, err) = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(name=self.name, rc=rc, msg=err) + + return 'disabled' in out + + def create_interface(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('create-if') + + if self.temporary: + cmd.append('-t') + + cmd.append(self.name) + + return self.module.run_command(cmd) + + def delete_interface(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('delete-if') + + if self.temporary: + cmd.append('-t') + + cmd.append(self.name) + + return self.module.run_command(cmd) + + def enable_interface(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('enable-if') + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + def disable_interface(self): + cmd = [self.module.get_bin_path('ipadm', True)] + + cmd.append('disable-if') + cmd.append('-t') + cmd.append(self.name) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + temporary=dict(default=False, type='bool'), + state=dict(default='present', choices=['absent', + 'present', + 'enabled', + 'disabled']), + ), + supports_check_mode=True + ) + + interface = IPInterface(module) + + rc = None + out = '' + err = '' + result = {} + result['name'] = interface.name + result['state'] = interface.state + result['temporary'] = interface.temporary + + if interface.state == 'absent': + if interface.interface_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = interface.delete_interface() + if rc != 0: + module.fail_json(name=interface.name, msg=err, rc=rc) + elif interface.state == 'present': + if not interface.interface_exists(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = interface.create_interface() + + if rc is not None and rc != 0: + module.fail_json(name=interface.name, msg=err, rc=rc) + + elif interface.state == 'enabled': + if interface.interface_is_disabled(): + (rc, out, err) = interface.enable_interface() + + if rc is not None and rc != 0: + module.fail_json(name=interface.name, msg=err, rc=rc) + + elif interface.state == 'disabled': + if not interface.interface_is_disabled(): + (rc, out, err) = interface.disable_interface() + + if rc is not None and rc != 0: + module.fail_json(name=interface.name, msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/illumos/ipadm_prop.py b/network/illumos/ipadm_prop.py new file mode 100644 index 00000000000..509ff82b1f7 --- /dev/null +++ b/network/illumos/ipadm_prop.py @@ -0,0 +1,270 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Adam Števko +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipadm_prop +short_description: Manage protocol properties on Solaris/illumos systems. +description: + - Modify protocol properties on Solaris/illumos systems. +version_added: "2.2" +author: Adam Števko (@xen0l) +options: + protocol: + description: + - Specifies the procotol for which we want to manage properties. + required: true + property: + description: + - Specifies the name of property we want to manage. + required: true + value: + description: + - Specifies the value we want to set for the property. + required: false + temporary: + description: + - Specifies that the property value is temporary. Temporary + property values do not persist across reboots. + required: false + default: false + choices: [ "true", "false" ] + state: + description: + - Set or reset the property value. + required: false + default: present + choices: [ "present", "absent", "reset" ] +''' + +EXAMPLES = ''' +# Set TCP receive buffer size +ipadm_prop: protocol=tcp property=recv_buf value=65536 + +# Reset UDP send buffer size to the default value +ipadm_prop: protocol=udp property=send_buf state=reset +''' + +RETURN = ''' +protocol: + description: property's protocol + returned: always + type: string + sample: "TCP" +property: + description: name of the property + returned: always + type: string + sample: "recv_maxbuf" +state: + description: state of the target + returned: always + type: string + sample: "present" +temporary: + description: property's persistence + returned: always + type: boolean + sample: "True" +value: + description: value of the property + returned: always + type: int/string (depends on property) + sample: 1024/never +''' + +SUPPORTED_PROTOCOLS = ['ipv4', 'ipv6', 'icmp', 'tcp', 'udp', 'sctp'] + + +class Prop(object): + + def __init__(self, module): + self.module = module + + self.protocol = module.params['protocol'] + self.property = module.params['property'] + self.value = module.params['value'] + self.temporary = module.params['temporary'] + self.state = module.params['state'] + + def property_exists(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-prop') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.protocol) + + (rc, _, _) = self.module.run_command(cmd) + + if rc == 0: + return True + else: + self.module.fail_json(msg='Unknown property "%s" for protocol %s' % + (self.property, self.protocol), + protocol=self.protocol, + property=self.property) + + def property_is_modified(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-prop') + cmd.append('-c') + cmd.append('-o') + cmd.append('current,default') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.protocol) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + (value, default) = out.split(':') + + if rc == 0 and value == default: + return True + else: + return False + + def property_is_set(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('show-prop') + cmd.append('-c') + cmd.append('-o') + cmd.append('current') + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.protocol) + + (rc, out, _) = self.module.run_command(cmd) + + out = out.rstrip() + + if rc == 0 and self.value == out: + return True + else: + return False + + def set_property(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('set-prop') + + if self.temporary: + cmd.append('-t') + + cmd.append('-p') + cmd.append(self.property + "=" + self.value) + cmd.append(self.protocol) + + return self.module.run_command(cmd) + + def reset_property(self): + cmd = [self.module.get_bin_path('ipadm')] + + cmd.append('reset-prop') + + if self.temporary: + cmd.append('-t') + + cmd.append('-p') + cmd.append(self.property) + cmd.append(self.protocol) + + return self.module.run_command(cmd) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + protocol=dict(required=True, choices=SUPPORTED_PROTOCOLS), + property=dict(required=True), + value=dict(required=False), + temporary=dict(default=False, type='bool'), + state=dict( + default='present', choices=['absent', 'present', 'reset']), + ), + supports_check_mode=True + ) + + prop = Prop(module) + + rc = None + out = '' + err = '' + result = {} + result['protocol'] = prop.protocol + result['property'] = prop.property + result['state'] = prop.state + result['temporary'] = prop.temporary + if prop.value: + result['value'] = prop.value + + if prop.state == 'absent' or prop.state == 'reset': + if prop.property_exists(): + if not prop.property_is_modified(): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = prop.reset_property() + if rc != 0: + module.fail_json(protocol=prop.protocol, + property=prop.property, + msg=err, + rc=rc) + + elif prop.state == 'present': + if prop.value is None: + module.fail_json(msg='Value is mandatory with state "present"') + + if prop.property_exists(): + if not prop.property_is_set(): + if module.check_mode: + module.exit_json(changed=True) + + (rc, out, err) = prop.set_property() + if rc != 0: + module.fail_json(protocol=prop.protocol, + property=prop.property, + msg=err, + rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/ipify_facts.py b/network/ipify_facts.py new file mode 100644 index 00000000000..4ffe19d3f5c --- /dev/null +++ b/network/ipify_facts.py @@ -0,0 +1,118 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipify_facts +short_description: Retrieve the public IP of your internet gateway. +description: + - If behind NAT and need to know the public IP of your internet gateway. +version_added: '2.0' +author: "René Moser (@resmo)" +options: + api_url: + description: + - URL of the ipify.org API service. + - C(?format=json) will be appended per default. + required: false + default: 'https://api.ipify.org' + timeout: + description: + - HTTP connection timeout in seconds. + required: false + default: 10 + version_added: "2.3" +notes: + - "Visit https://www.ipify.org to get more information." +''' + +EXAMPLES = ''' +# Gather IP facts from ipify.org +- name: get my public IP + ipify_facts: + +# Gather IP facts from your own ipify service endpoint with a custom timeout +- name: get my public IP + ipify_facts: + api_url: http://api.example.com/ipify + timeout: 20 +''' + +RETURN = ''' +--- +ipify_public_ip: + description: Public IP of the internet gateway. + returned: success + type: string + sample: 1.2.3.4 +''' + +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +class IpifyFacts(object): + + def __init__(self): + self.api_url = module.params.get('api_url') + self.timeout = module.params.get('timeout') + + def run(self): + result = { + 'ipify_public_ip': None + } + (response, info) = fetch_url(module=module, url=self.api_url + "?format=json" , force=True, timeout=self.timeout) + + if not response: + module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout)) + + data = json.loads(response.read()) + result['ipify_public_ip'] = data.get('ip') + return result + +def main(): + global module + module = AnsibleModule( + argument_spec = dict( + api_url=dict(default='https://api.ipify.org'), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + ipify_facts = IpifyFacts().run() + ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts) + module.exit_json(**ipify_facts_result) + +if __name__ == '__main__': + main() diff --git a/network/ipinfoio_facts.py b/network/ipinfoio_facts.py new file mode 100644 index 00000000000..748c49dcc9a --- /dev/null +++ b/network/ipinfoio_facts.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# +# (c) 2016, Aleksei Kostiuk +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': 'preview', + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipinfoio_facts +short_description: "Retrieve IP geolocation facts of a host's IP address" +description: + - "Gather IP geolocation facts of a host's IP address using ipinfo.io API" +version_added: "2.3" +author: "Aleksei Kostiuk (@akostyuk)" +options: + timeout: + description: + - HTTP connection timeout in seconds + required: false + default: 10 + http_agent: + description: + - Set http user agent + required: false + default: "ansible-ipinfoio-module/0.0.1" +notes: + - "Check http://ipinfo.io/ for more information" +''' + +EXAMPLES = ''' +# Retrieve geolocation data of a host's IP address +- name: get IP geolocation data + ipinfoio_facts: +''' + +RETURN = ''' +ansible_facts: + description: "Dictionary of ip geolocation facts for a host's IP address" + returned: changed + type: dictionary + contains: + ip: + description: "Public IP address of a host" + type: string + sample: "8.8.8.8" + hostname: + description: Domain name + type: string + sample: "google-public-dns-a.google.com" + country: + description: ISO 3166-1 alpha-2 country code + type: string + sample: "US" + region: + description: State or province name + type: string + sample: "California" + city: + description: City name + type: string + sample: "Mountain View" + loc: + description: Latitude and Longitude of the location + type: string + sample: "37.3860,-122.0838" + org: + description: "organization's name" + type: string + sample: "AS3356 Level 3 Communications, Inc." + postal: + description: Postal code + type: string + sample: "94035" +''' + +USER_AGENT = 'ansible-ipinfoio-module/0.0.1' + + +class IpinfoioFacts(object): + + def __init__(self, module): + self.url = 'https://ipinfo.io/json' + self.timeout = module.params.get('timeout') + self.module = module + + def get_geo_data(self): + response, info = fetch_url(self.module, self.url, force=True, # NOQA + timeout=self.timeout) + try: + info['status'] == 200 + except AssertionError: + self.module.fail_json(msg='Could not get {} page, ' + 'check for connectivity!'.format(self.url)) + else: + try: + content = response.read() + result = self.module.from_json(content.decode('utf8')) + except ValueError: + self.module.fail_json( + msg='Failed to parse the ipinfo.io response: ' + '{0} {1}'.format(self.url, content)) + else: + return result + + +def main(): + module = AnsibleModule( # NOQA + argument_spec=dict( + http_agent=dict(default=USER_AGENT), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + ipinfoio = IpinfoioFacts(module) + ipinfoio_result = dict( + changed=False, ansible_facts=ipinfoio.get_geo_data()) + module.exit_json(**ipinfoio_result) + +from ansible.module_utils.basic import * # NOQA +from ansible.module_utils.urls import * # NOQA + +if __name__ == '__main__': + main() diff --git a/network/lldp.py b/network/lldp.py index fd1b1092d5e..f222d765fe9 100644 --- a/network/lldp.py +++ b/network/lldp.py @@ -16,6 +16,10 @@ import subprocess +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: lldp @@ -36,8 +40,9 @@ lldp: - name: Print each switch/port - debug: msg="{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }} - with_items: lldp.keys() + debug: + msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }}" + with_items: "{{ lldp.keys() }}" # TASK: [Print each switch/port] *********************************************************** # ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} @@ -82,5 +87,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/network/netconf/__init__.py b/network/netconf/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/netconf/netconf_config.py b/network/netconf/netconf_config.py new file mode 100755 index 00000000000..7ed79a908b5 --- /dev/null +++ b/network/netconf/netconf_config.py @@ -0,0 +1,225 @@ +#!/usr/bin/python + +# (c) 2016, Leandro Lisboa Penz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: netconf_config +author: "Leandro Lisboa Penz (@lpenz)" +short_description: netconf device configuration +description: + - Netconf is a network management protocol developed and standardized by + the IETF. It is documented in RFC 6241. + + - This module allows the user to send a configuration XML file to a netconf + device, and detects if there was a configuration change. +notes: + - This module supports devices with and without the the candidate and + confirmed-commit capabilities. It always use the safer feature. +version_added: "2.2" +options: + host: + description: + - the hostname or ip address of the netconf device + required: true + port: + description: + - the netconf port + default: 830 + required: false + hostkey_verify: + description: + - if true, the ssh host key of the device must match a ssh key present on the host + - if false, the ssh host key of the device is not checked + default: true + required: false + username: + description: + - the username to authenticate with + required: true + password: + description: + - password of the user to authenticate with + required: true + xml: + description: + - the XML content to send to the device + required: true + + +requirements: + - "python >= 2.6" + - "ncclient" +''' + +EXAMPLES = ''' +- name: set ntp server in the device + netconf_config: + host: 10.0.0.1 + username: admin + password: admin + xml: | + + + + true + + ntp1 +
127.0.0.1
+
+
+
+
+ +- name: wipe ntp configuration + netconf_config: + host: 10.0.0.1 + username: admin + password: admin + xml: | + + + + false + + ntp1 + + + + + +''' + +RETURN = ''' +server_capabilities: + description: list of capabilities of the server + returned: success + type: list of strings + sample: ['urn:ietf:params:netconf:base:1.1','urn:ietf:params:netconf:capability:confirmed-commit:1.0','urn:ietf:params:netconf:capability:candidate:1.0'] + +''' + +import xml.dom.minidom +try: + import ncclient.manager + HAS_NCCLIENT = True +except ImportError: + HAS_NCCLIENT = False + + +import logging + + +def netconf_edit_config(m, xml, commit, retkwargs): + if ":candidate" in m.server_capabilities: + datastore = 'candidate' + else: + datastore = 'running' + m.lock(target=datastore) + try: + m.discard_changes() + config_before = m.get_config(source=datastore) + m.edit_config(target=datastore, config=xml) + config_after = m.get_config(source=datastore) + changed = config_before.data_xml != config_after.data_xml + if changed and commit: + if ":confirmed-commit" in m.server_capabilities: + m.commit(confirmed=True) + m.commit() + else: + m.commit() + return changed + finally: + m.unlock(target=datastore) + + +# ------------------------------------------------------------------- # +# Main + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + host=dict(type='str', required=True), + port=dict(type='int', default=830), + hostkey_verify=dict(type='bool', default=True), + username=dict(type='str', required=True, no_log=True), + password=dict(type='str', required=True, no_log=True), + xml=dict(type='str', required=True), + ) + ) + + if not HAS_NCCLIENT: + module.fail_json(msg='could not import the python library ' + 'ncclient required by this module') + + try: + xml.dom.minidom.parseString(module.params['xml']) + except: + e = get_exception() + module.fail_json( + msg='error parsing XML: ' + + str(e) + ) + return + + nckwargs = dict( + host=module.params['host'], + port=module.params['port'], + hostkey_verify=module.params['hostkey_verify'], + username=module.params['username'], + password=module.params['password'], + ) + retkwargs = dict() + + try: + m = ncclient.manager.connect(**nckwargs) + except ncclient.transport.errors.AuthenticationError: + module.fail_json( + msg='authentication failed while connecting to device' + ) + except: + e = get_exception() + module.fail_json( + msg='error connecting to the device: ' + + str(e) + ) + return + retkwargs['server_capabilities'] = list(m.server_capabilities) + try: + changed = netconf_edit_config( + m=m, + xml=module.params['xml'], + commit=True, + retkwargs=retkwargs, + ) + finally: + m.close_session() + module.exit_json(changed=changed, **retkwargs) + + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/nmcli.py b/network/nmcli.py index c674114a32e..86a844c7ee0 100644 --- a/network/nmcli.py +++ b/network/nmcli.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION=''' --- module: nmcli @@ -73,16 +77,16 @@ required: False default: None description: - - 'The IPv4 address to this interface using this format ie: "192.168.1.24/24"' + - 'The IPv4 address to this interface using this format ie: "192.0.2.24/24"' gw4: required: False description: - - 'The IPv4 gateway for this interface using this format ie: "192.168.100.1"' + - 'The IPv4 gateway for this interface using this format ie: "192.0.2.1"' dns4: required: False default: None description: - - 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["8.8.8.8 8.8.4.4"]' + - 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["192.0.2.53", "198.51.100.53"]' ip6: required: False default: None @@ -228,46 +232,89 @@ ```yml --- #devops_os_define_network -storage_gw: "192.168.0.254" -external_gw: "10.10.0.254" -tenant_gw: "172.100.0.254" +storage_gw: "192.0.2.254" +external_gw: "198.51.100.254" +tenant_gw: "203.0.113.254" #Team vars nmcli_team: - - {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"} - - {conn_name: 'external', ip4: "{{external_ip}}", gw4: "{{external_gw}}"} - - {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"} + - conn_name: tenant + ip4: '{{ tenant_ip }}' + gw4: '{{ tenant_gw }}' + - conn_name: external + ip4: '{{ external_ip }}' + gw4: '{{ external_gw }}' + - conn_name: storage + ip4: '{{ storage_ip }}' + gw4: '{{ storage_gw }}' nmcli_team_slave: - - {conn_name: 'em1', ifname: 'em1', master: 'tenant'} - - {conn_name: 'em2', ifname: 'em2', master: 'tenant'} - - {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'} - - {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'} + - conn_name: em1 + ifname: em1 + master: tenant + - conn_name: em2 + ifname: em2 + master: tenant + - conn_name: p2p1 + ifname: p2p1 + master: storage + - conn_name: p2p2 + ifname: p2p2 + master: external #bond vars nmcli_bond: - - {conn_name: 'tenant', ip4: "{{tenant_ip}}", gw4: '', mode: 'balance-rr'} - - {conn_name: 'external', ip4: "{{external_ip}}", gw4: '', mode: 'balance-rr'} - - {conn_name: 'storage', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}", mode: 'balance-rr'} + - conn_name: tenant + ip4: '{{ tenant_ip }}' + gw4: '' + mode: balance-rr + - conn_name: external + ip4: '{{ external_ip }}' + gw4: '' + mode: balance-rr + - conn_name: storage + ip4: '{{ storage_ip }}' + gw4: '{{ storage_gw }}' + mode: balance-rr nmcli_bond_slave: - - {conn_name: 'em1', ifname: 'em1', master: 'tenant'} - - {conn_name: 'em2', ifname: 'em2', master: 'tenant'} - - {conn_name: 'p2p1', ifname: 'p2p1', master: 'storage'} - - {conn_name: 'p2p2', ifname: 'p2p2', master: 'external'} + - conn_name: em1 + ifname: em1 + master: tenant + - conn_name: em2 + ifname: em2 + master: tenant + - conn_name: p2p1 + ifname: p2p1 + master: storage + - conn_name: p2p2 + ifname: p2p2 + master: external #ethernet vars nmcli_ethernet: - - {conn_name: 'em1', ifname: 'em1', ip4: "{{tenant_ip}}", gw4: "{{tenant_gw}}"} - - {conn_name: 'em2', ifname: 'em2', ip4: "{{tenant_ip1}}", gw4: "{{tenant_gw}}"} - - {conn_name: 'p2p1', ifname: 'p2p1', ip4: "{{storage_ip}}", gw4: "{{storage_gw}}"} - - {conn_name: 'p2p2', ifname: 'p2p2', ip4: "{{external_ip}}", gw4: "{{external_gw}}"} + - conn_name: em1 + ifname: em1 + ip4: '{{ tenant_ip }}' + gw4: '{{ tenant_gw }}' + - conn_name: em2 + ifname: em2 + ip4: '{{ tenant_ip1 }}' + gw4: '{{ tenant_gw }}' + - conn_name: p2p1 + ifname: p2p1 + ip4: '{{ storage_ip }}' + gw4: '{{ storage_gw }}' + - conn_name: p2p2 + ifname: p2p2 + ip4: '{{ external_ip }}' + gw4: '{{ external_gw }}' ``` ### host_vars ```yml --- -storage_ip: "192.168.160.21/23" -external_ip: "10.10.152.21/21" -tenant_ip: "192.168.200.21/23" +storage_ip: "192.0.2.91/23" +external_ip: "198.51.100.23/21" +tenant_ip: "203.0.113.77/23" ``` @@ -280,41 +327,70 @@ remote_user: root tasks: -- name: install needed network manager libs - yum: name={{ item }} state=installed - with_items: - - libnm-qt-devel.x86_64 - - nm-connection-editor.x86_64 - - libsemanage-python - - policycoreutils-python + - name: install needed network manager libs + yum: + name: '{{ item }}' + state: installed + with_items: + - NetworkManager-glib + - libnm-qt-devel.x86_64 + - nm-connection-editor.x86_64 + - libsemanage-python + - policycoreutils-python ##### Working with all cloud nodes - Teaming - name: try nmcli add team - conn_name only & ip4 gw4 - nmcli: type=team conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present + nmcli: + type: team + conn_name: '{{ item.conn_name }}' + ip4: '{{ item.ip4 }}' + gw4: '{{ item.gw4 }}' + state: present with_items: - - "{{nmcli_team}}" + - '{{ nmcli_team }}' - name: try nmcli add teams-slave - nmcli: type=team-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present + nmcli: + type: team-slave + conn_name: '{{ item.conn_name }}' + ifname: '{{ item.ifname }}' + master: '{{ item.master }}' + state: present with_items: - - "{{nmcli_team_slave}}" + - '{{ nmcli_team_slave }}' ###### Working with all cloud nodes - Bonding # - name: try nmcli add bond - conn_name only & ip4 gw4 mode -# nmcli: type=bond conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} mode={{item.mode}} state=present +# nmcli: +# type: bond +# conn_name: '{{ item.conn_name }}' +# ip4: '{{ item.ip4 }}' +# gw4: '{{ item.gw4 }}' +# mode: '{{ item.mode }}' +# state: present # with_items: -# - "{{nmcli_bond}}" +# - '{{ nmcli_bond }}' # # - name: try nmcli add bond-slave -# nmcli: type=bond-slave conn_name={{item.conn_name}} ifname={{item.ifname}} master={{item.master}} state=present +# nmcli: +# type: bond-slave +# conn_name: '{{ item.conn_name }}' +# ifname: '{{ item.ifname }}' +# master: '{{ item.master }}' +# state: present # with_items: -# - "{{nmcli_bond_slave}}" +# - '{{ nmcli_bond_slave }}' ##### Working with all cloud nodes - Ethernet # - name: nmcli add Ethernet - conn_name only & ip4 gw4 -# nmcli: type=ethernet conn_name={{item.conn_name}} ip4={{item.ip4}} gw4={{item.gw4}} state=present +# nmcli: +# type: ethernet +# conn_name: '{{ item.conn_name }}' +# ip4: '{{ item.ip4 }}' +# gw4: '{{ item.gw4 }}' +# state: present # with_items: -# - "{{nmcli_ethernet}}" +# - '{{ nmcli_ethernet }}' ``` ## playbook-del.yml example @@ -326,41 +402,77 @@ tasks: - name: try nmcli del team - multiple - nmcli: conn_name={{item.conn_name}} state=absent + nmcli: + conn_name: '{{ item.conn_name }}' + state: absent with_items: - - { conn_name: 'em1'} - - { conn_name: 'em2'} - - { conn_name: 'p1p1'} - - { conn_name: 'p1p2'} - - { conn_name: 'p2p1'} - - { conn_name: 'p2p2'} - - { conn_name: 'tenant'} - - { conn_name: 'storage'} - - { conn_name: 'external'} - - { conn_name: 'team-em1'} - - { conn_name: 'team-em2'} - - { conn_name: 'team-p1p1'} - - { conn_name: 'team-p1p2'} - - { conn_name: 'team-p2p1'} - - { conn_name: 'team-p2p2'} + - conn_name: em1 + - conn_name: em2 + - conn_name: p1p1 + - conn_name: p1p2 + - conn_name: p2p1 + - conn_name: p2p2 + - conn_name: tenant + - conn_name: storage + - conn_name: external + - conn_name: team-em1 + - conn_name: team-em2 + - conn_name: team-p1p1 + - conn_name: team-p1p2 + - conn_name: team-p2p1 + - conn_name: team-p2p2 ``` # To add an Ethernet connection with static IP configuration, issue a command as follows -- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present +- nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + state: present # To add an Team connection with static IP configuration, issue a command as follows -- nmcli: conn_name=my-team1 ifname=my-team1 type=team ip4=192.168.100.100/24 gw4=192.168.100.1 state=present autoconnect=yes +- nmcli: + conn_name: my-team1 + ifname: my-team1 + type: team + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + state: present + autoconnect: yes # Optionally, at the same time specify IPv6 addresses for the device as follows: -- nmcli: conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 ip6=abbe::cafe gw6=2001:db8::1 state=present +- nmcli: + conn_name: my-eth1 + ifname: eth1 + type: ethernet + ip4: 192.0.2.100/24 + gw4: 192.0.2.1 + ip6: '2001:db8::cafe' + gw6: '2001:db8::1' + state: present # To add two IPv4 DNS server addresses: --nmcli: conn_name=my-eth1 dns4=["8.8.8.8", "8.8.4.4"] state=present +- nmcli: + conn_name: my-eth1 + dns4: + - 192.0.2.53 + - 198.51.100.53 + state: present # To make a profile usable for all compatible Ethernet interfaces, issue a command as follows -- nmcli: ctype=ethernet name=my-eth1 ifname="*" state=present +- nmcli: + ctype: ethernet + name: my-eth1 + ifname: * + state: present # To change the property of a setting e.g. MTU, issue a command as follows: -- nmcli: conn_name=my-eth1 mtu=9000 state=present +- nmcli: + conn_name: my-eth1 + mtu: 9000 + type: ethernet + state: present Exit Status's: - nmcli exits with status 0 if it succeeds, a value greater than 0 is @@ -379,10 +491,22 @@ ''' # import ansible.module_utils.basic import os -import syslog import sys -import dbus -from gi.repository import NetworkManager, NMClient +HAVE_DBUS=False +try: + import dbus + HAVE_DBUS=True +except ImportError: + pass + +HAVE_NM_CLIENT=False +try: + from gi.repository import NetworkManager, NMClient + HAVE_NM_CLIENT=True +except ImportError: + pass + +from ansible.module_utils.basic import AnsibleModule class Nmcli(object): @@ -466,14 +590,8 @@ def __init__(self, module): self.flags=module.params['flags'] self.ingress=module.params['ingress'] self.egress=module.params['egress'] - # select whether we dump additional debug info through syslog - self.syslogging=True def execute_command(self, cmd, use_unsafe_shell=False, data=None): - if self.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) def merge_secrets(self, proxy, config, setting_name): @@ -486,7 +604,7 @@ def merge_secrets(self, proxy, config, setting_name): for setting in secrets: for key in secrets[setting]: config[setting_name][key]=secrets[setting][key] - except Exception, e: + except Exception as e: pass def dict_to_string(self, d): @@ -497,13 +615,13 @@ def dict_to_string(self, d): val=d[key] str_val="" add_string=True - if type(val)==type(dbus.Array([])): + if isinstance(val, dbus.Array): for elt in val: - if type(elt)==type(dbus.Byte(1)): + if isinstance(elt, dbus.Byte): str_val+="%s " % int(elt) - elif type(elt)==type(dbus.String("")): + elif isinstance(elt, dbus.String): str_val+="%s" % elt - elif type(val)==type(dbus.Dictionary({})): + elif isinstance(val, dbus.Dictionary): dstr+=self.dict_to_string(val) add_string=False else: @@ -520,6 +638,12 @@ def connection_to_string(self, config): return setting_list # print "" + def bool_to_string(self, boolean): + if boolean: + return "yes" + else: + return "no" + def list_connection_info(self): # Ask the settings service for the list of connections it provides bus=dbus.SystemBus() @@ -608,7 +732,7 @@ def create_connection_team(self): cmd.append(self.gw6) if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.autoconnect) + cmd.append(self.bool_to_string(self.autoconnect)) return cmd def modify_connection_team(self): @@ -631,13 +755,13 @@ def modify_connection_team(self): cmd.append(self.ip6) if self.gw6 is not None: cmd.append('ipv6.gateway') - cmd.append(self.gw4) + cmd.append(self.gw6) if self.dns6 is not None: cmd.append('ipv6.dns') cmd.append(self.dns6) if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.autoconnect) + cmd.append(self.bool_to_string(self.autoconnect)) # Can't use MTU with team return cmd @@ -710,7 +834,7 @@ def create_connection_bond(self): cmd.append(self.gw6) if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.autoconnect) + cmd.append(self.bool_to_string(self.autoconnect)) if self.mode is not None: cmd.append('mode') cmd.append(self.mode) @@ -751,13 +875,13 @@ def modify_connection_bond(self): cmd.append(self.ip6) if self.gw6 is not None: cmd.append('ipv6.gateway') - cmd.append(self.gw4) + cmd.append(self.gw6) if self.dns6 is not None: cmd.append('ipv6.dns') cmd.append(self.dns6) if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.autoconnect) + cmd.append(self.bool_to_string(self.autoconnect)) return cmd def create_connection_bond_slave(self): @@ -796,8 +920,8 @@ def create_connection_ethernet(self): cmd=[self.module.get_bin_path('nmcli', True)] # format for creating ethernet interface # To add an Ethernet connection with static IP configuration, issue a command as follows - # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present - # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1 + # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present + # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1 cmd.append('con') cmd.append('add') cmd.append('type') @@ -826,15 +950,15 @@ def create_connection_ethernet(self): cmd.append(self.gw6) if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.autoconnect) + cmd.append(self.bool_to_string(self.autoconnect)) return cmd def modify_connection_ethernet(self): cmd=[self.module.get_bin_path('nmcli', True)] # format for modifying ethernet interface # To add an Ethernet connection with static IP configuration, issue a command as follows - # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present - # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1 + # - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present + # nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1 cmd.append('con') cmd.append('mod') cmd.append(self.conn_name) @@ -852,7 +976,7 @@ def modify_connection_ethernet(self): cmd.append(self.ip6) if self.gw6 is not None: cmd.append('ipv6.gateway') - cmd.append(self.gw4) + cmd.append(self.gw6) if self.dns6 is not None: cmd.append('ipv6.dns') cmd.append(self.dns6) @@ -861,7 +985,7 @@ def modify_connection_ethernet(self): cmd.append(self.mtu) if self.autoconnect is not None: cmd.append('autoconnect') - cmd.append(self.autoconnect) + cmd.append(self.bool_to_string(self.autoconnect)) return cmd def create_connection_bridge(self): @@ -970,7 +1094,7 @@ def main(): # Parsing argument file module=AnsibleModule( argument_spec=dict( - autoconnect=dict(required=False, default=None, choices=['yes', 'no'], type='str'), + autoconnect=dict(required=False, default=None, type='bool'), state=dict(required=True, choices=['present', 'absent'], type='str'), conn_name=dict(required=True, type='str'), master=dict(required=False, default=None, type='str'), @@ -993,7 +1117,7 @@ def main(): mtu=dict(required=False, default=None, type='str'), mac=dict(required=False, default=None, type='str'), # bridge specific vars - stp=dict(required=False, default='yes', choices=['yes', 'no'], type='str'), + stp=dict(required=False, default=True, type='bool'), priority=dict(required=False, default="128", type='str'), slavepriority=dict(required=False, default="32", type='str'), forwarddelay=dict(required=False, default="15", type='str'), @@ -1010,6 +1134,12 @@ def main(): supports_check_mode=True ) + if not HAVE_DBUS: + module.fail_json(msg="This module requires dbus python bindings") + + if not HAVE_NM_CLIENT: + module.fail_json(msg="This module requires NetworkManager glib API") + nmcli=Nmcli(module) rc=None @@ -1064,7 +1194,5 @@ def main(): module.exit_json(**result) -# import module snippets -from ansible.module_utils.basic import * - -main() +if __name__ == '__main__': + main() diff --git a/network/openvswitch_bridge.py b/network/openvswitch_bridge.py index 8f29735862f..9816e2bff3a 100644 --- a/network/openvswitch_bridge.py +++ b/network/openvswitch_bridge.py @@ -22,6 +22,10 @@ # pylint: disable=C0111 +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: openvswitch_bridge @@ -35,7 +39,19 @@ bridge: required: true description: - - Name of bridge to manage + - Name of bridge or fake bridge to manage + parent: + version_added: "2.3" + required: false + default: None + description: + - Bridge parent of the fake bridge to manage + vlan: + version_added: "2.3" + required: false + default: None + description: + - The VLAN id of the fake bridge to manage (must be between 0 and 4095) state: required: false default: "present" @@ -65,13 +81,25 @@ EXAMPLES = ''' # Create a bridge named br-int -- openvswitch_bridge: bridge=br-int state=present +- openvswitch_bridge: + bridge: br-int + state: present + +# Create a fake bridge named br-int within br-parent on the VLAN 405 +- openvswitch_bridge: + bridge: br-int + parent: br-parent + vlan: 405 + state: present # Create an integration bridge -- openvswitch_bridge: bridge=br-int state=present fail_mode=secure +- openvswitch_bridge: + bridge: br-int + state: present + fail_mode: secure args: external_ids: - bridge-id: "br-int" + bridge-id: br-int ''' @@ -80,10 +108,18 @@ class OVSBridge(object): def __init__(self, module): self.module = module self.bridge = module.params['bridge'] + self.parent = module.params['parent'] + self.vlan = module.params['vlan'] self.state = module.params['state'] self.timeout = module.params['timeout'] self.fail_mode = module.params['fail_mode'] + if self.parent: + if self.vlan is None: + self.module.fail_json(msg='VLAN id must be set when parent is defined') + elif self.vlan < 0 or self.vlan > 4095: + self.module.fail_json(msg='Invalid VLAN ID (must be between 0 and 4095)') + def _vsctl(self, command): '''Run ovs-vsctl command''' return self.module.run_command(['ovs-vsctl', '-t', @@ -100,7 +136,11 @@ def exists(self): def add(self): '''Create the bridge''' - rtc, _, err = self._vsctl(['add-br', self.bridge]) + if self.parent and self.vlan: # Add fake bridge + rtc, _, err = self._vsctl(['add-br', self.bridge, self.parent, self.vlan]) + else: + rtc, _, err = self._vsctl(['add-br', self.bridge]) + if rtc != 0: self.module.fail_json(msg=err) if self.fail_mode: @@ -143,7 +183,8 @@ def check(self): changed = True elif self.state == 'present' and not self.exists(): changed = True - except Exception, earg: + except Exception: + earg = get_exception() self.module.fail_json(msg=str(earg)) # pylint: enable=W0703 @@ -167,9 +208,7 @@ def run(self): current_fail_mode = self.get_fail_mode() if self.fail_mode and (self.fail_mode != current_fail_mode): - syslog.syslog(syslog.LOG_NOTICE, - "changing fail mode %s to %s" % - (current_fail_mode, self.fail_mode)) + self.module.log( "changing fail mode %s to %s" % (current_fail_mode, self.fail_mode)) self.set_fail_mode() changed = True @@ -191,7 +230,8 @@ def run(self): self.set_external_id(key, None)): changed = True - except Exception, earg: + except Exception: + earg = get_exception() self.module.fail_json(msg=str(earg)) # pylint: enable=W0703 self.module.exit_json(changed=changed) @@ -249,9 +289,11 @@ def main(): module = AnsibleModule( argument_spec={ 'bridge': {'required': True}, + 'parent': {'default': None}, + 'vlan': {'default': None, 'type': 'int'}, 'state': {'default': 'present', 'choices': ['present', 'absent']}, 'timeout': {'default': 5, 'type': 'int'}, - 'external_ids': {'default': None}, + 'external_ids': {'default': None, 'type': 'dict'}, 'fail_mode': {'default': None}, }, supports_check_mode=True, @@ -269,4 +311,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/network/openvswitch_db.py b/network/openvswitch_db.py new file mode 100644 index 00000000000..6d769e43672 --- /dev/null +++ b/network/openvswitch_db.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# pylint: disable=C0111 + +# +# (c) 2015, Mark Hamilton +# +# Portions copyright @ 2015 VMware, Inc. +# +# This file is part of Ansible +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: openvswitch_db +author: "Mark Hamilton (mhamilton@vmware.com)" +version_added: 2.0 +short_description: Configure open vswitch database. +requirements: [ "ovs-vsctl >= 2.3.3" ] +description: + - Set column values in record in database table. +options: + table: + required: true + description: + - Identifies the table in the database. + record: + required: true + description: + - Identifies the recoard in the table. + column: + required: true + description: + - Identifies the column in the record. + key: + required: true + description: + - Identifies the key in the record column + value: + required: true + description: + - Expected value for the table, record, column and key. + timeout: + required: false + default: 5 + description: + - How long to wait for ovs-vswitchd to respond +""" + +EXAMPLES = ''' +# Increase the maximum idle time to 50 seconds before pruning unused kernel +# rules. +- openvswitch_db: + table: open_vswitch + record: . + col: other_config + key: max-idle + value: 50000 + +# Disable in band copy +- openvswitch_db: + table: Bridge + record: br-int + col: other_config + key: disable-in-band + value: true +''' + + +def cmd_run(module, cmd, check_rc=True): + """ Log and run ovs-vsctl command. """ + return module.run_command(cmd.split(" "), check_rc=check_rc) + + +def params_set(module): + """ Implement the ovs-vsctl set commands. """ + + changed = False + + ## + # Place in params dictionary in order to support the string format below. + module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True) + + fmt = "%(ovs-vsctl)s -t %(timeout)s get %(table)s %(record)s " \ + "%(col)s:%(key)s" + + cmd = fmt % module.params + + (_, output, _) = cmd_run(module, cmd, False) + if module.params['value'] not in output: + fmt = "%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s " \ + "%(col)s:%(key)s=%(value)s" + cmd = fmt % module.params + ## + # Check if flow exists and is the same. + (rtc, _, err) = cmd_run(module, cmd) + if rtc != 0: + module.fail_json(msg=err) + changed = True + module.exit_json(changed=changed) + + +# pylint: disable=E0602 +def main(): + """ Entry point for ansible module. """ + module = AnsibleModule( + argument_spec={ + 'table': {'required': True}, + 'record': {'required': True}, + 'col': {'required': True}, + 'key': {'required': True}, + 'value': {'required': True}, + 'timeout': {'default': 5, 'type': 'int'}, + }, + supports_check_mode=True, + ) + + params_set(module) + + +# pylint: disable=W0614 +# pylint: disable=W0401 +# pylint: disable=W0622 + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/network/openvswitch_port.py b/network/openvswitch_port.py index 469d53730da..759a2489c16 100644 --- a/network/openvswitch_port.py +++ b/network/openvswitch_port.py @@ -22,7 +22,9 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . -import syslog +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -42,6 +44,11 @@ required: true description: - Name of port to manage on the bridge + tag: + version_added: 2.2 + required: false + description: + - VLAN tag for this port state: required: false default: "present" @@ -69,21 +76,38 @@ EXAMPLES = ''' # Creates port eth2 on bridge br-ex -- openvswitch_port: bridge=br-ex port=eth2 state=present - -# Creates port eth6 and set ofport equal to 6. -- openvswitch_port: bridge=bridge-loop port=eth6 state=present - set Interface eth6 ofport_request=6 - -# Assign interface id server1-vifeth6 and mac address 52:54:00:30:6d:11 +- openvswitch_port: + bridge: br-ex + port: eth2 + state: present + +# Creates port eth6 +- openvswitch_port: + bridge: bridge-loop + port: eth6 + state: present + set: Interface eth6 + +# Creates port vlan10 with tag 10 on bridge br-ex +- openvswitch_port: + bridge: br-ex + port: vlan10 + tag: 10 + state: present + set: Interface vlan10 + +# Assign interface id server1-vifeth6 and mac address 00:00:5E:00:53:23 # to port vifeth6 and setup port to be managed by a controller. -- openvswitch_port: bridge=br-int port=vifeth6 state=present +- openvswitch_port: + bridge: br-int + port: vifeth6 + state: present args: external_ids: - iface-id: "{{inventory_hostname}}-vifeth6" - attached-mac: "52:54:00:30:6d:11" - vm-id: "{{inventory_hostname}}" - iface-status: "active" + iface-id: '{{ inventory_hostname }}-vifeth6' + attached-mac: '00:00:5E:00:53:23' + vm-id: '{{ inventory_hostname }}' + iface-status: active ''' # pylint: disable=W0703 @@ -99,7 +123,7 @@ def truncate_before(value, srch): return value -def _set_to_get(set_cmd): +def _set_to_get(set_cmd, module): """ Convert set command to get command and set value. return tuple (get command, set value) """ @@ -109,7 +133,7 @@ def _set_to_get(set_cmd): set_cmd = truncate_before(set_cmd, " option:") get_cmd = set_cmd.split(" ") (key, value) = get_cmd[-1].split("=") - syslog.syslog(syslog.LOG_NOTICE, "get commands %s " % key) + module.log("get commands %s " % key) return (["--", "get"] + get_cmd[:-1] + [key], value) @@ -120,6 +144,7 @@ def __init__(self, module): self.module = module self.bridge = module.params['bridge'] self.port = module.params['port'] + self.tag = module.params['tag'] self.state = module.params['state'] self.timeout = module.params['timeout'] self.set_opt = module.params.get('set', None) @@ -128,7 +153,6 @@ def _vsctl(self, command, check_rc=True): '''Run ovs-vsctl command''' cmd = ['ovs-vsctl', '-t', str(self.timeout)] + command - syslog.syslog(syslog.LOG_NOTICE, " ".join(cmd)) return self.module.run_command(cmd, check_rc=check_rc) def exists(self): @@ -139,15 +163,15 @@ def exists(self): if rtc != 0: self.module.fail_json(msg=err) - return any(port.rstrip() == self.port for port in out.split('\n')) + return any(port.rstrip() == self.port for port in out.split('\n')) or self.port == self.bridge def set(self, set_opt): """ Set attributes on a port. """ - syslog.syslog(syslog.LOG_NOTICE, "set called %s" % set_opt) + self.module.log("set called %s" % set_opt) if (not set_opt): return False - (get_cmd, set_value) = _set_to_get(set_opt) + (get_cmd, set_value) = _set_to_get(set_opt, self.module) (rtc, out, err) = self._vsctl(get_cmd, False) if rtc != 0: ## @@ -170,6 +194,8 @@ def set(self, set_opt): def add(self): '''Add the port''' cmd = ['add-port', self.bridge, self.port] + if self.tag: + cmd += ["tag=" + self.tag] if self.set and self.set_opt: cmd += ["--", "set"] cmd += self.set_opt.split(" ") @@ -195,7 +221,8 @@ def check(self): changed = True else: changed = False - except Exception, earg: + except Exception: + earg = get_exception() self.module.fail_json(msg=str(earg)) self.module.exit_json(changed=changed) @@ -226,7 +253,8 @@ def run(self): external_id = fmt_opt % (self.port, key, value) changed = self.set(external_id) or changed ## - except Exception, earg: + except Exception: + earg = get_exception() self.module.fail_json(msg=str(earg)) self.module.exit_json(changed=changed) @@ -238,10 +266,11 @@ def main(): argument_spec={ 'bridge': {'required': True}, 'port': {'required': True}, + 'tag': {'required': False}, 'state': {'default': 'present', 'choices': ['present', 'absent']}, 'timeout': {'default': 5, 'type': 'int'}, 'set': {'required': False, 'default': None}, - 'external_ids': {'default': {}, 'required': False}, + 'external_ids': {'default': {}, 'required': False, 'type': 'dict'}, }, supports_check_mode=True, ) @@ -259,4 +288,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/network/panos/__init__.py b/network/panos/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/panos/panos_admin.py b/network/panos/panos_admin.py new file mode 100755 index 00000000000..dd36ac08977 --- /dev/null +++ b/network/panos/panos_admin.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Ansible module to manage PaloAltoNetworks Firewall +# (c) 2016, techbizdev +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: panos_admin +short_description: Add or modify PAN-OS user accounts password. +description: + - PanOS module that allows changes to the user account passwords by doing + API calls to the Firewall using pan-api as the protocol. +author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" +version_added: "2.3" +requirements: + - pan-python +options: + ip_address: + description: + - IP address (or hostname) of PAN-OS device + required: true + password: + description: + - password for authentication + required: true + username: + description: + - username for authentication + required: false + default: "admin" + admin_username: + description: + - username for admin user + required: false + default: "admin" + admin_password: + description: + - password for admin user + required: true + role: + description: + - role for admin user + required: false + default: null + commit: + description: + - commit if changed + required: false + default: true +''' + +EXAMPLES = ''' +# Set the password of user admin to "badpassword" +# Doesn't commit the candidate config + - name: set admin password + panos_admin: + ip_address: "192.168.1.1" + password: "admin" + admin_username: admin + admin_password: "badpassword" + commit: False +''' + +RETURN = ''' +status: + description: success status + returned: success + type: string + sample: "okey dokey" +''' +from ansible.module_utils.basic import AnsibleModule + +try: + import pan.xapi + HAS_LIB = True +except ImportError: + HAS_LIB = False + +_ADMIN_XPATH = "/config/mgt-config/users/entry[@name='%s']" + + +def admin_exists(xapi, admin_username): + xapi.get(_ADMIN_XPATH % admin_username) + e = xapi.element_root.find('.//entry') + return e + + +def admin_set(xapi, module, admin_username, admin_password, role): + if admin_password is not None: + xapi.op(cmd='request password-hash password "%s"' % admin_password, + cmd_xml=True) + r = xapi.element_root + phash = r.find('.//phash').text + if role is not None: + rbval = "yes" + if role != "superuser" and role != 'superreader': + rbval = "" + + ea = admin_exists(xapi, admin_username) + if ea is not None: + # user exists + changed = False + + if role is not None: + rb = ea.find('.//role-based') + if rb is not None: + if rb[0].tag != role: + changed = True + xpath = _ADMIN_XPATH % admin_username + xpath += '/permissions/role-based/%s' % rb[0].tag + xapi.delete(xpath=xpath) + + xpath = _ADMIN_XPATH % admin_username + xpath += '/permissions/role-based' + xapi.set(xpath=xpath, + element='<%s>%s' % (role, rbval, role)) + + if admin_password is not None: + xapi.edit(xpath=_ADMIN_XPATH % admin_username+'/phash', + element='%s' % phash) + changed = True + + return changed + + # setup the non encrypted part of the monitor + exml = [] + + exml.append('%s' % phash) + exml.append('<%s>%s' + '' % (role, rbval, role)) + + exml = ''.join(exml) + # module.fail_json(msg=exml) + + xapi.set(xpath=_ADMIN_XPATH % admin_username, element=exml) + + return True + + +def main(): + argument_spec = dict( + ip_address=dict(), + password=dict(no_log=True), + username=dict(default='admin'), + admin_username=dict(default='admin'), + admin_password=dict(no_log=True), + role=dict(), + commit=dict(type='bool', default=True) + ) + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + + if not HAS_LIB: + module.fail_json(msg='pan-python required for this module') + + ip_address = module.params["ip_address"] + if not ip_address: + module.fail_json(msg="ip_address should be specified") + password = module.params["password"] + if not password: + module.fail_json(msg="password is required") + username = module.params['username'] + + xapi = pan.xapi.PanXapi( + hostname=ip_address, + api_username=username, + api_password=password + ) + + admin_username = module.params['admin_username'] + if admin_username is None: + module.fail_json(msg="admin_username is required") + admin_password = module.params['admin_password'] + role = module.params['role'] + commit = module.params['commit'] + + changed = admin_set(xapi, module, admin_username, admin_password, role) + + if changed and commit: + xapi.commit(cmd="", sync=True, interval=1) + + module.exit_json(changed=changed, msg="okey dokey") + +if __name__ == '__main__': + main() diff --git a/network/snmp_facts.py b/network/snmp_facts.py index 81a91ee6eb2..7801d0f2955 100644 --- a/network/snmp_facts.py +++ b/network/snmp_facts.py @@ -16,12 +16,16 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: snmp_facts version_added: "1.9" author: "Patrick Ogenstad (@ogenstad)" -short_description: Retrive facts for a device using SNMP. +short_description: Retrieve facts for a device using SNMP. description: - Retrieve facts for a device using SNMP, the facts will be inserted to the ansible_facts key. @@ -72,19 +76,22 @@ EXAMPLES = ''' # Gather facts with SNMP version 2 -- snmp_facts: host={{ inventory_hostname }} version=2c community=public - connection: local +- snmp_facts: + host: '{{ inventory_hostname }}' + version: 2c + community: public + delegate_to: local # Gather facts using SNMP version 3 - snmp_facts: - host={{ inventory_hostname }} - version=v3 - level=authPriv - integrity=sha - privacy=aes - username=snmp-user - authkey=abc12345 - privkey=def6789 + host: '{{ inventory_hostname }}' + version: v3 + level: authPriv + integrity: sha + privacy: aes + username: snmp-user + authkey: abc12345 + privkey: def6789 delegate_to: localhost ''' @@ -112,7 +119,7 @@ def __init__(self,dotprefix=False): self.sysContact = dp + "1.3.6.1.2.1.1.4.0" self.sysName = dp + "1.3.6.1.2.1.1.5.0" self.sysLocation = dp + "1.3.6.1.2.1.1.6.0" - + # From IF-MIB self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1" self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2" @@ -127,10 +134,10 @@ def __init__(self,dotprefix=False): self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1" self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2" self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3" - + def decode_hex(hexstring): - + if len(hexstring) < 3: return hexstring if hexstring[:2] == "0x": @@ -153,7 +160,7 @@ def lookup_adminstatus(int_adminstatus): 2: 'down', 3: 'testing' } - if int_adminstatus in adminstatus_options.keys(): + if int_adminstatus in adminstatus_options: return adminstatus_options[int_adminstatus] else: return "" @@ -168,7 +175,7 @@ def lookup_operstatus(int_operstatus): 6: 'notPresent', 7: 'lowerLayerDown' } - if int_operstatus in operstatus_options.keys(): + if int_operstatus in operstatus_options: return operstatus_options[int_operstatus] else: return "" @@ -200,7 +207,7 @@ def main(): if m_args['version'] == "v2" or m_args['version'] == "v2c": if m_args['community'] == False: module.fail_json(msg='Community not set when using snmp version 2') - + if m_args['version'] == "v3": if m_args['username'] == None: module.fail_json(msg='Username not set when using snmp version 3') @@ -208,7 +215,7 @@ def main(): if m_args['level'] == "authPriv" and m_args['privacy'] == None: module.fail_json(msg='Privacy algorithm not set when using authPriv') - + if m_args['integrity'] == "sha": integrity_proto = cmdgen.usmHMACSHAAuthProtocol elif m_args['integrity'] == "md5": @@ -218,7 +225,7 @@ def main(): privacy_proto = cmdgen.usmAesCfb128Protocol elif m_args['privacy'] == "des": privacy_proto = cmdgen.usmDESPrivProtocol - + # Use SNMP Version 2 if m_args['version'] == "v2" or m_args['version'] == "v2c": snmp_auth = cmdgen.CommunityData(m_args['community']) @@ -237,18 +244,19 @@ def main(): v = DefineOid(dotprefix=False) Tree = lambda: defaultdict(Tree) - + results = Tree() - + errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( snmp_auth, cmdgen.UdpTransportTarget((m_args['host'], 161)), cmdgen.MibVariable(p.sysDescr,), - cmdgen.MibVariable(p.sysObjectId,), + cmdgen.MibVariable(p.sysObjectId,), cmdgen.MibVariable(p.sysUpTime,), - cmdgen.MibVariable(p.sysContact,), + cmdgen.MibVariable(p.sysContact,), cmdgen.MibVariable(p.sysName,), cmdgen.MibVariable(p.sysLocation,), + lookupMib=False ) @@ -273,7 +281,7 @@ def main(): errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161)), + cmdgen.UdpTransportTarget((m_args['host'], 161)), cmdgen.MibVariable(p.ifIndex,), cmdgen.MibVariable(p.ifDescr,), cmdgen.MibVariable(p.ifMtu,), @@ -281,20 +289,21 @@ def main(): cmdgen.MibVariable(p.ifPhysAddress,), cmdgen.MibVariable(p.ifAdminStatus,), cmdgen.MibVariable(p.ifOperStatus,), - cmdgen.MibVariable(p.ipAdEntAddr,), - cmdgen.MibVariable(p.ipAdEntIfIndex,), - cmdgen.MibVariable(p.ipAdEntNetMask,), + cmdgen.MibVariable(p.ipAdEntAddr,), + cmdgen.MibVariable(p.ipAdEntIfIndex,), + cmdgen.MibVariable(p.ipAdEntNetMask,), cmdgen.MibVariable(p.ifAlias,), + lookupMib=False ) - + if errorIndication: module.fail_json(msg=str(errorIndication)) interface_indexes = [] - - all_ipv4_addresses = [] + + all_ipv4_addresses = [] ipv4_networks = Tree() for varBinds in varTable: @@ -358,9 +367,9 @@ def main(): results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface] results['ansible_all_ipv4_addresses'] = all_ipv4_addresses - + module.exit_json(ansible_facts=results) - -main() +if __name__ == '__main__': + main() diff --git a/network/wakeonlan.py b/network/wakeonlan.py new file mode 100644 index 00000000000..d49118d60ba --- /dev/null +++ b/network/wakeonlan.py @@ -0,0 +1,135 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Dag Wieers +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: wakeonlan +version_added: 2.2 +short_description: Send a magic Wake-on-LAN (WoL) broadcast packet +description: + - The M(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets. +options: + mac: + description: + - MAC address to send Wake-on-LAN broadcast packet for + required: true + default: null + broadcast: + description: + - Network broadcast address to use for broadcasting magic Wake-on-LAN packet + required: false + default: 255.255.255.255 + port: + description: + - UDP port to use for magic Wake-on-LAN packet + required: false + default: 7 +author: "Dag Wieers (@dagwieers)" +todo: + - Add arping support to check whether the system is up (before and after) + - Enable check-mode support (when we have arping support) + - Does not have SecureOn password support +notes: + - This module sends a magic packet, without knowing whether it worked + - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS) + - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first) when turned off +''' + +EXAMPLES = ''' +# Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66 +- wakeonlan: + mac: '00:00:5E:00:53:66' + broadcast: 192.0.2.23 + delegate_to: loclahost + +- wakeonlan: + mac: 00:00:5E:00:53:66 + port: 9 + delegate_to: localhost +''' + +RETURN=''' +# Default return values +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +import socket +import struct + + +def wakeonlan(module, mac, broadcast, port): + """ Send a magic Wake-on-LAN packet. """ + + mac_orig = mac + + # Remove possible seperator from MAC address + if len(mac) == 12 + 5: + mac = mac.replace(mac[2], '') + + # If we don't end up with 12 hexadecimal characters, fail + if len(mac) != 12: + module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig) + + # Test if it converts to an integer, otherwise fail + try: + int(mac, 16) + except ValueError: + module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig) + + # Create payload for magic packet + data = '' + padding = ''.join(['FFFFFFFFFFFF', mac * 20]) + for i in range(0, len(padding), 2): + data = ''.join([data, struct.pack('B', int(padding[i: i + 2], 16))]) + + # Broadcast payload to network + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + try: + sock.sendto(data, (broadcast, port)) + except socket.error: + e = get_exception() + module.fail_json(msg=str(e)) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + mac = dict(required=True, type='str'), + broadcast = dict(required=False, default='255.255.255.255'), + port = dict(required=False, type='int', default=7), + ), + ) + + mac = module.params.get('mac') + broadcast = module.params.get('broadcast') + port = module.params.get('port') + + wakeonlan(module, mac, broadcast, port) + module.exit_json(changed=True) + + +if __name__ == '__main__': + main() diff --git a/notification/campfire.py b/notification/campfire.py index 68e64f1bc94..8a7b44355f4 100644 --- a/notification/campfire.py +++ b/notification/campfire.py @@ -15,6 +15,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: campfire @@ -23,7 +27,6 @@ description: - Send a message to Campfire. - Messages with newlines will result in a "Paste" message being sent. -version_added: "1.2" options: subscription: description: @@ -61,10 +64,18 @@ ''' EXAMPLES = ''' -- campfire: subscription=foo token=12345 room=123 msg="Task completed." +- campfire: + subscription: foo + token: 12345 + room: 123 + msg: Task completed. -- campfire: subscription=foo token=12345 room=123 notify=loggins - msg="Task completed ... with feeling." +- campfire: + subscription: foo + token: 12345 + room: 123 + notify: loggins + msg: Task completed ... with feeling. ''' import cgi @@ -74,7 +85,7 @@ def main(): module = AnsibleModule( argument_spec=dict( subscription=dict(required=True), - token=dict(required=True), + token=dict(required=True, no_log=True), room=dict(required=True), msg=dict(required=True), notify=dict(required=False, @@ -118,14 +129,14 @@ def main(): # Send some audible notification if requested if notify: response, info = fetch_url(module, target_url, data=NSTR % cgi.escape(notify), headers=headers) - if info['status'] != 200: - module.fail_json(msg="unable to send msg: '%s', campfire api" - " returned error code: '%s'" % - (notify, info['status'])) + if info['status'] not in [200, 201]: + module.fail_json(msg="unable to send msg: '%s', campfire api" + " returned error code: '%s'" % + (notify, info['status'])) # Send the message response, info = fetch_url(module, target_url, data=MSTR %cgi.escape(msg), headers=headers) - if info['status'] != 200: + if info['status'] not in [200, 201]: module.fail_json(msg="unable to send msg: '%s', campfire api" " returned error code: '%s'" % (msg, info['status'])) diff --git a/notification/flowdock.py b/notification/flowdock.py index 34dad8db375..e0584295afa 100644 --- a/notification/flowdock.py +++ b/notification/flowdock.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: flowdock @@ -89,18 +93,20 @@ ''' EXAMPLES = ''' -- flowdock: type=inbox - token=AAAAAA - from_address=user@example.com - source='my cool app' - msg='test from ansible' - subject='test subject' - -- flowdock: type=chat - token=AAAAAA - external_user_name=testuser - msg='test from ansible' - tags=tag1,tag2,tag3 +- flowdock: + type: inbox + token: AAAAAA + from_address: user@example.com + source: my cool app + msg: test from ansible + subject: test subject + +- flowdock: + type: chat + token: AAAAAA + external_user_name: testuser + msg: test from ansible + tags: tag1,tag2,tag3 ''' import urllib @@ -113,7 +119,7 @@ def main(): module = AnsibleModule( argument_spec=dict( - token=dict(required=True), + token=dict(required=True, no_log=True), msg=dict(required=True), type=dict(required=True, choices=["inbox","chat"]), external_user_name=dict(required=False), @@ -189,5 +195,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() - +if __name__ == '__main__': + main() diff --git a/notification/grove.py b/notification/grove.py index 4e4a0b5b684..fe16289a220 100644 --- a/notification/grove.py +++ b/notification/grove.py @@ -1,5 +1,24 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -76,7 +95,7 @@ def do_notify_grove(module, channel_token, service, message, url=None, icon_url= def main(): module = AnsibleModule( argument_spec = dict( - channel_token = dict(type='str', required=True), + channel_token = dict(type='str', required=True, no_log=True), message = dict(type='str', required=True), service = dict(type='str', default='ansible'), url = dict(type='str', default=None), @@ -99,4 +118,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() + +if __name__ == '__main__': + main() diff --git a/notification/hall.py b/notification/hall.py index 05c1a981b73..d8766412d01 100755 --- a/notification/hall.py +++ b/notification/hall.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: hall short_description: Send notification to Hall @@ -60,7 +64,7 @@ room_token: title: Server Creation msg: "Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region." - with_items: ec2.instances + with_items: "{{ ec2.instances }}" """ HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s' @@ -94,4 +98,6 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() + +if __name__ == '__main__': + main() diff --git a/notification/hipchat.py b/notification/hipchat.py index f565ca9cdfc..f321a6b9141 100644 --- a/notification/hipchat.py +++ b/notification/hipchat.py @@ -15,6 +15,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: hipchat @@ -81,15 +85,16 @@ ''' EXAMPLES = ''' -- hipchat: room=notify msg="Ansible task finished" +- hipchat: + room: notif + msg: Ansible task finished # Use Hipchat API version 2 - - hipchat: - api: "https://api.hipchat.com/v2/" + api: 'https://api.hipchat.com/v2/' token: OAUTH2_TOKEN room: notify - msg: "Ansible task finished" + msg: Ansible task finished ''' # =========================================== @@ -97,6 +102,15 @@ # import urllib +try: + import json +except ImportError: + import simplejson as json + +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import fetch_url DEFAULT_URI = "https://api.hipchat.com/v1" @@ -104,10 +118,10 @@ NOTIFY_URI_V2 = "/room/{id_or_name}/notification" + def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI_V1): + color='yellow', notify=False, api=MSG_URI_V1): '''sending message to hipchat v1 server''' - print "Sending message to v1 server" params = {} params['room_id'] = room @@ -133,11 +147,10 @@ def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=NOTIFY_URI_V2): + color='yellow', notify=False, api=NOTIFY_URI_V2): '''sending message to hipchat v2 server''' - print "Sending message to v2 server" - headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} + headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} body = dict() body['message'] = msg @@ -147,7 +160,7 @@ def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', POST_URL = api + NOTIFY_URI_V2 - url = POST_URL.replace('{id_or_name}', room) + url = POST_URL.replace('{id_or_name}', urllib.pathname2url(room)) data = json.dumps(body) if module.check_mode: @@ -155,7 +168,10 @@ def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', module.exit_json(changed=False) response, info = fetch_url(module, url, data=data, headers=headers, method='POST') - if info['status'] == 200: + + # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows + # 204 to be the expected result code. + if info['status'] in [200, 204]: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) @@ -169,7 +185,7 @@ def main(): module = AnsibleModule( argument_spec=dict( - token=dict(required=True), + token=dict(required=True, no_log=True), room=dict(required=True), msg=dict(required=True), msg_from=dict(default="Ansible", aliases=['from']), @@ -184,7 +200,7 @@ def main(): ) token = module.params["token"] - room = module.params["room"] + room = str(module.params["room"]) msg = module.params["msg"] msg_from = module.params["msg_from"] color = module.params["color"] @@ -197,14 +213,12 @@ def main(): send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) else: send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to send msg: %s" % e) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * - -main() +if __name__ == '__main__': + main() diff --git a/notification/irc.py b/notification/irc.py index 7e34049c639..d2fa22a4f52 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: irc @@ -56,9 +60,11 @@ color: description: - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). + Added 11 more colors in version 2.0. required: false default: "none" - choices: [ "none", "yellow", "red", "green", "blue", "black" ] + choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan", + "light_blue", "pink", "gray", "light_gray"] channel: description: - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. @@ -89,6 +95,19 @@ - Designates whether TLS/SSL should be used when connecting to the IRC server default: False version_added: "1.8" + part: + description: + - Designates whether user should part from channel after sending message or not. + Useful for when using a faux bot and not wanting join/parts between messages. + default: True + version_added: "2.0" + style: + description: + - Text style for the message. Note italic does not work on some clients + default: None + required: False + choices: [ "bold", "underline", "reverse", "italic" ] + version_added: "2.0" # informational: requirements for nodes requirements: [ socket ] @@ -98,22 +117,29 @@ ''' EXAMPLES = ''' -- irc: server=irc.example.net channel="#t1" msg="Hello world" - -- local_action: irc port=6669 - server="irc.example.net" - channel="#t1" - msg="All finished at {{ ansible_date_time.iso8601 }}" - color=red - nick=ansibleIRC - -- local_action: irc port=6669 - server="irc.example.net" - channel="#t1" - nick_to=["nick1", "nick2"] - msg="All finished at {{ ansible_date_time.iso8601 }}" - color=red - nick=ansibleIRC +- irc: + server: irc.example.net + channel: "#t1" + msg: "Hello world" + +- local_action: + module: irc + port: 6669 + server: "irc.example.net" + channel: "#t1" + msg: "All finished at {{ ansible_date_time.iso8601 }}" + color: red + nick: ansibleIRC + +- local_action: + module: irc + port: 6669 + server: "irc.example.net" + channel: "#t1" + nick_to: ["nick1", "nick2"] + msg: "All finished at {{ ansible_date_time.iso8601 }}" + color: red + nick: ansibleIRC ''' # =========================================== @@ -128,24 +154,47 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=[], key=None, topic=None, - nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False): + nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None): '''send message to IRC''' colornumbers = { + 'white': "00", 'black': "01", + 'blue': "02", + 'green': "03", 'red': "04", - 'green': "09", + 'brown': "05", + 'purple': "06", + 'orange': "07", 'yellow': "08", - 'blue': "12", + 'light_green': "09", + 'teal': "10", + 'light_cyan': "11", + 'light_blue': "12", + 'pink': "13", + 'gray': "14", + 'light_gray': "15", } + stylechoices = { + 'bold': "\x02", + 'underline': "\x1F", + 'reverse': "\x16", + 'italic': "\x1D", + } + + try: + styletext = stylechoices[style] + except: + styletext = "" + try: colornumber = colornumbers[color] colortext = "\x03" + colornumber except: colortext = "" - message = colortext + msg + message = styletext + colortext + msg irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if use_ssl: @@ -194,9 +243,10 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=[], key if channel: irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) sleep(1) - irc.send('PART %s\r\n' % channel) - irc.send('QUIT\r\n') - sleep(1) + if part: + irc.send('PART %s\r\n' % channel) + irc.send('QUIT\r\n') + sleep(1) irc.close() # =========================================== @@ -208,17 +258,23 @@ def main(): module = AnsibleModule( argument_spec=dict( server=dict(default='localhost'), - port=dict(default=6667), + port=dict(type='int', default=6667), nick=dict(default='ansible'), nick_to=dict(required=False, type='list'), msg=dict(required=True), - color=dict(default="none", choices=["yellow", "red", "green", - "blue", "black", "none"]), + color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue", + "green", "red", "brown", + "purple", "orange", "yellow", + "light_green", "teal", "light_cyan", + "light_blue", "pink", "gray", + "light_gray", "none"]), + style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]), channel=dict(required=False), - key=dict(), + key=dict(no_log=True), topic=dict(), - passwd=dict(), + passwd=dict(no_log=True), timeout=dict(type='int', default=30), + part=dict(type='bool', default=True), use_ssl=dict(type='bool', default=False) ), supports_check_mode=True, @@ -239,10 +295,13 @@ def main(): passwd = module.params["passwd"] timeout = module.params["timeout"] use_ssl = module.params["use_ssl"] + part = module.params["part"] + style = module.params["style"] try: - send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl) - except Exception, e: + send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style) + except Exception: + e = get_exception() module.fail_json(msg="unable to send to IRC: %s" % e) module.exit_json(changed=False, channel=channel, nick=nick, @@ -250,4 +309,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/notification/jabber.py b/notification/jabber.py index 6d97e4232df..f68790fb296 100644 --- a/notification/jabber.py +++ b/notification/jabber.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- version_added: "1.2" @@ -66,24 +70,27 @@ EXAMPLES = ''' # send a message to a user -- jabber: user=mybot@example.net - password=secret - to=friend@example.net - msg="Ansible task finished" +- jabber: + user: mybot@example.net + password: secret + to: friend@example.net + msg: Ansible task finished # send a message to a room -- jabber: user=mybot@example.net - password=secret - to=mychaps@conference.example.net/ansiblebot - msg="Ansible task finished" +- jabber: + user: mybot@example.net + password: secret + to: mychaps@conference.example.net/ansiblebot + msg: Ansible task finished # send a message, specifying the host and port -- jabber user=mybot@example.net - host=talk.example.net - port=5223 - password=secret - to=mychaps@example.net - msg="Ansible task finished" +- jabber + user: mybot@example.net + host: talk.example.net + port: 5223 + password: secret + to: mychaps@example.net + msg: Ansible task finished ''' import os @@ -101,7 +108,7 @@ def main(): module = AnsibleModule( argument_spec=dict( user=dict(required=True), - password=dict(required=True), + password=dict(required=True, no_log=True), to=dict(required=True), msg=dict(required=True), host=dict(required=False), @@ -134,7 +141,7 @@ def main(): msg = xmpp.protocol.Message(body=module.params['msg']) try: - conn=xmpp.Client(server) + conn=xmpp.Client(server, debug=[]) if not conn.connect(server=(host,port)): module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) if not conn.auth(user,password,'Ansible'): @@ -155,11 +162,15 @@ def main(): conn.send(msg) time.sleep(1) conn.disconnect() - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="unable to send msg: %s" % e) module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/notification/mail.py b/notification/mail.py index 8be9a589cbf..51902f3f87f 100644 --- a/notification/mail.py +++ b/notification/mail.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = """ --- author: "Dag Wieers (@dagwieers)" @@ -120,37 +124,42 @@ EXAMPLES = ''' # Example playbook sending mail to root -- local_action: mail subject='System {{ ansible_hostname }} has been successfully provisioned.' +- mail: + subject: 'System {{ ansible_hostname }} has been successfully provisioned.' + delegate_to: localhost # Sending an e-mail using Gmail SMTP servers -- local_action: mail - host='smtp.gmail.com' - port=587 - username=username@gmail.com - password='mysecret' - to="John Smith " - subject='Ansible-report' - body='System {{ ansible_hostname }} has been successfully provisioned.' +- mail: + host: smtp.gmail.com + port: 587 + username: username@gmail.com + password: mysecret + to: John Smith + subject: Ansible-report + body: 'System {{ ansible_hostname }} has been successfully provisioned.' + delegate_to: localhost # Send e-mail to a bunch of users, attaching files -- local_action: mail - host='127.0.0.1' - port=2025 - subject="Ansible-report" - body="Hello, this is an e-mail. I hope you like it ;-)" - from="jane@example.net (Jane Jolie)" - to="John Doe , Suzie Something " - cc="Charlie Root " - attach="/etc/group /tmp/pavatar2.png" - headers=Reply-To=john@example.com|X-Special="Something or other" - charset=utf8 +- mail: + host: 127.0.0.1 + port: 2025 + subject: Ansible-report + body: Hello, this is an e-mail. I hope you like it ;-) + from: jane@example.net (Jane Jolie) + to: John Doe , Suzie Something + cc: Charlie Root + attach: /etc/group /tmp/pavatar2.png + headers: 'Reply-To=john@example.com|X-Special="Something or other"' + charset: utf8 + delegate_to: localhost + # Sending an e-mail using the remote machine, not the Ansible controller node - mail: - host='localhost' - port=25 - to="John Smith " - subject='Ansible-report' - body='System {{ ansible_hostname }} has been successfully provisioned.' + host: localhost + port: 25 + to: John Smith + subject: Ansible-report + body: 'System {{ ansible_hostname }} has been successfully provisioned.' ''' import os @@ -178,7 +187,7 @@ def main(): module = AnsibleModule( argument_spec = dict( username = dict(default=None), - password = dict(default=None), + password = dict(default=None, no_log=True), host = dict(default='localhost'), port = dict(default='25'), sender = dict(default='root', aliases=['from']), @@ -218,7 +227,8 @@ def main(): smtp = smtplib.SMTP_SSL(host, port=int(port)) except (smtplib.SMTPException, ssl.SSLError): smtp = smtplib.SMTP(host, port=int(port)) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e)) smtp.ehlo() @@ -283,15 +293,16 @@ def main(): part.add_header('Content-disposition', 'attachment', filename=os.path.basename(file)) msg.attach(part) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(rc=1, msg="Failed to send mail: can't attach file %s: %s" % (file, e)) - sys.exit() composed = msg.as_string() try: smtp.sendmail(sender_addr, set(addr_list), composed) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(rc=1, msg='Failed to send mail to %s: %s' % (", ".join(addr_list), e)) smtp.quit() @@ -300,4 +311,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/notification/mqtt.py b/notification/mqtt.py index c618ab69ae3..b13124b4f01 100644 --- a/notification/mqtt.py +++ b/notification/mqtt.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: mqtt @@ -75,6 +79,36 @@ retained message immediately. required: false default: False + ca_certs: + description: + - The path to the Certificate Authority certificate files that are to be + treated as trusted by this client. If this is the only option given + then the client will operate in a similar manner to a web browser. That + is to say it will require the broker to have a certificate signed by the + Certificate Authorities in ca_certs and will communicate using TLS v1, + but will not attempt any form of authentication. This provides basic + network encryption but may not be sufficient depending on how the broker + is configured. + required: False + default: None + version_added: 2.3 + certfile: + description: + - The path pointing to the PEM encoded client certificate. If this is not + None it will be used as client information for TLS based + authentication. Support for this feature is broker dependent. + required: False + default: None + version_added: 2.3 + keyfile: + description: + - The path pointing to the PEM encoded client private key. If this is not + None it will be used as client information for TLS based + authentication. Support for this feature is broker dependent. + required: False + default: None + version_added: 2.3 + # informational: requirements for nodes requirements: [ mosquitto ] @@ -113,14 +147,17 @@ def main(): module = AnsibleModule( argument_spec=dict( server = dict(default = 'localhost'), - port = dict(default = 1883), + port = dict(default = 1883, type='int'), topic = dict(required = True), payload = dict(required = True), client_id = dict(default = None), qos = dict(default="0", choices=["0", "1", "2"]), retain = dict(default=False, type='bool'), username = dict(default = None), - password = dict(default = None), + password = dict(default = None, no_log=True), + ca_certs = dict(default = None, type='path'), + certfile = dict(default = None, type='path'), + keyfile = dict(default = None, type='path'), ), supports_check_mode=True ) @@ -137,6 +174,9 @@ def main(): retain = module.params.get("retain") username = module.params.get("username", None) password = module.params.get("password", None) + ca_certs = module.params.get("ca_certs", None) + certfile = module.params.get("certfile", None) + keyfile = module.params.get("keyfile", None) if client_id is None: client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) @@ -148,6 +188,11 @@ def main(): if username is not None: auth = { 'username' : username, 'password' : password } + tls=None + if ca_certs is not None: + tls = {'ca_certs': ca_certs, 'certfile': certfile, + 'keyfile': keyfile} + try: rc = mqtt.single(topic, payload, qos=qos, @@ -155,12 +200,17 @@ def main(): client_id=client_id, hostname=server, port=port, - auth=auth) - except Exception, e: + auth=auth, + tls=tls) + except Exception: + e = get_exception() module.fail_json(msg="unable to publish to MQTT broker %s" % (e)) module.exit_json(changed=False, topic=topic) # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.pycompat24 import get_exception + +if __name__ == '__main__': + main() diff --git a/notification/nexmo.py b/notification/nexmo.py index 89a246c0d90..9fafcc03769 100644 --- a/notification/nexmo.py +++ b/notification/nexmo.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: nexmo short_description: Send a SMS via nexmo @@ -138,4 +142,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/notification/osx_say.py b/notification/osx_say.py index 7c0ba844583..ff6d3ae0147 100644 --- a/notification/osx_say.py +++ b/notification/osx_say.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: osx_say @@ -43,7 +47,10 @@ ''' EXAMPLES = ''' -- local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox +- osx_say: + msg: '{{ inventory_hostname }} is all done' + voice: Zarvox + delegate_to: localhost ''' DEFAULT_VOICE='Trinoids' @@ -73,4 +80,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/notification/pushbullet.py b/notification/pushbullet.py index 52d785306ce..ed09be8f516 100644 --- a/notification/pushbullet.py +++ b/notification/pushbullet.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- author: "Willy Barro (@willybarro)" @@ -108,12 +112,13 @@ def main(): module = AnsibleModule( argument_spec = dict( - api_key = dict(type='str', required=True), + api_key = dict(type='str', required=True, no_log=True), channel = dict(type='str', default=None), device = dict(type='str', default=None), push_type = dict(type='str', default="note", choices=['note', 'link']), title = dict(type='str', required=True), - body = dict(type='str', default=None) + body = dict(type='str', default=None), + url = dict(type='str', default=None), ), mutually_exclusive = ( ['channel', 'device'], @@ -127,6 +132,7 @@ def main(): push_type = module.params['push_type'] title = module.params['title'] body = module.params['body'] + url = module.params['url'] if not pushbullet_found: module.fail_json(msg="Python 'pushbullet.py' module is required. Install via: $ pip install pushbullet.py") @@ -170,7 +176,10 @@ def main(): # Send push notification try: - target.push_note(title, body) + if push_type == "link": + target.push_link(title, url, body) + else: + target.push_note(title, body) module.exit_json(changed=False, msg="OK") except PushError as e: module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e)) @@ -179,4 +188,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/notification/pushover.py b/notification/pushover.py index 0c1d6e94ab9..294da075cec 100644 --- a/notification/pushover.py +++ b/notification/pushover.py @@ -20,11 +20,15 @@ ### +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: pushover version_added: "2.0" -short_description: Send notifications via u(https://pushover.net) +short_description: Send notifications via U(https://pushover.net) description: - Send notifications via pushover, to subscriber list of devices, and email addresses. Requires pushover app on devices. @@ -34,26 +38,30 @@ options: msg: description: - What message you wish to send. + - What message you wish to send. required: true app_token: description: - Pushover issued token identifying your pushover app. + - Pushover issued token identifying your pushover app. required: true user_key: description: - Pushover issued authentication key for your user. + - Pushover issued authentication key for your user. required: true pri: - description: Message priority (see u(https://pushover.net) for details.) + description: + - Message priority (see U(https://pushover.net) for details.) required: false author: "Jim Richardson (@weaselkeeper)" ''' EXAMPLES = ''' -- local_action: pushover msg="{{inventory_hostname}} has exploded in flames, - It is now time to panic" app_token=wxfdksl user_key=baa5fe97f2c5ab3ca8f0bb59 +- pushover: + msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic' + app_token: wxfdksl + user_key: baa5fe97f2c5ab3ca8f0bb59 + delegate_to: localhost ''' import urllib @@ -94,19 +102,19 @@ def main(): module = AnsibleModule( argument_spec=dict( msg=dict(required=True), - app_token=dict(required=True), - user_key=dict(required=True), - pri=dict(required=False, default=0), + app_token=dict(required=True, no_log=True), + user_key=dict(required=True, no_log=True), + pri=dict(required=False, default='0', choices=['-2','-1','0','1','2']), ), ) msg_object = Pushover(module, module.params['user_key'], module.params['app_token']) try: - msg_object.run(module.params['pri'], module.params['msg']) + response = msg_object.run(module.params['pri'], module.params['msg']) except: module.fail_json(msg='Unable to send msg via pushover') - module.exit_json(msg=msg, changed=False) + module.exit_json(msg='message sent successfully: %s' % response, changed=False) # import module snippets from ansible.module_utils.basic import * diff --git a/notification/rocketchat.py b/notification/rocketchat.py new file mode 100644 index 00000000000..f7089f7984f --- /dev/null +++ b/notification/rocketchat.py @@ -0,0 +1,255 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Deepak Kothandan +# (c) 2015, Stefan Berggren +# (c) 2014, Ramon de la Fuente +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +module: rocketchat +short_description: Send notifications to Rocket Chat +description: + - The M(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration +version_added: "2.2" +author: "Ramon de la Fuente (@ramondelafuente)" +options: + domain: + description: + - The domain for your environment without protocol. (i.e. + C(subdomain.domain.com or chat.domain.tld)) + required: true + token: + description: + - Rocket Chat Incoming Webhook integration token. This provides + authentication to Rocket Chat's Incoming webhook for posting + messages. + required: true + protocol: + description: + - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https) + required: false + default: https + choices: + - 'http' + - 'https' + msg: + description: + - Message to be sent. + required: false + default: None + channel: + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the I(token) + specifed during the creation of webhook. + required: false + default: None + username: + description: + - This is the sender of the message. + required: false + default: "Ansible" + icon_url: + description: + - URL for the message sender's icon. + required: false + default: "https://www.ansible.com/favicon.ico" + icon_emoji: + description: + - Emoji for the message sender. The representation for the available emojis can be + got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used) + required: false + default: None + link_names: + description: + - Automatically create links for channels and usernames in I(msg). + required: false + default: 1 + choices: + - 1 + - 0 + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: + - 'yes' + - 'no' + color: + description: + - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message + required: false + default: 'normal' + choices: + - 'normal' + - 'good' + - 'warning' + - 'danger' + attachments: + description: + - Define a list of attachments. + required: false + default: None +""" + +EXAMPLES = """ +- name: Send notification message via Rocket Chat + local_action: + module: rocketchat + token: thetoken/generatedby/rocketchat + domain: chat.example.com + msg: "{{ inventory_hostname }} completed" + +- name: Send notification message via Rocket Chat all options + local_action: + module: rocketchat + domain: chat.example.com + token: thetoken/generatedby/rocketchat + msg: "{{ inventory_hostname }} completed" + channel: "#ansible" + username: "Ansible on {{ inventory_hostname }}" + icon_url: "http://www.example.com/some-image-file.png" + link_names: 0 + +- name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat + rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + msg: "{{ inventory_hostname }} is alive!" + color: good + username: "" + icon_url: "" + +- name: Use the attachments API + rocketchat: + token: thetoken/generatedby/rocketchat + domain: chat.example.com + attachments: + - text: "Display my system load on host A and B" + color: "#ff00dd" + title: "System load" + fields: + - title: "System A" + value: "load average: 0,74, 0,66, 0,63" + short: "true" + - title: "System B" + value: "load average: 5,16, 4,64, 2,43" + short: "true" + +""" + +RETURN = """ +changed: + description: A flag indicating if any change was made or not. + returned: success + type: boolean + sample: false +""" + +ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s' + +def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments): + payload = {} + if color == "normal" and text is not None: + payload = dict(text=text) + elif text is not None: + payload = dict(attachments=[dict(text=text, color=color)]) + if channel is not None: + if (channel[0] == '#') or (channel[0] == '@'): + payload['channel'] = channel + else: + payload['channel'] = '#' + channel + if username is not None: + payload['username'] = username + if icon_emoji is not None: + payload['icon_emoji'] = icon_emoji + else: + payload['icon_url'] = icon_url + if link_names is not None: + payload['link_names'] = link_names + + if attachments is not None: + if 'attachments' not in payload: + payload['attachments'] = [] + + if attachments is not None: + for attachment in attachments: + if 'fallback' not in attachment: + attachment['fallback'] = attachment['text'] + payload['attachments'].append(attachment) + + payload="payload=" + module.jsonify(payload) + return payload + +def do_notify_rocketchat(module, domain, token, protocol, payload): + + if token.count('/') < 1: + module.fail_json(msg="Invalid Token specified, provide a valid token") + + rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token) + + response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload) + if info['status'] != 200: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + +def main(): + module = AnsibleModule( + argument_spec = dict( + domain = dict(type='str', required=True, default=None), + token = dict(type='str', required=True, no_log=True), + protocol = dict(type='str', default='https', choices=['http', 'https']), + msg = dict(type='str', required=False, default=None), + channel = dict(type='str', default=None), + username = dict(type='str', default='Ansible'), + icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'), + icon_emoji = dict(type='str', default=None), + link_names = dict(type='int', default=1, choices=[0,1]), + validate_certs = dict(default='yes', type='bool'), + color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), + attachments = dict(type='list', required=False, default=None) + ) + ) + + domain = module.params['domain'] + token = module.params['token'] + protocol = module.params['protocol'] + text = module.params['msg'] + channel = module.params['channel'] + username = module.params['username'] + icon_url = module.params['icon_url'] + icon_emoji = module.params['icon_emoji'] + link_names = module.params['link_names'] + color = module.params['color'] + attachments = module.params['attachments'] + + payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments) + do_notify_rocketchat(module, domain, token, protocol, payload) + + module.exit_json(msg="OK") + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +if __name__ == '__main__': + main() diff --git a/notification/sendgrid.py b/notification/sendgrid.py index 2655b4248bb..b0821983dc7 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -18,41 +18,96 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- version_added: "2.0" module: sendgrid short_description: Sends an email with the SendGrid API description: - - Sends an email with a SendGrid account through their API, not through - the SMTP service. + - "Sends an email with a SendGrid account through their API, not through + the SMTP service." notes: - - This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails. - - Like the other notification modules, this one requires an external + - "This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails." + - "Like the other notification modules, this one requires an external dependency to work. In this case, you'll need an active SendGrid - account. + account." + - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers + you must pip install sendgrid" + - "since 2.2 username and password are not required if you supply an api_key" +requirements: + - sendgrid python library options: username: description: - username for logging into the SendGrid account - required: true + - username for logging into the SendGrid account. + - Since 2.2 it is only required if api_key is not supplied. + required: false + default: null password: - description: password that corresponds to the username - required: true + description: + - password that corresponds to the username + - Since 2.2 it is only required if api_key is not supplied. + required: false + default: null from_address: description: - the address in the "from" field for the email + - the address in the "from" field for the email required: true to_addresses: description: - a list with one or more recipient email addresses + - a list with one or more recipient email addresses required: true subject: description: - the desired subject for the email + - the desired subject for the email required: true - + api_key: + description: + - sendgrid API key to use instead of username/password + version_added: 2.2 + required: false + default: null + cc: + description: + - a list of email addresses to cc + version_added: 2.2 + required: false + default: null + bcc: + description: + - a list of email addresses to bcc + version_added: 2.2 + required: false + default: null + attachments: + description: + - a list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs) + version_added: 2.2 + required: false + default: null + from_name: + description: + - the name you want to appear in the from field, i.e 'John Doe' + version_added: 2.2 + required: false + default: null + html_body: + description: + - whether the body is html content that should be rendered + version_added: 2.2 + required: false + default: false + headers: + description: + - a dict to pass on as headers + version_added: 2.2 + required: false + default: null author: "Matt Makai (@makaimc)" ''' @@ -86,26 +141,72 @@ # import urllib +try: + import sendgrid + HAS_SENDGRID = True +except ImportError: + HAS_SENDGRID = False + def post_sendgrid_api(module, username, password, from_address, to_addresses, - subject, body): - SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" - AGENT = "Ansible" - data = {'api_user': username, 'api_key':password, - 'from':from_address, 'subject': subject, 'text': body} - encoded_data = urllib.urlencode(data) - to_addresses_api = '' - for recipient in to_addresses: - if isinstance(recipient, unicode): - recipient = recipient.encode('utf-8') - to_addresses_api += '&to[]=%s' % recipient - encoded_data += to_addresses_api - - headers = { 'User-Agent': AGENT, - 'Content-type': 'application/x-www-form-urlencoded', - 'Accept': 'application/json'} - return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST') + subject, body, api_key=None, cc=None, bcc=None, attachments=None, + html_body=False, from_name=None, headers=None): + + if not HAS_SENDGRID: + SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json" + AGENT = "Ansible" + data = {'api_user': username, 'api_key':password, + 'from':from_address, 'subject': subject, 'text': body} + encoded_data = urllib.urlencode(data) + to_addresses_api = '' + for recipient in to_addresses: + if isinstance(recipient, unicode): + recipient = recipient.encode('utf-8') + to_addresses_api += '&to[]=%s' % recipient + encoded_data += to_addresses_api + + headers = { 'User-Agent': AGENT, + 'Content-type': 'application/x-www-form-urlencoded', + 'Accept': 'application/json'} + return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST') + else: + + if api_key: + sg = sendgrid.SendGridClient(api_key) + else: + sg = sendgrid.SendGridClient(username, password) + + message = sendgrid.Mail() + message.set_subject(subject) + + for recip in to_addresses: + message.add_to(recip) + if cc: + for recip in cc: + message.add_cc(recip) + if bcc: + for recip in bcc: + message.add_bcc(recip) + if headers: + message.set_headers(headers) + + if attachments: + for f in attachments: + name = os.path.basename(f) + message.add_attachment(name, f) + + if from_name: + message.set_from('%s <%s.' % (from_name, from_address)) + else: + message.set_from(from_address) + + if html_body: + message.set_html(body) + else: + message.set_text(body) + + return sg.send(message) # ======================================= # Main # @@ -113,28 +214,57 @@ def post_sendgrid_api(module, username, password, from_address, to_addresses, def main(): module = AnsibleModule( argument_spec=dict( - username=dict(required=True), - password=dict(required=True, no_log=True), + username=dict(required=False), + password=dict(required=False, no_log=True), + api_key=dict(required=False, no_log=True), + bcc=dict(required=False, type='list'), + cc=dict(required=False, type='list'), + headers=dict(required=False, type='dict'), from_address=dict(required=True), + from_name=dict(required=False), to_addresses=dict(required=True, type='list'), subject=dict(required=True), body=dict(required=True), + html_body=dict(required=False, default=False, type='bool'), + attachments=dict(required=False, type='list') ), - supports_check_mode=True + supports_check_mode=True, + mutually_exclusive = [ + ['api_key', 'password'], + ['api_key', 'username'] + ], + required_together = [['username', 'password']], ) username = module.params['username'] password = module.params['password'] + api_key = module.params['api_key'] + bcc = module.params['bcc'] + cc = module.params['cc'] + headers = module.params['headers'] + from_name = module.params['from_name'] from_address = module.params['from_address'] to_addresses = module.params['to_addresses'] subject = module.params['subject'] body = module.params['body'] + html_body = module.params['html_body'] + attachments = module.params['attachments'] + + sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments] + + if any(lib_arg != None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID: + module.fail_json(msg='You must install the sendgrid python library if you want to use any of the following arguments: api_key, bcc, cc, headers, from_name, html_body, attachments') response, info = post_sendgrid_api(module, username, password, - from_address, to_addresses, subject, body) - if info['status'] != 200: - module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg']) + from_address, to_addresses, subject, body, attachments=attachments, + bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key) + if not HAS_SENDGRID: + if info['status'] != 200: + module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg']) + else: + if response != 200: + module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message']) module.exit_json(msg=subject, changed=False) diff --git a/notification/slack.py b/notification/slack.py index ba4ed2e4c2d..3d50e89df95 100644 --- a/notification/slack.py +++ b/notification/slack.py @@ -1,6 +1,8 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# (c) 2016, René Moser +# (c) 2015, Stefan Berggren # (c) 2014, Ramon de la Fuente # # This file is part of Ansible @@ -18,12 +20,16 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: slack short_description: Send Slack notifications description: - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration -version_added: 1.6 +version_added: "1.6" author: "Ramon de la Fuente (@ramondelafuente)" options: domain: @@ -32,6 +38,7 @@ C(future500.slack.com)) In 1.8 and beyond, this is deprecated and may be ignored. See token documentation for information. required: false + default: None token: description: - Slack integration token. This authenticates you to the slack service. @@ -46,25 +53,28 @@ msg: description: - Message to send. - required: true + required: false + default: None channel: description: - Channel to send the message to. If absent, the message goes to the channel selected for the I(token). required: false + default: None username: description: - This is the sender of the message. required: false - default: ansible + default: "Ansible" icon_url: description: - - Url for the message sender's icon (default C(http://www.ansible.com/favicon.ico)) + - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico)) required: false icon_emoji: description: - Emoji for the message sender. See Slack documentation for options. (if I(icon_emoji) is set, I(icon_url) will not be used) required: false + default: None link_names: description: - Automatically create links for channels and usernames in I(msg). @@ -77,6 +87,7 @@ description: - Setting for the message parser at Slack required: false + default: None choices: - 'full' - 'none' @@ -90,7 +101,7 @@ - 'yes' - 'no' color: - version_added: 2.0 + version_added: "2.0" description: - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message required: false @@ -100,21 +111,24 @@ - 'good' - 'warning' - 'danger' + attachments: + description: + - Define a list of attachments. This list mirrors the Slack JSON API. For more information, see https://api.slack.com/docs/attachments + required: false + default: None """ EXAMPLES = """ - name: Send notification message via Slack local_action: module: slack - domain: future500.slack.com - token: thetokengeneratedbyslack + token: thetoken/generatedby/slack msg: "{{ inventory_hostname }} completed" - name: Send notification message via Slack all options local_action: module: slack - domain: future500.slack.com - token: thetokengeneratedbyslack + token: thetoken/generatedby/slack msg: "{{ inventory_hostname }} completed" channel: "#ansible" username: "Ansible on {{ inventory_hostname }}" @@ -124,22 +138,60 @@ - name: insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack slack: - domain: future500.slack.com - token: thetokengeneratedbyslack + token: thetoken/generatedby/slack msg: "{{ inventory_hostname }} is alive!" color: good username: "" icon_url: "" + +- name: Use the attachments API + slack: + token: thetoken/generatedby/slack + attachments: + - text: "Display my system load on host A and B" + color: "#ff00dd" + title: "System load" + fields: + - title: "System A" + value: "load average: 0,74, 0,66, 0,63" + short: "true" + - title: "System B" + value: "load average: 5,16, 4,64, 2,43" + short: "true" + +- name: Send notification message via Slack (deprecated API using domain) + local_action: + module: slack + domain: future500.slack.com + token: thetokengeneratedbyslack + msg: "{{ inventory_hostname }} completed" + """ OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s' -def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color): - if color == 'normal': - payload = dict(text=text) - else: - payload = dict(attachments=[dict(text=text, color=color)]) +# See https://api.slack.com/docs/message-formatting#how_to_escape_characters +# Escaping quotes and apostrophe however is related to how Ansible handles them. +html_escape_table = { + '&': "&", + '>': ">", + '<': "<", + '"': "\"", + "'": "\'", +} + +def html_escape(text): + '''Produce entities within text.''' + return "".join(html_escape_table.get(c,c) for c in text) + +def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color, attachments): + payload = {} + if color == "normal" and text is not None: + payload = dict(text=html_escape(text)) + elif text is not None: + # With a custom color we have to set the message as attachment, and explicitely turn markdown parsing on for it. + payload = dict(attachments=[dict(text=html_escape(text), color=color, mrkdwn_in=["text"])]) if channel is not None: if (channel[0] == '#') or (channel[0] == '@'): payload['channel'] = channel @@ -156,7 +208,29 @@ def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoj if parse is not None: payload['parse'] = parse - payload="payload=" + module.jsonify(payload) + if attachments is not None: + if 'attachments' not in payload: + payload['attachments'] = [] + + if attachments is not None: + keys_to_escape = [ + 'title', + 'text', + 'author_name', + 'pretext', + 'fallback', + ] + for attachment in attachments: + for key in keys_to_escape: + if key in attachment: + attachment[key] = html_escape(attachment[key]) + + if 'fallback' not in attachment: + attachment['fallback'] = attachment['text'] + + payload['attachments'].append(attachment) + + payload=module.jsonify(payload) return payload def do_notify_slack(module, domain, token, payload): @@ -168,7 +242,12 @@ def do_notify_slack(module, domain, token, payload): module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form XXXX/YYYY/ZZZZ in your playbook") slack_incoming_webhook = OLD_SLACK_INCOMING_WEBHOOK % (domain, token) - response, info = fetch_url(module, slack_incoming_webhook, data=payload) + headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + } + response, info = fetch_url(module=module, url=slack_incoming_webhook, headers=headers, method='POST', data=payload) + if info['status'] != 200: obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]') module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg'])) @@ -178,15 +257,16 @@ def main(): argument_spec = dict( domain = dict(type='str', required=False, default=None), token = dict(type='str', required=True, no_log=True), - msg = dict(type='str', required=True), + msg = dict(type='str', required=False, default=None), channel = dict(type='str', default=None), username = dict(type='str', default='Ansible'), - icon_url = dict(type='str', default='http://www.ansible.com/favicon.ico'), + icon_url = dict(type='str', default='https://www.ansible.com/favicon.ico'), icon_emoji = dict(type='str', default=None), link_names = dict(type='int', default=1, choices=[0,1]), parse = dict(type='str', default=None, choices=['none', 'full']), validate_certs = dict(default='yes', type='bool'), - color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']) + color = dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']), + attachments = dict(type='list', required=False, default=None) ) ) @@ -200,8 +280,9 @@ def main(): link_names = module.params['link_names'] parse = module.params['parse'] color = module.params['color'] + attachments = module.params['attachments'] - payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color) + payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse, color, attachments) do_notify_slack(module, domain, token, payload) module.exit_json(msg="OK") @@ -209,4 +290,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() + +if __name__ == '__main__': + main() diff --git a/notification/sns.py b/notification/sns.py index 70030d66196..8e5a07dad63 100644 --- a/notification/sns.py +++ b/notification/sns.py @@ -18,13 +18,17 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: sns short_description: Send Amazon Simple Notification Service (SNS) messages description: - The M(sns) module sends notifications to a topic on your Amazon SNS account version_added: 1.6 -author: "Michael J. Schultz (@mjschultz)" +author: "Michael J. Schultz (@mjschultz)" options: msg: description: @@ -61,7 +65,7 @@ required: false aws_secret_key: description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false default: None aliases: ['ec2_secret_key', 'secret_key'] @@ -77,8 +81,8 @@ required: false aliases: ['aws_region', 'ec2_region'] -requirements: [ "boto" ] -author: Michael J. Schultz +requirements: + - "boto" """ EXAMPLES = """ @@ -98,18 +102,22 @@ topic: "deploy" """ -import sys +try: + import json +except ImportError: + import simplejson as json -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import ec2_argument_spec, connect_to_aws, get_aws_connection_info +from ansible.module_utils.pycompat24 import get_exception try: import boto import boto.ec2 import boto.sns + HAS_BOTO = True except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + HAS_BOTO = False def arn_topic_lookup(connection, short_topic): @@ -140,6 +148,9 @@ def main(): module = AnsibleModule(argument_spec=argument_spec) + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + msg = module.params['msg'] subject = module.params['subject'] topic = module.params['topic'] @@ -154,7 +165,8 @@ def main(): module.fail_json(msg="region must be specified") try: connection = connect_to_aws(boto.sns, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except boto.exception.NoAuthHandlerFound: + e = get_exception() module.fail_json(msg=str(e)) # .publish() takes full ARN topic id, but I'm lazy and type shortnames @@ -183,9 +195,11 @@ def main(): try: connection.publish(topic=arn_topic, subject=subject, message_structure='json', message=json_msg) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError: + e = get_exception() module.fail_json(msg=str(e)) module.exit_json(msg="OK") -main() +if __name__ == '__main__': + main() diff --git a/notification/telegram.py b/notification/telegram.py new file mode 100644 index 00000000000..57746cf06ae --- /dev/null +++ b/notification/telegram.py @@ -0,0 +1,105 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Artem Feofanov +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ + +module: telegram +version_added: "2.2" +author: "Artem Feofanov (@tyouxa)" + +short_description: module for sending notifications via telegram + +description: + - Send notifications via telegram bot, to a verified group or user +notes: + - You will require a telegram account and create telegram bot to use this module. +options: + msg: + description: + - What message you wish to send. + required: true + token: + description: + - Token identifying your telegram bot. + required: true + chat_id: + description: + - Telegram group or user chat_id + required: true + +""" + +EXAMPLES = """ + +send a message to chat in playbook +- telegram: + token: 'bot9999999:XXXXXXXXXXXXXXXXXXXXXXX' + chat_id: 000000 + msg: Ansible task finished +""" + +RETURN = """ + +msg: + description: The message you attempted to send + returned: success + type: string + sample: "Ansible task finished" +""" + +import urllib + +def main(): + + module = AnsibleModule( + argument_spec = dict( + token = dict(type='str',required=True,no_log=True), + chat_id = dict(type='str',required=True,no_log=True), + msg = dict(type='str',required=True)), + supports_check_mode=True + ) + + token = urllib.quote(module.params.get('token')) + chat_id = urllib.quote(module.params.get('chat_id')) + msg = urllib.quote(module.params.get('msg')) + + url = 'https://api.telegram.org/' + token + '/sendMessage?text=' + msg + '&chat_id=' + chat_id + + if module.check_mode: + module.exit_json(changed=False) + + response, info = fetch_url(module, url) + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +if __name__ == '__main__': + main() diff --git a/notification/twilio.py b/notification/twilio.py index 9ed1a09e12e..1d7e059e5c8 100644 --- a/notification/twilio.py +++ b/notification/twilio.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- version_added: "1.6" @@ -139,7 +143,7 @@ def main(): module = AnsibleModule( argument_spec=dict( account_sid=dict(required=True), - auth_token=dict(required=True), + auth_token=dict(required=True, no_log=True), msg=dict(required=True), from_number=dict(required=True), to_number=dict(required=True), @@ -161,8 +165,12 @@ def main(): for number in to_number: r, info = post_twilio_api(module, account_sid, auth_token, msg, from_number, number, media_url) - if info['status'] != 200: - module.fail_json(msg="unable to send message to %s" % number) + if info['status'] not in [200, 201]: + body_message = "unknown error" + if 'body' in info: + body = json.loads(info['body']) + body_message = body['message'] + module.fail_json(msg="unable to send message to %s: %s" % (number, body_message)) module.exit_json(msg=msg, changed=False) diff --git a/notification/typetalk.py b/notification/typetalk.py index 4f6ee28130b..f638be09ab2 100644 --- a/notification/typetalk.py +++ b/notification/typetalk.py @@ -15,6 +15,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: typetalk @@ -44,7 +48,11 @@ ''' EXAMPLES = ''' -- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed" +- typetalk: + client_id: 12345 + client_secret: 12345 + topic: 1 + msg: install completed ''' import urllib @@ -57,6 +65,11 @@ except ImportError: json = None +# import module snippets +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import fetch_url, ConnectionError + def do_request(module, url, params, headers=None): data = urllib.urlencode(params) @@ -72,14 +85,15 @@ def do_request(module, url, params, headers=None): raise exc return r -def get_access_token(client_id, client_secret): + +def get_access_token(module, client_id, client_secret): params = { 'client_id': client_id, 'client_secret': client_secret, 'grant_type': 'client_credentials', 'scope': 'topic.post' } - res = do_request('https://typetalk.in/oauth2/access_token', params) + res = do_request(module, 'https://typetalk.in/oauth2/access_token', params) return json.load(res)['access_token'] @@ -88,14 +102,15 @@ def send_message(module, client_id, client_secret, topic, msg): send message to typetalk """ try: - access_token = get_access_token(client_id, client_secret) + access_token = get_access_token(module, client_id, client_secret) url = 'https://typetalk.in/api/v1/topics/%d' % topic headers = { 'Authorization': 'Bearer %s' % access_token, } do_request(module, url, {'message': msg}, headers) return True, {'access_token': access_token} - except ConnectionError, e: + except ConnectionError: + e = get_exception() return False, e @@ -104,7 +119,7 @@ def main(): module = AnsibleModule( argument_spec=dict( client_id=dict(required=True), - client_secret=dict(required=True), + client_secret=dict(required=True, no_log=True), topic=dict(required=True, type='int'), msg=dict(required=True), ), @@ -126,8 +141,5 @@ def main(): module.exit_json(changed=True, topic=topic, msg=msg) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * if __name__ == '__main__': main() diff --git a/packaging/dpkg_selections.py b/packaging/dpkg_selections.py index f09ff9a9f00..f26ad68f02d 100644 --- a/packaging/dpkg_selections.py +++ b/packaging/dpkg_selections.py @@ -1,4 +1,24 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -23,7 +43,9 @@ ''' EXAMPLES = ''' # Prevent python from being upgraded. -- dpkg_selections: name=python selection=hold +- dpkg_selections: + name: python + selection: hold ''' def main(): @@ -57,4 +79,6 @@ def main(): from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/packaging/elasticsearch_plugin.py b/packaging/elasticsearch_plugin.py index 7b092a13667..8a165189625 100644 --- a/packaging/elasticsearch_plugin.py +++ b/packaging/elasticsearch_plugin.py @@ -22,6 +22,10 @@ along with Ansible. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: elasticsearch_plugin @@ -33,17 +37,17 @@ options: name: description: - - Name of the plugin to install + - Name of the plugin to install. In ES 2.x, the name can be an url or file location required: True state: description: - Desired state of a plugin. required: False - choices: [present, absent] + choices: ["present", "absent"] default: present url: description: - - Set exact URL to download the plugin from + - Set exact URL to download the plugin from (Only works for ES 1.x) required: False default: None timeout: @@ -61,6 +65,18 @@ - Your configured plugin directory specified in Elasticsearch required: False default: /usr/share/elasticsearch/plugins/ + proxy_host: + description: + - Proxy host to use during plugin installation + required: False + default: None + version_added: "2.1" + proxy_port: + description: + - Proxy port to use during plugin installation + required: False + default: None + version_added: "2.1" version: description: - Version of the plugin to be installed. @@ -71,15 +87,26 @@ EXAMPLES = ''' # Install Elasticsearch head plugin -- elasticsearch_plugin: state=present name="mobz/elasticsearch-head" +- elasticsearch_plugin: + state: present + name: mobz/elasticsearch-head # Install specific version of a plugin -- elasticsearch_plugin: state=present name="com.github.kzwang/elasticsearch-image" version="1.2.0" +- elasticsearch_plugin: + state: present + name: com.github.kzwang/elasticsearch-image + version: '1.2.0' # Uninstall Elasticsearch head plugin -- elasticsearch_plugin: state=absent name="mobz/elasticsearch-head" +- elasticsearch_plugin: + state: absent + name: mobz/elasticsearch-head ''' +PACKAGE_STATE_MAP = dict( + present="install", + absent="remove" +) def parse_plugin_repo(string): elements = string.split("/") @@ -99,70 +126,101 @@ def parse_plugin_repo(string): return repo - def is_plugin_present(plugin_dir, working_dir): return os.path.isdir(os.path.join(working_dir, plugin_dir)) - def parse_error(string): reason = "reason: " - return string[string.index(reason) + len(reason):].strip() + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string +def install_plugin(module, plugin_bin, plugin_name, version, url, proxy_host, proxy_port, timeout): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name] -def main(): + if version: + plugin_name = plugin_name + '/' + version - package_state_map = dict( - present="--install", - absent="--remove" - ) + if proxy_host and proxy_port: + cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) + + if url: + cmd_args.append("--url %s" % url) + + if timeout: + cmd_args.append("--timeout %s" % timeout) + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + +def remove_plugin(module, plugin_bin, plugin_name): + cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] + + cmd = " ".join(cmd_args) + + if module.check_mode: + rc, out, err = 0, "check mode", "" + else: + rc, out, err = module.run_command(cmd) + + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err +def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), - state=dict(default="present", choices=package_state_map.keys()), + state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()), url=dict(default=None), timeout=dict(default="1m"), - plugin_bin=dict(default="/usr/share/elasticsearch/bin/plugin"), - plugin_dir=dict(default="/usr/share/elasticsearch/plugins/"), + plugin_bin=dict(default="/usr/share/elasticsearch/bin/plugin", type="path"), + plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), + proxy_host=dict(default=None), + proxy_port=dict(default=None), version=dict(default=None) - ) + ), + supports_check_mode=True ) - plugin_bin = module.params["plugin_bin"] - plugin_dir = module.params["plugin_dir"] - name = module.params["name"] - state = module.params["state"] - url = module.params["url"] - timeout = module.params["timeout"] - version = module.params["version"] + name = module.params["name"] + state = module.params["state"] + url = module.params["url"] + timeout = module.params["timeout"] + plugin_bin = module.params["plugin_bin"] + plugin_dir = module.params["plugin_dir"] + proxy_host = module.params["proxy_host"] + proxy_port = module.params["proxy_port"] + version = module.params["version"] present = is_plugin_present(parse_plugin_repo(name), plugin_dir) # skip if the state is correct if (present and state == "present") or (state == "absent" and not present): - module.exit_json(changed=False, name=name) - - if (version): - name = name + '/' + version + module.exit_json(changed=False, name=name, state=state) - cmd_args = [plugin_bin, package_state_map[state], name] + if state == "present": + changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, url, proxy_host, proxy_port, timeout) - if url: - cmd_args.append("--url %s" % url) - - if timeout: - cmd_args.append("--timeout %s" % timeout) - - cmd = " ".join(cmd_args) - - rc, out, err = module.run_command(cmd) - - if rc != 0: - reason = parse_error(out) - module.fail_json(msg=reason) + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name) - module.exit_json(changed=True, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/kibana_plugin.py b/packaging/kibana_plugin.py new file mode 100644 index 00000000000..91e2f23cf57 --- /dev/null +++ b/packaging/kibana_plugin.py @@ -0,0 +1,248 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to manage elasticsearch shield role +(c) 2016, Thierno IB. BARRY @barryib +Sponsored by Polyconseil http://polyconseil.fr. + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +import os + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: kibana_plugin +short_description: Manage Kibana plugins +description: + - Manages Kibana plugins. +version_added: "2.2" +author: Thierno IB. BARRY (@barryib) +options: + name: + description: + - Name of the plugin to install + required: True + state: + description: + - Desired state of a plugin. + required: False + choices: ["present", "absent"] + default: present + url: + description: + - Set exact URL to download the plugin from. + For local file, prefix its absolute path with file:// + required: False + default: None + timeout: + description: + - "Timeout setting: 30s, 1m, 1h..." + required: False + default: 1m + plugin_bin: + description: + - Location of the plugin binary + required: False + default: /opt/kibana/bin/kibana + plugin_dir: + description: + - Your configured plugin directory specified in Kibana + required: False + default: /opt/kibana/installedPlugins/ + version: + description: + - Version of the plugin to be installed. + If plugin exists with previous version, it will NOT be updated if C(force) is not set to yes + required: False + default: None + force: + description: + - Delete and re-install the plugin. Can be useful for plugins update + required: False + choices: ["yes", "no"] + default: no +''' + +EXAMPLES = ''' +# Install Elasticsearch head plugin +- kibana_plugin: + state: present + name=: elasticsearch/marvel + +# Install specific version of a plugin +- kibana_plugin: + state: present + name: elasticsearch/marvel + version: '2.3.3' + +# Uninstall Elasticsearch head plugin +- kibana_plugin: + state: absent + name: elasticsearch/marvel +''' + +RETURN = ''' +cmd: + description: the launched command during plugin mangement (install / remove) + returned: success + type: string +name: + description: the plugin name to install or remove + returned: success + type: string +url: + description: the url from where the plugin is installed from + returned: success + type: string +timeout: + description: the timout for plugin download + returned: success + type: string +stdout: + description: the command stdout + returned: success + type: string +stderr: + description: the command stderr + returned: success + type: string +state: + description: the state for the managed plugin + returned: success + type: string +''' + +PACKAGE_STATE_MAP = dict( + present="--install", + absent="--remove" +) + +def parse_plugin_repo(string): + elements = string.split("/") + + # We first consider the simplest form: pluginname + repo = elements[0] + + # We consider the form: username/pluginname + if len(elements) > 1: + repo = elements[1] + + # remove elasticsearch- prefix + # remove es- prefix + for string in ("elasticsearch-", "es-"): + if repo.startswith(string): + return repo[len(string):] + + return repo + +def is_plugin_present(plugin_dir, working_dir): + return os.path.isdir(os.path.join(working_dir, plugin_dir)) + +def parse_error(string): + reason = "reason: " + try: + return string[string.index(reason) + len(reason):].strip() + except ValueError: + return string + +def install_plugin(module, plugin_bin, plugin_name, url, timeout): + cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name] + + if url: + cmd_args.append("--url %s" % url) + + if timeout: + cmd_args.append("--timeout %s" % timeout) + + cmd = " ".join(cmd_args) + + if module.check_mode: + return True, cmd, "check mode", "" + + rc, out, err = module.run_command(cmd) + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + +def remove_plugin(module, plugin_bin, plugin_name): + cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name] + + cmd = " ".join(cmd_args) + + if module.check_mode: + return True, cmd, "check mode", "" + + rc, out, err = module.run_command(cmd) + if rc != 0: + reason = parse_error(out) + module.fail_json(msg=reason) + + return True, cmd, out, err + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()), + url=dict(default=None), + timeout=dict(default="1m"), + plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"), + plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"), + version=dict(default=None), + force=dict(default="no", type="bool") + ), + supports_check_mode=True, + ) + + name = module.params["name"] + state = module.params["state"] + url = module.params["url"] + timeout = module.params["timeout"] + plugin_bin = module.params["plugin_bin"] + plugin_dir = module.params["plugin_dir"] + version = module.params["version"] + force = module.params["force"] + + present = is_plugin_present(parse_plugin_repo(name), plugin_dir) + + # skip if the state is correct + if (present and state == "present" and not force) or (state == "absent" and not present and not force): + module.exit_json(changed=False, name=name, state=state) + + if (version): + name = name + '/' + version + + if state == "present": + if force: + remove_plugin(module, plugin_bin, name) + changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout) + + elif state == "absent": + changed, cmd, out, err = remove_plugin(module, plugin_bin, name) + + module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err) + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/packaging/language/bower.py b/packaging/language/bower.py index bd7d4b26159..489ab3cb804 100644 --- a/packaging/language/bower.py +++ b/packaging/language/bower.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: bower @@ -37,10 +41,23 @@ required: false default: no choices: [ "yes", "no" ] + production: + description: + - Install with --production flag + required: false + default: no + choices: [ "yes", "no" ] + version_added: "2.0" path: description: - The base path where to install the bower packages required: true + relative_execpath: + description: + - Relative path to bower executable from install path + default: null + required: false + version_added: "2.1" state: description: - The state of the bower package @@ -54,20 +71,37 @@ ''' EXAMPLES = ''' -description: Install "bootstrap" bower package. -- bower: name=bootstrap - -description: Install "bootstrap" bower package on version 3.1.1. -- bower: name=bootstrap version=3.1.1 - -description: Remove the "bootstrap" bower package. -- bower: name=bootstrap state=absent - -description: Install packages based on bower.json. -- bower: path=/app/location - -description: Update packages based on bower.json to their latest version. -- bower: path=/app/location state=latest +- name: Install "bootstrap" bower package. + bower: + name: bootstrap + +- name: Install "bootstrap" bower package on version 3.1.1. + bower: + name: bootstrap + version: '3.1.1' + +- name: Remove the "bootstrap" bower package. + bower: + name: bootstrap + state: absent + +- name: Install packages based on bower.json. + bower: + path: /app/location + +- name: Update packages based on bower.json to their latest version. + bower: + path: /app/location + state: latest + +# install bower locally and run from there +- npm: + path: /app/location + name: bower + global: no +- bower: + path: /app/location + relative_execpath: node_modules/.bin ''' @@ -76,7 +110,9 @@ def __init__(self, module, **kwargs): self.module = module self.name = kwargs['name'] self.offline = kwargs['offline'] + self.production = kwargs['production'] self.path = kwargs['path'] + self.relative_execpath = kwargs['relative_execpath'] self.version = kwargs['version'] if kwargs['version']: @@ -86,7 +122,17 @@ def __init__(self, module, **kwargs): def _exec(self, args, run_in_check_mode=False, check_rc=True): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): - cmd = ["bower"] + args + ['--config.interactive=false', '--allow-root'] + cmd = [] + + if self.relative_execpath: + cmd.append(os.path.join(self.path, self.relative_execpath, "bower")) + if not os.path.isfile(cmd[-1]): + self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath) + else: + cmd.append("bower") + + cmd.extend(args) + cmd.extend(['--config.interactive=false', '--allow-root']) if self.name: cmd.append(self.name_version) @@ -94,6 +140,9 @@ def _exec(self, args, run_in_check_mode=False, check_rc=True): if self.offline: cmd.append('--offline') + if self.production: + cmd.append('--production') + # If path is specified, cd into that path and run the command. cwd = None if self.path: @@ -119,10 +168,9 @@ def list(self): dep_data = data['dependencies'][dep] if dep_data.get('missing', False): missing.append(dep) - elif \ - 'version' in dep_data['pkgMeta'] and \ - 'update' in dep_data and \ - dep_data['pkgMeta']['version'] != dep_data['update']['latest']: + elif ('version' in dep_data['pkgMeta'] and + 'update' in dep_data and + dep_data['pkgMeta']['version'] != dep_data['update']['latest']): outdated.append(dep) elif dep_data.get('incompatible', False): outdated.append(dep) @@ -148,7 +196,9 @@ def main(): arg_spec = dict( name=dict(default=None), offline=dict(default='no', type='bool'), - path=dict(required=True), + production=dict(default='no', type='bool'), + path=dict(required=True, type='path'), + relative_execpath=dict(default=None, required=False, type='path'), state=dict(default='present', choices=['present', 'absent', 'latest', ]), version=dict(default=None), ) @@ -158,14 +208,16 @@ def main(): name = module.params['name'] offline = module.params['offline'] + production = module.params['production'] path = os.path.expanduser(module.params['path']) + relative_execpath = module.params['relative_execpath'] state = module.params['state'] version = module.params['version'] if state == 'absent' and not name: module.fail_json(msg='uninstalling a package is only available for named packages') - bower = Bower(module, name=name, offline=offline, path=path, version=version) + bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version) changed = False if state == 'present': @@ -188,4 +240,5 @@ def main(): # Import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/language/bundler.py b/packaging/language/bundler.py index f4aeff4156f..e7950b08548 100644 --- a/packaging/language/bundler.py +++ b/packaging/language/bundler.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION=''' --- module: bundler @@ -112,19 +116,29 @@ EXAMPLES=''' # Installs gems from a Gemfile in the current directory -- bundler: state=present executable=~/.rvm/gems/2.1.5/bin/bundle +- bundler: + state: present + executable: ~/.rvm/gems/2.1.5/bin/bundle # Excludes the production group from installing -- bundler: state=present exclude_groups=production +- bundler: + state: present + exclude_groups: production # Only install gems from the default and production groups -- bundler: state=present deployment=yes +- bundler: + state: present + deployment_mode: yes # Installs gems using a Gemfile in another directory -- bundler: state=present gemfile=../rails_project/Gemfile +- bundler: + state: present + gemfile: ../rails_project/Gemfile # Updates Gemfile in another directory -- bundler: state=latest chdir=~/rails_project +- bundler: + state: latest + chdir: ~/rails_project ''' @@ -140,15 +154,15 @@ def main(): argument_spec=dict( executable=dict(default=None, required=False), state=dict(default='present', required=False, choices=['present', 'latest']), - chdir=dict(default=None, required=False), + chdir=dict(default=None, required=False, type='path'), exclude_groups=dict(default=None, required=False, type='list'), clean=dict(default=False, required=False, type='bool'), - gemfile=dict(default=None, required=False), + gemfile=dict(default=None, required=False, type='path'), local=dict(default=False, required=False, type='bool'), deployment_mode=dict(default=False, required=False, type='bool'), user_install=dict(default=True, required=False, type='bool'), - gem_path=dict(default=None, required=False), - binstub_directory=dict(default=None, required=False), + gem_path=dict(default=None, required=False, type='path'), + binstub_directory=dict(default=None, required=False, type='path'), extra_args=dict(default=None, required=False), ), supports_check_mode=True @@ -163,7 +177,7 @@ def main(): local = module.params.get('local') deployment_mode = module.params.get('deployment_mode') user_install = module.params.get('user_install') - gem_path = module.params.get('gem_install_path') + gem_path = module.params.get('gem_path') binstub_directory = module.params.get('binstub_directory') extra_args = module.params.get('extra_args') diff --git a/packaging/language/composer.py b/packaging/language/composer.py index 8e11d25216b..172acb4ad1c 100644 --- a/packaging/language/composer.py +++ b/packaging/language/composer.py @@ -19,10 +19,16 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: composer -author: "Dimitrios Tydeas Mengidis (@dmtrs)" +author: + - "Dimitrios Tydeas Mengidis (@dmtrs)" + - "René Moser (@resmo)" short_description: Dependency Manager for PHP version_added: "1.6" description: @@ -34,6 +40,12 @@ - Composer command like "install", "update" and so on required: false default: install + arguments: + version_added: "2.0" + description: + - Composer arguments like required package, version and so on + required: false + default: null working_dir: description: - Directory of your project ( see --working-dir ) @@ -94,37 +106,69 @@ - php - composer installed in bin path (recommended /usr/local/bin) notes: - - Default options that are always appended in each execution are --no-ansi, --no-progress, and --no-interaction + - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available. + - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid it. ''' EXAMPLES = ''' # Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock -- composer: command=install working_dir=/path/to/project +- composer: + command: install + working_dir: /path/to/project + +- composer: + command: require + arguments: my/package + working_dir: /path/to/project + +# Clone project and install with all dependencies +- composer: + command: create-project + arguments: package/package /path/to/project ~1.0 + working_dir: /path/to/project + prefer_dist: yes ''' import os import re +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + + def parse_out(string): return re.sub("\s+", " ", string).strip() def has_changed(string): - if "Nothing to install or update" in string: - return False - else: - return True + return "Nothing to install or update" not in string + +def get_available_options(module, command='install'): + # get all availabe options from a composer command using composer help to json + rc, out, err = composer_command(module, "help %s --format=json" % command) + if rc != 0: + output = parse_out(err) + module.fail_json(msg=output) -def composer_install(module, command, options): + command_help_json = json.loads(out) + return command_help_json['definition']['options'] + +def composer_command(module, command, arguments = "", options=[]): php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) - cmd = "%s %s %s %s" % (php_path, composer_path, command, " ".join(options)) - + cmd = "%s %s %s %s %s" % (php_path, composer_path, command, " ".join(options), arguments) return module.run_command(cmd) def main(): module = AnsibleModule( argument_spec = dict( command = dict(default="install", type="str", required=False), + arguments = dict(default="", type="str", required=False), working_dir = dict(aliases=["working-dir"], required=True), prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), @@ -137,48 +181,59 @@ def main(): supports_check_mode=True ) + # Get composer command with fallback to default + command = module.params['command'] + if re.search(r"\s", command): + module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'") + + arguments = module.params['arguments'] + available_options = get_available_options(module=module, command=command) + options = [] # Default options - options.append('--no-ansi') - options.append('--no-progress') - options.append('--no-interaction') + default_options = [ + 'no-ansi', + 'no-interaction', + 'no-progress', + ] - options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])]) + for option in default_options: + if option in available_options: + option = "--%s" % option + options.append(option) - # Get composer command with fallback to default - command = module.params['command'] + options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])]) - # Prepare options - if module.params['prefer_source']: - options.append('--prefer-source') - if module.params['prefer_dist']: - options.append('--prefer-dist') - if module.params['no_dev']: - options.append('--no-dev') - if module.params['no_scripts']: - options.append('--no-scripts') - if module.params['no_plugins']: - options.append('--no-plugins') - if module.params['optimize_autoloader']: - options.append('--optimize-autoloader') - if module.params['ignore_platform_reqs']: - options.append('--ignore-platform-reqs') + option_params = { + 'prefer_source': 'prefer-source', + 'prefer_dist': 'prefer-dist', + 'no_dev': 'no-dev', + 'no_scripts': 'no-scripts', + 'no_plugins': 'no_plugins', + 'optimize_autoloader': 'optimize-autoloader', + 'ignore_platform_reqs': 'ignore-platform-reqs', + } + + for param, option in option_params.iteritems(): + if module.params.get(param) and option in available_options: + option = "--%s" % option + options.append(option) if module.check_mode: options.append('--dry-run') - rc, out, err = composer_install(module, command, options) + rc, out, err = composer_command(module, command, arguments, options) if rc != 0: output = parse_out(err) - module.fail_json(msg=output) + module.fail_json(msg=output, stdout=err) else: # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages output = parse_out(out + err) - module.exit_json(changed=has_changed(output), msg=output) + module.exit_json(changed=has_changed(output), msg=output, stdout=out+err) # import module snippets from ansible.module_utils.basic import * - -main() +if __name__ == '__main__': + main() diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py index 0bee74de4cc..59677698069 100644 --- a/packaging/language/cpanm.py +++ b/packaging/language/cpanm.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: cpanm @@ -64,6 +68,26 @@ required: false default: false version_added: "2.0" + version: + description: + - minimum version of perl module to consider acceptable + required: false + default: false + version_added: "2.1" + system_lib: + description: + - Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work. + - This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation. + required: false + default: false + version_added: "2.0" + aliases: ['use_sudo'] + executable: + description: + - Override the path to the cpanm executable + required: false + default: null + version_added: "2.1" notes: - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. author: "Franck Cuny (@franckcuny)" @@ -71,71 +95,108 @@ EXAMPLES = ''' # install Dancer perl package -- cpanm: name=Dancer +- cpanm: + name: Dancer # install version 0.99_05 of the Plack perl package -- cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz +- cpanm: + name: MIYAGAWA/Plack-0.99_05.tar.gz # install Dancer into the specified locallib -- cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib +- cpanm: + name: Dancer + locallib: /srv/webapps/my_app/extlib # install perl dependencies from local directory -- cpanm: from_path=/srv/webapps/my_app/src/ +- cpanm: + from_path: /srv/webapps/my_app/src/ # install Dancer perl package without running the unit tests in indicated locallib -- cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib +- cpanm: + name: Dancer + notest: True + locallib: /srv/webapps/my_app/extlib # install Dancer perl package from a specific mirror -- cpanm: name=Dancer mirror=http://cpan.cpantesters.org/ +- cpanm: + name: Dancer + mirror: 'http://cpan.cpantesters.org/' + +# install Dancer perl package into the system root path +- cpanm: + name: Dancer + system_lib: yes + +# install Dancer if it's not already installed +# OR the installed version is older than version 1.0 +- cpanm: + name: Dancer + version: '1.0' ''' -def _is_package_installed(module, name, locallib, cpanm): +def _is_package_installed(module, name, locallib, cpanm, version): cmd = "" if locallib: os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib - cmd = "%s perl -M%s -e '1'" % (cmd, name) + cmd = "%s perl -e ' use %s" % (cmd, name) + if version: + cmd = "%s %s;'" % (cmd, version) + else: + cmd = "%s;'" % cmd res, stdout, stderr = module.run_command(cmd, check_rc=False) if res == 0: return True else: return False -def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, - installdeps, cpanm): +def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo): # this code should use "%s" like everything else and just return early but not fixing all of it now. # don't copy stuff like this if from_path: - cmd = "{cpanm} {path}".format(cpanm=cpanm, path=from_path) + cmd = cpanm + " " + from_path else: - cmd = "{cpanm} {name}".format(cpanm=cpanm, name=name) + cmd = cpanm + " " + name if notest is True: - cmd = "{cmd} -n".format(cmd=cmd) + cmd = cmd + " -n" if locallib is not None: - cmd = "{cmd} -l {locallib}".format(cmd=cmd, locallib=locallib) + cmd = cmd + " -l " + locallib if mirror is not None: - cmd = "{cmd} --mirror {mirror}".format(cmd=cmd, mirror=mirror) + cmd = cmd + " --mirror " + mirror if mirror_only is True: - cmd = "{cmd} --mirror-only".format(cmd=cmd) + cmd = cmd + " --mirror-only" if installdeps is True: - cmd = "{cmd} --installdeps".format(cmd=cmd) + cmd = cmd + " --installdeps" + + if use_sudo is True: + cmd = cmd + " --sudo" return cmd +def _get_cpanm_path(module): + if module.params['executable']: + return module.params['executable'] + else: + return module.get_bin_path('cpanm', True) + + def main(): arg_spec = dict( name=dict(default=None, required=False, aliases=['pkg']), - from_path=dict(default=None, required=False), + from_path=dict(default=None, required=False, type='path'), notest=dict(default=False, type='bool'), - locallib=dict(default=None, required=False), + locallib=dict(default=None, required=False, type='path'), mirror=dict(default=None, required=False), mirror_only=dict(default=False, type='bool'), installdeps=dict(default=False, type='bool'), + system_lib=dict(default=False, type='bool', aliases=['use_sudo']), + version=dict(default=None, required=False), + executable=dict(required=False, type='path'), ) module = AnsibleModule( @@ -143,7 +204,7 @@ def main(): required_one_of=[['name', 'from_path']], ) - cpanm = module.get_bin_path('cpanm', True) + cpanm = _get_cpanm_path(module) name = module.params['name'] from_path = module.params['from_path'] notest = module.boolean(module.params.get('notest', False)) @@ -151,22 +212,22 @@ def main(): mirror = module.params['mirror'] mirror_only = module.params['mirror_only'] installdeps = module.params['installdeps'] + use_sudo = module.params['system_lib'] + version = module.params['version'] changed = False - installed = _is_package_installed(module, name, locallib, cpanm) + installed = _is_package_installed(module, name, locallib, cpanm, version) if not installed: - out_cpanm = err_cpanm = '' - cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, - mirror_only, installdeps, cpanm) + cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo) rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) if rc_cpanm != 0: module.fail_json(msg=err_cpanm, cmd=cmd) - if err_cpanm and 'is up to date' not in err_cpanm: + if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1): changed = True module.exit_json(changed=changed, binary=cpanm, name=name) @@ -174,4 +235,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/language/maven_artifact.py b/packaging/language/maven_artifact.py index 5f20a9af169..d4a241d0e9d 100644 --- a/packaging/language/maven_artifact.py +++ b/packaging/language/maven_artifact.py @@ -25,6 +25,19 @@ import os import hashlib import sys +import posixpath +import urlparse +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +try: + import boto3 + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -39,48 +52,69 @@ requirements: - "python >= 2.6" - lxml + - boto if using a S3 repository (s3://...) options: group_id: - description: The Maven groupId coordinate + description: + - The Maven groupId coordinate required: true artifact_id: - description: The maven artifactId coordinate + description: + - The maven artifactId coordinate required: true version: - description: The maven version coordinate + description: + - The maven version coordinate required: false default: latest classifier: - description: The maven classifier coordinate + description: + - The maven classifier coordinate required: false default: null extension: - description: The maven type/extension coordinate + description: + - The maven type/extension coordinate required: false default: jar repository_url: - description: The URL of the Maven Repository to download from + description: + - The URL of the Maven Repository to download from. + - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2. required: false default: http://repo1.maven.org/maven2 username: - description: The username to authenticate as to the Maven Repository + description: + - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3 required: false default: null + aliases: [ "aws_secret_key" ] password: - description: The password to authenticate with to the Maven Repository + description: + - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3 required: false default: null + aliases: [ "aws_secret_access_key" ] dest: - description: The path where the artifact should be written to + description: + - The path where the artifact should be written to required: true default: false state: - description: The desired state of the artifact + description: + - The desired state of the artifact required: true default: present choices: [present,absent] + timeout: + description: + - Specifies a timeout in seconds for the connection attempt + required: false + default: 10 + version_added: "2.3" validate_certs: - description: If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists. + description: + - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists. required: false default: 'yes' choices: ['yes', 'no'] @@ -88,17 +122,35 @@ ''' EXAMPLES = ''' -# Download the latest version of the commons-collections artifact from Maven Central -- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections dest=/tmp/commons-collections-latest.jar - -# Download Apache Commons-Collections 3.2 from Maven Central -- maven_artifact: group_id=org.apache.commons artifact_id=commons-collections version=3.2 dest=/tmp/commons-collections-3.2.jar +# Download the latest version of the JUnit framework artifact from Maven Central +- maven_artifact: + group_id: junit + artifact_id: junit + dest: /tmp/junit-latest.jar + +# Download JUnit 4.11 from Maven Central +- maven_artifact: + group_id: junit + artifact_id: junit + version: 4.11 + dest: /tmp/junit-4.11.jar # Download an artifact from a private repository requiring authentication -- maven_artifact: group_id=com.company artifact_id=library-name repository_url=https://repo.company.com/maven username=user password=pass dest=/tmp/library-name-latest.jar +- maven_artifact: + group_id: com.company + artifact_id: library-name + repository_url: 'https://repo.company.com/maven' + username: user + password: pass + dest: /tmp/library-name-latest.jar # Download a WAR File to the Tomcat webapps directory to be deployed -- maven_artifact: group_id=com.company artifact_id=web-app extension=war repository_url=https://repo.company.com/maven dest=/var/lib/tomcat7/webapps/web-app.war +- maven_artifact: + group_id: com.company + artifact_id: web-app + extension: war + repository_url: 'https://repo.company.com/maven' + dest: /var/lib/tomcat7/webapps/web-app.war ''' class Artifact(object): @@ -122,9 +174,9 @@ def is_snapshot(self): return self.version and self.version.endswith("SNAPSHOT") def path(self, with_version=True): - base = self.group_id.replace(".", "/") + "/" + self.artifact_id + base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id) if with_version and self.version: - return base + "/" + self.version + return posixpath.join(base, self.version) else: return base @@ -184,14 +236,20 @@ def _find_latest_version_available(self, artifact): return v[0] def find_uri_for_artifact(self, artifact): + if artifact.version == "latest": + artifact.version = self._find_latest_version_available(artifact) + if artifact.is_snapshot(): path = "/%s/maven-metadata.xml" % (artifact.path()) xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0] buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] + for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"): + if len(snapshotArtifact.xpath("classifier/text()")) > 0 and snapshotArtifact.xpath("classifier/text()")[0] == artifact.classifier and len(snapshotArtifact.xpath("extension/text()")) > 0 and snapshotArtifact.xpath("extension/text()")[0] == artifact.extension: + return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0]) return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber)) - else: - return self._uri_for_artifact(artifact) + + return self._uri_for_artifact(artifact, artifact.version) def _uri_for_artifact(self, artifact, version=None): if artifact.is_snapshot() and not version: @@ -199,19 +257,30 @@ def _uri_for_artifact(self, artifact, version=None): elif not artifact.is_snapshot(): version = artifact.version if artifact.classifier: - return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension + return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension) - return self.base + "/" + artifact.path() + "/" + artifact.artifact_id + "-" + version + "." + artifact.extension + return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension) def _request(self, url, failmsg, f): + url_to_use = url + parsed_url = urlparse(url) + if parsed_url.scheme=='s3': + parsed_url = urlparse(url) + bucket_name = parsed_url.netloc + key_name = parsed_url.path[1:] + client = boto3.client('s3',aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', '')) + url_to_use = client.generate_presigned_url('get_object',Params={'Bucket':bucket_name,'Key':key_name},ExpiresIn=10) + + req_timeout = self.module.params.get('timeout') + # Hack to add parameters in the way that fetch_url expects self.module.params['url_username'] = self.module.params.get('username', '') self.module.params['url_password'] = self.module.params.get('password', '') self.module.params['http_agent'] = self.module.params.get('user_agent', None) - response, info = fetch_url(self.module, url) + response, info = fetch_url(self.module, url_to_use, timeout=req_timeout) if info['status'] != 200: - raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url) + raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use) else: return f(response) @@ -226,9 +295,10 @@ def download(self, artifact, filename=None): if not self.verify_md5(filename, url + ".md5"): response = self._request(url, "Failed to download artifact " + str(artifact), lambda r: r) if response: - with open(filename, 'w') as f: - # f.write(response.read()) - self._write_chunks(response, f, report_hook=self.chunk_report) + f = open(filename, 'w') + # f.write(response.read()) + self._write_chunks(response, f, report_hook=self.chunk_report) + f.close() return True else: return False @@ -272,9 +342,10 @@ def verify_md5(self, file, remote_md5): def _local_md5(self, file): md5 = hashlib.md5() - with open(file, 'rb') as f: - for chunk in iter(lambda: f.read(8192), ''): - md5.update(chunk) + f = open(file, 'rb') + for chunk in iter(lambda: f.read(8192), ''): + md5.update(chunk) + f.close() return md5.hexdigest() @@ -283,18 +354,27 @@ def main(): argument_spec = dict( group_id = dict(default=None), artifact_id = dict(default=None), - version = dict(default=None), + version = dict(default="latest"), classifier = dict(default=None), - extension = dict(default=None, required=True), + extension = dict(default='jar'), repository_url = dict(default=None), - username = dict(default=None), - password = dict(default=None), + username = dict(default=None,aliases=['aws_secret_key']), + password = dict(default=None, no_log=True,aliases=['aws_secret_access_key']), state = dict(default="present", choices=["present","absent"]), # TODO - Implement a "latest" state - dest = dict(default=None), + timeout = dict(default=10, type='int'), + dest = dict(type="path", default=None), validate_certs = dict(required=False, default=True, type='bool'), ) ) + try: + parsed_url = urlparse(module.params["repository_url"]) + except AttributeError as e: + module.fail_json(msg='url parsing went wrong %s' % e) + + if parsed_url.scheme=='s3' and not HAS_BOTO: + module.fail_json(msg='boto3 required for this module, when using s3:// repository URLs') + group_id = module.params["group_id"] artifact_id = module.params["artifact_id"] version = module.params["version"] @@ -319,8 +399,8 @@ def main(): prev_state = "absent" if os.path.isdir(dest): - dest = dest + "/" + artifact_id + "-" + version + "." + extension - if os.path.lexists(dest): + dest = posixpath.join(dest, artifact_id + "-" + version + "." + extension) + if os.path.lexists(dest) and downloader.verify_md5(dest, downloader.find_uri_for_artifact(artifact) + '.md5'): prev_state = "present" else: path = os.path.dirname(dest) @@ -339,8 +419,6 @@ def main(): module.fail_json(msg=e.args[0]) -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * + if __name__ == '__main__': main() diff --git a/packaging/language/npm.py b/packaging/language/npm.py index d804efff331..b1df88e60a2 100644 --- a/packaging/language/npm.py +++ b/packaging/language/npm.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: npm @@ -78,28 +82,46 @@ EXAMPLES = ''' description: Install "coffee-script" node.js package. -- npm: name=coffee-script path=/app/location +- npm: + name: coffee-script + path: /app/location description: Install "coffee-script" node.js package on version 1.6.1. -- npm: name=coffee-script version=1.6.1 path=/app/location +- npm: + name: coffee-script + version: '1.6.1' + path: /app/location description: Install "coffee-script" node.js package globally. -- npm: name=coffee-script global=yes +- npm: + name: coffee-script + global: yes description: Remove the globally package "coffee-script". -- npm: name=coffee-script global=yes state=absent +- npm: + name: coffee-script + global: yes + state: absent description: Install "coffee-script" node.js package from custom registry. -- npm: name=coffee-script registry=http://registry.mysite.com +- npm: + name: coffee-script + registry: 'http://registry.mysite.com' description: Install packages based on package.json. -- npm: path=/app/location +- npm: + path: /app/location description: Update packages based on package.json to their latest version. -- npm: path=/app/location state=latest +- npm: + path: /app/location + state: latest description: Install packages based on package.json using the npm installed with nvm v0.10.1. -- npm: path=/app/location executable=/opt/nvm/v0.10.1/bin/npm state=present +- npm: + path: /app/location + executable: /opt/nvm/v0.10.1/bin/npm + state: present ''' import os @@ -107,7 +129,12 @@ try: import json except ImportError: - import simplejson as json + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + class Npm(object): def __init__(self, module, **kwargs): @@ -126,7 +153,7 @@ def __init__(self, module, **kwargs): self.executable = [module.get_bin_path('npm', True)] if kwargs['version']: - self.name_version = self.name + '@' + self.version + self.name_version = self.name + '@' + str(self.version) else: self.name_version = self.name @@ -149,7 +176,6 @@ def _exec(self, args, run_in_check_mode=False, check_rc=True): #If path is specified, cd into that path and run the command. cwd = None if self.path: - self.path = os.path.abspath(os.path.expanduser(self.path)) if not os.path.exists(self.path): os.makedirs(self.path) if not os.path.isdir(self.path): @@ -207,10 +233,10 @@ def list_outdated(self): def main(): arg_spec = dict( name=dict(default=None), - path=dict(default=None), + path=dict(default=None, type='path'), version=dict(default=None), production=dict(default='no', type='bool'), - executable=dict(default=None), + executable=dict(default=None, type='path'), registry=dict(default=None), state=dict(default='present', choices=['present', 'absent', 'latest']), ignore_scripts=dict(default=False, type='bool'), @@ -248,9 +274,12 @@ def main(): elif state == 'latest': installed, missing = npm.list() outdated = npm.list_outdated() - if len(missing) or len(outdated): + if len(missing): changed = True npm.install() + if len(outdated): + changed = True + npm.update() else: #absent installed, missing = npm.list() if name in installed: @@ -261,4 +290,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/packaging/language/pear.py b/packaging/language/pear.py index 5762f9c815c..0379538874d 100644 --- a/packaging/language/pear.py +++ b/packaging/language/pear.py @@ -20,6 +20,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: pear @@ -45,16 +49,24 @@ EXAMPLES = ''' # Install pear package -- pear: name=Net_URL2 state=present +- pear: + name: Net_URL2 + state: present # Install pecl package -- pear: name=pecl/json_post state=present +- pear: + name: pecl/json_post + state: present # Upgrade package -- pear: name=Net_URL2 state=latest +- pear: + name: Net_URL2 + state: latest # Remove packages -- pear: name=Net_URL2,pecl/json_post state=absent +- pear: + name: Net_URL2,pecl/json_post + state: absent ''' import os @@ -224,4 +236,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/apk.py b/packaging/os/apk.py index ec0e3908faf..8d8c5a6f808 100644 --- a/packaging/os/apk.py +++ b/packaging/os/apk.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: apk @@ -42,7 +46,7 @@ choices: [ "present", "absent", "latest" ] update_cache: description: - - Update repository indexes. Can be run with other steps or on it's own. + - Update repository indexes. Can be run with other steps or on it's own. required: false default: no choices: [ "yes", "no" ] @@ -52,38 +56,60 @@ required: false default: no choices: [ "yes", "no" ] +notes: + - '"name" and "upgrade" are mutually exclusive.' ''' EXAMPLES = ''' # Update repositories and install "foo" package -- apk: name=foo update_cache=yes +- apk: + name: foo + update_cache: yes # Update repositories and install "foo" and "bar" packages -- apk: name=foo,bar update_cache=yes +- apk: + name: foo,bar + update_cache: yes # Remove "foo" package -- apk: name=foo state=absent +- apk: + name: foo + state: absent # Remove "foo" and "bar" packages -- apk: name=foo,bar state=absent +- apk: + name: foo,bar + state: absent # Install the package "foo" -- apk: name=foo state=present +- apk: + name: foo + state: present # Install the packages "foo" and "bar" -- apk: name=foo,bar state=present +- apk: + name: foo,bar + state: present # Update repositories and update package "foo" to latest version -- apk: name=foo state=latest update_cache=yes +- apk: + name: foo + state: latest + update_cache: yes # Update repositories and update packages "foo" and "bar" to latest versions -- apk: name=foo,bar state=latest update_cache=yes +- apk: + name: foo,bar + state: latest + update_cache: yes # Update all installed packages to the latest versions -- apk: upgrade=yes +- apk: + upgrade: yes # Update repositories as a separate step -- apk: update_cache=yes +- apk: + update_cache: yes ''' import os @@ -114,6 +140,23 @@ def query_latest(module, name): return False return True +def query_virtual(module, name): + cmd = "%s -v info --description %s" % (APK_PATH, name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + search_pattern = "^%s: virtual meta package" % (name) + if re.search(search_pattern, stdout): + return True + return False + +def get_dependencies(module, name): + cmd = "%s -v info --depends %s" % (APK_PATH, name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + dependencies = stdout.split() + if len(dependencies) > 1: + return dependencies[1:] + else: + return [] + def upgrade_packages(module): if module.check_mode: cmd = "%s upgrade --simulate" % (APK_PATH) @@ -128,29 +171,40 @@ def upgrade_packages(module): def install_packages(module, names, state): upgrade = False - uninstalled = [] + to_install = [] + to_upgrade = [] for name in names: - if not query_package(module, name): - uninstalled.append(name) - elif state == 'latest' and not query_latest(module, name): - upgrade = True - if not uninstalled and not upgrade: + # Check if virtual package + if query_virtual(module, name): + # Get virtual package dependencies + dependencies = get_dependencies(module, name) + for dependency in dependencies: + if state == 'latest' and not query_latest(module, dependency): + to_upgrade.append(dependency) + else: + if not query_package(module, name): + to_install.append(name) + elif state == 'latest' and not query_latest(module, name): + to_upgrade.append(name) + if to_upgrade: + upgrade = True + if not to_install and not upgrade: module.exit_json(changed=False, msg="package(s) already installed") - names = " ".join(uninstalled) + packages = " ".join(to_install) + " ".join(to_upgrade) if upgrade: if module.check_mode: - cmd = "%s add --upgrade --simulate %s" % (APK_PATH, names) + cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages) else: - cmd = "%s add --upgrade %s" % (APK_PATH, names) + cmd = "%s add --upgrade %s" % (APK_PATH, packages) else: if module.check_mode: - cmd = "%s add --simulate %s" % (APK_PATH, names) + cmd = "%s add --simulate %s" % (APK_PATH, packages) else: - cmd = "%s add %s" % (APK_PATH, names) + cmd = "%s add %s" % (APK_PATH, packages) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: - module.fail_json(msg="failed to install %s" % (names)) - module.exit_json(changed=True, msg="installed %s package(s)" % (names)) + module.fail_json(msg="failed to install %s" % (packages)) + module.exit_json(changed=True, msg="installed %s package(s)" % (packages)) def remove_packages(module, names): installed = [] @@ -168,7 +222,7 @@ def remove_packages(module, names): if rc != 0: module.fail_json(msg="failed to remove %s package(s)" % (names)) module.exit_json(changed=True, msg="removed %s package(s)" % (names)) - + # ========================================== # Main control flow. @@ -177,13 +231,17 @@ def main(): argument_spec = dict( state = dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']), name = dict(type='list'), - update_cache = dict(default='no', choices=BOOLEANS, type='bool'), - upgrade = dict(default='no', choices=BOOLEANS, type='bool'), + update_cache = dict(default='no', type='bool'), + upgrade = dict(default='no', type='bool'), ), required_one_of = [['name', 'update_cache', 'upgrade']], + mutually_exclusive = [['name', 'upgrade']], supports_check_mode = True ) + # Set LANG env since we parse stdout + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + global APK_PATH APK_PATH = module.get_bin_path('apk', required=True) diff --git a/packaging/os/dnf.py b/packaging/os/dnf.py index 7afbee44c54..016fdf60453 100644 --- a/packaging/os/dnf.py +++ b/packaging/os/dnf.py @@ -1,7 +1,8 @@ -#!/usr/bin/python -tt +#!/usr/bin/python # -*- coding: utf-8 -*- -# Written by Cristian van Ee +# Copyright 2015 Cristian van Ee +# Copyright 2015 Igor Gnatenko # # This file is part of Ansible # @@ -19,17 +20,9 @@ # along with Ansible. If not, see . # - -import traceback -import os -import dnf - -try: - from dnf import find_unfinished_transactions, find_ts_remaining - from rpmUtils.miscutils import splitFilename - transaction_helpers = True -except: - transaction_helpers = False +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -45,17 +38,20 @@ required: true default: null aliases: [] + list: description: - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples. required: false default: null + state: description: - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. required: false choices: [ "present", "latest", "absent" ] default: "present" + enablerepo: description: - I(Repoid) of repositories to enable for the install/update operation. @@ -93,738 +89,391 @@ notes: [] # informational: requirements for nodes requirements: - - dnf - - yum-utils (for repoquery) -author: "Cristian van Ee (@DJMuggs)" + - "python >= 2.6" + - python-dnf +author: + - '"Igor Gnatenko (@ignatenkobrain)" ' + - '"Cristian van Ee (@DJMuggs)" ' ''' EXAMPLES = ''' - name: install the latest version of Apache - dnf: name=httpd state=latest + dnf: + name: httpd + state: latest - name: remove the Apache package - dnf: name=httpd state=absent + dnf: + name: httpd + state: absent - name: install the latest version of Apache from the testing repo - dnf: name=httpd enablerepo=testing state=present + dnf: + name: httpd + enablerepo: testing + state: present - name: upgrade all packages - dnf: name=* state=latest + dnf: + name: * + state: latest - name: install the nginx rpm from a remote repo - dnf: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present + dnf: + name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm' + state: present - name: install nginx rpm from a local file - dnf: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present + dnf: + name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm + state: present - name: install the 'Development tools' package group - dnf: name="@Development tools" state=present - + dnf: + name: '@Development tools' + state: present ''' +import os -def_qf = "%{name}-%{version}-%{release}.%{arch}" - -repoquery='/usr/bin/repoquery' -if not os.path.exists(repoquery): - repoquery = None - -dnfbin='/usr/bin/dnf' - -import syslog - -def log(msg): - syslog.openlog('ansible-dnf', 0, syslog.LOG_USER) - syslog.syslog(syslog.LOG_NOTICE, msg) - -def dnf_base(conf_file=None, cachedir=False): - - my = dnf.Base() - my.conf.debuglevel=0 - if conf_file and os.path.exists(conf_file): - my.conf.config_file_path = conf_file - my.conf.read() - my.read_all_repos() - my.fill_sack() - - return my - -def install_dnf_utils(module): - - if not module.check_mode: - dnf_path = module.get_bin_path('dnf') - if dnf_path: - rc, so, se = module.run_command('%s -y install yum-utils' % dnf_path) - if rc == 0: - this_path = module.get_bin_path('repoquery') - global repoquery - repoquery = this_path - -def po_to_nevra(po): - - if hasattr(po, 'ui_nevra'): - return po.ui_nevra - else: - return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch) - -def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[], is_pkg=False): - - if not repoq: - - pkgs = [] - try: - my = dnf_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - e,m,u = my.rpmdb.matchPackageNames([pkgspec]) - pkgs = e + m - if not pkgs: - pkgs.extend(my.returnInstalledPackagesByDep(pkgspec)) - except Exception, e: - module.fail_json(msg="Failure talking to dnf: %s" % e) - - return [ po_to_nevra(p) for p in pkgs ] - - else: - - cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) - if not is_pkg: - cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec] - rc2,out2,err2 = module.run_command(cmd) - else: - rc2,out2,err2 = (0, '', '') - - if rc == 0 and rc2 == 0: - out += out2 - return [ p for p in out.split('\n') if p.strip() ] +try: + import dnf + import dnf + import dnf.cli + import dnf.const + import dnf.exceptions + import dnf.subject + import dnf.util + HAS_DNF = True +except ImportError: + HAS_DNF = False + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six import PY2 + + +def _ensure_dnf(module): + if not HAS_DNF: + if PY2: + package = 'python2-dnf' else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) - - return [] - -def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): + package = 'python3-dnf' - if not repoq: + if module.check_mode: + module.fail_json(msg="`{0}` is not installed, but it is required" + " for the Ansible dnf module.".format(package)) - pkgs = [] + module.run_command(['dnf', 'install', '-y', package], check_rc=True) + global dnf try: - my = dnf_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - e,m,u = my.pkgSack.matchPackageNames([pkgspec]) - pkgs = e + m - if not pkgs: - pkgs.extend(my.returnPackagesByDep(pkgspec)) - except Exception, e: - module.fail_json(msg="Failure talking to dnf: %s" % e) - - return [ po_to_nevra(p) for p in pkgs ] - - else: - myrepoq = list(repoq) - - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) - - cmd = myrepoq + ["--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) - if rc == 0: - return [ p for p in out.split('\n') if p.strip() ] + import dnf + import dnf.cli + import dnf.const + import dnf.exceptions + import dnf.subject + import dnf.util + except ImportError: + module.fail_json(msg="Could not import the dnf python module." + " Please install `{0}` package.".format(package)) + + +def _configure_base(module, base, conf_file, disable_gpg_check): + """Configure the dnf Base object.""" + conf = base.conf + + # Turn off debug messages in the output + conf.debuglevel = 0 + + # Set whether to check gpg signatures + conf.gpgcheck = not disable_gpg_check + + # Don't prompt for user confirmations + conf.assumeyes = True + + # Change the configuration file path if provided + if conf_file: + # Fail if we can't read the configuration file. + if not os.access(conf_file, os.R_OK): + module.fail_json( + msg="cannot read configuration file", conf_file=conf_file) else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) - - - return [] + conf.config_file_path = conf_file + + # Read the configuration file + conf.read() + + +def _specify_repositories(base, disablerepo, enablerepo): + """Enable and disable repositories matching the provided patterns.""" + base.read_all_repos() + repos = base.repos + + # Disable repositories + for repo_pattern in disablerepo: + for repo in repos.get_matching(repo_pattern): + repo.disable() + + # Enable repositories + for repo_pattern in enablerepo: + for repo in repos.get_matching(repo_pattern): + repo.enable() + + +def _base(module, conf_file, disable_gpg_check, disablerepo, enablerepo): + """Return a fully configured dnf Base object.""" + base = dnf.Base() + _configure_base(module, base, conf_file, disable_gpg_check) + _specify_repositories(base, disablerepo, enablerepo) + base.fill_sack(load_system_repo='auto') + return base + + +def _package_dict(package): + """Return a dictionary of information for the package.""" + # NOTE: This no longer contains the 'dnfstate' field because it is + # already known based on the query type. + result = { + 'name': package.name, + 'arch': package.arch, + 'epoch': str(package.epoch), + 'release': package.release, + 'version': package.version, + 'repo': package.repoid} + result['nevra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format( + **result) + + return result + + +def list_items(module, base, command): + """List package info based on the command.""" + # Rename updates to upgrades + if command == 'updates': + command = 'upgrades' + + # Return the corresponding packages + if command in ['installed', 'upgrades', 'available']: + results = [ + _package_dict(package) + for package in getattr(base.sack.query(), command)()] + # Return the enabled repository ids + elif command in ['repos', 'repositories']: + results = [ + {'repoid': repo.id, 'state': 'enabled'} + for repo in base.repos.iter_enabled()] + # Return any matching packages + else: + packages = dnf.subject.Subject(command).get_best_query(base.sack) + results = [_package_dict(package) for package in packages] -def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): + module.exit_json(results=results) - if not repoq: - retpkgs = [] - pkgs = [] - updates = [] +def _mark_package_install(module, base, pkg_spec): + """Mark the package for install.""" + try: + base.install(pkg_spec) + except dnf.exceptions.MarkingError: + module.fail_json(msg="No package {} available.".format(pkg_spec)) - try: - my = dnf_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - pkgs = my.returnPackagesByDep(pkgspec) + my.returnInstalledPackagesByDep(pkgspec) - if not pkgs: - e,m,u = my.pkgSack.matchPackageNames([pkgspec]) - pkgs = e + m - updates = my.doPackageLists(pkgnarrow='updates').updates - except Exception, e: - module.fail_json(msg="Failure talking to dnf: %s" % e) - - for pkg in pkgs: - if pkg in updates: - retpkgs.append(pkg) - - return set([ po_to_nevra(p) for p in retpkgs ]) - else: - myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) - - cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) - - if rc == 0: - return set([ p for p in out.split('\n') if p.strip() ]) +def _parse_spec_group_file(names): + pkg_specs, grp_specs, filenames = [], [], [] + for name in names: + if name.endswith(".rpm"): + filenames.append(name) + elif name.startswith("@"): + grp_specs.append(name[1:]) else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) - - return [] - -def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): + pkg_specs.append(name) + return pkg_specs, grp_specs, filenames - if not repoq: - pkgs = [] - try: - my = dnf_base(conf_file) - for rid in en_repos: - my.repos.enableRepo(rid) - for rid in dis_repos: - my.repos.disableRepo(rid) - - pkgs = my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec) - if not pkgs: - e,m,u = my.pkgSack.matchPackageNames([req_spec]) - pkgs.extend(e) - pkgs.extend(m) - e,m,u = my.rpmdb.matchPackageNames([req_spec]) - pkgs.extend(e) - pkgs.extend(m) - except Exception, e: - module.fail_json(msg="Failure talking to dnf: %s" % e) - - return set([ po_to_nevra(p) for p in pkgs ]) - - else: - myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) - - cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec] - rc,out,err = module.run_command(cmd) - cmd = myrepoq + ["--qf", qf, req_spec] - rc2,out2,err2 = module.run_command(cmd) - if rc == 0 and rc2 == 0: - out += out2 - pkgs = set([ p for p in out.split('\n') if p.strip() ]) - if not pkgs: - pkgs = is_installed(module, repoq, req_spec, conf_file, qf=qf) - return pkgs - else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) - - return [] - -def transaction_exists(pkglist): - """ - checks the package list to see if any packages are - involved in an incomplete transaction - """ - - conflicts = [] - if not transaction_helpers: - return conflicts - - # first, we create a list of the package 'nvreas' - # so we can compare the pieces later more easily - pkglist_nvreas = [] - for pkg in pkglist: - pkglist_nvreas.append(splitFilename(pkg)) - - # next, we build the list of packages that are - # contained within an unfinished transaction - unfinished_transactions = find_unfinished_transactions() - for trans in unfinished_transactions: - steps = find_ts_remaining(trans) - for step in steps: - # the action is install/erase/etc., but we only - # care about the package spec contained in the step - (action, step_spec) = step - (n,v,r,e,a) = splitFilename(step_spec) - # and see if that spec is in the list of packages - # requested for installation/updating - for pkg in pkglist_nvreas: - # if the name and arch match, we're going to assume - # this package is part of a pending transaction - # the label is just for display purposes - label = "%s-%s" % (n,a) - if n == pkg[0] and a == pkg[4]: - if label not in conflicts: - conflicts.append("%s-%s" % (n,a)) - break - return conflicts - -def local_nvra(module, path): - """return nvra of a local rpm passed in""" - - cmd = ['/bin/rpm', '-qp' ,'--qf', - '%{name}-%{version}-%{release}.%{arch}\n', path ] - rc, out, err = module.run_command(cmd) - if rc != 0: - return None - nvra = out.split('\n')[0] - return nvra - -def pkg_to_dict(pkgstr): - - if pkgstr.strip(): - n,e,v,r,a,repo = pkgstr.split('|') - else: - return {'error_parsing': pkgstr} - - d = { - 'name':n, - 'arch':a, - 'epoch':e, - 'release':r, - 'version':v, - 'repo':repo, - 'nevra': '%s:%s-%s-%s.%s' % (e,n,v,r,a) - } - - if repo == 'installed': - d['dnfstate'] = 'installed' +def _install_remote_rpms(base, filenames): + if int(dnf.__version__.split(".")[0]) >= 2: + pkgs = list(sorted(base.add_remote_rpms(list(filenames)), reverse=True)) else: - d['dnfstate'] = 'available' - - return d - -def repolist(module, repoq, qf="%{repoid}"): - - cmd = repoq + ["--qf", qf, "-a"] - rc,out,err = module.run_command(cmd) - ret = [] - if rc == 0: - ret = set([ p for p in out.split('\n') if p.strip() ]) - return ret - -def list_stuff(module, conf_file, stuff): - - qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}" - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q'] - if conf_file and os.path.exists(conf_file): - repoq += ['-c', conf_file] - - if stuff == 'installed': - return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] - elif stuff == 'updates': - return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] - elif stuff == 'available': - return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] - elif stuff == 'repos': - return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ] + pkgs = [] + for filename in filenames: + pkgs.append(base.add_remote_rpm(filename)) + for pkg in pkgs: + base.package_install(pkg) + + +def ensure(module, base, state, names): + # Accumulate failures. Package management modules install what they can + # and fail with a message about what they can't. + failures = [] + allow_erasing = False + if names == ['*'] and state == 'latest': + base.upgrade_all() else: - return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ] - -def install(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos): - - res = {} - res['results'] = [] - res['msg'] = '' - res['rc'] = 0 - res['changed'] = False - - for spec in items: - pkg = None - - # check if pkgspec is installed (if possible for idempotence) - # localpkg - if spec.endswith('.rpm') and '://' not in spec: - # get the pkg name-v-r.arch - if not os.path.exists(spec): - res['msg'] += "No Package file matching '%s' found on system" % spec - module.fail_json(**res) - - nvra = local_nvra(module, spec) - # look for them in the rpmdb - if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos): - # if they are there, skip it - continue - pkg = spec - - # URL - elif '://' in spec: - pkg = spec - - #groups :( - elif spec.startswith('@'): - # complete wild ass guess b/c it's a group - pkg = spec - - # range requires or file-requires or pkgname :( - else: - # most common case is the pkg is already installed and done - # short circuit all the bs - and search for it as a pkg in is_installed - # if you find it then we're done - if not set(['*','?']).intersection(set(spec)): - pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) - if pkgs: - res['results'].append('%s providing %s is already installed' % (pkgs[0], spec)) - continue - - # look up what pkgs provide this - pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) - if not pkglist: - res['msg'] += "No Package matching '%s' found available, installed or updated" % spec - module.fail_json(**res) - - # if any of the packages are involved in a transaction, fail now - # so that we don't hang on the dnf operation later - conflicts = transaction_exists(pkglist) - if len(conflicts) > 0: - res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) - module.fail_json(**res) - - # if any of them are installed - # then nothing to do - - found = False - for this in pkglist: - if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True): - found = True - res['results'].append('%s providing %s is already installed' % (this, spec)) - break - - # if the version of the pkg you have installed is not in ANY repo, but there are - # other versions in the repos (both higher and lower) then the previous checks won't work. - # so we check one more time. This really only works for pkgname - not for file provides or virt provides - # but virt provides should be all caught in what_provides on its own. - # highly irritating - if not found: - if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): - found = True - res['results'].append('package providing %s is already installed' % (spec)) - - if found: - continue - - # if not - then pass in the spec as what to install - # we could get here if nothing provides it but that's not - # the error we're catching here - pkg = spec - - cmd = dnf_basecmd + ['install', pkg] - - if module.check_mode: - module.exit_json(changed=True) - - changed = True - - rc, out, err = module.run_command(cmd) - - # Fail on invalid urls: - if (rc == 1 and '://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)): - err = 'Package at %s could not be installed' % spec - module.fail_json(changed=False,msg=err,rc=1) - elif (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out: - # avoid failing in the 'Nothing To Do' case - # this may happen with an URL spec. - # for an already installed group, - # we get rc = 0 and 'Nothing to do' in out, not in err. - rc = 0 - err = '' - out = '%s: Nothing to do' % spec - changed = False - - res['rc'] += rc - res['results'].append(out) - res['msg'] += err - - # FIXME - if we did an install - go and check the rpmdb to see if it actually installed - # look for the pkg in rpmdb - # look for the pkg via obsoletes - - # accumulate any changes - res['changed'] |= changed - - module.exit_json(**res) + pkg_specs, group_specs, filenames = _parse_spec_group_file(names) + if group_specs: + base.read_comps() + + pkg_specs = [p.strip() for p in pkg_specs] + filenames = [f.strip() for f in filenames] + groups = [] + environments = [] + for group_spec in (g.strip() for g in group_specs): + group = base.comps.group_by_pattern(group_spec) + if group: + groups.append(group) + else: + environment = base.comps.environment_by_pattern(group_spec) + if environment: + environments.append(environment.id) + else: + module.fail_json( + msg="No group {} available.".format(group_spec)) + + if state in ['installed', 'present']: + # Install files. + _install_remote_rpms(base, filenames) + + # Install groups. + for group in groups: + try: + base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES) + except dnf.exceptions.Error as e: + # In dnf 2.0 if all the mandatory packages in a group do + # not install, an error is raised. We want to capture + # this but still install as much as possible. + failures.append((group, e)) + + for environment in environments: + try: + base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES) + except dnf.exceptions.Error as e: + failures.append((group, e)) + # Install packages. + for pkg_spec in pkg_specs: + _mark_package_install(module, base, pkg_spec) -def remove(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos): + elif state == 'latest': + # "latest" is same as "installed" for filenames. + _install_remote_rpms(base, filenames) - res = {} - res['results'] = [] - res['msg'] = '' - res['changed'] = False - res['rc'] = 0 + for group in groups: + try: + try: + base.group_upgrade(group) + except dnf.exceptions.CompsError: + # If not already installed, try to install. + base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES) + except dnf.exceptions.Error as e: + failures.append((group, e)) + + for environment in environments: + try: + try: + base.environment_upgrade(environment) + except dnf.exceptions.CompsError: + # If not already installed, try to install. + base.environment_install(group, dnf.const.GROUP_PACKAGE_TYPES) + except dnf.exceptions.Error as e: + failures.append((group, e)) + + for pkg_spec in pkg_specs: + # best effort causes to install the latest package + # even if not previously installed + base.conf.best = True + base.install(pkg_spec) - for pkg in items: - is_group = False - # group remove - this is doom on a stick - if pkg.startswith('@'): - is_group = True else: - if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): - res['results'].append('%s is not installed' % pkg) - continue + # state == absent + if filenames: + module.fail_json( + msg="Cannot remove paths -- please specify package name.") - # run an actual dnf transaction - cmd = dnf_basecmd + ["remove", pkg] + for group in groups: + try: + base.group_remove(group) + except dnf.exceptions.CompsError: + # Group is already uninstalled. + pass + for envioronment in environments: + try: + base.environment_remove(environment) + except dnf.exceptions.CompsError: + # Environment is already uninstalled. + pass + + installed = base.sack.query().installed() + for pkg_spec in pkg_specs: + if installed.filter(name=pkg_spec): + base.remove(pkg_spec) + + # Like the dnf CLI we want to allow recursive removal of dependent + # packages + allow_erasing = True + + if not base.resolve(allow_erasing=allow_erasing): + if failures: + module.fail_json(msg='Failed to install some of the specified packages', + failures=failures) + module.exit_json(msg="Nothing to do") + else: if module.check_mode: + if failures: + module.fail_json(msg='Failed to install some of the specified packages', + failures=failures) module.exit_json(changed=True) - rc, out, err = module.run_command(cmd) - - res['rc'] += rc - res['results'].append(out) - res['msg'] += err - - # compile the results into one batch. If anything is changed - # then mark changed - # at the end - if we've end up failed then fail out of the rest - # of the process - - # at this point we should check to see if the pkg is no longer present - - if not is_group: # we can't sensibly check for a group being uninstalled reliably - # look to see if the pkg shows up from is_installed. If it doesn't - if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): - res['changed'] = True - else: - module.fail_json(**res) - - if rc != 0: - module.fail_json(**res) - - module.exit_json(**res) - -def latest(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos): - - res = {} - res['results'] = [] - res['msg'] = '' - res['changed'] = False - res['rc'] = 0 - - for spec in items: - - pkg = None - basecmd = 'update' - cmd = '' - # groups, again - if spec.startswith('@'): - pkg = spec - - elif spec == '*': #update all - # use check-update to see if there is any need - rc,out,err = module.run_command(dnf_basecmd + ['check-update']) - if rc == 100: - cmd = dnf_basecmd + [basecmd] - else: - res['results'].append('All packages up to date') - continue - - # dep/pkgname - find it - else: - if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): - basecmd = 'update' - else: - basecmd = 'install' - - pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) - if not pkglist: - res['msg'] += "No Package matching '%s' found available, installed or updated" % spec - module.fail_json(**res) - - nothing_to_do = True - for this in pkglist: - if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): - nothing_to_do = False - break - - if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos): - nothing_to_do = False - break - - if nothing_to_do: - res['results'].append("All packages providing %s are up to date" % spec) - continue - - # if any of the packages are involved in a transaction, fail now - # so that we don't hang on the dnf operation later - conflicts = transaction_exists(pkglist) - if len(conflicts) > 0: - res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) - module.fail_json(**res) - - pkg = spec - if not cmd: - cmd = dnf_basecmd + [basecmd, pkg] - - if module.check_mode: - return module.exit_json(changed=True) - - rc, out, err = module.run_command(cmd) - - res['rc'] += rc - res['results'].append(out) - res['msg'] += err - - # FIXME if it is - update it and check to see if it applied - # check to see if there is no longer an update available for the pkgspec - - if rc: - res['failed'] = True - else: - res['changed'] = True - - module.exit_json(**res) - -def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, - disable_gpg_check): + base.download_packages(base.transaction.install_set) + base.do_transaction() + response = {'changed': True, 'results': []} + for package in base.transaction.install_set: + response['results'].append("Installed: {0}".format(package)) + for package in base.transaction.remove_set: + response['results'].append("Removed: {0}".format(package)) - # take multiple args comma separated - items = pkgspec.split(',') + if failures: + module.fail_json(msg='Failed to install some of the specified packages', + failures=failures) + module.exit_json(**response) - # need debug level 2 to get 'Nothing to do' for groupinstall. - dnf_basecmd = [dnfbin, '-d', '2', '-y'] - - - if not repoquery: - repoq = None - else: - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet', '-q'] - - if conf_file and os.path.exists(conf_file): - dnf_basecmd += ['-c', conf_file] - if repoq: - repoq += ['-c', conf_file] - - dis_repos =[] - en_repos = [] - if disablerepo: - dis_repos = disablerepo.split(',') - if enablerepo: - en_repos = enablerepo.split(',') - - for repoid in dis_repos: - r_cmd = ['--disablerepo=%s' % repoid] - dnf_basecmd.extend(r_cmd) - - for repoid in en_repos: - r_cmd = ['--enablerepo=%s' % repoid] - dnf_basecmd.extend(r_cmd) - - if state in ['installed', 'present', 'latest']: - my = dnf_base(conf_file) - try: - for r in dis_repos: - my.repos.disableRepo(r) - - current_repos = dnf.yum.config.RepoConf() - for r in en_repos: - try: - my.repos.enableRepo(r) - new_repos = my.repos.repos.keys() - for i in new_repos: - if not i in current_repos: - rid = my.repos.getRepo(i) - a = rid.repoXML.repoid - current_repos = new_repos - except dnf.exceptions.Error, e: - module.fail_json(msg="Error setting/accessing repo %s: %s" % (r, e)) - except dnf.exceptions.Error, e: - module.fail_json(msg="Error accessing repos: %s" % e) - - if state in ['installed', 'present']: - if disable_gpg_check: - dnf_basecmd.append('--nogpgcheck') - install(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos) - elif state in ['removed', 'absent']: - remove(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos) - elif state == 'latest': - if disable_gpg_check: - dnf_basecmd.append('--nogpgcheck') - latest(module, items, repoq, dnf_basecmd, conf_file, en_repos, dis_repos) - - # should be caught by AnsibleModule argument_spec - return dict(changed=False, failed=True, results='', errors='unexpected state') def main(): - - # state=installed name=pkgspec - # state=removed name=pkgspec - # state=latest name=pkgspec - # - # informational commands: - # list=installed - # list=updates - # list=available - # list=repos - # list=pkgspec - + """The main function.""" module = AnsibleModule( - argument_spec = dict( - name=dict(aliases=['pkg']), - # removed==absent, installed==present, these are accepted as aliases - state=dict(default='installed', choices=['absent','present','installed','removed','latest']), - enablerepo=dict(), - disablerepo=dict(), + argument_spec=dict( + name=dict(aliases=['pkg'], type='list'), + state=dict( + default='installed', + choices=[ + 'absent', 'present', 'installed', 'removed', 'latest']), + enablerepo=dict(type='list', default=[]), + disablerepo=dict(type='list', default=[]), list=dict(), - conf_file=dict(default=None), - disable_gpg_check=dict(required=False, default="no", type='bool'), - # this should not be needed, but exists as a failsafe - install_repoquery=dict(required=False, default="yes", type='bool'), + conf_file=dict(default=None, type='path'), + disable_gpg_check=dict(default=False, type='bool'), ), - required_one_of = [['name','list']], - mutually_exclusive = [['name','list']], - supports_check_mode = True - ) - - # this should not be needed, but exists as a failsafe + required_one_of=[['name', 'list']], + mutually_exclusive=[['name', 'list']], + supports_check_mode=True) params = module.params - if params['install_repoquery'] and not repoquery and not module.check_mode: - install_dnf_utils(module) - if not repoquery: - module.fail_json(msg="repoquery is required to use this module at this time. Please install the yum-utils package.") - if params['list']: - results = dict(results=list_stuff(module, params['conf_file'], params['list'])) - module.exit_json(**results) + _ensure_dnf(module) + if params['list']: + base = _base( + module, params['conf_file'], params['disable_gpg_check'], + params['disablerepo'], params['enablerepo']) + list_items(module, base, params['list']) else: - pkg = params['name'] - state = params['state'] - enablerepo = params.get('enablerepo', '') - disablerepo = params.get('disablerepo', '') - disable_gpg_check = params['disable_gpg_check'] - res = ensure(module, state, pkg, params['conf_file'], enablerepo, - disablerepo, disable_gpg_check) - module.fail_json(msg="we should never get here unless this all failed", **res) - -# import module snippets -from ansible.module_utils.basic import * -main() + # Note: base takes a long time to run so we want to check for failure + # before running it. + if not dnf.util.am_i_root(): + module.fail_json(msg="This command has to be run under the root user.") + base = _base( + module, params['conf_file'], params['disable_gpg_check'], + params['disablerepo'], params['enablerepo']) + + ensure(module, base, params['state'], params['name']) + +if __name__ == '__main__': + main() diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py old mode 100644 new mode 100755 index 91888ba6bca..c44ccabbe6f --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -3,6 +3,7 @@ # (c) 2013, Andrew Dunham # (c) 2013, Daniel Jaouen +# (c) 2015, Indrajit Raychaudhuri # # Based on macports (Jimmy Tang ) # @@ -19,12 +20,19 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: homebrew author: + - "Indrajit Raychaudhuri (@indrajitr)" - "Daniel Jaouen (@danieljaouen)" - "Andrew Dunham (@andrew-d)" +requirements: + - "python >= 2.6" short_description: Package manager for Homebrew description: - Manages Homebrew packages @@ -35,6 +43,12 @@ - name of package to install/remove required: false default: None + aliases: ['pkg', 'package', 'formula'] + path: + description: + - "':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system." + required: false + default: '/usr/local/bin' state: description: - state of the package @@ -45,37 +59,82 @@ description: - update homebrew itself first required: false - default: "no" + default: no choices: [ "yes", "no" ] + aliases: ['update-brew'] upgrade_all: description: - upgrade all homebrew packages required: false - default: "no" + default: no choices: [ "yes", "no" ] + aliases: ['upgrade'] install_options: description: - options flags to install a package required: false default: null + aliases: ['options'] version_added: "1.4" notes: [] ''' EXAMPLES = ''' -- homebrew: name=foo state=present -- homebrew: name=foo state=present update_homebrew=yes -- homebrew: name=foo state=latest update_homebrew=yes -- homebrew: update_homebrew=yes upgrade_all=yes -- homebrew: name=foo state=head -- homebrew: name=foo state=linked -- homebrew: name=foo state=absent -- homebrew: name=foo,bar state=absent -- homebrew: name=foo state=present install_options=with-baz,enable-debug +# Install formula foo with 'brew' in default path (C(/usr/local/bin)) +- homebrew: + name: foo + state: present + +# Install formula foo with 'brew' in alternate path C(/my/other/location/bin) +- homebrew: + name: foo + path: /my/other/location/bin + state: present + +# Update homebrew first and install formula foo with 'brew' in default path +- homebrew: + name: foo + state: present + update_homebrew: yes + +# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path +- homebrew: + name: foo + state: latest + update_homebrew: yes + +# Update homebrew and upgrade all packages +- homebrew: + update_homebrew: yes + upgrade_all: yes + +# Miscellaneous other examples +- homebrew: + name: foo + state: head + +- homebrew: + name: foo + state: linked + +- homebrew: + name: foo + state: absent + +- homebrew: + name: foo,bar + state: absent + +- homebrew: + name: foo + state: present + install_options: with-baz,enable-debug ''' import os.path import re +from ansible.module_utils.six import iteritems + # exceptions -------------------------------------------------------------- {{{ class HomebrewException(Exception): @@ -119,6 +178,7 @@ class Homebrew(object): / # slash (for taps) \+ # plusses - # dashes + : # colons (for URLs) ''' INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) @@ -300,7 +360,7 @@ def current_package(self, package): return package # /class properties -------------------------------------------- }}} - def __init__(self, module, path=None, packages=None, state=None, + def __init__(self, module, path, packages=None, state=None, update_homebrew=False, upgrade_all=False, install_options=None): if not install_options: @@ -322,17 +382,12 @@ def _setup_status_vars(self): self.message = '' def _setup_instance_vars(self, **kwargs): - for key, val in kwargs.iteritems(): + for key, val in iteritems(kwargs): setattr(self, key, val) def _prep(self): - self._prep_path() self._prep_brew_path() - def _prep_path(self): - if not self.path: - self.path = ['/usr/local/bin'] - def _prep_brew_path(self): if not self.module: self.brew_path = None @@ -394,18 +449,17 @@ def _current_package_is_installed(self): return False - def _outdated_packages(self): + def _current_package_is_outdated(self): + if not self.valid_package(self.current_package): + return False + rc, out, err = self.module.run_command([ self.brew_path, 'outdated', + self.current_package, ]) - return [line.split(' ')[0].strip() for line in out.split('\n') if line] - def _current_package_is_outdated(self): - if not self.valid_package(self.current_package): - return False - - return self.current_package in self._outdated_packages() + return rc != 0 def _current_package_is_installed_from_head(self): if not Homebrew.valid_package(self.current_package): @@ -763,8 +817,16 @@ def _unlink_packages(self): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(aliases=["pkg"], required=False), - path=dict(required=False), + name=dict( + aliases=["pkg", "package", "formula"], + required=False, + type='list', + ), + path=dict( + default="/usr/local/bin", + required=False, + type='path', + ), state=dict( default="present", choices=[ @@ -775,12 +837,12 @@ def main(): ], ), update_homebrew=dict( - default="no", + default=False, aliases=["update-brew"], type='bool', ), upgrade_all=dict( - default="no", + default=False, aliases=["upgrade"], type='bool', ), @@ -792,18 +854,19 @@ def main(): ), supports_check_mode=True, ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + p = module.params if p['name']: - packages = p['name'].split(',') + packages = p['name'] else: packages = None path = p['path'] if path: path = path.split(':') - else: - path = ['/usr/local/bin'] state = p['state'] if state in ('present', 'installed'): @@ -839,4 +902,3 @@ def main(): if __name__ == '__main__': main() - diff --git a/packaging/os/homebrew_cask.py b/packaging/os/homebrew_cask.py old mode 100644 new mode 100755 index e1b721a97b4..86d7f35e0ca --- a/packaging/os/homebrew_cask.py +++ b/packaging/os/homebrew_cask.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # (c) 2013, Daniel Jaouen +# (c) 2016, Indrajit Raychaudhuri # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -16,10 +17,19 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: homebrew_cask -author: "Daniel Jaouen (@danieljaouen)" +author: + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Enric Lluelles (@enriclluelles)" +requirements: + - "python >= 2.6" short_description: Install/uninstall homebrew casks. description: - Manages Homebrew casks. @@ -29,21 +39,65 @@ description: - name of cask to install/remove required: true + aliases: ['pkg', 'package', 'cask'] + path: + description: + - "':' separated list of paths to search for 'brew' executable." + required: false + default: '/usr/local/bin' state: description: - state of the cask choices: [ 'present', 'absent' ] required: false default: present + update_homebrew: + description: + - update homebrew itself first. Note that C(brew cask update) is + a synonym for C(brew update). + required: false + default: no + choices: [ "yes", "no" ] + aliases: ['update-brew'] + version_added: "2.2" + install_options: + description: + - options flags to install a package + required: false + default: null + aliases: ['options'] + version_added: "2.2" ''' EXAMPLES = ''' -- homebrew_cask: name=alfred state=present -- homebrew_cask: name=alfred state=absent +- homebrew_cask: + name: alfred + state: present + +- homebrew_cask: + name: alfred + state: absent + +- homebrew_cask: + name: alfred + state: present + install_options: 'appdir=/Applications' + +- homebrew_cask: + name: alfred + state: present + install_options: 'debug,appdir=/Applications' + +- homebrew_cask: + name: alfred + state: absent + install_options: force ''' import os.path import re +from ansible.module_utils.six import iteritems + # exceptions -------------------------------------------------------------- {{{ class HomebrewCaskException(Exception): @@ -69,6 +123,7 @@ class HomebrewCask(object): \s # spaces : # colons {sep} # the OS-specific path separator + . # dots - # dashes '''.format(sep=os.path.sep) @@ -76,11 +131,14 @@ class HomebrewCask(object): \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) \s # spaces {sep} # the OS-specific path separator + . # dots - # dashes '''.format(sep=os.path.sep) VALID_CASK_CHARS = r''' \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + / # slash (for taps) - # dashes ''' @@ -98,6 +156,7 @@ def valid_path(cls, path): - a string containing only: - alphanumeric characters - dashes + - dots - spaces - colons - os.path.sep @@ -122,6 +181,7 @@ def valid_brew_path(cls, brew_path): - a string containing only: - alphanumeric characters - dashes + - dots - spaces - os.path.sep ''' @@ -170,6 +230,7 @@ def valid_module(cls, module): '''A valid module is an instance of AnsibleModule.''' return isinstance(module, AnsibleModule) + # /class validations ------------------------------------------- }}} # class properties --------------------------------------------- {{{ @@ -251,10 +312,14 @@ def current_cask(self, cask): return cask # /class properties -------------------------------------------- }}} - def __init__(self, module, path=None, casks=None, state=None): + def __init__(self, module, path=path, casks=None, state=None, + update_homebrew=False, install_options=None): + if not install_options: + install_options = list() self._setup_status_vars() self._setup_instance_vars(module=module, path=path, casks=casks, - state=state) + state=state, update_homebrew=update_homebrew, + install_options=install_options,) self._prep() @@ -267,17 +332,12 @@ def _setup_status_vars(self): self.message = '' def _setup_instance_vars(self, **kwargs): - for key, val in kwargs.iteritems(): + for key, val in iteritems(kwargs): setattr(self, key, val) def _prep(self): - self._prep_path() self._prep_brew_path() - def _prep_path(self): - if not self.path: - self.path = ['/usr/local/bin'] - def _prep_brew_path(self): if not self.module: self.brew_path = None @@ -324,8 +384,12 @@ def _current_cask_is_installed(self): self.message = 'Invalid cask: {0}.'.format(self.current_cask) raise HomebrewCaskException(self.message) - cmd = [self.brew_path, 'cask', 'list'] - rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) + cmd = [ + "{brew_path}".format(brew_path=self.brew_path), + "cask", + "list" + ] + rc, out, err = self.module.run_command(cmd) if 'nothing to list' in err: return False @@ -340,6 +404,9 @@ def _current_cask_is_installed(self): # commands ----------------------------------------------------- {{{ def _run(self): + if self.update_homebrew: + self._update_homebrew() + if self.state == 'installed': return self._install_casks() elif self.state == 'absent': @@ -353,7 +420,7 @@ def _update_homebrew(self): rc, out, err = self.module.run_command([ self.brew_path, 'update', - ], path_prefix=self.path[0]) + ]) if rc == 0: if out and isinstance(out, basestring): already_updated = any( @@ -395,11 +462,13 @@ def _install_current_cask(self): ) raise HomebrewCaskException(self.message) - cmd = [opt - for opt in (self.brew_path, 'cask', 'install', self.current_cask) - if opt] + opts = ( + [self.brew_path, 'cask', 'install', self.current_cask] + + self.install_options + ) - rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) if self._current_cask_is_installed(): self.changed_count += 1 @@ -444,7 +513,7 @@ def _uninstall_current_cask(self): for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask) if opt] - rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) + rc, out, err = self.module.run_command(cmd) if not self._current_cask_is_installed(): self.changed_count += 1 @@ -469,8 +538,16 @@ def _uninstall_casks(self): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(aliases=["cask"], required=False), - path=dict(required=False), + name=dict( + aliases=["pkg", "package", "cask"], + required=False, + type='list', + ), + path=dict( + default="/usr/local/bin", + required=False, + type='path', + ), state=dict( default="present", choices=[ @@ -478,21 +555,32 @@ def main(): "absent", "removed", "uninstalled", ], ), + update_homebrew=dict( + default=False, + aliases=["update-brew"], + type='bool', + ), + install_options=dict( + default=None, + aliases=['options'], + type='list', + ) ), supports_check_mode=True, ) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + p = module.params if p['name']: - casks = p['name'].split(',') + casks = p['name'] else: casks = None path = p['path'] if path: path = path.split(':') - else: - path = ['/usr/local/bin'] state = p['state'] if state in ('present', 'installed'): @@ -500,8 +588,14 @@ def main(): if state in ('absent', 'removed', 'uninstalled'): state = 'absent' + update_homebrew = p['update_homebrew'] + p['install_options'] = p['install_options'] or [] + install_options = ['--{0}'.format(install_option) + for install_option in p['install_options']] + brew_cask = HomebrewCask(module=module, path=path, casks=casks, - state=state) + state=state, update_homebrew=update_homebrew, + install_options=install_options) (failed, changed, message) = brew_cask.run() if failed: module.fail_json(msg=message) @@ -513,4 +607,3 @@ def main(): if __name__ == '__main__': main() - diff --git a/packaging/os/homebrew_tap.py b/packaging/os/homebrew_tap.py index c6511f0c7b2..649a32f1b89 100644 --- a/packaging/os/homebrew_tap.py +++ b/packaging/os/homebrew_tap.py @@ -2,6 +2,8 @@ # -*- coding: utf-8 -*- # (c) 2013, Daniel Jaouen +# (c) 2016, Indrajit Raychaudhuri +# # Based on homebrew (Andrew Dunham ) # # This file is part of Ansible @@ -21,19 +23,36 @@ import re +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: homebrew_tap -author: "Daniel Jaouen (@danieljaouen)" +author: + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" short_description: Tap a Homebrew repository. description: - Tap external Homebrew repositories. version_added: "1.6" options: - tap: + name: description: - - The repository to tap. + - The GitHub user/organization repository to tap. required: true + aliases: ['tap'] + url: + description: + - The optional git URL of the repository to tap. The URL is not + assumed to be on GitHub, and the protocol doesn't have to be HTTP. + Any location and protocol that git can handle is fine. + required: false + version_added: "2.2" + note: + - I(name) option may not be a list of multiple taps (but a single + tap instead) when this option is provided. state: description: - state of the repository. @@ -44,9 +63,20 @@ ''' EXAMPLES = ''' -homebrew_tap: tap=homebrew/dupes state=present -homebrew_tap: tap=homebrew/dupes state=absent -homebrew_tap: tap=homebrew/dupes,homebrew/science state=present +- homebrew_tap: + name: homebrew/dupes + +- homebrew_tap: + name: homebrew/dupes + state: absent + +- homebrew_tap: + name: homebrew/dupes,homebrew/science + state: present + +- homebrew_tap: + name: telemachus/brew + url: 'https://bitbucket.org/telemachus/brew' ''' @@ -63,11 +93,14 @@ def already_tapped(module, brew_path, tap): brew_path, 'tap', ]) + taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] - return tap.lower() in taps + tap_name = re.sub('homebrew-', '', tap.lower()) + return tap_name in taps -def add_tap(module, brew_path, tap): + +def add_tap(module, brew_path, tap, url=None): '''Adds a single tap.''' failed, changed, msg = False, False, '' @@ -83,6 +116,7 @@ def add_tap(module, brew_path, tap): brew_path, 'tap', tap, + url, ]) if already_tapped(module, brew_path, tap): changed = True @@ -180,7 +214,8 @@ def remove_taps(module, brew_path, taps): def main(): module = AnsibleModule( argument_spec=dict( - name=dict(aliases=['tap'], required=True), + name=dict(aliases=['tap'], type='list', required=True), + url=dict(default=None, required=False), state=dict(default='present', choices=['present', 'absent']), ), supports_check_mode=True, @@ -192,10 +227,22 @@ def main(): opt_dirs=['/usr/local/bin'] ) - taps = module.params['name'].split(',') + taps = module.params['name'] + url = module.params['url'] if module.params['state'] == 'present': - failed, changed, msg = add_taps(module, brew_path, taps) + if url is None: + # No tap URL provided explicitly, continue with bulk addition + # of all the taps. + failed, changed, msg = add_taps(module, brew_path, taps) + else: + # When an tap URL is provided explicitly, we allow adding + # *single* tap only. Validate and proceed to add single tap. + if len(taps) > 1: + msg = "List of muliple taps may not be provided with 'url' option." + module.fail_json(msg=msg) + else: + failed, changed, msg = add_tap(module, brew_path, taps[0], url) if failed: module.fail_json(msg=msg) diff --git a/packaging/os/layman.py b/packaging/os/layman.py index 62694ee9118..440001b48a0 100644 --- a/packaging/os/layman.py +++ b/packaging/os/layman.py @@ -21,6 +21,10 @@ import shutil from os import path +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: layman @@ -64,19 +68,29 @@ EXAMPLES = ''' # Install the overlay 'mozilla' which is on the central overlays list. -- layman: name=mozilla +- layman: + name: mozilla # Install the overlay 'cvut' from the specified alternative list. -- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml +- layman: + name: cvut + list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml' # Update (sync) the overlay 'cvut', or install if not installed yet. -- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml state=updated +- layman: + name: cvut + list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml' + state: updated # Update (sync) all of the installed overlays. -- layman: name=ALL state=updated +- layman: + name: ALL + state: updated # Uninstall the overlay 'cvut'. -- layman: name=cvut state=absent +- layman: + name: cvut + state: absent ''' USERAGENT = 'ansible-httpget' @@ -120,7 +134,7 @@ def download_url(module, url, dest): try: with open(dest, 'w') as f: shutil.copyfileobj(response, f) - except IOError, e: + except IOError as e: raise ModuleError("Failed to write: %s" % str(e)) @@ -143,7 +157,11 @@ def install_overlay(module, name, list_url=None): layman = init_layman(layman_conf) if layman.is_installed(name): - return False + return False + + if module.check_mode: + mymsg = 'Would add layman repo \'' + name + '\'' + module.exit_json(changed=True, msg=mymsg) if not layman.is_repo(name): if not list_url: @@ -164,7 +182,7 @@ def install_overlay(module, name, list_url=None): return True -def uninstall_overlay(name): +def uninstall_overlay(module, name): '''Uninstalls the given overlay repository from the system. :param name: the overlay id to uninstall @@ -177,6 +195,10 @@ def uninstall_overlay(name): if not layman.is_installed(name): return False + + if module.check_mode: + mymsg = 'Would remove layman repo \'' + name + '\'' + module.exit_json(changed=True, msg=mymsg) layman.delete_repos(name) if layman.get_errors(): raise ModuleError(layman.get_errors()) @@ -216,7 +238,8 @@ def main(): list_url = dict(aliases=['url']), state = dict(default="present", choices=['present', 'absent', 'updated']), validate_certs = dict(required=False, default=True, type='bool'), - ) + ), + supports_check_mode=True ) if not HAS_LAYMAN_API: @@ -237,9 +260,9 @@ def main(): else: sync_overlay(name) else: - changed = uninstall_overlay(name) + changed = uninstall_overlay(module, name) - except ModuleError, e: + except ModuleError as e: module.fail_json(msg=e.message) else: module.exit_json(changed=changed, name=name) diff --git a/packaging/os/macports.py b/packaging/os/macports.py index ca3a0f97426..ac49f1568e5 100644 --- a/packaging/os/macports.py +++ b/packaging/os/macports.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: macports @@ -46,11 +50,26 @@ notes: [] ''' EXAMPLES = ''' -- macports: name=foo state=present -- macports: name=foo state=present update_cache=yes -- macports: name=foo state=absent -- macports: name=foo state=active -- macports: name=foo state=inactive +- macports: + name: foo + state: present + +- macports: + name: foo + state: present + update_cache: yes + +- macports: + name: foo + state: absent + +- macports: + name: foo + state: active + +- macports: + name: foo + state: inactive ''' import pipes @@ -214,4 +233,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/openbsd_pkg.py b/packaging/os/openbsd_pkg.py index 1f331261d98..7d0e9ac9459 100644 --- a/packaging/os/openbsd_pkg.py +++ b/packaging/os/openbsd_pkg.py @@ -18,9 +18,17 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import os +import platform import re import shlex -import syslog +import sqlite3 + +from distutils.version import StrictVersion + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -30,6 +38,7 @@ short_description: Manage packages on OpenBSD. description: - Manage packages on OpenBSD using the pkg tools. +requirements: [ "python >= 2.5" ] options: name: required: true @@ -42,137 +51,167 @@ - C(present) will make sure the package is installed. C(latest) will make sure the latest version of the package is installed. C(absent) will make sure the specified package is not installed. + build: + required: false + choices: [ yes, no ] + default: no + description: + - Build the package from source instead of downloading and installing + a binary. Requires that the port source tree is already installed. + Automatically builds and installs the 'sqlports' package, if it is + not already installed. + version_added: "2.1" + ports_dir: + required: false + default: /usr/ports + description: + - When used in combination with the 'build' option, allows overriding + the default ports source directory. + version_added: "2.1" ''' EXAMPLES = ''' # Make sure nmap is installed -- openbsd_pkg: name=nmap state=present +- openbsd_pkg: + name: nmap + state: present # Make sure nmap is the latest version -- openbsd_pkg: name=nmap state=latest +- openbsd_pkg: + name: nmap + state: latest # Make sure nmap is not installed -- openbsd_pkg: name=nmap state=absent +- openbsd_pkg: + name: nmap + state: absent + +# Make sure nmap is installed, build it from source if it is not +- openbsd_pkg: + name: nmap + state: present + build: yes # Specify a pkg flavour with '--' -- openbsd_pkg: name=vim--nox11 state=present +- openbsd_pkg: + name: vim--no_x11 + state: present # Specify the default flavour to avoid ambiguity errors -- openbsd_pkg: name=vim-- state=present +- openbsd_pkg: + name: vim-- + state: present + +# Specify a package branch (requires at least OpenBSD 6.0) +- openbsd_pkg: + name: python%3.5 + state: present # Update all packages on the system -- openbsd_pkg: name=* state=latest +- openbsd_pkg: + name: * + state: latest ''' -# Control if we write debug information to syslog. -debug = False - # Function used for executing commands. def execute_command(cmd, module): - if debug: - syslog.syslog("execute_command(): cmd = %s" % cmd) # Break command line into arguments. # This makes run_command() use shell=False which we need to not cause shell # expansion of special characters like '*'. cmd_args = shlex.split(cmd) return module.run_command(cmd_args) -# Function used for getting the name of a currently installed package. -def get_current_name(name, pkg_spec, module): - info_cmd = 'pkg_info' - (rc, stdout, stderr) = execute_command("%s" % (info_cmd), module) - if rc != 0: - return (rc, stdout, stderr) - - if pkg_spec['version']: - pattern = "^%s" % name - elif pkg_spec['flavor']: - pattern = "^%s-.*-%s\s" % (pkg_spec['stem'], pkg_spec['flavor']) - else: - pattern = "^%s-" % pkg_spec['stem'] - - if debug: - syslog.syslog("get_current_name(): pattern = %s" % pattern) - - for line in stdout.splitlines(): - if debug: - syslog.syslog("get_current_name: line = %s" % line) - match = re.search(pattern, line) - if match: - current_name = line.split()[0] - - return current_name - # Function used to find out if a package is currently installed. def get_package_state(name, pkg_spec, module): - info_cmd = 'pkg_info -e' + info_cmd = 'pkg_info -Iq' - if pkg_spec['version']: - command = "%s %s" % (info_cmd, name) - elif pkg_spec['flavor']: - command = "%s %s-*-%s" % (info_cmd, pkg_spec['stem'], pkg_spec['flavor']) - else: - command = "%s %s-*" % (info_cmd, pkg_spec['stem']) + command = "%s inst:%s" % (info_cmd, name) rc, stdout, stderr = execute_command(command, module) - if (stderr): + if stderr: module.fail_json(msg="failed in get_package_state(): " + stderr) - if rc == 0: + if stdout: + # If the requested package name is just a stem, like "python", we may + # find multiple packages with that name. + pkg_spec['installed_names'] = [name for name in stdout.splitlines()] + module.debug("get_package_state(): installed_names = %s" % pkg_spec['installed_names']) return True else: return False # Function used to make sure a package is present. def package_present(name, installed_state, pkg_spec, module): + build = module.params['build'] + if module.check_mode: install_cmd = 'pkg_add -Imn' else: - install_cmd = 'pkg_add -Im' + if build is True: + port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module)) + if os.path.isdir(port_dir): + if pkg_spec['flavor']: + flavors = pkg_spec['flavor'].replace('-', ' ') + install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors) + elif pkg_spec['subpackage']: + install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir, pkg_spec['subpackage']) + else: + install_cmd = "cd %s && make install && make clean=depends" % (port_dir) + else: + module.fail_json(msg="the port source directory %s does not exist" % (port_dir)) + else: + install_cmd = 'pkg_add -Im' if installed_state is False: # Attempt to install the package - (rc, stdout, stderr) = execute_command("%s %s" % (install_cmd, name), module) + if build is True and not module.check_mode: + (rc, stdout, stderr) = module.run_command(install_cmd, module, use_unsafe_shell=True) + else: + (rc, stdout, stderr) = execute_command("%s %s" % (install_cmd, name), module) # The behaviour of pkg_add is a bit different depending on if a # specific version is supplied or not. # # When a specific version is supplied the return code will be 0 when - # a package is found and 1 when it is not, if a version is not - # supplied the tool will exit 0 in both cases: - if pkg_spec['version']: + # a package is found and 1 when it is not. If a version is not + # supplied the tool will exit 0 in both cases. + # + # It is important to note that "version" relates to the + # packages-specs(7) notion of a version. If using the branch syntax + # (like "python%3.5") the version number is considered part of the + # stem, and the pkg_add behavior behaves the same as if the name did + # not contain a version (which it strictly speaking does not). + if pkg_spec['version'] or build is True: # Depend on the return code. - if debug: - syslog.syslog("package_present(): depending on return code") + module.debug("package_present(): depending on return code") if rc: changed=False else: # Depend on stderr instead. - if debug: - syslog.syslog("package_present(): depending on stderr") + module.debug("package_present(): depending on stderr") if stderr: # There is a corner case where having an empty directory in # installpath prior to the right location will result in a # "file:/local/package/directory/ is empty" message on stderr # while still installing the package, so we need to look for # for a message like "packagename-1.0: ok" just in case. - match = re.search("\W%s-[^:]+: ok\W" % name, stdout) + if pkg_spec['style'] == 'branch': + match = re.search("\W%s-[^:]+: ok\W" % pkg_spec['pkgname'], stdout) + else: + match = re.search("\W%s-[^:]+: ok\W" % name, stdout) + if match: # It turns out we were able to install the package. - if debug: - syslog.syslog("package_present(): we were able to install package") - pass + module.debug("package_present(): we were able to install the package") else: # We really did fail, fake the return code. - if debug: - syslog.syslog("package_present(): we really did fail") + module.debug("package_present(): we really did fail") rc = 1 changed=False else: - if debug: - syslog.syslog("package_present(): stderr was not set") + module.debug("package_present(): stderr was not set") if rc == 0: if module.check_mode: @@ -190,6 +229,10 @@ def package_present(name, installed_state, pkg_spec, module): # Function used to make sure a package is the latest available version. def package_latest(name, installed_state, pkg_spec, module): + + if module.params['build'] is True: + module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build']) + if module.check_mode: upgrade_cmd = 'pkg_add -umn' else: @@ -199,26 +242,23 @@ def package_latest(name, installed_state, pkg_spec, module): if installed_state is True: - # Fetch name of currently installed package. - pre_upgrade_name = get_current_name(name, pkg_spec, module) - - if debug: - syslog.syslog("package_latest(): pre_upgrade_name = %s" % pre_upgrade_name) - # Attempt to upgrade the package. (rc, stdout, stderr) = execute_command("%s %s" % (upgrade_cmd, name), module) # Look for output looking something like "nmap-6.01->6.25: ok" to see if # something changed (or would have changed). Use \W to delimit the match # from progress meter output. - match = re.search("\W%s->.+: ok\W" % pre_upgrade_name, stdout) - if match: - if module.check_mode: - module.exit_json(changed=True) - - changed = True - else: - changed = False + changed = False + for installed_name in pkg_spec['installed_names']: + module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name) + match = re.search("\W%s->.+: ok\W" % installed_name, stdout) + if match: + module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name) + if module.check_mode: + module.exit_json(changed=True) + + changed = True + break # FIXME: This part is problematic. Based on the issues mentioned (and # handled) in package_present() it is not safe to blindly trust stderr @@ -237,8 +277,7 @@ def package_latest(name, installed_state, pkg_spec, module): else: # If package was not installed at all just make it present. - if debug: - syslog.syslog("package_latest(): package is not installed, calling package_present()") + module.debug("package_latest(): package is not installed, calling package_present()") return package_present(name, installed_state, pkg_spec, module) # Function used to make sure a package is not installed. @@ -271,7 +310,12 @@ def package_absent(name, installed_state, module): # Function used to parse the package name based on packages-specs(7). # The general name structure is "stem-version[-flavors]". +# +# Names containing "%" are a special variation not part of the +# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a +# description. def parse_package_name(name, pkg_spec, module): + module.debug("parse_package_name(): parsing name: %s" % name) # Do some initial matches so we can base the more advanced regex on that. version_match = re.search("-[0-9]", name) versionless_match = re.search("--", name) @@ -279,7 +323,7 @@ def parse_package_name(name, pkg_spec, module): # Stop if someone is giving us a name that both has a version and is # version-less at the same time. if version_match and versionless_match: - module.fail_json(msg="Package name both has a version and is version-less: " + name) + module.fail_json(msg="package name both has a version and is version-less: " + name) # If name includes a version. if version_match: @@ -290,8 +334,9 @@ def parse_package_name(name, pkg_spec, module): pkg_spec['version'] = match.group('version') pkg_spec['flavor_separator'] = match.group('flavor_separator') pkg_spec['flavor'] = match.group('flavor') + pkg_spec['style'] = 'version' else: - module.fail_json(msg="Unable to parse package name at version_match: " + name) + module.fail_json(msg="unable to parse package name at version_match: " + name) # If name includes no version but is version-less ("--"). elif versionless_match: @@ -302,8 +347,9 @@ def parse_package_name(name, pkg_spec, module): pkg_spec['version'] = None pkg_spec['flavor_separator'] = '-' pkg_spec['flavor'] = match.group('flavor') + pkg_spec['style'] = 'versionless' else: - module.fail_json(msg="Unable to parse package name at versionless_match: " + name) + module.fail_json(msg="unable to parse package name at versionless_match: " + name) # If name includes no version, and is not version-less, it is all a stem. else: @@ -314,15 +360,83 @@ def parse_package_name(name, pkg_spec, module): pkg_spec['version'] = None pkg_spec['flavor_separator'] = None pkg_spec['flavor'] = None + pkg_spec['style'] = 'stem' else: - module.fail_json(msg="Unable to parse package name at else: " + name) + module.fail_json(msg="unable to parse package name at else: " + name) + + # If the stem contains an "%" then it needs special treatment. + branch_match = re.search("%", pkg_spec['stem']) + if branch_match: + + branch_release = "6.0" + + if version_match or versionless_match: + module.fail_json(msg="package name using 'branch' syntax also has a version or is version-less: " + name) + if StrictVersion(platform.release()) < StrictVersion(branch_release): + module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name)) + + pkg_spec['style'] = 'branch' + + # Key names from description in pkg_add(1). + pkg_spec['pkgname'] = pkg_spec['stem'].split('%')[0] + pkg_spec['branch'] = pkg_spec['stem'].split('%')[1] # Sanity check that there are no trailing dashes in flavor. # Try to stop strange stuff early so we can be strict later. if pkg_spec['flavor']: match = re.search("-$", pkg_spec['flavor']) if match: - module.fail_json(msg="Trailing dash in flavor: " + pkg_spec['flavor']) + module.fail_json(msg="trailing dash in flavor: " + pkg_spec['flavor']) + +# Function used for figuring out the port path. +def get_package_source_path(name, pkg_spec, module): + pkg_spec['subpackage'] = None + if pkg_spec['stem'] == 'sqlports': + return 'databases/sqlports' + else: + # try for an exact match first + sqlports_db_file = '/usr/local/share/sqlports' + if not os.path.isfile(sqlports_db_file): + module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file) + + conn = sqlite3.connect(sqlports_db_file) + first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname' + query = first_part_of_query + ' = ?' + module.debug("package_package_source_path(): exact query: %s" % query) + cursor = conn.execute(query, (name,)) + results = cursor.fetchall() + + # next, try for a fuzzier match + if len(results) < 1: + looking_for = pkg_spec['stem'] + (pkg_spec['version_separator'] or '-') + (pkg_spec['version'] or '%') + query = first_part_of_query + ' LIKE ?' + if pkg_spec['flavor']: + looking_for += pkg_spec['flavor_separator'] + pkg_spec['flavor'] + module.debug("package_package_source_path(): fuzzy flavor query: %s" % query) + cursor = conn.execute(query, (looking_for,)) + elif pkg_spec['style'] == 'versionless': + query += ' AND fullpkgname NOT LIKE ?' + module.debug("package_package_source_path(): fuzzy versionless query: %s" % query) + cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,)) + else: + module.debug("package_package_source_path(): fuzzy query: %s" % query) + cursor = conn.execute(query, (looking_for,)) + results = cursor.fetchall() + + # error if we don't find exactly 1 match + conn.close() + if len(results) < 1: + module.fail_json(msg="could not find a port by the name '%s'" % name) + if len(results) > 1: + matches = map(lambda x:x[1], results) + module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches)) + + # there's exactly 1 match, so figure out the subpackage, if any, then return + fullpkgpath = results[0][0] + parts = fullpkgpath.split(',') + if len(parts) > 1 and parts[1][0] == '-': + pkg_spec['subpackage'] = parts[1] + return parts[0] # Function used for upgrading all installed packages. def upgrade_packages(module): @@ -363,12 +477,16 @@ def main(): argument_spec = dict( name = dict(required=True), state = dict(required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']), + build = dict(default='no', type='bool'), + ports_dir = dict(default='/usr/ports'), ), supports_check_mode = True ) name = module.params['name'] state = module.params['state'] + build = module.params['build'] + ports_dir = module.params['ports_dir'] rc = 0 stdout = '' @@ -376,6 +494,19 @@ def main(): result = {} result['name'] = name result['state'] = state + result['build'] = build + + if build is True: + if not os.path.isdir(ports_dir): + module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir)) + + # build sqlports if its not installed yet + pkg_spec = {} + parse_package_name('sqlports', pkg_spec, module) + installed_state = get_package_state('sqlports', pkg_spec, module) + if not installed_state: + module.debug("main(): installing 'sqlports' because build=%s" % module.params['build']) + package_present('sqlports', installed_state, pkg_spec, module) if name == '*': if state != 'latest': @@ -388,6 +519,11 @@ def main(): pkg_spec = {} parse_package_name(name, pkg_spec, module) + # Not sure how the branch syntax is supposed to play together + # with build mode. Disable it for now. + if pkg_spec['style'] == 'branch' and module.params['build'] is True: + module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], name)) + # Get package state. installed_state = get_package_state(name, pkg_spec, module) @@ -411,4 +547,6 @@ def main(): # Import module snippets. from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/packaging/os/opkg.py b/packaging/os/opkg.py index 5b75ad1a260..6360f45af33 100644 --- a/packaging/os/opkg.py +++ b/packaging/os/opkg.py @@ -17,6 +17,10 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: opkg @@ -36,6 +40,13 @@ choices: [ 'present', 'absent' ] required: false default: present + force: + description: + - opkg --force parameter used + choices: ["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"] + required: false + default: absent + version_added: "2.0" update_cache: description: - update the package db first @@ -45,10 +56,27 @@ notes: [] ''' EXAMPLES = ''' -- opkg: name=foo state=present -- opkg: name=foo state=present update_cache=yes -- opkg: name=foo state=absent -- opkg: name=foo,bar state=absent +- opkg: + name: foo + state: present + +- opkg: + name: foo + state: present + update_cache: yes + +- opkg: + name: foo + state: absent + +- opkg: + name: foo,bar + state: absent + +- opkg: + name: foo + state: present + force: overwrite ''' import pipes @@ -77,6 +105,11 @@ def query_package(module, opkg_path, name, state="present"): def remove_packages(module, opkg_path, packages): """ Uninstalls one or more packages if installed. """ + p = module.params + force = p["force"] + if force: + force = "--force-%s" % force + remove_c = 0 # Using a for loop incase of error, we can report the package that failed for package in packages: @@ -84,7 +117,7 @@ def remove_packages(module, opkg_path, packages): if not query_package(module, opkg_path, package): continue - rc, out, err = module.run_command("%s remove %s" % (opkg_path, package)) + rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package)) if query_package(module, opkg_path, package): module.fail_json(msg="failed to remove %s: %s" % (package, out)) @@ -101,13 +134,18 @@ def remove_packages(module, opkg_path, packages): def install_packages(module, opkg_path, packages): """ Installs one or more packages if not already installed. """ + p = module.params + force = p["force"] + if force: + force = "--force-%s" % force + install_c = 0 for package in packages: if query_package(module, opkg_path, package): continue - rc, out, err = module.run_command("%s install %s" % (opkg_path, package)) + rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package)) if not query_package(module, opkg_path, package): module.fail_json(msg="failed to install %s: %s" % (package, out)) @@ -125,6 +163,7 @@ def main(): argument_spec = dict( name = dict(aliases=["pkg"], required=True), state = dict(default="present", choices=["present", "installed", "absent", "removed"]), + force = dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"]), update_cache = dict(default="no", aliases=["update-cache"], type='bool') ) ) @@ -147,4 +186,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/pacman.py b/packaging/os/pacman.py index 74e29a1f936..89766a49745 100644 --- a/packaging/os/pacman.py +++ b/packaging/os/pacman.py @@ -3,6 +3,7 @@ # (c) 2012, Afterburn # (c) 2013, Aaron Bull Schaefer +# (c) 2015, Indrajit Raychaudhuri # # This file is part of Ansible # @@ -19,6 +20,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: pacman @@ -28,6 +33,7 @@ Arch Linux and its variants. version_added: "1.0" author: + - "Indrajit Raychaudhuri (@indrajitr)" - "'Aaron Bull Schaefer (@elasticdog)' " - "Afterburn" notes: [] @@ -38,6 +44,7 @@ - Name of the package to install, upgrade, or remove. required: false default: null + aliases: [ 'pkg', 'package' ] state: description: @@ -52,15 +59,17 @@ that they are not required by other packages and were not explicitly installed by a user. required: false - default: "no" + default: no choices: ["yes", "no"] version_added: "1.3" force: description: - - Force remove package, without any checks. + - When removing package - force remove package, without any + checks. When update_cache - force redownload repo + databases. required: false - default: "no" + default: no choices: ["yes", "no"] version_added: "2.0" @@ -69,42 +78,62 @@ - Whether or not to refresh the master package lists. This can be run as part of a package installation or as a separate step. required: false - default: "no" + default: no choices: ["yes", "no"] + aliases: [ 'update-cache' ] upgrade: description: - Whether or not to upgrade whole system required: false - default: "no" + default: no choices: ["yes", "no"] version_added: "2.0" ''' EXAMPLES = ''' # Install package foo -- pacman: name=foo state=present +- pacman: + name: foo + state: present # Upgrade package foo -- pacman: name=foo state=latest update_cache=yes +- pacman: + name: foo + state: latest + update_cache: yes # Remove packages foo and bar -- pacman: name=foo,bar state=absent +- pacman: + name: foo,bar + state: absent # Recursively remove package baz -- pacman: name=baz state=absent recurse=yes +- pacman: + name: baz + state: absent + recurse: yes # Run the equivalent of "pacman -Sy" as a separate step -- pacman: update_cache=yes +- pacman: + update_cache: yes # Run the equivalent of "pacman -Su" as a separate step -- pacman: upgrade=yes +- pacman: + upgrade: yes + +# Run the equivalent of "pacman -Syu" as a separate step +- pacman: + update_cache: yes + upgrade: yes # Run the equivalent of "pacman -Rdd", force remove package baz -- pacman: name=baz state=absent force=yes +- pacman: + name: baz + state: absent + force: yes ''' -import json import shlex import os import re @@ -119,13 +148,13 @@ def get_version(pacman_output): return None def query_package(module, pacman_path, name, state="present"): - """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, and a second boolean to indicate if the package is up-to-date.""" + """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available""" if state == "present": lcmd = "%s -Qi %s" % (pacman_path, name) lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) if lrc != 0: # package is not installed locally - return False, False + return False, False, False # get the version installed locally (if any) lversion = get_version(lstdout) @@ -138,13 +167,19 @@ def query_package(module, pacman_path, name, state="present"): if rrc == 0: # Return True to indicate that the package is installed locally, and the result of the version number comparison # to determine if the package is up-to-date. - return True, (lversion == rversion) + return True, (lversion == rversion), False - return False, False + # package is installed but cannot fetch remote Version. Last True stands for the error + return True, True, True def update_package_db(module, pacman_path): - cmd = "%s -Sy" % (pacman_path) + if module.params["force"]: + args = "Syy" + else: + args = "Sy" + + cmd = "%s -%s" % (pacman_path, args) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: @@ -158,23 +193,25 @@ def upgrade(module, pacman_path): rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False) if rc == 0: + if module.check_mode: + data = stdout.split('\n') + module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data) - 1)) rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) if rc == 0: module.exit_json(changed=True, msg='System upgraded') else: - module.fail_json(msg="could not upgrade") + module.fail_json(msg="Could not upgrade") else: module.exit_json(changed=False, msg='Nothing to upgrade') def remove_packages(module, pacman_path, packages): - if module.params["recurse"]: - args = "Rs" - else: - args = "R" - -def remove_packages(module, pacman_path, packages): - if module.params["force"]: - args = "Rdd" + if module.params["recurse"] or module.params["force"]: + if module.params["recurse"]: + args = "Rs" + if module.params["force"]: + args = "Rdd" + if module.params["recurse"] and module.params["force"]: + args = "Rdds" else: args = "R" @@ -182,7 +219,7 @@ def remove_packages(module, pacman_path, packages): # Using a for loop incase of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove - installed, updated = query_package(module, pacman_path, package) + installed, updated, unknown = query_package(module, pacman_path, package) if not installed: continue @@ -203,10 +240,15 @@ def remove_packages(module, pacman_path, packages): def install_packages(module, pacman_path, state, packages, package_files): install_c = 0 + package_err = [] + message = "" for i, package in enumerate(packages): # if the package is installed and state == present or state == latest and is up-to-date then skip - installed, updated = query_package(module, pacman_path, package) + installed, updated, latestError = query_package(module, pacman_path, package) + if latestError and state == 'latest': + package_err.append(package) + if installed and (state == 'present' or (state == 'latest' and updated)): continue @@ -215,7 +257,7 @@ def install_packages(module, pacman_path, state, packages, package_files): else: params = '-S %s' % package - cmd = "%s %s --noconfirm" % (pacman_path, params) + cmd = "%s %s --noconfirm --needed" % (pacman_path, params) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: @@ -223,16 +265,18 @@ def install_packages(module, pacman_path, state, packages, package_files): install_c += 1 - if install_c > 0: - module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + if state == 'latest' and len(package_err) > 0: + message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err) - module.exit_json(changed=False, msg="package(s) already installed") + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message)) + module.exit_json(changed=False, msg="package(s) already installed. %s" % (message)) def check_packages(module, pacman_path, packages, state): would_be_changed = [] for package in packages: - installed, updated = query_package(module, pacman_path, package) + installed, updated, unknown = query_package(module, pacman_path, package) if ((state in ["present", "latest"] and not installed) or (state == "absent" and installed) or (state == "latest" and not updated)): @@ -243,26 +287,43 @@ def check_packages(module, pacman_path, packages, state): module.exit_json(changed=True, msg="%s package(s) would be %s" % ( len(would_be_changed), state)) else: - module.exit_json(change=False, msg="package(s) already %s" % state) + module.exit_json(changed=False, msg="package(s) already %s" % state) + + +def expand_package_groups(module, pacman_path, pkgs): + expanded = [] + + for pkg in pkgs: + cmd = "%s -Sgq %s" % (pacman_path, pkg) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc == 0: + # A group was found matching the name, so expand it + for name in stdout.split('\n'): + name = name.strip() + if name: + expanded.append(name) + else: + expanded.append(pkg) + + return expanded def main(): module = AnsibleModule( argument_spec = dict( - name = dict(aliases=['pkg']), + name = dict(aliases=['pkg', 'package'], type='list'), state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), - recurse = dict(default='no', choices=BOOLEANS, type='bool'), - force = dict(default='no', choices=BOOLEANS, type='bool'), - upgrade = dict(default='no', choices=BOOLEANS, type='bool'), - update_cache = dict(default='no', aliases=['update-cache'], choices=BOOLEANS, type='bool')), + recurse = dict(default=False, type='bool'), + force = dict(default=False, type='bool'), + upgrade = dict(default=False, type='bool'), + update_cache = dict(default=False, aliases=['update-cache'], type='bool') + ), required_one_of = [['name', 'update_cache', 'upgrade']], supports_check_mode = True) pacman_path = module.get_bin_path('pacman', True) - if not os.path.exists(pacman_path): - module.fail_json(msg="cannot find pacman, in path %s" % (pacman_path)) - p = module.params # normalize the state parameter @@ -273,17 +334,17 @@ def main(): if p["update_cache"] and not module.check_mode: update_package_db(module, pacman_path) - if not p['name']: - module.exit_json(changed=True, msg='updated the package master lists') + if not (p['name'] or p['upgrade']): + module.exit_json(changed=True, msg='Updated the package master lists') - if p['update_cache'] and module.check_mode and not p['name']: + if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']): module.exit_json(changed=True, msg='Would have updated the package cache') if p['upgrade']: upgrade(module, pacman_path) if p['name']: - pkgs = p['name'].split(',') + pkgs = expand_package_groups(module, pacman_path, p['name']) pkg_files = [] for i, pkg in enumerate(pkgs): diff --git a/packaging/os/pkg5.py b/packaging/os/pkg5.py index 837eefd243e..4c02d63821a 100644 --- a/packaging/os/pkg5.py +++ b/packaging/os/pkg5.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: pkg5 @@ -49,10 +53,13 @@ ''' EXAMPLES = ''' # Install Vim: -- pkg5: name=editor/vim +- pkg5: + name: editor/vim # Remove finger daemon: -- pkg5: name=service/network/finger state=absent +- pkg5: + name: service/network/finger + state: absent # Install several packages at once: - pkg5: @@ -78,7 +85,7 @@ def main(): ] ), accept_licenses=dict( - choices=BOOLEANS, + type='bool', default=False, aliases=['accept_licences', 'accept'], ), @@ -165,4 +172,6 @@ def is_latest(module, package): from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/packaging/os/pkg5_publisher.py b/packaging/os/pkg5_publisher.py index 3881f5dd0b8..279b40f0090 100644 --- a/packaging/os/pkg5_publisher.py +++ b/packaging/os/pkg5_publisher.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: pkg5_publisher @@ -66,10 +70,15 @@ ''' EXAMPLES = ''' # Fetch packages for the solaris publisher direct from Oracle: -- pkg5_publisher: name=solaris sticky=true origin=https://pkg.oracle.com/solaris/support/ +- pkg5_publisher: + name: solaris + sticky: true + origin: https://pkg.oracle.com/solaris/support/ # Configure a publisher for locally-produced packages: -- pkg5_publisher: name=site origin=https://pkg.example.com/site/ +- pkg5_publisher: + name: site + origin: 'https://pkg.example.com/site/' ''' def main(): @@ -77,8 +86,8 @@ def main(): argument_spec=dict( name=dict(required=True, aliases=['publisher']), state=dict(default='present', choices=['present', 'absent']), - sticky=dict(choices=BOOLEANS), - enabled=dict(choices=BOOLEANS), + sticky=dict(type='bool'), + enabled=dict(type='bool'), # search_after=dict(), # search_before=dict(), origin=dict(type='list'), @@ -180,13 +189,14 @@ def get_publishers(module): publishers[name]['origin'] = [] publishers[name]['mirror'] = [] - publishers[name][values['type']].append(values['uri']) + if values['type'] is not None: + publishers[name][values['type']].append(values['uri']) return publishers def unstringify(val): - if val == "-": + if val == "-" or val == '': return None elif val == "true": return True @@ -197,4 +207,6 @@ def unstringify(val): from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/packaging/os/pkgin.py b/packaging/os/pkgin.py old mode 100644 new mode 100755 index e600026409b..8e75f2d18ce --- a/packaging/os/pkgin.py +++ b/packaging/os/pkgin.py @@ -3,6 +3,7 @@ # Copyright (c) 2013 Shaun Zinck # Copyright (c) 2015 Lawrence Leonard Gilbert +# Copyright (c) 2016 Jasper Lievisse Adriaanse # # Written by Shaun Zinck # Based on pacman module written by Afterburn @@ -22,6 +23,10 @@ # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: pkgin @@ -33,6 +38,7 @@ author: - "Larry Gilbert (L2G)" - "Shaun Zinck (@szinck)" + - "Jasper Lievisse Adriaanse (@jasperla)" notes: - "Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package will be silently removed as @@ -42,34 +48,98 @@ description: - Name of package to install/remove; - multiple names may be given, separated by commas - required: true + required: false + default: null state: description: - Intended state of the package choices: [ 'present', 'absent' ] required: false default: present + update_cache: + description: + - Update repository database. Can be run with other steps or on it's own. + required: false + default: no + choices: [ "yes", "no" ] + version_added: "2.1" + upgrade: + description: + - Upgrade main packages to their newer versions + required: false + default: no + choices: [ "yes", "no" ] + version_added: "2.1" + full_upgrade: + description: + - Upgrade all packages to their newer versions + required: false + default: no + choices: [ "yes", "no" ] + version_added: "2.1" + clean: + description: + - Clean packages cache + required: false + default: no + choices: [ "yes", "no" ] + version_added: "2.1" + force: + description: + - Force package reinstall + required: false + default: no + choices: [ "yes", "no" ] + version_added: "2.1" ''' EXAMPLES = ''' # install package foo -- pkgin: name=foo state=present +- pkgin: + name: foo + state: present + +# Update database and install "foo" package +- pkgin: + name: foo + update_cache: yes # remove package foo -- pkgin: name=foo state=absent +- pkgin: + name: foo + state: absent # remove packages foo and bar -- pkgin: name=foo,bar state=absent +- pkgin: + name: foo,bar + state: absent + +# Update repositories as a separate step +- pkgin: + update_cache: yes + +# Upgrade main packages (equivalent to C(pkgin upgrade)) +- pkgin: + upgrade: yes + +# Upgrade all packages (equivalent to C(pkgin full-upgrade)) +- pkgin: + full_upgrade: yes + +# Force-upgrade all packages (equivalent to C(pkgin -F full-upgrade)) +- pkgin: + full_upgrade: yes + force: yes + +# clean packages cache (equivalent to C(pkgin clean)) +- pkgin: + clean: yes ''' -import json -import shlex -import os -import sys -import pipes +import re -def query_package(module, pkgin_path, name): +def query_package(module, name): """Search for the package by name. Possible return values: @@ -79,7 +149,7 @@ def query_package(module, pkgin_path, name): """ # test whether '-p' (parsable) flag is supported. - rc, out, err = module.run_command("%s -p -v" % pkgin_path) + rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH) if rc == 0: pflag = '-p' @@ -90,38 +160,51 @@ def query_package(module, pkgin_path, name): # Use "pkgin search" to find the package. The regular expression will # only match on the complete name. - rc, out, err = module.run_command("%s %s search \"^%s$\"" % (pkgin_path, pflag, name)) + rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name)) # rc will not be 0 unless the search was a success if rc == 0: - # Get first line - line = out.split('\n')[0] - - # Break up line at spaces. The first part will be the package with its - # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state - # of the package: - # '' - not installed - # '<' - installed but out of date - # '=' - installed and up to date - # '>' - installed but newer than the repository version - pkgname_with_version, raw_state = out.split(splitchar)[0:2] - - # Strip version - # (results in sth like 'gcc47-libs') - pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1]) - - if name != pkgname_without_version: - return False - # no fall-through - - # The package was found; now return its state - if raw_state == '<': - return 'outdated' - elif raw_state == '=' or raw_state == '>': - return 'present' - else: - return False + # Search results may contain more than one line (e.g., 'emacs'), so iterate + # through each line to see if we have a match. + packages = out.split('\n') + + for package in packages: + + # Break up line at spaces. The first part will be the package with its + # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state + # of the package: + # '' - not installed + # '<' - installed but out of date + # '=' - installed and up to date + # '>' - installed but newer than the repository version + pkgname_with_version, raw_state = package.split(splitchar)[0:2] + + # Search for package, stripping version + # (results in sth like 'gcc47-libs' or 'emacs24-nox11') + pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M) + + # Do not proceed unless we have a match + if not pkg_search_obj: + continue + + # Grab matched string + pkgname_without_version = pkg_search_obj.group(1) + + if name != pkgname_without_version: + continue + + # The package was found; now return its state + if raw_state == '<': + return 'outdated' + elif raw_state == '=' or raw_state == '>': + return 'present' + else: + return False + # no fall-through + + # No packages were matched, so return False + return False def format_action_message(module, action, count): @@ -139,31 +222,43 @@ def format_action_message(module, action, count): return message + "s" -def format_pkgin_command(module, pkgin_path, command, package): - vars = { "pkgin": pkgin_path, +def format_pkgin_command(module, command, package=None): + # Not all commands take a package argument, so cover this up by passing + # an empty string. Some commands (e.g. 'update') will ignore extra + # arguments, however this behaviour cannot be relied on for others. + if package is None: + package = "" + + if module.params["force"]: + force = "-F" + else: + force = "" + + vars = { "pkgin": PKGIN_PATH, "command": command, - "package": package } + "package": package, + "force": force} if module.check_mode: return "%(pkgin)s -n %(command)s %(package)s" % vars else: - return "%(pkgin)s -y %(command)s %(package)s" % vars + return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars -def remove_packages(module, pkgin_path, packages): +def remove_packages(module, packages): remove_c = 0 # Using a for loop incase of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove - if not query_package(module, pkgin_path, package): + if not query_package(module, package): continue rc, out, err = module.run_command( - format_pkgin_command(module, pkgin_path, "remove", package)) + format_pkgin_command(module, "remove", package)) - if not module.check_mode and query_package(module, pkgin_path, package): + if not module.check_mode and query_package(module, package): module.fail_json(msg="failed to remove %s: %s" % (package, out)) remove_c += 1 @@ -174,18 +269,18 @@ def remove_packages(module, pkgin_path, packages): module.exit_json(changed=False, msg="package(s) already absent") -def install_packages(module, pkgin_path, packages): +def install_packages(module, packages): install_c = 0 for package in packages: - if query_package(module, pkgin_path, package): + if query_package(module, package): continue rc, out, err = module.run_command( - format_pkgin_command(module, pkgin_path, "install", package)) + format_pkgin_command(module, "install", package)) - if not module.check_mode and not query_package(module, pkgin_path, package): + if not module.check_mode and not query_package(module, package): module.fail_json(msg="failed to install %s: %s" % (package, out)) install_c += 1 @@ -195,28 +290,100 @@ def install_packages(module, pkgin_path, packages): module.exit_json(changed=False, msg="package(s) already present") +def update_package_db(module): + rc, out, err = module.run_command( + format_pkgin_command(module, "update")) + + if rc == 0: + if re.search('database for.*is up-to-date\n$', out): + return False, "datebase is up-to-date" + else: + return True, "updated repository database" + else: + module.fail_json(msg="could not update package db") + +def do_upgrade_packages(module, full=False): + if full: + cmd = "full-upgrade" + else: + cmd = "upgrade" + + rc, out, err = module.run_command( + format_pkgin_command(module, cmd)) + + if rc == 0: + if re.search('^nothing to do.\n$', out): + module.exit_json(changed=False, msg="nothing left to upgrade") + else: + module.fail_json(msg="could not %s packages" % cmd) + +def upgrade_packages(module): + do_upgrade_packages(module) + +def full_upgrade_packages(module): + do_upgrade_packages(module, True) +def clean_cache(module): + rc, out, err = module.run_command( + format_pkgin_command(module, "clean")) + + if rc == 0: + # There's no indication if 'clean' actually removed anything, + # so assume it did. + module.exit_json(changed=True, msg="cleaned caches") + else: + module.fail_json(msg="could not clean package cache") def main(): module = AnsibleModule( argument_spec = dict( state = dict(default="present", choices=["present","absent"]), - name = dict(aliases=["pkg"], required=True)), + name = dict(aliases=["pkg"], type='list'), + update_cache = dict(default='no', type='bool'), + upgrade = dict(default='no', type='bool'), + full_upgrade = dict(default='no', type='bool'), + clean = dict(default='no', type='bool'), + force = dict(default='no', type='bool')), + required_one_of = [['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']], supports_check_mode = True) - pkgin_path = module.get_bin_path('pkgin', True, ['/opt/local/bin']) + global PKGIN_PATH + PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin']) + + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') p = module.params - pkgs = p["name"].split(",") + if p["update_cache"]: + c, msg = update_package_db(module) + if not (p['name'] or p["upgrade"] or p["full_upgrade"]): + module.exit_json(changed=c, msg=msg) + + if p["upgrade"]: + upgrade_packages(module) + if not p['name']: + module.exit_json(changed=True, msg='upgraded packages') + + if p["full_upgrade"]: + full_upgrade_packages(module) + if not p['name']: + module.exit_json(changed=True, msg='upgraded all packages') + + if p["clean"]: + clean_cache(module) + if not p['name']: + module.exit_json(changed=True, msg='cleaned caches') + + pkgs = p["name"] if p["state"] == "present": - install_packages(module, pkgin_path, pkgs) + install_packages(module, pkgs) elif p["state"] == "absent": - remove_packages(module, pkgin_path, pkgs) + remove_packages(module, pkgs) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/pkgng.py b/packaging/os/pkgng.py index fe0f2687b31..5727b190031 100644 --- a/packaging/os/pkgng.py +++ b/packaging/os/pkgng.py @@ -21,6 +21,10 @@ # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: pkgng @@ -32,42 +36,56 @@ options: name: description: - - name of package to install/remove + - Name of package to install/remove. required: true state: description: - - state of the package + - State of the package. choices: [ 'present', 'absent' ] required: false default: present cached: description: - - use local package base or try to fetch an updated one + - Use local package base instead of fetching an updated one. choices: [ 'yes', 'no' ] required: false default: no annotation: description: - - a comma-separated list of keyvalue-pairs of the form - <+/-/:>[=]. A '+' denotes adding an annotation, a - '-' denotes removing an annotation, and ':' denotes modifying an + - A comma-separated list of keyvalue-pairs of the form + C(<+/-/:>[=]). A C(+) denotes adding an annotation, a + C(-) denotes removing an annotation, and C(:) denotes modifying an annotation. If setting or modifying annotations, a value must be provided. required: false version_added: "1.6" pkgsite: description: - - for pkgng versions before 1.1.4, specify packagesite to use - for downloading packages, if not specified, use settings from - /usr/local/etc/pkg.conf - for newer pkgng versions, specify a the name of a repository - configured in /usr/local/etc/pkg/repos + - For pkgng versions before 1.1.4, specify packagesite to use + for downloading packages. If not specified, use settings from + C(/usr/local/etc/pkg.conf). + - For newer pkgng versions, specify a the name of a repository + configured in C(/usr/local/etc/pkg/repos). required: false rootdir: description: - - for pkgng versions 1.5 and later, pkg will install all packages - within the specified root directory + - For pkgng versions 1.5 and later, pkg will install all packages + within the specified root directory. + - Can not be used together with I(chroot) option. + required: false + chroot: + version_added: "2.1" + description: + - Pkg will chroot in the specified environment. + - Can not be used together with I(rootdir) option. required: false + autoremove: + version_added: "2.2" + description: + - Remove automatically installed packages which are no longer needed. + required: false + choices: [ "yes", "no" ] + default: no author: "bleader (@bleader)" notes: - When using pkgsite, be careful that already in cache packages won't be downloaded again. @@ -75,25 +93,28 @@ EXAMPLES = ''' # Install package foo -- pkgng: name=foo state=present +- pkgng: + name: foo + state: present # Annotate package foo and bar -- pkgng: name=foo,bar annotation=+test1=baz,-test2,:test3=foobar +- pkgng: + name: foo,bar + annotation: '+test1=baz,-test2,:test3=foobar' # Remove packages foo and bar -- pkgng: name=foo,bar state=absent +- pkgng: + name: foo,bar + state: absent ''' -import json -import shlex -import os import re -import sys +from ansible.module_utils.basic import AnsibleModule -def query_package(module, pkgng_path, name, rootdir_arg): +def query_package(module, pkgng_path, name, dir_arg): - rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, rootdir_arg, name)) + rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name)) if rc == 0: return True @@ -103,7 +124,7 @@ def query_package(module, pkgng_path, name, rootdir_arg): def pkgng_older_than(module, pkgng_path, compare_version): rc, out, err = module.run_command("%s -v" % pkgng_path) - version = map(lambda x: int(x), re.split(r'[\._]', out)) + version = [int(x) for x in re.split(r'[\._]', out)] i = 0 new_pkgng = True @@ -117,21 +138,21 @@ def pkgng_older_than(module, pkgng_path, compare_version): return not new_pkgng -def remove_packages(module, pkgng_path, packages, rootdir_arg): - +def remove_packages(module, pkgng_path, packages, dir_arg): + remove_c = 0 # Using a for loop incase of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove - if not query_package(module, pkgng_path, package, rootdir_arg): + if not query_package(module, pkgng_path, package, dir_arg): continue if not module.check_mode: - rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, rootdir_arg, package)) + rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package)) - if not module.check_mode and query_package(module, pkgng_path, package, rootdir_arg): + if not module.check_mode and query_package(module, pkgng_path, package, dir_arg): module.fail_json(msg="failed to remove %s: %s" % (package, out)) - + remove_c += 1 if remove_c > 0: @@ -141,7 +162,7 @@ def remove_packages(module, pkgng_path, packages, rootdir_arg): return (False, "package(s) already absent") -def install_packages(module, pkgng_path, packages, cached, pkgsite, rootdir_arg): +def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg): install_c = 0 @@ -161,44 +182,44 @@ def install_packages(module, pkgng_path, packages, cached, pkgsite, rootdir_arg) if old_pkgng: rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) else: - rc, out, err = module.run_command("%s update" % (pkgng_path)) + rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg)) if rc != 0: module.fail_json(msg="Could not update catalogue") for package in packages: - if query_package(module, pkgng_path, package, rootdir_arg): + if query_package(module, pkgng_path, package, dir_arg): continue if not module.check_mode: if old_pkgng: rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package)) else: - rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, rootdir_arg, pkgsite, package)) + rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, pkgsite, package)) - if not module.check_mode and not query_package(module, pkgng_path, package, rootdir_arg): + if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg): module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err) install_c += 1 - + if install_c > 0: return (True, "added %s package(s)" % (install_c)) return (False, "package(s) already present") -def annotation_query(module, pkgng_path, package, tag, rootdir_arg): - rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, rootdir_arg, package)) +def annotation_query(module, pkgng_path, package, tag, dir_arg): + rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package)) match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) if match: return match.group('value') return False -def annotation_add(module, pkgng_path, package, tag, value, rootdir_arg): - _value = annotation_query(module, pkgng_path, package, tag, rootdir_arg) +def annotation_add(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) if not _value: # Annotation does not exist, add it. rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"' - % (pkgng_path, rootdir_arg, package, tag, value)) + % (pkgng_path, dir_arg, package, tag, value)) if rc != 0: module.fail_json("could not annotate %s: %s" % (package, out), stderr=err) @@ -213,19 +234,19 @@ def annotation_add(module, pkgng_path, package, tag, value, rootdir_arg): # Annotation exists, nothing to do return False -def annotation_delete(module, pkgng_path, package, tag, value, rootdir_arg): - _value = annotation_query(module, pkgng_path, package, tag, rootdir_arg) +def annotation_delete(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) if _value: rc, out, err = module.run_command('%s %s annotate -y -D %s %s' - % (pkgng_path, rootdir_arg, package, tag)) + % (pkgng_path, dir_arg, package, tag)) if rc != 0: module.fail_json("could not delete annotation to %s: %s" % (package, out), stderr=err) return True return False -def annotation_modify(module, pkgng_path, package, tag, value, rootdir_arg): - _value = annotation_query(module, pkgng_path, package, tag, rootdir_arg) +def annotation_modify(module, pkgng_path, package, tag, value, dir_arg): + _value = annotation_query(module, pkgng_path, package, tag, dir_arg) if not value: # No such tag module.fail_json("could not change annotation to %s: tag %s does not exist" @@ -235,14 +256,14 @@ def annotation_modify(module, pkgng_path, package, tag, value, rootdir_arg): return False else: rc,out,err = module.run_command('%s %s annotate -y -M %s %s "%s"' - % (pkgng_path, rootdir_arg, package, tag, value)) + % (pkgng_path, dir_arg, package, tag, value)) if rc != 0: module.fail_json("could not change annotation annotation to %s: %s" % (package, out), stderr=err) return True -def annotate_packages(module, pkgng_path, packages, annotation, rootdir_arg): +def annotate_packages(module, pkgng_path, packages, annotation, dir_arg): annotate_c = 0 annotations = map(lambda _annotation: re.match(r'(?P[\+-:])(?P\w+)(=(?P\w+))?', @@ -264,54 +285,79 @@ def annotate_packages(module, pkgng_path, packages, annotation, rootdir_arg): return (True, "added %s annotations." % annotate_c) return (False, "changed no annotations") +def autoremove_packages(module, pkgng_path, dir_arg): + rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg)) + + autoremove_c = 0 + + match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE) + if match: + autoremove_c = int(match.group(1)) + + if autoremove_c == 0: + return False, "no package(s) to autoremove" + + if not module.check_mode: + rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg)) + + return True, "autoremoved %d package(s)" % (autoremove_c) + def main(): module = AnsibleModule( argument_spec = dict( state = dict(default="present", choices=["present","absent"], required=False), - name = dict(aliases=["pkg"], required=True), + name = dict(aliases=["pkg"], required=True, type='list'), cached = dict(default=False, type='bool'), annotation = dict(default="", required=False), pkgsite = dict(default="", required=False), - rootdir = dict(default="", required=False)), - supports_check_mode = True) + rootdir = dict(default="", required=False, type='path'), + chroot = dict(default="", required=False, type='path'), + autoremove = dict(default=False, type='bool')), + supports_check_mode = True, + mutually_exclusive =[["rootdir", "chroot"]]) pkgng_path = module.get_bin_path('pkg', True) p = module.params - pkgs = p["name"].split(",") + pkgs = p["name"] changed = False msgs = [] - rootdir_arg = "" + dir_arg = "" if p["rootdir"] != "": old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0]) if old_pkgng: module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater") else: - rootdir_arg = "--rootdir %s" % (p["rootdir"]) + dir_arg = "--rootdir %s" % (p["rootdir"]) + + if p["chroot"] != "": + dir_arg = '--chroot %s' % (p["chroot"]) if p["state"] == "present": - _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], rootdir_arg) + _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg) changed = changed or _changed msgs.append(_msg) elif p["state"] == "absent": - _changed, _msg = remove_packages(module, pkgng_path, pkgs, rootdir_arg) + _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg) + changed = changed or _changed + msgs.append(_msg) + + if p["autoremove"]: + _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg) changed = changed or _changed msgs.append(_msg) if p["annotation"]: - _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], rootdir_arg) + _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg) changed = changed or _changed msgs.append(_msg) module.exit_json(changed=changed, msg=", ".join(msgs)) - -# import module snippets -from ansible.module_utils.basic import * - -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py index 3a4720630cf..a54e96eeb08 100644 --- a/packaging/os/pkgutil.py +++ b/packaging/os/pkgutil.py @@ -2,8 +2,8 @@ # -*- coding: utf-8 -*- # (c) 2013, Alexander Winkler -# based on svr4pkg by -# Boyd Adamson (2012) +# based on svr4pkg by +# Boyd Adamson (2012) # # This file is part of Ansible # @@ -21,6 +21,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: pkgutil @@ -42,6 +46,7 @@ description: - Specifies the repository path to install the package from. - Its global definition is done in C(/etc/opt/csw/pkgutil.conf). + required: false state: description: - Whether to install (C(present)), or remove (C(absent)) a package. @@ -49,24 +54,35 @@ - "Note: The module has a limitation that (C(latest)) only works for one package, not lists of them." required: true choices: ["present", "absent", "latest"] + update_catalog: + description: + - If you want to refresh your catalog from the mirror, set this to (C(yes)). + required: false + default: False + version_added: "2.1" ''' EXAMPLES = ''' # Install a package -pkgutil: name=CSWcommon state=present +- pkgutil: + name: CSWcommon + state: present # Install a package from a specific repository -pkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest' +- pkgutil: + name: CSWnrpe + site: 'ftp://myinternal.repo/opencsw/kiel' + state: latest ''' import os import pipes def package_installed(module, name): - cmd = [module.get_bin_path('pkginfo', True)] + cmd = ['pkginfo'] cmd.append('-q') cmd.append(name) - rc, out, err = module.run_command(' '.join(cmd)) + rc, out, err = run_command(module, cmd) if rc == 0: return True else: @@ -74,24 +90,25 @@ def package_installed(module, name): def package_latest(module, name, site): # Only supports one package - cmd = [ 'pkgutil', '--single', '-c' ] + cmd = [ 'pkgutil', '-U', '--single', '-c' ] if site is not None: - cmd += [ '-t', pipes.quote(site) ] - cmd.append(pipes.quote(name)) - cmd += [ '| tail -1 | grep -v SAME' ] - rc, out, err = module.run_command(' '.join(cmd), use_unsafe_shell=True) - if rc == 1: - return True - else: - return False + cmd += [ '-t', site] + cmd.append(name) + rc, out, err = run_command(module, cmd) + # replace | tail -1 |grep -v SAME + # use -2, because splitting on \n create a empty line + # at the end of the list + return 'SAME' in out.split('\n')[-2] -def run_command(module, cmd): +def run_command(module, cmd, **kwargs): progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True) - return module.run_command(cmd) + cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin']) + return module.run_command(cmd, **kwargs) -def package_install(module, state, name, site): +def package_install(module, state, name, site, update_catalog): cmd = [ 'pkgutil', '-iy' ] + if update_catalog: + cmd += [ '-U' ] if site is not None: cmd += [ '-t', site ] if state == 'latest': @@ -100,8 +117,10 @@ def package_install(module, state, name, site): (rc, out, err) = run_command(module, cmd) return (rc, out, err) -def package_upgrade(module, name, site): +def package_upgrade(module, name, site, update_catalog): cmd = [ 'pkgutil', '-ufy' ] + if update_catalog: + cmd += [ '-U' ] if site is not None: cmd += [ '-t', site ] cmd.append(name) @@ -119,12 +138,14 @@ def main(): name = dict(required = True), state = dict(required = True, choices=['present', 'absent','latest']), site = dict(default = None), + update_catalog = dict(required = False, default = False, type='bool'), ), supports_check_mode=True ) name = module.params['name'] state = module.params['state'] site = module.params['site'] + update_catalog = module.params['update_catalog'] rc = None out = '' err = '' @@ -136,31 +157,59 @@ def main(): if not package_installed(module, name): if module.check_mode: module.exit_json(changed=True) - (rc, out, err) = package_install(module, state, name, site) + (rc, out, err) = package_install(module, state, name, site, update_catalog) # Stdout is normally empty but for some packages can be # very long and is not often useful if len(out) > 75: out = out[:75] + '...' + if rc != 0: + if err: + msg = err + else: + msg = out + module.fail_json(msg=msg) elif state == 'latest': if not package_installed(module, name): if module.check_mode: module.exit_json(changed=True) - (rc, out, err) = package_install(module, state, name, site) + (rc, out, err) = package_install(module, state, name, site, update_catalog) + if len(out) > 75: + out = out[:75] + '...' + if rc != 0: + if err: + msg = err + else: + msg = out + module.fail_json(msg=msg) + else: if not package_latest(module, name, site): if module.check_mode: module.exit_json(changed=True) - (rc, out, err) = package_upgrade(module, name, site) + (rc, out, err) = package_upgrade(module, name, site, update_catalog) if len(out) > 75: out = out[:75] + '...' + if rc != 0: + if err: + msg = err + else: + msg = out + module.fail_json(msg=msg) elif state == 'absent': if package_installed(module, name): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = package_uninstall(module, name) - out = out[:75] + if len(out) > 75: + out = out[:75] + '...' + if rc != 0: + if err: + msg = err + else: + msg = out + module.fail_json(msg=msg) if rc is None: # pkgutil was not executed because the package was already present/absent @@ -180,4 +229,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/packaging/os/portage.py b/packaging/os/portage.py index 7be55db3ca8..5debeda058c 100644 --- a/packaging/os/portage.py +++ b/packaging/os/portage.py @@ -1,8 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# (c) 2016, William L Thomson Jr # (c) 2013, Yap Sok Ann # Written by Yap Sok Ann +# Modified by William L. Thomson Jr. # Based on apt module written by Matthew Williams # # This module is free software: you can redistribute it and/or modify @@ -19,6 +21,10 @@ # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: portage @@ -76,29 +82,29 @@ description: - Do not add the packages to the world file (--oneshot) required: false - default: null - choices: [ "yes" ] + default: False + choices: [ "yes", "no" ] noreplace: description: - Do not re-emerge installed packages (--noreplace) required: false - default: null - choices: [ "yes" ] + default: False + choices: [ "yes", "no" ] nodeps: description: - Only merge packages but not their dependencies (--nodeps) required: false - default: null - choices: [ "yes" ] + default: False + choices: [ "yes", "no" ] onlydeps: description: - Only merge packages' dependencies but not the packages (--onlydeps) required: false - default: null - choices: [ "yes" ] + default: False + choices: [ "yes", "no" ] depclean: description: @@ -106,22 +112,22 @@ - If no package is specified, clean up the world's dependencies - Otherwise, --depclean serves as a dependency aware version of --unmerge required: false - default: null - choices: [ "yes" ] + default: False + choices: [ "yes", "no" ] quiet: description: - Run emerge in quiet mode (--quiet) required: false - default: null - choices: [ "yes" ] + default: False + choices: [ "yes", "no" ] verbose: description: - Run emerge in verbose mode (--verbose) required: false - default: null - choices: [ "yes" ] + default: False + choices: [ "yes", "no" ] sync: description: @@ -130,24 +136,50 @@ - If web, perform "emerge-webrsync" required: false default: null - choices: [ "yes", "web" ] + choices: [ "yes", "web", "no" ] getbinpkg: description: - Prefer packages specified at PORTAGE_BINHOST in make.conf required: false - default: null - choices: [ "yes" ] + default: False + choices: [ "yes", "no" ] usepkgonly: description: - Merge only binaries (no compiling). This sets getbinpkg=yes. required: false - deafult: null - choices: [ "yes" ] + default: False + choices: [ "yes", "no" ] + + keepgoing: + description: + - Continue as much as possible after an error. + required: false + default: False + choices: [ "yes", "no" ] + version_added: 2.3 + + jobs: + description: + - Specifies the number of packages to build simultaneously. + required: false + default: None + type: int + version_added: 2.3 + + loadavg: + description: + - Specifies that no new builds should be started if there are + - other builds running and the load average is at least LOAD + required: false + default: None + type: float + version_added: 2.3 requirements: [ gentoolkit ] -author: +author: + - "William L Thomson Jr (@wltjr)" - "Yap Sok Ann (@sayap)" - "Andrew Udvare" notes: [] @@ -155,28 +187,46 @@ EXAMPLES = ''' # Make sure package foo is installed -- portage: package=foo state=present +- portage: + package: foo + state: present # Make sure package foo is not installed -- portage: package=foo state=absent +- portage: + package: foo + state: absent # Update package foo to the "best" version -- portage: package=foo update=yes +- portage: + package: foo + update: yes # Install package foo using PORTAGE_BINHOST setup -- portage: package=foo getbinpkg=yes +- portage: + package: foo + getbinpkg: yes # Re-install world from binary packages only and do not allow any compiling -- portage: package=@world usepkgonly=yes +- portage: + package: @world + usepkgonly: yes # Sync repositories and update world -- portage: package=@world update=yes deep=yes sync=yes +- portage: + package: @world + update: yes + deep: yes + sync: yes # Remove unneeded packages -- portage: depclean=yes +- portage: + depclean: yes # Remove package foo if it is not explicitly needed -- portage: package=foo state=absent depclean=yes +- portage: + package: foo + state: absent + depclean: yes ''' @@ -272,14 +322,24 @@ def emerge_packages(module, packages): 'getbinpkg': '--getbinpkg', 'usepkgonly': '--usepkgonly', 'usepkg': '--usepkg', + 'keepgoing': '--keep-going', } - for flag, arg in emerge_flags.iteritems(): + for flag, arg in emerge_flags.items(): if p[flag]: args.append(arg) if p['usepkg'] and p['usepkgonly']: module.fail_json(msg='Use only one of usepkg, usepkgonly') + emerge_flags = { + 'jobs': '--jobs=', + 'loadavg': '--load-average ', + } + + for flag, arg in emerge_flags.items(): + if p[flag] is not None: + args.append(arg + str(p[flag])) + cmd, (rc, out, err) = run_emerge(module, packages, *args) if rc != 0: module.fail_json( @@ -396,26 +456,29 @@ def run_emerge(module, packages, *args): def main(): module = AnsibleModule( argument_spec=dict( - package=dict(default=None, aliases=['name']), + package=dict(default=None, aliases=['name'], type='list'), state=dict( default=portage_present_states[0], choices=portage_present_states + portage_absent_states, ), - update=dict(default=None, choices=['yes']), - deep=dict(default=None, choices=['yes']), - newuse=dict(default=None, choices=['yes']), - changed_use=dict(default=None, choices=['yes']), - oneshot=dict(default=None, choices=['yes']), - noreplace=dict(default=None, choices=['yes']), - nodeps=dict(default=None, choices=['yes']), - onlydeps=dict(default=None, choices=['yes']), - depclean=dict(default=None, choices=['yes']), - quiet=dict(default=None, choices=['yes']), - verbose=dict(default=None, choices=['yes']), + update=dict(default=False, type='bool'), + deep=dict(default=False, type='bool'), + newuse=dict(default=False, type='bool'), + changed_use=dict(default=False, type='bool'), + oneshot=dict(default=False, type='bool'), + noreplace=dict(default=False, type='bool'), + nodeps=dict(default=False, type='bool'), + onlydeps=dict(default=False, type='bool'), + depclean=dict(default=False, type='bool'), + quiet=dict(default=False, type='bool'), + verbose=dict(default=False, type='bool'), sync=dict(default=None, choices=['yes', 'web']), - getbinpkg=dict(default=None, choices=['yes']), - usepkgonly=dict(default=None, choices=['yes']), - usepkg=dict(default=None, choices=['yes']), + getbinpkg=dict(default=False, type='bool'), + usepkgonly=dict(default=False, type='bool'), + usepkg=dict(default=False, type='bool'), + keepgoing=dict(default=False, type='bool'), + jobs=dict(default=None, type='int'), + loadavg=dict(default=None, type='float'), ), required_one_of=[['package', 'sync', 'depclean']], mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], @@ -434,7 +497,7 @@ def main(): packages = [] if p['package']: - packages.extend(p['package'].split(',')) + packages.extend(p['package']) if p['depclean']: if packages and p['state'] not in portage_absent_states: @@ -454,4 +517,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/portinstall.py b/packaging/os/portinstall.py index b4e3044167e..ccd301e526a 100644 --- a/packaging/os/portinstall.py +++ b/packaging/os/portinstall.py @@ -19,6 +19,10 @@ # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: portinstall @@ -48,17 +52,22 @@ EXAMPLES = ''' # Install package foo -- portinstall: name=foo state=present +- portinstall: + name: foo + state: present # Install package security/cyrus-sasl2-saslauthd -- portinstall: name=security/cyrus-sasl2-saslauthd state=present +- portinstall: + name: security/cyrus-sasl2-saslauthd + state: present # Remove packages foo and bar -- portinstall: name=foo,bar state=absent +- portinstall: + name: foo,bar + state: absent ''' -import json import shlex import os import sys @@ -204,4 +213,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/slackpkg.py b/packaging/os/slackpkg.py index 674de538efe..3c4ee4f62e2 100644 --- a/packaging/os/slackpkg.py +++ b/packaging/os/slackpkg.py @@ -22,6 +22,10 @@ # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: slackpkg @@ -56,14 +60,19 @@ EXAMPLES = ''' # Install package foo -- slackpkg: name=foo state=present +- slackpkg: + name: foo + state: present # Remove packages foo and bar -- slackpkg: name=foo,bar state=absent +- slackpkg: + name: foo,bar + state: absent # Make sure that it is the most updated package -- slackpkg: name=foo state=latest - +- slackpkg: + name: foo + state: latest ''' @@ -196,4 +205,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/svr4pkg.py b/packaging/os/svr4pkg.py index 5d8bac17eaa..81409e3b2dd 100644 --- a/packaging/os/svr4pkg.py +++ b/packaging/os/svr4pkg.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: svr4pkg @@ -75,19 +79,35 @@ EXAMPLES = ''' # Install a package from an already copied file -- svr4pkg: name=CSWcommon src=/tmp/cswpkgs.pkg state=present +- svr4pkg: + name: CSWcommon + src: /tmp/cswpkgs.pkg + state: present # Install a package directly from an http site -- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present zone=current +- svr4pkg: + name: CSWpkgutil + src: 'http://get.opencsw.org/now' + state: present + zone: current # Install a package with a response file -- svr4pkg: name=CSWggrep src=/tmp/third-party.pkg response_file=/tmp/ggrep.response state=present +- svr4pkg: + name: CSWggrep + src: /tmp/third-party.pkg + response_file: /tmp/ggrep.response + state: present # Ensure that a package is not installed. -- svr4pkg: name=SUNWgnome-sound-recorder state=absent +- svr4pkg: + name: SUNWgnome-sound-recorder + state: absent # Ensure that a category is not installed. -- svr4pkg: name=FIREFOX state=absent category=true +- svr4pkg: + name: FIREFOX + state: absent + category: true ''' @@ -225,9 +245,10 @@ def main(): else: result['changed'] = False + # rc will be none when the package already was installed and no action took place # Only return failed=False when the returncode is known to be good as there may be more # undocumented failure return codes - if rc not in (0, 2, 10, 20): + if rc not in (None, 0, 2, 10, 20): result['failed'] = True else: result['failed'] = False @@ -241,4 +262,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/packaging/os/swdepot.py b/packaging/os/swdepot.py index 157fa212c17..6ea7d1059be 100644 --- a/packaging/os/swdepot.py +++ b/packaging/os/swdepot.py @@ -21,6 +21,10 @@ import re import pipes +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: swdepot @@ -58,9 +62,19 @@ ''' EXAMPLES = ''' -- swdepot: name=unzip-6.0 state=installed depot=repository:/path -- swdepot: name=unzip state=latest depot=repository:/path -- swdepot: name=unzip state=absent +- swdepot: + name: unzip-6.0 + state: installed + depot: 'repository:/path' + +- swdepot: + name: unzip + state: latest + depot: 'repository:/path' + +- swdepot: + name: unzip + state: absent ''' def compare_package(version1, version2): @@ -147,7 +161,7 @@ def main(): if not rc: changed = True - msg = "Packaged installed" + msg = "Package installed" else: module.fail_json(name=name, msg=output, rc=rc) @@ -192,5 +206,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() - +if __name__ == '__main__': + main() diff --git a/packaging/os/urpmi.py b/packaging/os/urpmi.py index 7b7aaefbd1d..e995f1d4894 100644 --- a/packaging/os/urpmi.py +++ b/packaging/os/urpmi.py @@ -19,6 +19,10 @@ # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: urpmi @@ -44,9 +48,9 @@ required: false default: no choices: [ "yes", "no" ] - no-suggests: + no-recommends: description: - - Corresponds to the C(--no-suggests) option for I(urpmi). + - Corresponds to the C(--no-recommends) option for I(urpmi). required: false default: yes choices: [ "yes", "no" ] @@ -63,17 +67,28 @@ EXAMPLES = ''' # install package foo -- urpmi: pkg=foo state=present +- urpmi: + pkg: foo + state: present + # remove package foo -- urpmi: pkg=foo state=absent +- urpmi: + pkg: foo + state: absent + # description: remove packages foo and bar -- urpmi: pkg=foo,bar state=absent +- urpmi: + pkg: foo,bar + state: absent + # description: update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists) -- urpmi: name=bar, state=present, update_cache=yes +- urpmi: + name: bar + state: present + update_cache: yes ''' -import json import shlex import os import sys @@ -130,7 +145,7 @@ def remove_packages(module, packages): module.exit_json(changed=False, msg="package(s) already absent") -def install_packages(module, pkgspec, force=True, no_suggests=True): +def install_packages(module, pkgspec, force=True, no_recommends=True): packages = "" for package in pkgspec: @@ -138,17 +153,17 @@ def install_packages(module, pkgspec, force=True, no_suggests=True): packages += "'%s' " % package if len(packages) != 0: - if no_suggests: - no_suggests_yes = '--no-suggests' + if no_recommends: + no_recommends_yes = '--no-recommends' else: - no_suggests_yes = '' + no_recommends_yes = '' if force: force_yes = '--force' else: force_yes = '' - cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_suggests_yes, packages)) + cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_recommends_yes, packages)) rc, out, err = module.run_command(cmd) @@ -168,12 +183,12 @@ def install_packages(module, pkgspec, force=True, no_suggests=True): def main(): module = AnsibleModule( - argument_spec = dict( - state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']), - update_cache = dict(default=False, aliases=['update-cache'], type='bool'), - force = dict(default=True, type='bool'), - no_suggests = dict(default=True, aliases=['no-suggests'], type='bool'), - package = dict(aliases=['pkg', 'name'], required=True))) + argument_spec = dict( + state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']), + update_cache = dict(default=False, aliases=['update-cache'], type='bool'), + force = dict(default=True, type='bool'), + no_recommends = dict(default=True, aliases=['no-recommends'], type='bool'), + package = dict(aliases=['pkg', 'name'], required=True))) if not os.path.exists(URPMI_PATH): @@ -182,7 +197,7 @@ def main(): p = module.params force_yes = p['force'] - no_suggest_yes = p['no_suggests'] + no_recommends_yes = p['no_recommends'] if p['update_cache']: update_package_db(module) @@ -190,7 +205,7 @@ def main(): packages = p['package'].split(',') if p['state'] in [ 'installed', 'present' ]: - install_packages(module, packages, force_yes, no_suggest_yes) + install_packages(module, packages, force_yes, no_recommends_yes) elif p['state'] in [ 'removed', 'absent' ]: remove_packages(module, packages) @@ -198,4 +213,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/xbps.py b/packaging/os/xbps.py new file mode 100644 index 00000000000..0bfe678ab89 --- /dev/null +++ b/packaging/os/xbps.py @@ -0,0 +1,303 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2016 Dino Occhialini +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: xbps +short_description: Manage packages with XBPS +description: + - Manage packages with the XBPS package manager. +author: + - "Dino Occhialini (@dinoocch)" + - "Michael Aldridge (@the-maldridge)" +version_added: "2.3" +options: + name: + description: + - Name of the package to install, upgrade, or remove. + required: false + default: null + state: + description: + - Desired state of the package. + required: false + default: "present" + choices: ["present", "absent", "latest"] + recurse: + description: + - When removing a package, also remove its dependencies, provided + that they are not required by other packages and were not + explicitly installed by a user. + required: false + default: no + choices: ["yes", "no"] + update_cache: + description: + - Whether or not to refresh the master package lists. This can be + run as part of a package installation or as a separate step. + required: false + default: yes + choices: ["yes", "no"] + upgrade: + description: + - Whether or not to upgrade whole system + required: false + default: no + choices: ["yes", "no"] +''' + +EXAMPLES = ''' +# Install package foo +- xbps: name=foo state=present +# Upgrade package foo +- xbps: name=foo state=latest update_cache=yes +# Remove packages foo and bar +- xbps: name=foo,bar state=absent +# Recursively remove package foo +- xbps: name=foo state=absent recurse=yes +# Update package cache +- xbps: update_cache=yes +# Upgrade packages +- xbps: upgrade=yes +''' + +RETURN = ''' +msg: + description: Message about results + returned: success + type: string + sample: "System Upgraded" +packages: + description: Packages that are affected/would be affected + type: list + sample: ["ansible"] +''' + + +import os + +from ansible.module_utils.basic import AnsibleModule + + +def is_installed(xbps_output): + """Returns package install state""" + return bool(len(xbps_output)) + + +def query_package(module, xbps_path, name, state="present"): + """Returns Package info""" + if state == "present": + lcmd = "%s %s" % (xbps_path['query'], name) + lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) + if not is_installed(lstdout): + # package is not installed locally + return False, False + + rcmd = "%s -Sun" % (xbps_path['install']) + rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) + if rrc == 0 or rrc == 17: + """Return True to indicate that the package is installed locally, + and the result of the version number comparison to determine if the + package is up-to-date""" + return True, name not in rstdout + + return False, False + + +def update_package_db(module, xbps_path): + """Returns True if update_package_db changed""" + cmd = "%s -S" % (xbps_path['install']) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="Could not update package db") + if "avg rate" in stdout: + return True + else: + return False + + +def upgrade(module, xbps_path): + """Returns true is full upgrade succeeds""" + cmdupgrade = "%s -uy" % (xbps_path['install']) + cmdneedupgrade = "%s -un" % (xbps_path['install']) + + rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False) + if rc == 0: + if(len(stdout.splitlines()) == 0): + module.exit_json(changed=False, msg='Nothing to upgrade') + else: + rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False) + if rc == 0: + module.exit_json(changed=True, msg='System upgraded') + else: + module.fail_json(msg="Could not upgrade") + else: + module.fail_json(msg="Could not upgrade") + + +def remove_packages(module, xbps_path, packages): + """Returns true if package removal succeeds""" + changed_packages = [] + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + installed, updated = query_package(module, xbps_path, package) + if not installed: + continue + + cmd = "%s -y %s" % (xbps_path['remove'], package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + changed_packages.append(package) + + if len(changed_packages) > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % + len(changed_packages), packages=changed_packages) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, xbps_path, state, packages): + """Returns true if package install succeeds.""" + toInstall = [] + for i, package in enumerate(packages): + """If the package is installed and state == present or state == latest + and is up-to-date then skip""" + installed, updated = query_package(module, xbps_path, package) + if installed and (state == 'present' or + (state == 'latest' and updated)): + continue + + toInstall.append(package) + + if len(toInstall) == 0: + module.exit_json(changed=False, msg="Nothing to Install") + + cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall)) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0 and not (state == 'latest' and rc == 17): + module.fail_json(msg="failed to install %s" % (package)) + + module.exit_json(changed=True, msg="installed %s package(s)" + % (len(toInstall)), + packages=toInstall) + + module.exit_json(changed=False, msg="package(s) already installed", + packages=[]) + + +def check_packages(module, xbps_path, packages, state): + """Returns change status of command""" + would_be_changed = [] + for package in packages: + installed, updated = query_package(module, xbps_path, package) + if ((state in ["present", "latest"] and not installed) or + (state == "absent" and installed) or + (state == "latest" and not updated)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state), + packages=would_be_changed) + else: + module.exit_json(changed=False, msg="package(s) already %s" % state, + packages=[]) + + +def main(): + """Returns, calling appropriate command""" + + module = AnsibleModule( + argument_spec=dict( + name=dict(default=None, aliases=['pkg', 'package'], type='list'), + state=dict(default='present', choices=['present', 'installed', + 'latest', 'absent', + 'removed']), + recurse=dict(default=False, type='bool'), + force=dict(default=False, type='bool'), + upgrade=dict(default=False, type='bool'), + update_cache=dict(default=True, aliases=['update-cache'], + type='bool') + ), + required_one_of=[['name', 'update_cache', 'upgrade']], + supports_check_mode=True) + + xbps_path = dict() + xbps_path['install'] = module.get_bin_path('xbps-install', True) + xbps_path['query'] = module.get_bin_path('xbps-query', True) + xbps_path['remove'] = module.get_bin_path('xbps-remove', True) + + if not os.path.exists(xbps_path['install']): + module.fail_json(msg="cannot find xbps, in path %s" + % (xbps_path['install'])) + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p["update_cache"] and not module.check_mode: + changed = update_package_db(module, xbps_path) + if p['name'] is None and not p['upgrade']: + if changed: + module.exit_json(changed=True, + msg='Updated the package master lists') + else: + module.exit_json(changed=False, + msg='Package list already up to date') + + if (p['update_cache'] and module.check_mode and not + (p['name'] or p['upgrade'])): + module.exit_json(changed=True, + msg='Would have updated the package cache') + + if p['upgrade']: + upgrade(module, xbps_path) + + if p['name']: + pkgs = p['name'] + + if module.check_mode: + check_packages(module, xbps_path, pkgs, p['state']) + + if p['state'] in ['present', 'latest']: + install_packages(module, xbps_path, p['state'], pkgs) + elif p['state'] == 'absent': + remove_packages(module, xbps_path, pkgs) + + +if __name__ == "__main__": + main() diff --git a/packaging/os/yum_repository.py b/packaging/os/yum_repository.py new file mode 100644 index 00000000000..1d00d26f682 --- /dev/null +++ b/packaging/os/yum_repository.py @@ -0,0 +1,761 @@ +#!/usr/bin/python +# encoding: utf-8 + +# (c) 2015-2016, Jiri Tyr +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +import os +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.six.moves import configparser + + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: yum_repository +author: Jiri Tyr (@jtyr) +version_added: '2.1' +short_description: Add and remove YUM repositories +description: + - Add or remove YUM repositories in RPM-based Linux distributions. + +options: + async: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - If set to C(yes) Yum will download packages and metadata from this + repo in parallel, if possible. + bandwidth: + required: false + default: 0 + description: + - Maximum available network bandwidth in bytes/second. Used with the + I(throttle) option. + - If I(throttle) is a percentage and bandwidth is C(0) then bandwidth + throttling will be disabled. If I(throttle) is expressed as a data rate + (bytes/sec) then this option is ignored. Default is C(0) (no bandwidth + throttling). + baseurl: + required: false + default: null + description: + - URL to the directory where the yum repository's 'repodata' directory + lives. + - This or the I(mirrorlist) parameter is required if I(state) is set to + C(present). + cost: + required: false + default: 1000 + description: + - Relative cost of accessing this repository. Useful for weighing one + repo's packages as greater/less than any other. + deltarpm_metadata_percentage: + required: false + default: 100 + description: + - When the relative size of deltarpm metadata vs pkgs is larger than + this, deltarpm metadata is not downloaded from the repo. Note that you + can give values over C(100), so C(200) means that the metadata is + required to be half the size of the packages. Use C(0) to turn off + this check, and always download metadata. + deltarpm_percentage: + required: false + default: 75 + description: + - When the relative size of delta vs pkg is larger than this, delta is + not used. Use C(0) to turn off delta rpm processing. Local repositories + (with file:// I(baseurl)) have delta rpms turned off by default. + description: + required: false + default: null + description: + - A human readable string describing the repository. + - This parameter is only required if I(state) is set to C(present). + enabled: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - This tells yum whether or not use this repository. + enablegroups: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - Determines whether yum will allow the use of package groups for this + repository. + exclude: + required: false + default: null + description: + - List of packages to exclude from updates or installs. This should be a + space separated list. Shell globs using wildcards (eg. C(*) and C(?)) + are allowed. + - The list can also be a regular YAML array. + failovermethod: + required: false + choices: [roundrobin, priority] + default: roundrobin + description: + - C(roundrobin) randomly selects a URL out of the list of URLs to start + with and proceeds through each of them as it encounters a failure + contacting the host. + - C(priority) starts from the first I(baseurl) listed and reads through + them sequentially. + file: + required: false + default: null + description: + - File to use to save the repo in. Defaults to the value of I(name). + gpgcakey: + required: false + default: null + description: + - A URL pointing to the ASCII-armored CA key file for the repository. + gpgcheck: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - Tells yum whether or not it should perform a GPG signature check on + packages. + gpgkey: + required: false + default: null + description: + - A URL pointing to the ASCII-armored GPG key file for the repository. + http_caching: + required: false + choices: [all, packages, none] + default: all + description: + - Determines how upstream HTTP caches are instructed to handle any HTTP + downloads that Yum does. + - C(all) means that all HTTP downloads should be cached. + - C(packages) means that only RPM package downloads should be cached (but + not repository metadata downloads). + - C(none) means that no HTTP downloads should be cached. + include: + required: false + default: null + description: + - Include external configuration file. Both, local path and URL is + supported. Configuration file will be inserted at the position of the + I(include=) line. Included files may contain further include lines. + Yum will abort with an error if an inclusion loop is detected. + includepkgs: + required: false + default: null + description: + - List of packages you want to only use from a repository. This should be + a space separated list. Shell globs using wildcards (eg. C(*) and C(?)) + are allowed. Substitution variables (e.g. C($releasever)) are honored + here. + - The list can also be a regular YAML array. + ip_resolve: + required: false + choices: [4, 6, IPv4, IPv6, whatever] + default: whatever + description: + - Determines how yum resolves host names. + - C(4) or C(IPv4) - resolve to IPv4 addresses only. + - C(6) or C(IPv6) - resolve to IPv6 addresses only. + keepalive: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - This tells yum whether or not HTTP/1.1 keepalive should be used with + this repository. This can improve transfer speeds by using one + connection when downloading multiple files from a repository. + keepcache: + required: false + choices: ['0', '1'] + default: '1' + description: + - Either C(1) or C(0). Determines whether or not yum keeps the cache of + headers and packages after successful installation. + metadata_expire: + required: false + default: 21600 + description: + - Time (in seconds) after which the metadata will expire. + - Default value is 6 hours. + metadata_expire_filter: + required: false + choices: [never, 'read-only:past', 'read-only:present', 'read-only:future'] + default: 'read-only:present' + description: + - Filter the I(metadata_expire) time, allowing a trade of speed for + accuracy if a command doesn't require it. Each yum command can specify + that it requires a certain level of timeliness quality from the remote + repos. from "I'm about to install/upgrade, so this better be current" + to "Anything that's available is good enough". + - C(never) - Nothing is filtered, always obey I(metadata_expire). + - C(read-only:past) - Commands that only care about past information are + filtered from metadata expiring. Eg. I(yum history) info (if history + needs to lookup anything about a previous transaction, then by + definition the remote package was available in the past). + - C(read-only:present) - Commands that are balanced between past and + future. Eg. I(yum list yum). + - C(read-only:future) - Commands that are likely to result in running + other commands which will require the latest metadata. Eg. + I(yum check-update). + - Note that this option does not override "yum clean expire-cache". + metalink: + required: false + default: null + description: + - Specifies a URL to a metalink file for the repomd.xml, a list of + mirrors for the entire repository are generated by converting the + mirrors for the repomd.xml file to a I(baseurl). + mirrorlist: + required: false + default: null + description: + - Specifies a URL to a file containing a list of baseurls. + - This or the I(baseurl) parameter is required if I(state) is set to + C(present). + mirrorlist_expire: + required: false + default: 21600 + description: + - Time (in seconds) after which the mirrorlist locally cached will + expire. + - Default value is 6 hours. + name: + required: true + description: + - Unique repository ID. + - This parameter is only required if I(state) is set to C(present) or + C(absent). + params: + required: false + default: null + description: + - Option used to allow the user to overwrite any of the other options. + To remove an option, set the value of the option to C(null). + password: + required: false + default: null + description: + - Password to use with the username for basic authentication. + priority: + required: false + default: 99 + description: + - Enforce ordered protection of repositories. The value is an integer + from 1 to 99. + - This option only works if the YUM Priorities plugin is installed. + protect: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - Protect packages from updates from other repositories. + proxy: + required: false + default: null + description: + - URL to the proxy server that yum should use. Set to C(_none_) to + disable the global proxy setting. + proxy_password: + required: false + default: null + description: + - Username to use for proxy. + proxy_username: + required: false + default: null + description: + - Password for this proxy. + repo_gpgcheck: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - This tells yum whether or not it should perform a GPG signature check + on the repodata from this repository. + reposdir: + required: false + default: /etc/yum.repos.d + description: + - Directory where the C(.repo) files will be stored. + retries: + required: false + default: 10 + description: + - Set the number of times any attempt to retrieve a file should retry + before returning an error. Setting this to C(0) makes yum try forever. + s3_enabled: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - Enables support for S3 repositories. + - This option only works if the YUM S3 plugin is installed. + skip_if_unavailable: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - If set to C(yes) yum will continue running if this repository cannot be + contacted for any reason. This should be set carefully as all repos are + consulted for any given command. + ssl_check_cert_permissions: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - Whether yum should check the permissions on the paths for the + certificates on the repository (both remote and local). + - If we can't read any of the files then yum will force + I(skip_if_unavailable) to be C(yes). This is most useful for non-root + processes which use yum on repos that have client cert files which are + readable only by root. + sslcacert: + required: false + default: null + description: + - Path to the directory containing the databases of the certificate + authorities yum should use to verify SSL certificates. + sslclientcert: + required: false + default: null + description: + - Path to the SSL client certificate yum should use to connect to + repos/remote sites. + sslclientkey: + required: false + default: null + description: + - Path to the SSL client key yum should use to connect to repos/remote + sites. + sslverify: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - Defines whether yum should verify SSL certificates/hosts at all. + state: + required: false + choices: [absent, present] + default: present + description: + - State of the repo file. + throttle: + required: false + default: null + description: + - Enable bandwidth throttling for downloads. + - This option can be expressed as a absolute data rate in bytes/sec. An + SI prefix (k, M or G) may be appended to the bandwidth value. + timeout: + required: false + default: 30 + description: + - Number of seconds to wait for a connection before timing out. + ui_repoid_vars: + required: false + default: releasever basearch + description: + - When a repository id is displayed, append these yum variables to the + string if they are used in the I(baseurl)/etc. Variables are appended + in the order listed (and found). + username: + required: false + default: null + description: + - Username to use for basic authentication to a repo or really any url. + +extends_documentation_fragment: + - files + +notes: + - All comments will be removed if modifying an existing repo file. + - Section order is preserved in an existing repo file. + - Parameters in a section are ordered alphabetically in an existing repo + file. + - The repo file will be automatically deleted if it contains no repository. +''' + +EXAMPLES = ''' +- name: Add repository + yum_repository: + name: epel + description: EPEL YUM repo + baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/ + +- name: Add multiple repositories into the same file (1/2) + yum_repository: + name: epel + description: EPEL YUM repo + file: external_repos + baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/ + gpgcheck: no + +- name: Add multiple repositories into the same file (2/2) + yum_repository: + name: rpmforge + description: RPMforge YUM repo + file: external_repos + baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge + mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge + enabled: no + +- name: Remove repository + yum_repository: + name: epel + state: absent + +- name: Remove repository from a specific repo file + yum_repository: + name: epel + file: external_repos + state: absent + +# +# Allow to overwrite the yum_repository parameters by defining the parameters +# as a variable in the defaults or vars file: +# +# my_role_somerepo_params: +# # Disable GPG checking +# gpgcheck: no +# # Remove the gpgkey option +# gpgkey: null +# +- name: Add Some repo + yum_repository: + name: somerepo + description: Some YUM repo + baseurl: http://server.com/path/to/the/repo + gpgkey: http://server.com/keys/somerepo.pub + gpgcheck: yes + params: "{{ my_role_somerepo_params }}" +''' + +RETURN = ''' +repo: + description: repository name + returned: success + type: string + sample: "epel" +state: + description: state of the target, after execution + returned: success + type: string + sample: "present" +''' + + +class YumRepo(object): + # Class global variables + module = None + params = None + section = None + repofile = configparser.RawConfigParser() + + # List of parameters which will be allowed in the repo file output + allowed_params = [ + 'async', + 'bandwidth', + 'baseurl', + 'cost', + 'deltarpm_metadata_percentage', + 'deltarpm_percentage', + 'enabled', + 'enablegroups', + 'exclude', + 'failovermethod', + 'gpgcakey', + 'gpgcheck', + 'gpgkey', + 'http_caching', + 'include', + 'includepkgs', + 'ip_resolve', + 'keepalive', + 'keepcache', + 'metadata_expire', + 'metadata_expire_filter', + 'metalink', + 'mirrorlist', + 'mirrorlist_expire', + 'name', + 'password', + 'priority', + 'protect', + 'proxy', + 'proxy_password', + 'proxy_username', + 'repo_gpgcheck', + 'retries', + 's3_enabled', + 'skip_if_unavailable', + 'sslcacert', + 'ssl_check_cert_permissions', + 'sslclientcert', + 'sslclientkey', + 'sslverify', + 'throttle', + 'timeout', + 'ui_repoid_vars', + 'username'] + + # List of parameters which can be a list + list_params = ['exclude', 'includepkgs'] + + def __init__(self, module): + # To be able to use fail_json + self.module = module + # Shortcut for the params + self.params = self.module.params + # Section is always the repoid + self.section = self.params['repoid'] + + # Check if repo directory exists + repos_dir = self.params['reposdir'] + if not os.path.isdir(repos_dir): + self.module.fail_json( + msg="Repo directory '%s' does not exist." % repos_dir) + + # Set dest; also used to set dest parameter for the FS attributes + self.params['dest'] = os.path.join( + repos_dir, "%s.repo" % self.params['file']) + + # Read the repo file if it exists + if os.path.isfile(self.params['dest']): + self.repofile.read(self.params['dest']) + + def add(self): + # Remove already existing repo and create a new one + if self.repofile.has_section(self.section): + self.repofile.remove_section(self.section) + + # Add section + self.repofile.add_section(self.section) + + # Baseurl/mirrorlist is not required because for removal we need only + # the repo name. This is why we check if the baseurl/mirrorlist is + # defined. + if (self.params['baseurl'], self.params['mirrorlist']) == (None, None): + self.module.fail_json( + msg='Paramater "baseurl" or "mirrorlist" is required for ' + 'adding a new repo.') + + # Set options + for key, value in sorted(self.params.items()): + if key in self.list_params and isinstance(value, list): + # Join items into one string for specific parameters + value = ' '.join(value) + elif isinstance(value, bool): + # Convert boolean value to integer + value = int(value) + + # Set the value only if it was defined (default is None) + if value is not None and key in self.allowed_params: + self.repofile.set(self.section, key, value) + + def save(self): + if len(self.repofile.sections()): + # Write data into the file + try: + fd = open(self.params['dest'], 'w') + except IOError: + e = get_exception() + self.module.fail_json( + msg="Cannot open repo file %s." % self.params['dest'], + details=str(e)) + + self.repofile.write(fd) + + try: + fd.close() + except IOError: + e = get_exception() + self.module.fail_json( + msg="Cannot write repo file %s." % self.params['dest'], + details=str(e)) + else: + # Remove the file if there are not repos + try: + os.remove(self.params['dest']) + except OSError: + e = get_exception() + self.module.fail_json( + msg=( + "Cannot remove empty repo file %s." % + self.params['dest']), + details=str(e)) + + def remove(self): + # Remove section if exists + if self.repofile.has_section(self.section): + self.repofile.remove_section(self.section) + + def dump(self): + repo_string = "" + + # Compose the repo file + for section in sorted(self.repofile.sections()): + repo_string += "[%s]\n" % section + + for key, value in sorted(self.repofile.items(section)): + repo_string += "%s = %s\n" % (key, value) + + repo_string += "\n" + + return repo_string + + +def main(): + # Module settings + module = AnsibleModule( + argument_spec=dict( + async=dict(type='bool'), + bandwidth=dict(), + baseurl=dict(), + cost=dict(), + deltarpm_metadata_percentage=dict(), + deltarpm_percentage=dict(), + description=dict(), + enabled=dict(type='bool'), + enablegroups=dict(type='bool'), + exclude=dict(), + failovermethod=dict(choices=['roundrobin', 'priority']), + file=dict(), + gpgcakey=dict(), + gpgcheck=dict(type='bool'), + gpgkey=dict(), + http_caching=dict(choices=['all', 'packages', 'none']), + include=dict(), + includepkgs=dict(), + ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']), + keepalive=dict(type='bool'), + keepcache=dict(choices=['0', '1']), + metadata_expire=dict(), + metadata_expire_filter=dict( + choices=[ + 'never', + 'read-only:past', + 'read-only:present', + 'read-only:future']), + metalink=dict(), + mirrorlist=dict(), + mirrorlist_expire=dict(), + name=dict(required=True), + params=dict(type='dict'), + password=dict(no_log=True), + priority=dict(), + protect=dict(type='bool'), + proxy=dict(), + proxy_password=dict(no_log=True), + proxy_username=dict(), + repo_gpgcheck=dict(type='bool'), + reposdir=dict(default='/etc/yum.repos.d', type='path'), + retries=dict(), + s3_enabled=dict(type='bool'), + skip_if_unavailable=dict(type='bool'), + sslcacert=dict(), + ssl_check_cert_permissions=dict(type='bool'), + sslclientcert=dict(), + sslclientkey=dict(), + sslverify=dict(type='bool'), + state=dict(choices=['present', 'absent'], default='present'), + throttle=dict(), + timeout=dict(), + ui_repoid_vars=dict(), + username=dict(), + ), + add_file_common_args=True, + supports_check_mode=True, + ) + + # Update module parameters by user's parameters if defined + if 'params' in module.params and isinstance(module.params['params'], dict): + module.params.update(module.params['params']) + # Remove the params + module.params.pop('params', None) + + name = module.params['name'] + state = module.params['state'] + + # Check if required parameters are present + if state == 'present': + if ( + module.params['baseurl'] is None and + module.params['mirrorlist'] is None): + module.fail_json( + msg="Parameter 'baseurl' or 'mirrorlist' is required.") + if module.params['description'] is None: + module.fail_json( + msg="Parameter 'description' is required.") + + # Rename "name" and "description" to ensure correct key sorting + module.params['repoid'] = module.params['name'] + module.params['name'] = module.params['description'] + del module.params['description'] + + # Define repo file name if it doesn't exist + if module.params['file'] is None: + module.params['file'] = module.params['repoid'] + + # Instantiate the YumRepo object + yumrepo = YumRepo(module) + + # Get repo status before change + diff = { + 'before_header': yumrepo.params['dest'], + 'before': yumrepo.dump(), + 'after_header': yumrepo.params['dest'], + 'after': '' + } + + # Perform action depending on the state + if state == 'present': + yumrepo.add() + elif state == 'absent': + yumrepo.remove() + + # Get repo status after change + diff['after'] = yumrepo.dump() + + # Compare repo states + changed = diff['before'] != diff['after'] + + # Save the file only if not in check mode and if there was a change + if not module.check_mode and changed: + yumrepo.save() + + # Change file attributes if needed + if os.path.isfile(module.params['dest']): + file_args = module.load_file_common_arguments(module.params) + changed = module.set_fs_attributes_if_different(file_args, changed) + + # Print status of the change + module.exit_json(changed=changed, repo=name, state=state, diff=diff) + + +if __name__ == '__main__': + main() diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index 5cf2f742f3c..837a7ef4774 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -26,14 +26,22 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from xml.dom.minidom import parseString as parseXML import re +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: zypper author: - "Patrick Callahan (@dirtyharrycallahan)" - "Alexander Gubin (@alxgu)" + - "Thomas O'Donnell (@andytom)" + - "Robin Roth (@robinro)" + - "Andrii Radyk (@AnderEnder)" version_added: "1.2" short_description: Manage packages on SUSE and openSUSE description: @@ -41,7 +49,10 @@ options: name: description: - - package name or package specifier with version C(name) or C(name-1.0). You can also pass a url or a local path to a rpm file. + - Package name C(name) or package specifier. + - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to update the package within the version range given. + - You can also pass a url or a local path to a rpm file. + - When using state=latest, this can be '*', which updates all installed packages. required: true aliases: [ 'pkg' ] state: @@ -56,7 +67,7 @@ description: - The type of package to be operated on. required: false - choices: [ package, patch, pattern, product, srcpackage ] + choices: [ package, patch, pattern, product, srcpackage, application ] default: "package" version_added: "2.0" disable_gpg_check: @@ -67,198 +78,343 @@ required: false default: "no" choices: [ "yes", "no" ] - aliases: [] disable_recommends: version_added: "1.8" description: - - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages. + - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages. required: false default: "yes" choices: [ "yes", "no" ] + force: + version_added: "2.2" + description: + - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. + required: false + default: "no" + choices: [ "yes", "no" ] + update_cache: + version_added: "2.2" + description: + - Run the equivalent of C(zypper refresh) before the operation. + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "refresh" ] + oldpackage: + version_added: "2.2" + description: + - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a version is specified as part of the package name. + required: false + default: "no" + choices: [ "yes", "no" ] -notes: [] # informational: requirements for nodes -requirements: [ zypper, rpm ] -author: Patrick Callahan +requirements: + - "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml + - rpm ''' EXAMPLES = ''' # Install "nmap" -- zypper: name=nmap state=present +- zypper: + name: nmap + state: present # Install apache2 with recommended packages -- zypper: name=apache2 state=present disable_recommends=no +- zypper: + name: apache2 + state: present + disable_recommends: no + +# Apply a given patch +- zypper: + name: openSUSE-2016-128 + state: present + type: patch # Remove the "nmap" package -- zypper: name=nmap state=absent +- zypper: + name: nmap + state: absent # Install the nginx rpm from a remote repo -- zypper: name=http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm state=present +- zypper: + name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm' + state: present # Install local rpm file -- zypper: name=/tmp/fancy-software.rpm state=present +- zypper: + name: /tmp/fancy-software.rpm + state: present + +# Update all packages +- zypper: + name: * + state: latest + +# Apply all available patches +- zypper: + name: * + state: latest + type: patch + +# Refresh repositories and update package "openssl" +- zypper: + name: openssl + state: present + update_cache: yes + +# Install specific version (possible comparisons: <, >, <=, >=, =) +- zypper: + name: 'docker>=1.10' + state: installed ''' -# Function used for getting zypper version -def zypper_version(module): - """Return (rc, message) tuple""" - cmd = ['/usr/bin/zypper', '-V'] - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc == 0: - return rc, stdout - else: - return rc, stderr -# Function used for getting versions of currently installed packages. -def get_current_version(m, packages): - cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n'] - cmd.extend(packages) +def split_name_version(name): + """splits of the package name and desired version - rc, stdout, stderr = m.run_command(cmd, check_rc=False) + example formats: + - docker>=1.10 + - apache=2.4 + + Allowed version specifiers: <, >, <=, >=, = + Allowed version format: [0-9.-]* + + Also allows a prefix indicating remove "-", "~" or install "+" + """ + + prefix = '' + if name[0] in ['-', '~', '+']: + prefix = name[0] + name = name[1:] - current_version = {} - rpmoutput_re = re.compile('^(\S+) (\S+)$') - - for stdoutline in stdout.splitlines(): - match = rpmoutput_re.match(stdoutline) - if match == None: - return None - package = match.group(1) - version = match.group(2) - current_version[package] = version - - for package in packages: - if package not in current_version: - print package + ' was not returned by rpm \n' - return None - - return current_version - - -# Function used to find out if a package is currently installed. -def get_package_state(m, packages): - for i in range(0, len(packages)): - # Check state of a local rpm-file - if ".rpm" in packages[i]: - # Check if rpm file is available - package = packages[i] - if not os.path.isfile(package) and not '://' in package: - stderr = "No Package file matching '%s' found on system" % package - m.fail_json(msg=stderr) - # Get packagename from rpm file - cmd = ['/bin/rpm', '--query', '--qf', '%{NAME}', '--package'] - cmd.append(package) - rc, stdout, stderr = m.run_command(cmd, check_rc=False) - packages[i] = stdout - - cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n'] + version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$') + try: + reres = version_check.match(name) + name, version = reres.groups() + return prefix, name, version + except: + return prefix, name, None + + +def get_want_state(m, names, remove=False): + packages_install = {} + packages_remove = {} + urls = [] + for name in names: + if '://' in name or name.endswith('.rpm'): + urls.append(name) + else: + prefix, pname, version = split_name_version(name) + if prefix in ['-', '~']: + packages_remove[pname] = version + elif prefix == '+': + packages_install[pname] = version + else: + if remove: + packages_remove[pname] = version + else: + packages_install[pname] = version + return packages_install, packages_remove, urls + + +def get_installed_state(m, packages): + "get installed state of packages" + + cmd = get_cmd(m, 'search') + cmd.extend(['--match-exact', '--details', '--installed-only']) cmd.extend(packages) + return parse_zypper_xml(m, cmd, fail_not_found=False)[0] + +def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): rc, stdout, stderr = m.run_command(cmd, check_rc=False) - installed_state = {} - rpmoutput_re = re.compile('^package (\S+) (.*)$') - for stdoutline in stdout.splitlines(): - match = rpmoutput_re.match(stdoutline) - if match == None: - return None - package = match.group(1) - result = match.group(2) - if result == 'is installed': - installed_state[package] = True + dom = parseXML(stdout) + if rc == 104: + # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found) + if fail_not_found: + errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data + m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) else: - installed_state[package] = False - - for package in packages: - if package not in installed_state: - print package + ' was not returned by rpm \n' - return None - - return installed_state - -# Function used to make sure a package is present. -def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): - packages = [] - for package in name: - if installed_state[package] is False: - packages.append(package) - if len(packages) != 0: - cmd = ['/usr/bin/zypper', '--non-interactive'] - # add global options before zypper command - if disable_gpg_check: - cmd.append('--no-gpg-checks') - cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type]) - # add install parameter - if disable_recommends and not old_zypper: - cmd.append('--no-recommends') - cmd.extend(packages) - rc, stdout, stderr = m.run_command(cmd, check_rc=False) + return {}, rc, stdout, stderr + elif rc in [0, 106, 103]: + # zypper exit codes + # 0: success + # 106: signature verification failed + # 103: zypper was upgraded, run same command again + if packages is None: + firstrun = True + packages = {} + solvable_list = dom.getElementsByTagName('solvable') + for solvable in solvable_list: + name = solvable.getAttribute('name') + packages[name] = {} + packages[name]['version'] = solvable.getAttribute('edition') + packages[name]['oldversion'] = solvable.getAttribute('edition-old') + status = solvable.getAttribute('status') + packages[name]['installed'] = status == "installed" + packages[name]['group'] = solvable.parentNode.nodeName + if rc == 103 and firstrun: + # if this was the first run and it failed with 103 + # run zypper again with the same command to complete update + return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) + + return packages, rc, stdout, stderr + m.fail_json(msg='Zypper run command failed with return code %s.'%rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) + + +def get_cmd(m, subcommand): + "puts together the basic zypper command arguments with those passed to the module" + is_install = subcommand in ['install', 'update', 'patch'] + is_refresh = subcommand == 'refresh' + cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout'] + + # add global options before zypper command + if (is_install or is_refresh) and m.params['disable_gpg_check']: + cmd.append('--no-gpg-checks') - if rc == 0: - changed=True - else: - changed=False + cmd.append(subcommand) + if subcommand != 'patch' and not is_refresh: + cmd.extend(['--type', m.params['type']]) + if m.check_mode and subcommand != 'search': + cmd.append('--dry-run') + if is_install: + cmd.append('--auto-agree-with-licenses') + if m.params['disable_recommends']: + cmd.append('--no-recommends') + if m.params['force']: + cmd.append('--force') + if m.params['oldpackage']: + cmd.append('--oldpackage') + return cmd + + +def set_diff(m, retvals, result): + # TODO: if there is only one package, set before/after to version numbers + packages = {'installed': [], 'removed': [], 'upgraded': []} + if result: + for p in result: + group = result[p]['group'] + if group == 'to-upgrade': + versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')' + packages['upgraded'].append(p + versions) + elif group == 'to-install': + packages['installed'].append(p) + elif group == 'to-remove': + packages['removed'].append(p) + + output = '' + for state in packages: + if packages[state]: + output += state + ': ' + ', '.join(packages[state]) + '\n' + if 'diff' not in retvals: + retvals['diff'] = {} + if 'prepared' not in retvals['diff']: + retvals['diff']['prepared'] = output else: - rc = 0 - stdout = '' - stderr = '' - changed=False + retvals['diff']['prepared'] += '\n' + output + + +def package_present(m, name, want_latest): + "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + name_install, name_remove, urls = get_want_state(m, name) + + # if a version string is given, pass it to zypper + install_version = [p+name_install[p] for p in name_install if name_install[p]] + remove_version = [p+name_remove[p] for p in name_remove if name_remove[p]] + + # add oldpackage flag when a version is given to allow downgrades + if install_version or remove_version: + m.params['oldpackage'] = True + + if not want_latest: + # for state=present: filter out already installed packages + install_and_remove = name_install.copy() + install_and_remove.update(name_remove) + prerun_state = get_installed_state(m, install_and_remove) + # generate lists of packages to install or remove + name_install = [p for p in name_install if p not in prerun_state] + name_remove = [p for p in name_remove if p in prerun_state] + if not any((name_install, name_remove, urls, install_version, remove_version)): + # nothing to install/remove and nothing to update + return None, retvals + + # zypper install also updates packages + cmd = get_cmd(m, 'install') + cmd.append('--') + cmd.extend(urls) + + # pass packages with version information + cmd.extend(install_version) + cmd.extend(['-%s' % p for p in remove_version]) + + # allow for + or - prefixes in install/remove lists + # do this in one zypper run to allow for dependency-resolution + # for example "-exim postfix" runs without removing packages depending on mailserver + cmd.extend(name_install) + cmd.extend(['-%s' % p for p in name_remove]) + + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + + return result, retvals + + +def package_update_all(m): + "run update or patch on all available packages" + + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + if m.params['type'] == 'patch': + cmdname = 'patch' + else: + cmdname = 'update' - return (rc, stdout, stderr, changed) + cmd = get_cmd(m, cmdname) + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals -# Function used to make sure a package is the latest available version. -def package_latest(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): - # first of all, make sure all the packages are installed - (rc, stdout, stderr, changed) = package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper) +def package_absent(m, name): + "remove the packages in name" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} + # Get package state + name_install, name_remove, urls = get_want_state(m, name, remove=True) + if name_install: + m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.") + if urls: + m.fail_json(msg="Can not remove via URL.") + if m.params['type'] == 'patch': + m.fail_json(msg="Can not remove patches.") + prerun_state = get_installed_state(m, name_remove) + remove_version = [p+name_remove[p] for p in name_remove if name_remove[p]] + name_remove = [p for p in name_remove if p in prerun_state] + if not name_remove and not remove_version: + return None, retvals - # if we've already made a change, we don't have to check whether a version changed - if not changed: - pre_upgrade_versions = get_current_version(m, name) + cmd = get_cmd(m, 'remove') + cmd.extend(name_remove) + cmd.extend(remove_version) - cmd = ['/usr/bin/zypper', '--non-interactive'] + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) + return result, retvals - if disable_gpg_check: - cmd.append('--no-gpg-checks') - if old_zypper: - cmd.extend(['install', '--auto-agree-with-licenses', '-t', package_type]) - else: - cmd.extend(['update', '--auto-agree-with-licenses', '-t', package_type]) +def repo_refresh(m): + "update the repositories" + retvals = {'rc': 0, 'stdout': '', 'stderr': ''} - cmd.extend(name) - rc, stdout, stderr = m.run_command(cmd, check_rc=False) + cmd = get_cmd(m, 'refresh') - # if we've already made a change, we don't have to check whether a version changed - if not changed: - post_upgrade_versions = get_current_version(m, name) - if pre_upgrade_versions != post_upgrade_versions: - changed = True - - return (rc, stdout, stderr, changed) - -# Function used to make sure a package is not installed. -def package_absent(m, name, installed_state, package_type, old_zypper): - packages = [] - for package in name: - if installed_state[package] is True: - packages.append(package) - if len(packages) != 0: - cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type] - cmd.extend(packages) - rc, stdout, stderr = m.run_command(cmd) - - if rc == 0: - changed=True - else: - changed=False - else: - rc = 0 - stdout = '' - stderr = '' - changed=False + retvals['cmd'] = cmd + result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) - return (rc, stdout, stderr, changed) + return retvals # =========================================== # Main control flow @@ -268,57 +424,54 @@ def main(): argument_spec = dict( name = dict(required=True, aliases=['pkg'], type='list'), state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), - type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage']), + type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), disable_gpg_check = dict(required=False, default='no', type='bool'), disable_recommends = dict(required=False, default='yes', type='bool'), + force = dict(required=False, default='no', type='bool'), + update_cache = dict(required=False, aliases=['refresh'], default='no', type='bool'), + oldpackage = dict(required=False, default='no', type='bool'), ), - supports_check_mode = False + supports_check_mode = True ) + name = module.params['name'] + state = module.params['state'] + update_cache = module.params['update_cache'] - params = module.params + # remove empty strings from package list + name = filter(None, name) - name = params['name'] - state = params['state'] - type_ = params['type'] - disable_gpg_check = params['disable_gpg_check'] - disable_recommends = params['disable_recommends'] + # Refresh repositories + if update_cache: + retvals = repo_refresh(module) - rc = 0 - stdout = '' - stderr = '' - result = {} - result['name'] = name - result['state'] = state + if retvals['rc'] != 0: + module.fail_json(msg="Zypper refresh run failed.", **retvals) - rc, out = zypper_version(module) - match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out) - if not match or int(match.group(1)) > 0: - old_zypper = False + # Perform requested action + if name == ['*'] and state == 'latest': + packages_changed, retvals = package_update_all(module) else: - old_zypper = True + if state in ['absent', 'removed']: + packages_changed, retvals = package_absent(module, name) + elif state in ['installed', 'present', 'latest']: + packages_changed, retvals = package_present(module, name, state == 'latest') - # Get package state - installed_state = get_package_state(module, name) + retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed) - # Perform requested action - if state in ['installed', 'present']: - (rc, stdout, stderr, changed) = package_present(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper) - elif state in ['absent', 'removed']: - (rc, stdout, stderr, changed) = package_absent(module, name, installed_state, type_, old_zypper) - elif state == 'latest': - (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, type_, disable_gpg_check, disable_recommends, old_zypper) - - if rc != 0: - if stderr: - module.fail_json(msg=stderr) - else: - module.fail_json(msg=stdout) + if module._diff: + set_diff(module, retvals, packages_changed) + + if retvals['rc'] != 0: + module.fail_json(msg="Zypper run failed.", **retvals) - result['changed'] = changed + if not retvals['changed']: + del retvals['stdout'] + del retvals['stderr'] - module.exit_json(**result) + module.exit_json(name=name, state=state, update_cache=update_cache, **retvals) # import module snippets -from ansible.module_utils.basic import * -main() +from ansible.module_utils.basic import AnsibleModule +if __name__ == "__main__": + main() diff --git a/packaging/os/zypper_repository.py b/packaging/os/zypper_repository.py index 446723ef042..187e5803674 100644 --- a/packaging/os/zypper_repository.py +++ b/packaging/os/zypper_repository.py @@ -20,6 +20,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: zypper_repository @@ -55,46 +59,114 @@ - Whether to disable GPG signature checking of all packages. Has an effect only if state is I(present). + - Needs zypper version >= 1.6.2. required: false default: "no" choices: [ "yes", "no" ] - aliases: [] - refresh: + autorefresh: description: - Enable autorefresh of the repository. required: false default: "yes" choices: [ "yes", "no" ] - aliases: [] -notes: [] -requirements: [ zypper ] + aliases: [ "refresh" ] + priority: + description: + - Set priority of repository. Packages will always be installed + from the repository with the smallest priority number. + - Needs zypper version >= 1.12.25. + required: false + version_added: "2.1" + overwrite_multiple: + description: + - Overwrite multiple repository entries, if repositories with both name and + URL already exist. + required: false + default: "no" + choices: [ "yes", "no" ] + version_added: "2.1" + auto_import_keys: + description: + - Automatically import the gpg signing key of the new or changed repository. + - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent). + - Implies runrefresh. + required: false + default: "no" + choices: ["yes", "no"] + version_added: "2.2" + runrefresh: + description: + - Refresh the package list of the given repository. + - Can be used with repo=* to refresh all repositories. + required: false + default: "no" + choices: ["yes", "no"] + version_added: "2.2" + enabled: + description: + - Set repository to enabled (or disabled). + required: false + default: "yes" + choices: ["yes", "no"] + version_added: "2.2" + + +requirements: + - "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0" + - python-xml ''' EXAMPLES = ''' # Add NVIDIA repository for graphics drivers -- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=present +- zypper_repository: + name: nvidia-repo + repo: 'ftp://download.nvidia.com/opensuse/12.2' + state: present # Remove NVIDIA repository -- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=absent +- zypper_repository: + name: nvidia-repo + repo: 'ftp://download.nvidia.com/opensuse/12.2' + state: absent # Add python development repository -- zypper_repository: repo=http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo +- zypper_repository: + repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo' + +# Refresh all repos +- zypper_repository: + repo: * + runrefresh: yes + +# Add a repo and add it's gpg key +- zypper_repository: + repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/' + auto_import_keys: yes + +# Force refresh of a repository +- zypper_repository: + repo: 'http://my_internal_ci_repo/repo + name: my_ci_repo + state: present + runrefresh: yes ''' REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] -def zypper_version(module): - """Return (rc, message) tuple""" - cmd = ['/usr/bin/zypper', '-V'] - rc, stdout, stderr = module.run_command(cmd, check_rc=False) - if rc == 0: - return rc, stdout - else: - return rc, stderr +from distutils.version import LooseVersion + +def _get_cmd(*args): + """Combines the non-interactive zypper command with arguments/subcommands""" + cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive'] + cmd.extend(args) + + return cmd + def _parse_repos(module): - """parses the output of zypper -x lr and returns a parse repo dictionary""" - cmd = ['/usr/bin/zypper', '-x', 'lr'] + """parses the output of zypper --xmlout repos and return a parse repo dictionary""" + cmd = _get_cmd('--xmlout', 'repos') + from xml.dom.minidom import parseString as parseXML rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: @@ -113,119 +185,129 @@ def _parse_repos(module): elif rc == 6: return [] else: - d = { 'zypper_exit_code': rc } - if stderr: - d['stderr'] = stderr - if stdout: - d['stdout'] = stdout - module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), **d) - -def _parse_repos_old(module): - """parses the output of zypper sl and returns a parse repo dictionary""" - cmd = ['/usr/bin/zypper', 'sl'] - repos = [] - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - for line in stdout.split('\n'): - matched = re.search(r'\d+\s+\|\s+(?P\w+)\s+\|\s+(?P\w+)\s+\|\s+(?P\w+)\s+\|\s+(?P\w+)\s+\|\s+(?P.*)', line) - if matched == None: - continue - - m = matched.groupdict() - m['alias']= m['name'] - m['priority'] = 100 - m['gpgcheck'] = 1 - repos.append(m) - - return repos - -def repo_exists(module, old_zypper, **kwargs): - - def repo_subset(realrepo, repocmp): - for k in repocmp: - if k not in realrepo: - return False - - for k, v in realrepo.items(): - if k in repocmp: - if v.rstrip("/") != repocmp[k].rstrip("/"): - return False - return True - - if old_zypper: - repos = _parse_repos_old(module) - else: - repos = _parse_repos(module) + module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr) - for repo in repos: - if repo_subset(repo, kwargs): +def _repo_changes(realrepo, repocmp): + "Check whether the 2 given repos have different settings." + for k in repocmp: + if repocmp[k] and k not in realrepo: return True + + for k, v in realrepo.items(): + if k in repocmp and repocmp[k]: + valold = str(repocmp[k] or "") + valnew = v or "" + if k == "url": + valold, valnew = valold.rstrip("/"), valnew.rstrip("/") + if valold != valnew: + return True return False +def repo_exists(module, repodata, overwrite_multiple): + """Check whether the repository already exists. -def add_repo(module, repo, alias, description, disable_gpg_check, old_zypper, refresh): - if old_zypper: - cmd = ['/usr/bin/zypper', 'sa'] - else: - cmd = ['/usr/bin/zypper', 'ar', '--check'] + returns (exists, mod, old_repos) + exists: whether a matching (name, URL) repo exists + mod: whether there are changes compared to the existing repo + old_repos: list of matching repos + """ + existing_repos = _parse_repos(module) - if repo.startswith("file:/") and old_zypper: - cmd.extend(['-t', 'Plaindir']) - else: - cmd.extend(['-t', 'plaindir']) + # look for repos that have matching alias or url to the one searched + repos = [] + for kw in ['alias', 'url']: + name = repodata[kw] + for oldr in existing_repos: + if repodata[kw] == oldr[kw] and oldr not in repos: + repos.append(oldr) + + if len(repos) == 0: + # Repo does not exist yet + return (False, False, None) + elif len(repos) == 1: + # Found an existing repo, look for changes + has_changes = _repo_changes(repos[0], repodata) + return (True, has_changes, repos) + elif len(repos) >= 2: + if overwrite_multiple: + # Found two repos and want to overwrite_multiple + return (True, True, repos) + else: + errmsg = 'More than one repo matched "%s": "%s".' % (name, repos) + errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten' + module.fail_json(msg=errmsg) + + +def addmodify_repo(module, repodata, old_repos, zypper_version, warnings): + "Adds the repo, removes old repos before, that would conflict." + repo = repodata['url'] + cmd = _get_cmd('addrepo', '--check') + if repodata['name']: + cmd.extend(['--name', repodata['name']]) + + # priority on addrepo available since 1.12.25 + # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336 + if repodata['priority']: + if zypper_version >= LooseVersion('1.12.25'): + cmd.extend(['--priority', str(repodata['priority'])]) + else: + warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.") - if description: - cmd.extend(['--name', description]) + if repodata['enabled'] == '0': + cmd.append('--disable') - if disable_gpg_check and not old_zypper: - cmd.append('--no-gpgcheck') + # gpgcheck available since 1.6.2 + # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449 + # the default changed in the past, so don't assume a default here and show warning for old zypper versions + if zypper_version >= LooseVersion('1.6.2'): + if repodata['gpgcheck'] == '1': + cmd.append('--gpgcheck') + else: + cmd.append('--no-gpgcheck') + else: + warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.") - if refresh: + if repodata['autorefresh'] == '1': cmd.append('--refresh') cmd.append(repo) if not repo.endswith('.repo'): - cmd.append(alias) + cmd.append(repodata['alias']) + + if old_repos is not None: + for oldrepo in old_repos: + remove_repo(module, oldrepo['url']) rc, stdout, stderr = module.run_command(cmd, check_rc=False) - changed = rc == 0 - if rc == 0: - changed = True - elif 'already exists. Please use another alias' in stderr: - changed = False - else: - #module.fail_json(msg=stderr if stderr else stdout) - if stderr: - module.fail_json(msg=stderr) - else: - module.fail_json(msg=stdout) + return rc, stdout, stderr - return changed +def remove_repo(module, repo): + "Removes the repo." + cmd = _get_cmd('removerepo', repo) -def remove_repo(module, repo, alias, old_zypper): + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + return rc, stdout, stderr - if old_zypper: - cmd = ['/usr/bin/zypper', 'sd'] - else: - cmd = ['/usr/bin/zypper', 'rr'] - if alias: - cmd.append(alias) - else: - cmd.append(repo) - rc, stdout, stderr = module.run_command(cmd, check_rc=True) - changed = rc == 0 - return changed +def get_zypper_version(module): + rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version']) + if rc != 0 or not stdout.startswith('zypper '): + return LooseVersion('1.0') + return LooseVersion(stdout.split()[1]) +def runrefreshrepo(module, auto_import_keys=False, shortname=None): + "Forces zypper to refresh repo metadata." + if auto_import_keys: + cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force') + else: + cmd = _get_cmd('refresh', '--force') + if shortname is not None: + cmd.extend(['-r', shortname]) -def fail_if_rc_is_null(module, rc, stdout, stderr): - if rc != 0: - #module.fail_json(msg=stderr if stderr else stdout) - if stderr: - module.fail_json(msg=stderr) - else: - module.fail_json(msg=stdout) + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + return rc, stdout, stderr def main(): @@ -234,64 +316,99 @@ def main(): name=dict(required=False), repo=dict(required=False), state=dict(choices=['present', 'absent'], default='present'), + runrefresh=dict(required=False, default='no', type='bool'), description=dict(required=False), - disable_gpg_check = dict(required=False, default='no', type='bool'), - refresh = dict(required=False, default='yes', type='bool'), + disable_gpg_check = dict(required=False, default=False, type='bool'), + autorefresh = dict(required=False, default=True, type='bool', aliases=['refresh']), + priority = dict(required=False, type='int'), + enabled = dict(required=False, default=True, type='bool'), + overwrite_multiple = dict(required=False, default=False, type='bool'), + auto_import_keys = dict(required=False, default=False, type='bool'), ), supports_check_mode=False, + required_one_of = [['state','runrefresh']], ) repo = module.params['repo'] + alias = module.params['name'] state = module.params['state'] - name = module.params['name'] - description = module.params['description'] - disable_gpg_check = module.params['disable_gpg_check'] - refresh = module.params['refresh'] + overwrite_multiple = module.params['overwrite_multiple'] + auto_import_keys = module.params['auto_import_keys'] + runrefresh = module.params['runrefresh'] + + zypper_version = get_zypper_version(module) + warnings = [] # collect warning messages for final output + + repodata = { + 'url': repo, + 'alias': alias, + 'name': module.params['description'], + 'priority': module.params['priority'], + } + # rewrite bools in the language that zypper lr -x provides for easier comparison + if module.params['enabled']: + repodata['enabled'] = '1' + else: + repodata['enabled'] = '0' + if module.params['disable_gpg_check']: + repodata['gpgcheck'] = '0' + else: + repodata['gpgcheck'] = '1' + if module.params['autorefresh']: + repodata['autorefresh'] = '1' + else: + repodata['autorefresh'] = '0' def exit_unchanged(): - module.exit_json(changed=False, repo=repo, state=state, name=name) - - rc, out = zypper_version(module) - match = re.match(r'zypper\s+(\d+)\.(\d+)\.(\d+)', out) - if not match or int(match.group(1)) > 0: - old_zypper = False - else: - old_zypper = True + module.exit_json(changed=False, repodata=repodata, state=state) # Check run-time module parameters + if repo == '*' or alias == '*': + if runrefresh: + runrefreshrepo(module, auto_import_keys) + module.exit_json(changed=False, runrefresh=True) + else: + module.fail_json(msg='repo=* can only be used with the runrefresh option.') + if state == 'present' and not repo: module.fail_json(msg='Module option state=present requires repo') - if state == 'absent' and not repo and not name: + if state == 'absent' and not repo and not alias: module.fail_json(msg='Alias or repo parameter required when state=absent') if repo and repo.endswith('.repo'): - if name: - module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding repo files') + if alias: + module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files') else: - if not name and state == "present": - module.fail_json(msg='Name required when adding non-repo files:') + if not alias and state == "present": + module.fail_json(msg='Name required when adding non-repo files.') - if repo and repo.endswith('.repo'): - exists = repo_exists(module, old_zypper, url=repo, alias=name) - elif repo: - exists = repo_exists(module, old_zypper, url=repo) + exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple) + + if repo: + shortname = repo else: - exists = repo_exists(module, old_zypper, alias=name) + shortname = alias if state == 'present': - if exists: + if exists and not mod: + if runrefresh: + runrefreshrepo(module, auto_import_keys, shortname) exit_unchanged() - - changed = add_repo(module, repo, name, description, disable_gpg_check, old_zypper, refresh) + rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings) + if rc == 0 and (runrefresh or auto_import_keys): + runrefreshrepo(module, auto_import_keys, shortname) elif state == 'absent': if not exists: exit_unchanged() + rc, stdout, stderr = remove_repo(module, shortname) - changed = remove_repo(module, repo, name, old_zypper) - - module.exit_json(changed=changed, repo=repo, state=state) + if rc == 0: + module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings) + else: + module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/remote_management/__init__.py b/remote_management/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/remote_management/ipmi/__init__.py b/remote_management/ipmi/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/remote_management/ipmi/ipmi_boot.py b/remote_management/ipmi/ipmi_boot.py new file mode 100644 index 00000000000..06281d4d46f --- /dev/null +++ b/remote_management/ipmi/ipmi_boot.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +try: + from pyghmi.ipmi import command +except ImportError: + command = None + +from ansible.module_utils.basic import * + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipmi_boot +short_description: Management of order of boot devices +description: + - Use this module to manage order of boot devices +version_added: "2.2" +options: + name: + description: + - Hostname or ip address of the BMC. + required: true + port: + description: + - Remote RMCP port. + required: false + type: int + default: 623 + user: + description: + - Username to use to connect to the BMC. + required: true + password: + description: + - Password to connect to the BMC. + required: true + default: null + bootdev: + description: + - Set boot device to use on next reboot + required: true + choices: + - network -- Request network boot + - hd -- Boot from hard drive + - safe -- Boot from hard drive, requesting 'safe mode' + - optical -- boot from CD/DVD/BD drive + - setup -- Boot into setup utility + - default -- remove any IPMI directed boot device request + state: + description: + - Whether to ensure that boot devices is desired. + default: present + choices: + - present -- Request system turn on + - absent -- Request system turn on + persistent: + description: + - If set, ask that system firmware uses this device beyond next boot. + Be aware many systems do not honor this. + required: false + type: boolean + default: false + uefiboot: + description: + - If set, request UEFI boot explicitly. + Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option. + In practice, this flag not being set does not preclude UEFI boot on any system I've encountered. + required: false + type: boolean + default: false +requirements: + - "python >= 2.6" + - pyghmi +author: "Bulat Gaifullin (gaifullinbf@gmail.com)" +''' + +RETURN = ''' +bootdev: + description: The boot device name which will be used beyond next boot. + returned: success + type: string + sample: default +persistent: + description: If True, system firmware will use this device beyond next boot. + returned: success + type: bool + sample: false +uefimode: + description: If True, system firmware will use UEFI boot explicitly beyond next boot. + returned: success + type: bool + sample: false +''' + +EXAMPLES = ''' +# Ensure bootdevice is HD. +- ipmi_boot: + name: test.testdomain.com + user: admin + password: password + bootdev: hd + +# Ensure bootdevice is not Network +- ipmi_boot: + name: test.testdomain.com + user: admin + password: password + bootdev: network + state: absent +''' + +# ================================================== + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + port=dict(default=623, type='int'), + user=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + state=dict(default='present', choices=['present', 'absent']), + bootdev=dict(required=True, choices=['network', 'hd', 'safe', 'optical', 'setup', 'default']), + persistent=dict(default=False, type='bool'), + uefiboot=dict(default=False, type='bool') + ), + supports_check_mode=True, + ) + + if command is None: + module.fail_json(msg='the python pyghmi module is required') + + name = module.params['name'] + port = module.params['port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + bootdev = module.params['bootdev'] + persistent = module.params['persistent'] + uefiboot = module.params['uefiboot'] + request = dict() + + if state == 'absent' and bootdev == 'default': + module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.") + + # --- run command --- + try: + ipmi_cmd = command.Command( + bmc=name, userid=user, password=password, port=port + ) + module.debug('ipmi instantiated - name: "%s"' % name) + current = ipmi_cmd.get_bootdev() + # uefimode may not supported by BMC, so use desired value as default + current.setdefault('uefimode', uefiboot) + if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot): + request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent) + elif state == 'absent' and current['bootdev'] == bootdev: + request = dict(bootdev='default') + else: + module.exit_json(changed=False, **current) + + if module.check_mode: + response = dict(bootdev=request['bootdev']) + else: + response = ipmi_cmd.set_bootdev(**request) + + if 'error' in response: + module.fail_json(msg=response['error']) + + if 'persist' in request: + response['persistent'] = request['persist'] + if 'uefiboot' in request: + response['uefimode'] = request['uefiboot'] + + module.exit_json(changed=True, **response) + except Exception as e: + module.fail_json(msg=str(e)) + +if __name__ == '__main__': + main() diff --git a/remote_management/ipmi/ipmi_power.py b/remote_management/ipmi/ipmi_power.py new file mode 100644 index 00000000000..b661be4c535 --- /dev/null +++ b/remote_management/ipmi/ipmi_power.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +try: + from pyghmi.ipmi import command +except ImportError: + command = None + +from ansible.module_utils.basic import * + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: ipmi_power +short_description: Power management for machine +description: + - Use this module for power management +version_added: "2.2" +options: + name: + description: + - Hostname or ip address of the BMC. + required: true + port: + description: + - Remote RMCP port. + required: false + type: int + default: 623 + user: + description: + - Username to use to connect to the BMC. + required: true + password: + description: + - Password to connect to the BMC. + required: true + default: null + state: + description: + - Whether to ensure that the machine in desired state. + required: true + choices: + - on -- Request system turn on + - off -- Request system turn off without waiting for OS to shutdown + - shutdown -- Have system request OS proper shutdown + - reset -- Request system reset without waiting for OS + - boot -- If system is off, then 'on', else 'reset' + timeout: + description: + - Maximum number of seconds before interrupt request. + required: false + type: int + default: 300 +requirements: + - "python >= 2.6" + - pyghmi +author: "Bulat Gaifullin (gaifullinbf@gmail.com)" +''' + +RETURN = ''' +powerstate: + description: The current power state of the machine. + returned: success + type: string + sample: on +''' + +EXAMPLES = ''' +# Ensure machine is powered on. +- ipmi_power: + name: test.testdomain.com + user: admin + password: password + state: on +''' + +# ================================================== + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + port=dict(default=623, type='int'), + state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']), + user=dict(required=True, no_log=True), + password=dict(required=True, no_log=True), + timeout=dict(default=300, type='int'), + ), + supports_check_mode=True, + ) + + if command is None: + module.fail_json(msg='the python pyghmi module is required') + + name = module.params['name'] + port = module.params['port'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + timeout = module.params['timeout'] + + # --- run command --- + try: + ipmi_cmd = command.Command( + bmc=name, userid=user, password=password, port=port + ) + module.debug('ipmi instantiated - name: "%s"' % name) + + current = ipmi_cmd.get_power() + if current['powerstate'] != state: + response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout) + changed = True + else: + response = current + changed = False + + if 'error' in response: + module.fail_json(msg=response['error']) + + module.exit_json(changed=changed, **response) + except Exception as e: + module.fail_json(msg=str(e)) + +if __name__ == '__main__': + main() diff --git a/source_control/bzr.py b/source_control/bzr.py index 0fc6ac28584..f66c00abf82 100644 --- a/source_control/bzr.py +++ b/source_control/bzr.py @@ -19,6 +19,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = u''' --- module: bzr @@ -62,7 +66,10 @@ EXAMPLES = ''' # Example bzr checkout from Ansible Playbooks -- bzr: name=bzr+ssh://foosball.example.org/path/to/branch dest=/srv/checkout version=22 +- bzr: + name: 'bzr+ssh://foosball.example.org/path/to/branch' + dest: /srv/checkout + version: 22 ''' import re @@ -143,7 +150,7 @@ def switch_version(self): def main(): module = AnsibleModule( argument_spec = dict( - dest=dict(required=True), + dest=dict(required=True, type='path'), name=dict(required=True, aliases=['parent']), version=dict(default='head'), force=dict(default='no', type='bool'), @@ -151,7 +158,7 @@ def main(): ) ) - dest = os.path.abspath(os.path.expanduser(module.params['dest'])) + dest = module.params['dest'] parent = module.params['name'] version = module.params['version'] force = module.params['force'] @@ -196,4 +203,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/source_control/git_config.py b/source_control/git_config.py new file mode 100644 index 00000000000..16f2457dd98 --- /dev/null +++ b/source_control/git_config.py @@ -0,0 +1,258 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Marius Gedminas +# (c) 2016, Matthew Gamble +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: git_config +author: + - "Matthew Gamble" + - "Marius Gedminas" +version_added: 2.1 +requirements: ['git'] +short_description: Read and write git configuration +description: + - The M(git_config) module changes git configuration by invoking 'git config'. + This is needed if you don't want to use M(template) for the entire git + config file (e.g. because you need to change just C(user.email) in + /etc/.git/config). Solutions involving M(command) are cumbersone or + don't work correctly in check mode. +options: + list_all: + description: + - List all settings (optionally limited to a given I(scope)) + required: false + choices: [ "yes", "no" ] + default: no + name: + description: + - The name of the setting. If no value is supplied, the value will + be read from the config if it has been set. + required: false + default: null + repo: + description: + - Path to a git repository for reading and writing values from a + specific repo. + required: false + default: null + scope: + description: + - Specify which scope to read/set values from. This is required + when setting config values. If this is set to local, you must + also specify the repo parameter. It defaults to system only when + not using I(list_all)=yes. + required: false + choices: [ "local", "global", "system" ] + default: null + value: + description: + - When specifying the name of a single setting, supply a value to + set that setting to the given value. + required: false + default: null +''' + +EXAMPLES = ''' +# Set some settings in ~/.gitconfig +- git_config: + name: alias.ci + scope: global + value: commit + +- git_config: + name: alias.st + scope: global + value: status + +# Or system-wide: +- git_config: + name: alias.remotev + scope: system + value: remote -v + +- git_config: + name: core.editor + scope: global + value: vim + +# scope=system is the default +- git_config: + name: alias.diffc + value: diff --cached + +- git_config: + name: color.ui + value: auto + +# Make etckeeper not complain when invoked by cron +- git_config: + name: user.email + repo: /etc + scope: local + value: 'root@{{ ansible_fqdn }}' + +# Read individual values from git config +- git_config: + name: alias.ci + scope: global + +# scope: system is also assumed when reading values, unless list_all=yes +- git_config: + name: alias.diffc + +# Read all values from git config +- git_config: + list_all: yes + scope: global + +# When list_all=yes and no scope is specified, you get configuration from all scopes +- git_config: + list_all: yes + +# Specify a repository to include local settings +- git_config: + list_all: yes + repo: /path/to/repo.git +''' + +RETURN = ''' +--- +config_value: + description: When list_all=no and value is not set, a string containing the value of the setting in name + returned: success + type: string + sample: "vim" + +config_values: + description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings + returned: success + type: dictionary + sample: + core.editor: "vim" + color.ui: "auto" + alias.diffc: "diff --cached" + alias.remotev: "remote -v" +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + list_all=dict(required=False, type='bool', default=False), + name=dict(type='str'), + repo=dict(type='path'), + scope=dict(required=False, type='str', choices=['local', 'global', 'system']), + value=dict(required=False) + ), + mutually_exclusive=[['list_all', 'name'], ['list_all', 'value']], + required_if=[('scope', 'local', ['repo'])], + required_one_of=[['list_all', 'name']], + supports_check_mode=True, + ) + git_path = module.get_bin_path('git') + if not git_path: + module.fail_json(msg="Could not find git. Please ensure it is installed.") + + params = module.params + # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. + # Set the locale to C to ensure consistent messages. + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + if params['name']: + name = params['name'] + else: + name = None + + if params['scope']: + scope = params['scope'] + elif params['list_all']: + scope = None + else: + scope = 'system' + + if params['value']: + new_value = params['value'] + else: + new_value = None + + args = [git_path, "config", "--includes"] + if params['list_all']: + args.append('-l') + if scope: + args.append("--" + scope) + if name: + args.append(name) + + if scope == 'local': + dir = params['repo'] + elif params['list_all'] and params['repo']: + # Include local settings from a specific repo when listing all available settings + dir = params['repo'] + else: + # Run from root directory to avoid accidentally picking up any local config settings + dir = "/" + + (rc, out, err) = module.run_command(' '.join(args), cwd=dir) + if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err: + # This just means nothing has been set at the given scope + module.exit_json(changed=False, msg='', config_values={}) + elif rc >= 2: + # If the return code is 1, it just means the option hasn't been set yet, which is fine. + module.fail_json(rc=rc, msg=err, cmd=' '.join(args)) + + if params['list_all']: + values = out.rstrip().splitlines() + config_values = {} + for value in values: + k, v = value.split('=', 1) + config_values[k] = v + module.exit_json(changed=False, msg='', config_values=config_values) + elif not new_value: + module.exit_json(changed=False, msg='', config_value=out.rstrip()) + else: + old_value = out.rstrip() + if old_value == new_value: + module.exit_json(changed=False, msg="") + + if not module.check_mode: + new_value_quoted = "'" + new_value + "'" + (rc, out, err) = module.run_command(' '.join(args + [new_value_quoted]), cwd=dir) + if err: + module.fail_json(rc=rc, msg=err, cmd=' '.join(args + [new_value_quoted])) + module.exit_json( + msg='setting changed', + diff=dict( + before_header=' '.join(args), + before=old_value + "\n", + after_header=' '.join(args), + after=new_value + "\n" + ), + changed=True + ) + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/source_control/github_hooks.py b/source_control/github_hooks.py index d75fcb1573d..ce76b503c23 100644 --- a/source_control/github_hooks.py +++ b/source_control/github_hooks.py @@ -18,9 +18,21 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import json +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + import base64 +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: github_hooks @@ -49,7 +61,7 @@ description: - This tells the githooks module what you want it to do. required: true - choices: [ "create", "cleanall" ] + choices: [ "create", "cleanall", "list", "clean504" ] validate_certs: description: - If C(no), SSL certificates for the target repo will not be validated. This should only be used @@ -69,10 +81,20 @@ EXAMPLES = ''' # Example creating a new service hook. It ignores duplicates. -- github_hooks: action=create hookurl=http://11.111.111.111:2222 user={{ gituser }} oauthkey={{ oauthkey }} repo=https://api.github.com/repos/pcgentry/Github-Auto-Deploy +- github_hooks: + action: create + hookurl: 'http://11.111.111.111:2222' + user: '{{ gituser }}' + oauthkey: '{{ oauthkey }}' + repo: 'https://api.github.com/repos/pcgentry/Github-Auto-Deploy' # Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would be called from a handler. -- local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }} +- github_hooks: + action: cleanall + user: '{{ gituser }}' + oauthkey: '{{ oauthkey }}' + repo: '{{ repo }}' + delegate_to: localhost ''' def _list(module, hookurl, oauthkey, repo, user): @@ -144,9 +166,9 @@ def _delete(module, hookurl, oauthkey, repo, user, hookid): def main(): module = AnsibleModule( argument_spec=dict( - action=dict(required=True), + action=dict(required=True, choices=['list','clean504','cleanall','create']), hookurl=dict(required=False), - oauthkey=dict(required=True), + oauthkey=dict(required=True, no_log=True), repo=dict(required=True), user=dict(required=True), validate_certs=dict(default='yes', type='bool'), @@ -183,4 +205,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/source_control/github_key.py b/source_control/github_key.py new file mode 100644 index 00000000000..cc54734e004 --- /dev/null +++ b/source_control/github_key.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: github_key +short_description: Manage GitHub access keys. +description: + - Creates, removes, or updates GitHub access keys. +version_added: "2.2" +options: + token: + description: + - GitHub Access Token with permission to list and create public keys. + required: true + name: + description: + - SSH key name + required: true + pubkey: + description: + - SSH public key value. Required when C(state=present). + required: false + default: none + state: + description: + - Whether to remove a key, ensure that it exists, or update its value. + choices: ['present', 'absent'] + default: 'present' + required: false + force: + description: + - The default is C(yes), which will replace the existing remote key + if it's different than C(pubkey). If C(no), the key will only be + set if no key with the given C(name) exists. + required: false + choices: ['yes', 'no'] + default: 'yes' + +author: Robert Estelle (@erydo) +''' + +RETURN = ''' +deleted_keys: + description: An array of key objects that were deleted. Only present on state=absent + type: list + returned: When state=absent + sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}] +matching_keys: + description: An array of keys matching the specified name. Only present on state=present + type: list + returned: When state=present + sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}] +key: + description: Metadata about the key just created. Only present on state=present + type: dict + returned: success + sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False} +''' + +EXAMPLES = ''' +- name: Read SSH public key to authorize + shell: cat /home/foo/.ssh/id_rsa.pub + register: ssh_pub_key + +- name: Authorize key with GitHub + local_action: + module: github_key + name: Access Key for Some Machine + token: '{{ github_access_token }}' + pubkey: '{{ ssh_pub_key.stdout }}' +''' + + +import sys # noqa +import json +import re + + +API_BASE = 'https://api.github.com' + + +class GitHubResponse(object): + def __init__(self, response, info): + self.content = response.read() + self.info = info + + def json(self): + return json.loads(self.content) + + def links(self): + links = {} + if 'link' in self.info: + link_header = re.info['link'] + matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header) + for url, rel in matches: + links[rel] = url + return links + + +class GitHubSession(object): + def __init__(self, module, token): + self.module = module + self.token = token + + def request(self, method, url, data=None): + headers = { + 'Authorization': 'token %s' % self.token, + 'Content-Type': 'application/json', + 'Accept': 'application/vnd.github.v3+json', + } + response, info = fetch_url( + self.module, url, method=method, data=data, headers=headers) + if not (200 <= info['status'] < 400): + self.module.fail_json( + msg=(" failed to send request %s to %s: %s" + % (method, url, info['msg']))) + return GitHubResponse(response, info) + + +def get_all_keys(session): + url = API_BASE + '/user/keys' + while url: + r = session.request('GET', url) + for key in r.json(): + yield key + + url = r.links().get('next') + + +def create_key(session, name, pubkey, check_mode): + if check_mode: + from datetime import datetime + now = datetime.utcnow() + return { + 'id': 0, + 'key': pubkey, + 'title': name, + 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', + 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'), + 'read_only': False, + 'verified': False + } + else: + return session.request( + 'POST', + API_BASE + '/user/keys', + data=json.dumps({'title': name, 'key': pubkey})).json() + + +def delete_keys(session, to_delete, check_mode): + if check_mode: + return + + for key in to_delete: + session.request('DELETE', API_BASE + '/user/keys/%s' % key[id]) + + +def ensure_key_absent(session, name, check_mode): + to_delete = [key for key in get_all_keys(session) if key['title'] == name] + delete_keys(session, to_delete, check_mode=check_mode) + + return {'changed': bool(to_delete), + 'deleted_keys': to_delete} + + +def ensure_key_present(session, name, pubkey, force, check_mode): + matching_keys = [k for k in get_all_keys(session) if k['title'] == name] + deleted_keys = [] + + if matching_keys and force and matching_keys[0]['key'] != pubkey: + delete_keys(session, matching_keys, check_mode=check_mode) + (deleted_keys, matching_keys) = (matching_keys, []) + + if not matching_keys: + key = create_key(session, name, pubkey, check_mode=check_mode) + else: + key = matching_keys[0] + + return { + 'changed': bool(deleted_keys or not matching_keys), + 'deleted_keys': deleted_keys, + 'matching_keys': matching_keys, + 'key': key + } + + +def main(): + argument_spec = { + 'token': {'required': True, 'no_log': True}, + 'name': {'required': True}, + 'pubkey': {}, + 'state': {'choices': ['present', 'absent'], 'default': 'present'}, + 'force': {'default': True, 'type': 'bool'}, + } + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + token = module.params['token'] + name = module.params['name'] + state = module.params['state'] + force = module.params['force'] + pubkey = module.params.get('pubkey') + + if pubkey: + pubkey_parts = pubkey.split(' ') + # Keys consist of a protocol, the key data, and an optional comment. + if len(pubkey_parts) < 2: + module.fail_json(msg='"pubkey" parameter has an invalid format') + + # Strip out comment so we can compare to the keys GitHub returns. + pubkey = ' '.join(pubkey_parts[:2]) + elif state == 'present': + module.fail_json(msg='"pubkey" is required when state=present') + + session = GitHubSession(module, token) + if state == 'present': + result = ensure_key_present(session, name, pubkey, force=force, + check_mode=module.check_mode) + elif state == 'absent': + result = ensure_key_absent(session, name, check_mode=module.check_mode) + + module.exit_json(**result) + +from ansible.module_utils.basic import * # noqa +from ansible.module_utils.urls import * # noqa + +if __name__ == '__main__': + main() diff --git a/source_control/github_release.py b/source_control/github_release.py new file mode 100644 index 00000000000..ac59e6b69ae --- /dev/null +++ b/source_control/github_release.py @@ -0,0 +1,125 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: github_release +short_description: Interact with GitHub Releases +description: + - Fetch metadata about Github Releases +version_added: 2.2 +options: + token: + required: true + description: + - Github Personal Access Token for authenticating + user: + required: true + description: + - The GitHub account that owns the repository + repo: + required: true + description: + - Repository name + action: + required: true + description: + - Action to perform + choices: [ 'latest_release' ] + +author: + - "Adrian Moisey (@adrianmoisey)" +requirements: + - "github3.py >= 1.0.0a3" +''' + +EXAMPLES = ''' +- name: Get latest release of test/test + github: + token: tokenabc1234567890 + user: testuser + repo: testrepo + action: latest_release +''' + +RETURN = ''' +latest_release: + description: Version of the latest release + type: string + returned: success + sample: 1.1.0 +''' + +try: + import github3 + + HAS_GITHUB_API = True +except ImportError: + HAS_GITHUB_API = False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + repo=dict(required=True), + user=dict(required=True), + token=dict(required=True, no_log=True), + action=dict(required=True, choices=['latest_release']), + ), + supports_check_mode=True + ) + + if not HAS_GITHUB_API: + module.fail_json(msg='Missing requried github3 module (check docs or install with: pip install github3)') + + repo = module.params['repo'] + user = module.params['user'] + login_token = module.params['token'] + action = module.params['action'] + + # login to github + try: + gh = github3.login(token=str(login_token)) + # test if we're actually logged in + gh.me() + except github3.AuthenticationFailed: + e = get_exception() + module.fail_json(msg='Failed to connect to Github: %s' % e) + + repository = gh.repository(str(user), str(repo)) + + if not repository: + module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo)) + + if action == 'latest_release': + release = repository.latest_release() + if release: + module.exit_json(tag=release.tag_name) + else: + module.exit_json(tag=None) + + +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/source_control/gitlab_group.py b/source_control/gitlab_group.py new file mode 100644 index 00000000000..4c133028474 --- /dev/null +++ b/source_control/gitlab_group.py @@ -0,0 +1,222 @@ +#!/usr/bin/python +# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: gitlab_group +short_description: Creates/updates/deletes Gitlab Groups +description: + - When the group does not exists in Gitlab, it will be created. + - When the group does exists and state=absent, the group will be deleted. +version_added: "2.1" +author: "Werner Dijkerman (@dj-wasabi)" +requirements: + - pyapi-gitlab python module +options: + server_url: + description: + - Url of Gitlab server, with protocol (http or https). + required: true + validate_certs: + description: + - When using https if SSL certificate needs to be verified. + required: false + default: true + aliases: + - verify_ssl + login_user: + description: + - Gitlab user name. + required: false + default: null + login_password: + description: + - Gitlab password for login_user + required: false + default: null + login_token: + description: + - Gitlab token for logging in. + required: false + default: null + name: + description: + - Name of the group you want to create. + required: true + path: + description: + - The path of the group you want to create, this will be server_url/group_path + - If not supplied, the group_name will be used. + required: false + default: null + state: + description: + - create or delete group. + - Possible values are present and absent. + required: false + default: "present" + choices: ["present", "absent"] +''' + +EXAMPLES = ''' +- name: "Delete Gitlab Group" + local_action: gitlab_group + server_url="http://gitlab.dj-wasabi.local" + validate_certs=false + login_token="WnUzDsxjy8230-Dy_k" + name=my_first_group + state=absent + +- name: "Create Gitlab Group" + local_action: gitlab_group + server_url="https://gitlab.dj-wasabi.local" + validate_certs=true + login_user=dj-wasabi + login_password="MySecretPassword" + name=my_first_group + path=my_first_group + state=present +''' + +RETURN = '''# ''' + +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except: + HAS_GITLAB_PACKAGE = False + +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception + +class GitLabGroup(object): + def __init__(self, module, git): + self._module = module + self._gitlab = git + + def createGroup(self, group_name, group_path): + if self._module.check_mode: + self._module.exit_json(changed=True) + return self._gitlab.creategroup(group_name, group_path) + + def deleteGroup(self, group_name): + is_group_empty = True + group_id = self.idGroup(group_name) + + for project in self._gitlab.getall(self._gitlab.getprojects): + owner = project['namespace']['name'] + if owner == group_name: + is_group_empty = False + + if is_group_empty: + if self._module.check_mode: + self._module.exit_json(changed=True) + return self._gitlab.deletegroup(group_id) + else: + self._module.fail_json(msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.") + + def existsGroup(self, group_name): + for group in self._gitlab.getall(self._gitlab.getgroups): + if group['name'] == group_name: + return True + return False + + def idGroup(self, group_name): + for group in self._gitlab.getall(self._gitlab.getgroups): + if group['name'] == group_name: + return group['id'] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True), + validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']), + login_user=dict(required=False, no_log=True), + login_password=dict(required=False, no_log=True), + login_token=dict(required=False, no_log=True), + name=dict(required=True), + path=dict(required=False), + state=dict(default="present", choices=["present", "absent"]), + ), + supports_check_mode=True + ) + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg="Missing requried gitlab module (check docs or install with: pip install pyapi-gitlab") + + server_url = module.params['server_url'] + verify_ssl = module.params['validate_certs'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_token = module.params['login_token'] + group_name = module.params['name'] + group_path = module.params['path'] + state = module.params['state'] + + # We need both login_user and login_password or login_token, otherwise we fail. + if login_user is not None and login_password is not None: + use_credentials = True + elif login_token is not None: + use_credentials = False + else: + module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token") + + # Set group_path to group_name if it is empty. + if group_path is None: + group_path = group_name.replace(" ", "_") + + # Lets make an connection to the Gitlab server_url, with either login_user and login_password + # or with login_token + try: + if use_credentials: + git = gitlab.Gitlab(host=server_url) + git.login(user=login_user, password=login_password) + else: + git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl) + except Exception: + e = get_exception() + module.fail_json(msg="Failed to connect to Gitlab server: %s " % e) + + # Validate if group exists and take action based on "state" + group = GitLabGroup(module, git) + group_name = group_name.lower() + group_exists = group.existsGroup(group_name) + + if group_exists and state == "absent": + group.deleteGroup(group_name) + module.exit_json(changed=True, result="Successfully deleted group %s" % group_name) + else: + if state == "absent": + module.exit_json(changed=False, result="Group deleted or does not exists") + else: + if group_exists: + module.exit_json(changed=False) + else: + if group.createGroup(group_name, group_path): + module.exit_json(changed=True, result="Successfully created or updated the group %s" % group_name) + + + + +if __name__ == '__main__': + main() diff --git a/source_control/gitlab_project.py b/source_control/gitlab_project.py new file mode 100644 index 00000000000..94852afac86 --- /dev/null +++ b/source_control/gitlab_project.py @@ -0,0 +1,405 @@ +#!/usr/bin/python +# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: gitlab_project +short_description: Creates/updates/deletes Gitlab Projects +description: + - When the project does not exists in Gitlab, it will be created. + - When the project does exists and state=absent, the project will be deleted. + - When changes are made to the project, the project will be updated. +version_added: "2.1" +author: "Werner Dijkerman (@dj-wasabi)" +requirements: + - pyapi-gitlab python module +options: + server_url: + description: + - Url of Gitlab server, with protocol (http or https). + required: true + validate_certs: + description: + - When using https if SSL certificate needs to be verified. + required: false + default: true + aliases: + - verify_ssl + login_user: + description: + - Gitlab user name. + required: false + default: null + login_password: + description: + - Gitlab password for login_user + required: false + default: null + login_token: + description: + - Gitlab token for logging in. + required: false + default: null + group: + description: + - The name of the group of which this projects belongs to. + - When not provided, project will belong to user which is configured in 'login_user' or 'login_token' + - When provided with username, project will be created for this user. 'login_user' or 'login_token' needs admin rights. + required: false + default: null + name: + description: + - The name of the project + required: true + path: + description: + - The path of the project you want to create, this will be server_url//path + - If not supplied, name will be used. + required: false + default: null + description: + description: + - An description for the project. + required: false + default: null + issues_enabled: + description: + - Whether you want to create issues or not. + - Possible values are true and false. + required: false + default: true + merge_requests_enabled: + description: + - If merge requests can be made or not. + - Possible values are true and false. + required: false + default: true + wiki_enabled: + description: + - If an wiki for this project should be available or not. + - Possible values are true and false. + required: false + default: true + snippets_enabled: + description: + - If creating snippets should be available or not. + - Possible values are true and false. + required: false + default: true + public: + description: + - If the project is public available or not. + - Setting this to true is same as setting visibility_level to 20. + - Possible values are true and false. + required: false + default: false + visibility_level: + description: + - Private. visibility_level is 0. Project access must be granted explicitly for each user. + - Internal. visibility_level is 10. The project can be cloned by any logged in user. + - Public. visibility_level is 20. The project can be cloned without any authentication. + - Possible values are 0, 10 and 20. + required: false + default: 0 + import_url: + description: + - Git repository which will me imported into gitlab. + - Gitlab server needs read access to this git repository. + required: false + default: false + state: + description: + - create or delete project. + - Possible values are present and absent. + required: false + default: "present" + choices: ["present", "absent"] +''' + +EXAMPLES = ''' +- name: "Delete Gitlab Project" + local_action: gitlab_project + server_url="http://gitlab.dj-wasabi.local" + validate_certs=false + login_token="WnUzDsxjy8230-Dy_k" + name=my_first_project + state=absent + +- name: "Create Gitlab Project in group Ansible" + local_action: gitlab_project + server_url="https://gitlab.dj-wasabi.local" + validate_certs=true + login_user=dj-wasabi + login_password="MySecretPassword" + name=my_first_project + group=ansible + issues_enabled=false + wiki_enabled=true + snippets_enabled=true + import_url="http://git.example.com/example/lab.git" + state=present +''' + +RETURN = '''# ''' + +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except: + HAS_GITLAB_PACKAGE = False + +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception + + +class GitLabProject(object): + def __init__(self, module, git): + self._module = module + self._gitlab = git + + def createOrUpdateProject(self, project_exists, group_name, import_url, arguments): + is_user = False + group_id = self.getGroupId(group_name) + if not group_id: + group_id = self.getUserId(group_name) + is_user = True + + if project_exists: + # Edit project + return self.updateProject(group_name, arguments) + else: + # Create project + if self._module.check_mode: + self._module.exit_json(changed=True) + return self.createProject(is_user, group_id, import_url, arguments) + + def createProject(self, is_user, user_id, import_url, arguments): + if is_user: + return self._gitlab.createprojectuser(user_id=user_id, import_url=import_url, **arguments) + else: + group_id = user_id + return self._gitlab.createproject(namespace_id=group_id, import_url=import_url, **arguments) + + def deleteProject(self, group_name, project_name): + if self.existsGroup(group_name): + project_owner = group_name + else: + project_owner = self._gitlab.currentuser()['username'] + + search_results = self._gitlab.searchproject(search=project_name) + for result in search_results: + owner = result['namespace']['name'] + if owner == project_owner: + return self._gitlab.deleteproject(result['id']) + + def existsProject(self, group_name, project_name): + if self.existsGroup(group_name): + project_owner = group_name + else: + project_owner = self._gitlab.currentuser()['username'] + + search_results = self._gitlab.searchproject(search=project_name) + for result in search_results: + owner = result['namespace']['name'] + if owner == project_owner: + return True + return False + + def existsGroup(self, group_name): + if group_name is not None: + # Find the group, if group not exists we try for user + for group in self._gitlab.getall(self._gitlab.getgroups): + if group['name'] == group_name: + return True + + user_name = group_name + user_data = self._gitlab.getusers(search=user_name) + for data in user_data: + if 'id' in user_data: + return True + return False + + def getGroupId(self, group_name): + if group_name is not None: + # Find the group, if group not exists we try for user + for group in self._gitlab.getall(self._gitlab.getgroups): + if group['name'] == group_name: + return group['id'] + + def getProjectId(self, group_name, project_name): + if self.existsGroup(group_name): + project_owner = group_name + else: + project_owner = self._gitlab.currentuser()['username'] + + search_results = self._gitlab.searchproject(search=project_name) + for result in search_results: + owner = result['namespace']['name'] + if owner == project_owner: + return result['id'] + + def getUserId(self, user_name): + user_data = self._gitlab.getusers(search=user_name) + + for data in user_data: + if 'id' in data: + return data['id'] + return self._gitlab.currentuser()['id'] + + def to_bool(self, value): + if value: + return 1 + else: + return 0 + + def updateProject(self, group_name, arguments): + project_changed = False + project_name = arguments['name'] + project_id = self.getProjectId(group_name, project_name) + project_data = self._gitlab.getproject(project_id=project_id) + + for arg_key, arg_value in arguments.items(): + project_data_value = project_data[arg_key] + + if isinstance(project_data_value, bool) or project_data_value is None: + to_bool = self.to_bool(project_data_value) + if to_bool != arg_value: + project_changed = True + continue + else: + if project_data_value != arg_value: + project_changed = True + + if project_changed: + if self._module.check_mode: + self._module.exit_json(changed=True) + return self._gitlab.editproject(project_id=project_id, **arguments) + else: + return False + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True), + validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']), + login_user=dict(required=False, no_log=True), + login_password=dict(required=False, no_log=True), + login_token=dict(required=False, no_log=True), + group=dict(required=False), + name=dict(required=True), + path=dict(required=False), + description=dict(required=False), + issues_enabled=dict(default=True, type='bool'), + merge_requests_enabled=dict(default=True, type='bool'), + wiki_enabled=dict(default=True, type='bool'), + snippets_enabled=dict(default=True, type='bool'), + public=dict(default=False, type='bool'), + visibility_level=dict(default="0", choices=["0", "10", "20"]), + import_url=dict(required=False), + state=dict(default="present", choices=["present", 'absent']), + ), + supports_check_mode=True + ) + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab") + + server_url = module.params['server_url'] + verify_ssl = module.params['validate_certs'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_token = module.params['login_token'] + group_name = module.params['group'] + project_name = module.params['name'] + project_path = module.params['path'] + description = module.params['description'] + issues_enabled = module.params['issues_enabled'] + merge_requests_enabled = module.params['merge_requests_enabled'] + wiki_enabled = module.params['wiki_enabled'] + snippets_enabled = module.params['snippets_enabled'] + public = module.params['public'] + visibility_level = module.params['visibility_level'] + import_url = module.params['import_url'] + state = module.params['state'] + + # We need both login_user and login_password or login_token, otherwise we fail. + if login_user is not None and login_password is not None: + use_credentials = True + elif login_token is not None: + use_credentials = False + else: + module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token") + + # Set project_path to project_name if it is empty. + if project_path is None: + project_path = project_name.replace(" ", "_") + + # Gitlab API makes no difference between upper and lower cases, so we lower them. + project_name = project_name.lower() + project_path = project_path.lower() + if group_name is not None: + group_name = group_name.lower() + + # Lets make an connection to the Gitlab server_url, with either login_user and login_password + # or with login_token + try: + if use_credentials: + git = gitlab.Gitlab(host=server_url, verify_ssl=verify_ssl) + git.login(user=login_user, password=login_password) + else: + git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl) + except Exception: + e = get_exception() + module.fail_json(msg="Failed to connect to Gitlab server: %s " % e) + + # Validate if project exists and take action based on "state" + project = GitLabProject(module, git) + project_exists = project.existsProject(group_name, project_name) + + # Creating the project dict + arguments = {"name": project_name, + "path": project_path, + "description": description, + "issues_enabled": project.to_bool(issues_enabled), + "merge_requests_enabled": project.to_bool(merge_requests_enabled), + "wiki_enabled": project.to_bool(wiki_enabled), + "snippets_enabled": project.to_bool(snippets_enabled), + "public": project.to_bool(public), + "visibility_level": int(visibility_level)} + + if project_exists and state == "absent": + project.deleteProject(group_name, project_name) + module.exit_json(changed=True, result="Successfully deleted project %s" % project_name) + else: + if state == "absent": + module.exit_json(changed=False, result="Project deleted or does not exists") + else: + if project.createOrUpdateProject(project_exists, group_name, import_url, arguments): + module.exit_json(changed=True, result="Successfully created or updated the project %s" % project_name) + else: + module.exit_json(changed=False) + + + +if __name__ == '__main__': + main() diff --git a/source_control/gitlab_user.py b/source_control/gitlab_user.py new file mode 100644 index 00000000000..e289d70e2c0 --- /dev/null +++ b/source_control/gitlab_user.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +# (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: gitlab_user +short_description: Creates/updates/deletes Gitlab Users +description: + - When the user does not exists in Gitlab, it will be created. + - When the user does exists and state=absent, the user will be deleted. + - When changes are made to user, the user will be updated. +version_added: "2.1" +author: "Werner Dijkerman (@dj-wasabi)" +requirements: + - pyapi-gitlab python module +options: + server_url: + description: + - Url of Gitlab server, with protocol (http or https). + required: true + validate_certs: + description: + - When using https if SSL certificate needs to be verified. + required: false + default: true + aliases: + - verify_ssl + login_user: + description: + - Gitlab user name. + required: false + default: null + login_password: + description: + - Gitlab password for login_user + required: false + default: null + login_token: + description: + - Gitlab token for logging in. + required: false + default: null + name: + description: + - Name of the user you want to create + required: true + username: + description: + - The username of the user. + required: true + password: + description: + - The password of the user. + required: true + email: + description: + - The email that belongs to the user. + required: true + sshkey_name: + description: + - The name of the sshkey + required: false + default: null + sshkey_file: + description: + - The ssh key itself. + required: false + default: null + group: + description: + - Add user as an member to this group. + required: false + default: null + access_level: + description: + - The access level to the group. One of the following can be used. + - guest + - reporter + - developer + - master + - owner + required: false + default: null + state: + description: + - create or delete group. + - Possible values are present and absent. + required: false + default: present + choices: ["present", "absent"] +''' + +EXAMPLES = ''' +- name: "Delete Gitlab User" + local_action: gitlab_user + server_url="http://gitlab.dj-wasabi.local" + validate_certs=false + login_token="WnUzDsxjy8230-Dy_k" + username=myusername + state=absent + +- name: "Create Gitlab User" + local_action: gitlab_user + server_url="https://gitlab.dj-wasabi.local" + validate_certs=true + login_user=dj-wasabi + login_password="MySecretPassword" + name=My Name + username=myusername + password=mysecretpassword + email=me@home.com + sshkey_name=MySSH + sshkey_file=ssh-rsa AAAAB3NzaC1yc... + state=present +''' + +RETURN = '''# ''' + +try: + import gitlab + HAS_GITLAB_PACKAGE = True +except: + HAS_GITLAB_PACKAGE = False + +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.basic import * + + +class GitLabUser(object): + def __init__(self, module, git): + self._module = module + self._gitlab = git + + def addToGroup(self, group_id, user_id, access_level): + if access_level == "guest": + level = 10 + elif access_level == "reporter": + level = 20 + elif access_level == "developer": + level = 30 + elif access_level == "master": + level = 40 + elif access_level == "owner": + level = 50 + return self._gitlab.addgroupmember(group_id, user_id, level) + + def createOrUpdateUser(self, user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level): + group_id = '' + arguments = {"name": user_name, + "username": user_username, + "email": user_email} + + if group_name is not None: + if self.existsGroup(group_name): + group_id = self.getGroupId(group_name) + + if self.existsUser(user_username): + self.updateUser(group_id, user_sshkey_name, user_sshkey_file, access_level, arguments) + else: + if self._module.check_mode: + self._module.exit_json(changed=True) + self.createUser(group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments) + + def createUser(self, group_id, user_password, user_sshkey_name, user_sshkey_file, access_level, arguments): + user_changed = False + + # Create the user + user_username = arguments['username'] + user_name = arguments['name'] + user_email = arguments['email'] + if self._gitlab.createuser(password=user_password, **arguments): + user_id = self.getUserId(user_username) + if self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file): + user_changed = True + # Add the user to the group if group_id is not empty + if group_id != '': + if self.addToGroup(group_id, user_id, access_level): + user_changed = True + user_changed = True + + # Exit with change to true or false + if user_changed: + self._module.exit_json(changed=True, result="Created the user") + else: + self._module.exit_json(changed=False) + + def deleteUser(self, user_username): + user_id = self.getUserId(user_username) + + if self._gitlab.deleteuser(user_id): + self._module.exit_json(changed=True, result="Successfully deleted user %s" % user_username) + else: + self._module.exit_json(changed=False, result="User %s already deleted or something went wrong" % user_username) + + def existsGroup(self, group_name): + for group in self._gitlab.getall(self._gitlab.getgroups): + if group['name'] == group_name: + return True + return False + + def existsUser(self, username): + found_user = self._gitlab.getusers(search=username) + for user in found_user: + if user['id'] != '': + return True + return False + + def getGroupId(self, group_name): + for group in self._gitlab.getall(self._gitlab.getgroups): + if group['name'] == group_name: + return group['id'] + + def getUserId(self, username): + found_user = self._gitlab.getusers(search=username) + for user in found_user: + if user['id'] != '': + return user['id'] + + def updateUser(self, group_id, user_sshkey_name, user_sshkey_file, access_level, arguments): + user_changed = False + user_username = arguments['username'] + user_id = self.getUserId(user_username) + user_data = self._gitlab.getuser(user_id=user_id) + + # Lets check if we need to update the user + for arg_key, arg_value in arguments.items(): + if user_data[arg_key] != arg_value: + user_changed = True + + if user_changed: + if self._module.check_mode: + self._module.exit_json(changed=True) + self._gitlab.edituser(user_id=user_id, **arguments) + user_changed = True + if self._module.check_mode or self._gitlab.addsshkeyuser(user_id=user_id, title=user_sshkey_name, key=user_sshkey_file): + user_changed = True + if group_id != '': + if self._module.check_mode or self.addToGroup(group_id, user_id, access_level): + user_changed = True + if user_changed: + self._module.exit_json(changed=True, result="The user %s is updated" % user_username) + else: + self._module.exit_json(changed=False, result="The user %s is already up2date" % user_username) + + +def main(): + global user_id + module = AnsibleModule( + argument_spec=dict( + server_url=dict(required=True), + validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']), + login_user=dict(required=False, no_log=True), + login_password=dict(required=False, no_log=True), + login_token=dict(required=False, no_log=True), + name=dict(required=True), + username=dict(required=True), + password=dict(required=True, no_log=True), + email=dict(required=True), + sshkey_name=dict(required=False), + sshkey_file=dict(required=False), + group=dict(required=False), + access_level=dict(required=False, choices=["guest", "reporter", "developer", "master", "owner"]), + state=dict(default="present", choices=["present", "absent"]), + ), + supports_check_mode=True + ) + + if not HAS_GITLAB_PACKAGE: + module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install pyapi-gitlab") + + server_url = module.params['server_url'] + verify_ssl = module.params['validate_certs'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_token = module.params['login_token'] + user_name = module.params['name'] + user_username = module.params['username'] + user_password = module.params['password'] + user_email = module.params['email'] + user_sshkey_name = module.params['sshkey_name'] + user_sshkey_file = module.params['sshkey_file'] + group_name = module.params['group'] + access_level = module.params['access_level'] + state = module.params['state'] + + # We need both login_user and login_password or login_token, otherwise we fail. + if login_user is not None and login_password is not None: + use_credentials = True + elif login_token is not None: + use_credentials = False + else: + module.fail_json(msg="No login credentials are given. Use login_user with login_password, or login_token") + + # Check if vars are none + if user_sshkey_file is not None and user_sshkey_name is not None: + use_sshkey = True + else: + use_sshkey = False + + if group_name is not None and access_level is not None: + add_to_group = True + group_name = group_name.lower() + else: + add_to_group = False + + user_username = user_username.lower() + + # Lets make an connection to the Gitlab server_url, with either login_user and login_password + # or with login_token + try: + if use_credentials: + git = gitlab.Gitlab(host=server_url) + git.login(user=login_user, password=login_password) + else: + git = gitlab.Gitlab(server_url, token=login_token, verify_ssl=verify_ssl) + except Exception: + e = get_exception() + module.fail_json(msg="Failed to connect to Gitlab server: %s " % e) + + # Validate if group exists and take action based on "state" + user = GitLabUser(module, git) + + # Check if user exists, if not exists and state = absent, we exit nicely. + if not user.existsUser(user_username) and state == "absent": + module.exit_json(changed=False, result="User already deleted or does not exists") + else: + # User exists, + if state == "absent": + user.deleteUser(user_username) + else: + user.createOrUpdateUser(user_name, user_username, user_password, user_email, user_sshkey_name, user_sshkey_file, group_name, access_level) + + + +if __name__ == '__main__': + main() diff --git a/storage/__init__.py b/storage/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/storage/netapp/README.md b/storage/netapp/README.md new file mode 100644 index 00000000000..8d5ab2fd4cf --- /dev/null +++ b/storage/netapp/README.md @@ -0,0 +1,454 @@ +#NetApp Storage Modules +This directory contains modules that support the storage platforms in the NetApp portfolio. + +##SANtricity Modules +The modules prefixed with *netapp\_e* are built to support the SANtricity storage platform. They require the SANtricity +WebServices Proxy. The WebServices Proxy is free software available at the [NetApp Software Download site](http://mysupport.netapp.com/NOW/download/software/eseries_webservices/1.40.X000.0009/). +Starting with the E2800 platform (11.30 OS), the modules will work directly with the storage array. Starting with this +platform, REST API requests are handled directly on the box. This array can still be managed by proxy for large scale deployments. +The modules provide idempotent provisioning for volume groups, disk pools, standard volumes, thin volumes, LUN mapping, +hosts, host groups (clusters), volume snapshots, consistency groups, and asynchronous mirroring. +### Prerequisites +| Software | Version | +| -------- |:-------:| +| SANtricity Web Services Proxy*|1.4 or 2.0| +| Ansible | 2.2** | +\* Not required for *E2800 with 11.30 OS*
+\*\*The modules where developed with this version. Ansible forward and backward compatibility applies. + +###Questions and Contribution +Please feel free to submit pull requests with improvements. Issues for these modules should be routed to @hulquest but +we also try to keep an eye on the list for issues specific to these modules. General questions can be made to our [development team](mailto:ng-hsg-engcustomer-esolutions-support@netapp.com) + +### Examples +These examples are not comprehensive but are intended to help you get started when integrating storage provisioning into +your playbooks. +```yml +- name: NetApp Test All Modules + hosts: proxy20 + gather_facts: yes + connection: local + vars: + storage_systems: + ansible1: + address1: "10.251.230.41" + address2: "10.251.230.42" + ansible2: + address1: "10.251.230.43" + address2: "10.251.230.44" + ansible3: + address1: "10.251.230.45" + address2: "10.251.230.46" + ansible4: + address1: "10.251.230.47" + address2: "10.251.230.48" + storage_pools: + Disk_Pool_1: + raid_level: raidDiskPool + criteria_drive_count: 11 + Disk_Pool_2: + raid_level: raidDiskPool + criteria_drive_count: 11 + Disk_Pool_3: + raid_level: raid0 + criteria_drive_count: 2 + volumes: + vol_1: + storage_pool_name: Disk_Pool_1 + size: 10 + thin_provision: false + thin_volume_repo_size: 7 + vol_2: + storage_pool_name: Disk_Pool_2 + size: 10 + thin_provision: false + thin_volume_repo_size: 7 + vol_3: + storage_pool_name: Disk_Pool_3 + size: 10 + thin_provision: false + thin_volume_repo_size: 7 + thin_vol_1: + storage_pool_name: Disk_Pool_1 + size: 10 + thin_provision: true + thin_volume_repo_size: 7 + hosts: + ANSIBLE-1: + host_type: 1 + index: 1 + ports: + - type: 'fc' + label: 'fpPort1' + port: '2100000E1E191B01' + + netapp_api_host: 10.251.230.29 + netapp_api_url: http://{{ netapp_api_host }}/devmgr/v2 + netapp_api_username: rw + netapp_api_password: rw + ssid: ansible1 + auth: no + lun_mapping: no + netapp_api_validate_certs: False + snapshot: no + gather_facts: no + amg_create: no + remove_volume: no + make_volume: no + check_thins: no + remove_storage_pool: yes + check_storage_pool: yes + remove_storage_system: no + check_storage_system: yes + change_role: no + flash_cache: False + configure_hostgroup: no + configure_async_mirror: False + configure_snapshot: no + copy_volume: False + volume_copy_source_volume_id: + volume_destination_source_volume_id: + snapshot_volume_storage_pool_name: Disk_Pool_3 + snapshot_volume_image_id: 3400000060080E5000299B640063074057BC5C5E + snapshot_volume: no + snapshot_volume_name: vol_1_snap_vol + host_type_index: 1 + host_name: ANSIBLE-1 + set_host: no + remove_host: no + amg_member_target_array: + amg_member_primary_pool: + amg_member_secondary_pool: + amg_member_primary_volume: + amg_member_secondary_volume: + set_amg_member: False + amg_array_name: foo + amg_name: amg_made_by_ansible + amg_secondaryArrayId: ansible2 + amg_sync_name: foo + amg_sync: no + + tasks: + + - name: Get array facts + netapp_e_facts: + ssid: "{{ item.key }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + with_dict: "{{ storage_systems }}" + when: gather_facts + + - name: Presence of storage system + netapp_e_storage_system: + ssid: "{{ item.key }}" + state: present + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + controller_addresses: + - "{{ item.value.address1 }}" + - "{{ item.value.address2 }}" + with_dict: "{{ storage_systems }}" + when: check_storage_system + + - name: Create Snapshot + netapp_e_snapshot_images: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + snapshot_group: "ansible_snapshot_group" + state: 'create' + when: snapshot + + - name: Auth Module Example + netapp_e_auth: + ssid: "{{ ssid }}" + current_password: 'Infinit2' + new_password: 'Infinit1' + set_admin: yes + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + when: auth + + - name: No disk groups + netapp_e_storagepool: + ssid: "{{ ssid }}" + name: "{{ item }}" + state: absent + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + remove_volumes: yes + with_items: + - Disk_Pool_1 + - Disk_Pool_2 + - Disk_Pool_3 + when: remove_storage_pool + + - name: Make disk groups + netapp_e_storagepool: + ssid: "{{ ssid }}" + name: "{{ item.key }}" + state: present + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + raid_level: "{{ item.value.raid_level }}" + criteria_drive_count: "{{ item.value.criteria_drive_count }}" + with_dict: " {{ storage_pools }}" + when: check_storage_pool + + - name: No thin volume + netapp_e_volume: + ssid: "{{ ssid }}" + name: NewThinVolumeByAnsible + state: absent + thin_provision: yes + log_path: /tmp/volume.log + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + when: check_thins + + - name: Make a thin volume + netapp_e_volume: + ssid: "{{ ssid }}" + name: NewThinVolumeByAnsible + state: present + thin_provision: yes + thin_volume_repo_size: 7 + size: 10 + log_path: /tmp/volume.log + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + storage_pool_name: Disk_Pool_1 + when: check_thins + + - name: Remove standard/thick volumes + netapp_e_volume: + ssid: "{{ ssid }}" + name: "{{ item.key }}" + state: absent + log_path: /tmp/volume.log + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + with_dict: "{{ volumes }}" + when: remove_volume + + - name: Make a volume + netapp_e_volume: + ssid: "{{ ssid }}" + name: "{{ item.key }}" + state: present + storage_pool_name: "{{ item.value.storage_pool_name }}" + size: "{{ item.value.size }}" + thin_provision: "{{ item.value.thin_provision }}" + thin_volume_repo_size: "{{ item.value.thin_volume_repo_size }}" + log_path: /tmp/volume.log + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + with_dict: "{{ volumes }}" + when: make_volume + + - name: No storage system + netapp_e_storage_system: + ssid: "{{ item.key }}" + state: absent + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + with_dict: "{{ storage_systems }}" + when: remove_storage_system + + - name: Update the role of a storage array + netapp_e_amg_role: + name: "{{ amg_name }}" + role: primary + force: true + noSync: true + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + when: change_role + + - name: Flash Cache + netapp_e_flashcache: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + name: SSDCacheBuiltByAnsible + when: flash_cache + + - name: Configure Hostgroup + netapp_e_hostgroup: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: absent + name: "ansible-host-group" + when: configure_hostgroup + + - name: Configure Snapshot group + netapp_e_snapshot_group: + ssid: "{{ ssid }}" + state: present + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + base_volume_name: vol_3 + name: ansible_snapshot_group + repo_pct: 20 + warning_threshold: 85 + delete_limit: 30 + full_policy: purgepit + storage_pool_name: Disk_Pool_3 + rollback_priority: medium + when: configure_snapshot + + - name: Copy volume + netapp_e_volume_copy: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + status: present + source_volume_id: "{{ volume_copy_source_volume_id }}" + destination_volume_id: "{{ volume_destination_source_volume_id }}" + when: copy_volume + + - name: Snapshot volume + netapp_e_snapshot_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + state: present + storage_pool_name: "{{ snapshot_volume_storage_pool_name }}" + snapshot_image_id: "{{ snapshot_volume_image_id }}" + name: "{{ snapshot_volume_name }}" + when: snapshot_volume + + - name: Remove hosts + netapp_e_host: + ssid: "{{ ssid }}" + state: absent + name: "{{ item.key }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + host_type_index: "{{ host_type_index }}" + with_dict: "{{hosts}}" + when: remove_host + + - name: Ensure/add hosts + netapp_e_host: + ssid: "{{ ssid }}" + state: present + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + name: "{{ item.key }}" + host_type_index: "{{ item.value.index }}" + ports: + - type: 'fc' + label: 'fpPort1' + port: '2100000E1E191B01' + with_dict: "{{hosts}}" + when: set_host + + - name: Unmap a volume + netapp_e_lun_mapping: + state: absent + ssid: "{{ ssid }}" + lun: 2 + target: "{{ host_name }}" + volume_name: "thin_vol_1" + target_type: host + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + when: lun_mapping + + - name: Map a volume + netapp_e_lun_mapping: + state: present + ssid: "{{ ssid }}" + lun: 16 + target: "{{ host_name }}" + volume_name: "thin_vol_1" + target_type: host + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + when: lun_mapping + + - name: Update LUN Id + netapp_e_lun_mapping: + state: present + ssid: "{{ ssid }}" + lun: 2 + target: "{{ host_name }}" + volume_name: "thin_vol_1" + target_type: host + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + when: lun_mapping + + - name: AMG removal + netapp_e_amg: + state: absent + ssid: "{{ ssid }}" + secondaryArrayId: "{{amg_secondaryArrayId}}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + new_name: "{{amg_array_name}}" + name: "{{amg_name}}" + when: amg_create + + - name: AMG create + netapp_e_amg: + state: present + ssid: "{{ ssid }}" + secondaryArrayId: "{{amg_secondaryArrayId}}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + new_name: "{{amg_array_name}}" + name: "{{amg_name}}" + when: amg_create + + - name: start AMG async + netapp_e_amg_sync: + name: "{{ amg_name }}" + state: running + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + when: amg_sync +``` diff --git a/storage/netapp/__init__.py b/storage/netapp/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/storage/netapp/netapp_e_amg.py b/storage/netapp/netapp_e_amg.py new file mode 100644 index 00000000000..e5f60b29454 --- /dev/null +++ b/storage/netapp/netapp_e_amg.py @@ -0,0 +1,332 @@ +#!/usr/bin/python +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: netapp_e_amg +short_description: Create, Remove, and Update Asynchronous Mirror Groups +description: + - Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + name: + description: + - The name of the async array you wish to target, or create. + - If C(state) is present and the name isn't found, it will attempt to create. + required: yes + secondaryArrayId: + description: + - The ID of the secondary array to be used in mirroing process + required: yes + syncIntervalMinutes: + description: + - The synchronization interval in minutes + required: no + default: 10 + manualSync: + description: + - Setting this to true will cause other synchronization values to be ignored + required: no + default: no + recoveryWarnThresholdMinutes: + description: + - Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value + required: no + default: 20 + repoUtilizationWarnThreshold: + description: + - Recovery point warning threshold + required: no + default: 80 + interfaceType: + description: + - The intended protocol to use if both Fibre and iSCSI are available. + choices: + - iscsi + - fibre + required: no + default: null + syncWarnThresholdMinutes: + description: + - The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete. + required: no + default: 10 + ssid: + description: + - The ID of the primary storage array for the async mirror action + required: yes + state: + description: + - A C(state) of present will either create or update the async mirror group. + - A C(state) of absent will remove the async mirror group. + required: yes +""" + +EXAMPLES = """ + - name: AMG removal + na_eseries_amg: + state: absent + ssid: "{{ ssid }}" + secondaryArrayId: "{{amg_secondaryArrayId}}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + new_name: "{{amg_array_name}}" + name: "{{amg_name}}" + when: amg_create + + - name: AMG create + netapp_e_amg: + state: present + ssid: "{{ ssid }}" + secondaryArrayId: "{{amg_secondaryArrayId}}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + new_name: "{{amg_array_name}}" + name: "{{amg_name}}" + when: amg_create +""" + +RETURN = """ +msg: + description: Successful removal + returned: success + type: string + sample: "Async mirror group removed." + +msg: + description: Successful creation + returned: success + type: string + sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}' +""" + +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule, get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=False, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def has_match(module, ssid, api_url, api_pwd, api_usr, body): + compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes', + 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold'] + desired_state = dict((x, (body.get(x))) for x in compare_keys) + label_exists = False + matches_spec = False + current_state = None + async_id = None + api_data = None + desired_name = body.get('name') + endpoint = 'storage-systems/%s/async-mirrors' % ssid + url = api_url + endpoint + try: + rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS) + except Exception: + error = get_exception() + module.exit_json(exception="Error finding a match. Message: %s" % str(error)) + + for async_group in data: + if async_group['label'] == desired_name: + label_exists = True + api_data = async_group + async_id = async_group['groupRef'] + current_state = dict( + syncIntervalMinutes=async_group['syncIntervalMinutes'], + syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'], + recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'], + repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'], + ) + + if current_state == desired_state: + matches_spec = True + + return label_exists, matches_spec, api_data, async_id + + +def create_async(module, ssid, api_url, api_pwd, api_usr, body): + endpoint = 'storage-systems/%s/async-mirrors' % ssid + url = api_url + endpoint + post_data = json.dumps(body) + try: + rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd, + headers=HEADERS) + except Exception: + error = get_exception() + module.exit_json(exception="Exception while creating aysnc mirror group. Message: %s" % str(error)) + return data + + +def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id): + endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id) + url = api_url + endpoint + compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes', + 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold'] + desired_state = dict((x, (body.get(x))) for x in compare_keys) + + if new_name: + desired_state['new_name'] = new_name + + post_data = json.dumps(desired_state) + + try: + rc, data = request(url, data=post_data, method='POST', headers=HEADERS, + url_username=user, url_password=pwd) + except Exception: + error = get_exception() + module.exit_json(exception="Exception while updating async mirror group. Message: %s" % str(error)) + + return data + + +def remove_amg(module, ssid, api_url, pwd, user, async_id): + endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id) + url = api_url + endpoint + try: + rc, data = request(url, method='DELETE', url_username=user, url_password=pwd, + headers=HEADERS) + except Exception: + error = get_exception() + module.exit_json(exception="Exception while removing async mirror group. Message: %s" % str(error)) + + return + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + name=dict(required=True, type='str'), + new_name=dict(required=False, type='str'), + secondaryArrayId=dict(required=True, type='str'), + syncIntervalMinutes=dict(required=False, default=10, type='int'), + manualSync=dict(required=False, default=False, type='bool'), + recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'), + repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'), + interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'), + ssid=dict(required=True, type='str'), + state=dict(required=True, choices=['present', 'absent']), + syncWarnThresholdMinutes=dict(required=False, default=10, type='int') + )) + + module = AnsibleModule(argument_spec=argument_spec) + + p = module.params + + ssid = p.pop('ssid') + api_url = p.pop('api_url') + user = p.pop('api_username') + pwd = p.pop('api_password') + new_name = p.pop('new_name') + state = p.pop('state') + + if not api_url.endswith('/'): + api_url += '/' + + name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p) + + if state == 'present': + if name_exists and spec_matches: + module.exit_json(changed=False, msg="Desired state met", **api_data) + elif name_exists and not spec_matches: + results = update_async(module, ssid, api_url, pwd, user, + p, new_name, async_id) + module.exit_json(changed=True, + msg="Async mirror group updated", async_id=async_id, + **results) + elif not name_exists: + results = create_async(module, ssid, api_url, user, pwd, p) + module.exit_json(changed=True, **results) + + elif state == 'absent': + if name_exists: + remove_amg(module, ssid, api_url, pwd, user, async_id) + module.exit_json(changed=True, msg="Async mirror group removed.", + async_id=async_id) + else: + module.exit_json(changed=False, + msg="Async Mirror group: %s already absent" % p['name']) + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_amg_role.py b/storage/netapp/netapp_e_amg_role.py new file mode 100644 index 00000000000..bfe3c4b8334 --- /dev/null +++ b/storage/netapp/netapp_e_amg_role.py @@ -0,0 +1,243 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: netapp_e_amg_role +short_description: Update the role of a storage array within an Asynchronous Mirror Group (AMG). +description: + - Update a storage array to become the primary or secondary instance in an asynchronous mirror group +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + description: + - The ID of the primary storage array for the async mirror action + required: yes + role: + description: + - Whether the array should be the primary or secondary array for the AMG + required: yes + choices: ['primary', 'secondary'] + noSync: + description: + - Whether to avoid synchronization prior to role reversal + required: no + default: no + choices: [yes, no] + force: + description: + - Whether to force the role reversal regardless of the online-state of the primary + required: no + default: no +""" + +EXAMPLES = """ + - name: Update the role of a storage array + netapp_e_amg_role: + name: updating amg role + role: primary + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" +""" + +RETURN = """ +msg: + description: Failure message + returned: failure + type: string + sample: "No Async Mirror Group with the name." +""" +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def has_match(module, ssid, api_url, api_pwd, api_usr, body, name): + amg_exists = False + has_desired_role = False + amg_id = None + amg_data = None + get_amgs = 'storage-systems/%s/async-mirrors' % ssid + url = api_url + get_amgs + try: + amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd, + headers=HEADERS) + except: + module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid)) + + for amg in amgs: + if amg['label'] == name: + amg_exists = True + amg_id = amg['id'] + amg_data = amg + if amg['localRole'] == body.get('role'): + has_desired_role = True + + return amg_exists, has_desired_role, amg_id, amg_data + + +def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id): + endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id) + url = api_url + endpoint + post_data = json.dumps(body) + try: + request(url, data=post_data, method='POST', url_username=api_usr, + url_password=api_pwd, headers=HEADERS) + except: + err = get_exception() + module.fail_json( + msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err))) + + status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id) + status_url = api_url + status_endpoint + try: + rc, status = request(status_url, method='GET', url_username=api_usr, + url_password=api_pwd, headers=HEADERS) + except: + err = get_exception() + module.fail_json( + msg="Failed to check status of AMG after role reversal. " + + "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err))) + + # Here we wait for the role reversal to complete + if 'roleChangeProgress' in status: + while status['roleChangeProgress'] != "none": + try: + rc, status = request(status_url, method='GET', + url_username=api_usr, url_password=api_pwd, headers=HEADERS) + except: + err = get_exception() + module.fail_json( + msg="Failed to check status of AMG after role reversal. " + + "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err))) + return status + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + name=dict(required=True, type='str'), + role=dict(required=True, choices=['primary', 'secondary']), + noSync=dict(required=False, type='bool', default=False), + force=dict(required=False, type='bool', default=False), + ssid=dict(required=True, type='str'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True), + )) + + module = AnsibleModule(argument_spec=argument_spec) + + p = module.params + + ssid = p.pop('ssid') + api_url = p.pop('api_url') + user = p.pop('api_username') + pwd = p.pop('api_password') + name = p.pop('name') + + if not api_url.endswith('/'): + api_url += '/' + + agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name) + + if not agm_exists: + module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name) + elif has_desired_role: + module.exit_json(changed=False, **amg_data) + + else: + amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id) + if amg_data: + module.exit_json(changed=True, **amg_data) + else: + module.exit_json(changed=True, msg="AMG role changed.") + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_amg_sync.py b/storage/netapp/netapp_e_amg_sync.py new file mode 100644 index 00000000000..548b115ff0a --- /dev/null +++ b/storage/netapp/netapp_e_amg_sync.py @@ -0,0 +1,273 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: netapp_e_amg_sync +short_description: Conduct synchronization actions on asynchronous member groups. +description: + - Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays. +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + description: + - The ID of the storage array containing the AMG you wish to target + name: + description: + - The name of the async mirror group you wish to target + required: yes + state: + description: + - The synchronization action you'd like to take. + - If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in progress, it will return with an OK status. + - If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended + choices: + - running + - suspended + required: yes + delete_recovery_point: + description: + - Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization. + - If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last failures point will be deleted and synchronization will continue. + - If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary and the failures point will be preserved. + - "NOTE: This only has impact for newly launched syncs." + choices: + - yes + - no + default: no +""" +EXAMPLES = """ + - name: start AMG async + netapp_e_amg_sync: + name: "{{ amg_sync_name }}" + state: running + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" +""" +RETURN = """ +json: + description: The object attributes of the AMG. + returned: success + type: string + example: + { + "changed": false, + "connectionType": "fc", + "groupRef": "3700000060080E5000299C24000006EF57ACAC70", + "groupState": "optimal", + "id": "3700000060080E5000299C24000006EF57ACAC70", + "label": "made_with_ansible", + "localRole": "primary", + "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", + "orphanGroup": false, + "recoveryPointAgeAlertThresholdMinutes": 20, + "remoteRole": "secondary", + "remoteTarget": { + "nodeName": { + "ioInterfaceType": "fc", + "iscsiNodeName": null, + "remoteNodeWWN": "20040080E5299F1C" + }, + "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", + "scsiinitiatorTargetBaseProperties": { + "ioInterfaceType": "fc", + "iscsiinitiatorTargetBaseParameters": null + } + }, + "remoteTargetId": "ansible2", + "remoteTargetName": "Ansible2", + "remoteTargetWwn": "60080E5000299F880000000056A25D56", + "repositoryUtilizationWarnThreshold": 80, + "roleChangeProgress": "none", + "syncActivity": "idle", + "syncCompletionTimeAlertThresholdMinutes": 10, + "syncIntervalMinutes": 10, + "worldWideName": "60080E5000299C24000006EF57ACAC70" + } +""" +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class AMGsync(object): + def __init__(self): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + name=dict(required=True, type='str'), + ssid=dict(required=True, type='str'), + state=dict(required=True, type='str', choices=['running', 'suspended']), + delete_recovery_point=dict(required=False, type='bool', default=False) + )) + self.module = AnsibleModule(argument_spec=argument_spec) + args = self.module.params + self.name = args['name'] + self.ssid = args['ssid'] + self.state = args['state'] + self.delete_recovery_point = args['delete_recovery_point'] + try: + self.user = args['api_username'] + self.pwd = args['api_password'] + self.url = args['api_url'] + except KeyError: + self.module.fail_json(msg="You must pass in api_username" + "and api_password and api_url to the module.") + self.certs = args['validate_certs'] + + self.post_headers = { + "Accept": "application/json", + "Content-Type": "application/json" + } + self.amg_id, self.amg_obj = self.get_amg() + + def get_amg(self): + endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid + (rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs, + headers=self.post_headers) + try: + amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id'] + amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0] + except IndexError: + self.module.fail_json( + msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid)) + return amg_id, amg_obj + + @property + def current_state(self): + amg_id, amg_obj = self.get_amg() + return amg_obj['syncActivity'] + + def run_sync_action(self): + # If we get to this point we know that the states differ, and there is no 'err' state, + # so no need to revalidate + + post_body = dict() + if self.state == 'running': + if self.current_state == 'idle': + if self.delete_recovery_point: + post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point)) + suffix = 'sync' + else: + # In a suspended state + suffix = 'resume' + else: + suffix = 'suspend' + + endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix) + + (rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd, + validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers, + ignore_errors=True) + + if not str(rc).startswith('2'): + self.module.fail_json(msg=str(resp['errorMessage'])) + + return resp + + def apply(self): + state_map = dict( + running=['active'], + suspended=['userSuspended', 'internallySuspended', 'paused'], + err=['unkown', '_UNDEFINED']) + + if self.current_state not in state_map[self.state]: + if self.current_state in state_map['err']: + self.module.fail_json( + msg="The sync is a state of '%s', this requires manual intervention. " + + "Please investigate and try again" % self.current_state) + else: + self.amg_obj = self.run_sync_action() + + (ret, amg) = self.get_amg() + self.module.exit_json(changed=False, **amg) + + +def main(): + sync = AMGsync() + sync.apply() + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_auth.py b/storage/netapp/netapp_e_auth.py new file mode 100644 index 00000000000..19bdb0bfea5 --- /dev/null +++ b/storage/netapp/netapp_e_auth.py @@ -0,0 +1,273 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: netapp_e_auth +short_description: Sets or updates the password for a storage array. +description: + - Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web Services proxy. Note, all storage arrays do not have a Monitor or RO role. +version_added: "2.2" +author: Kevin Hulquest (@hulquest) +options: + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + name: + description: + - The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use the ID instead. + required: False + ssid: + description: + - the identifier of the storage array in the Web Services Proxy. + required: False + set_admin: + description: + - Boolean value on whether to update the admin password. If set to false then the RO account is updated. + default: False + current_password: + description: + - The current admin password. This is not required if the password hasn't been set before. + required: False + new_password: + description: + - The password you would like to set. Cannot be more than 30 characters. + required: True + api_url: + description: + - The full API url. + - "Example: http://ENDPOINT:8080/devmgr/v2" + - This can optionally be set via an environment variable, API_URL + required: False + api_username: + description: + - The username used to authenticate against the API + - This can optionally be set via an environment variable, API_USERNAME + required: False + api_password: + description: + - The password used to authenticate against the API + - This can optionally be set via an environment variable, API_PASSWORD + required: False +''' + +EXAMPLES = ''' +- name: Test module + netapp_e_auth: + name: trex + current_password: OldPasswd + new_password: NewPasswd + set_admin: yes + api_url: '{{ netapp_api_url }}' + api_username: '{{ netapp_api_username }}' + api_password: '{{ netapp_api_password }}' +''' + +RETURN = ''' +msg: + description: Success message + returned: success + type: string + sample: "Password Updated Successfully" +''' +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json" +} + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def get_ssid(module, name, api_url, user, pwd): + count = 0 + all_systems = 'storage-systems' + systems_url = api_url + all_systems + rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd) + for system in data: + if system['name'] == name: + count += 1 + if count > 1: + module.fail_json( + msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " + + "Use the id instead") + else: + ssid = system['id'] + else: + continue + + if count == 0: + module.fail_json(msg="No storage array with the name %s was found" % name) + + else: + return ssid + + +def get_pwd_status(module, ssid, api_url, user, pwd): + pwd_status = "storage-systems/%s/passwords" % ssid + url = api_url + pwd_status + try: + rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd) + return data['readOnlyPasswordSet'], data['adminPasswordSet'] + except HTTPError: + error = get_exception() + module.fail_json(msg="There was an issue with connecting, please check that your " + "endpoint is properly defined and your credentials are correct: %s" % str(error)) + + +def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd): + update_pwd = 'storage-systems/%s' % ssid + url = api_url + update_pwd + post_body = json.dumps(dict(storedPassword=pwd)) + try: + rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr, + url_password=api_pwd) + except: + err = get_exception() + module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, str(err))) + return data + + +def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False): + set_pass = "storage-systems/%s/passwords" % ssid + url = api_url + set_pass + + if not current_password: + current_password = "" + + post_body = json.dumps( + dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password)) + + try: + rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd, + ignore_errors=True) + except: + err = get_exception() + module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, str(err))) + + if rc == 422: + post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password)) + try: + rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd) + except Exception: + module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again") + + update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd) + + if int(rc) == 204: + return update_data + else: + module.fail_json(msg="%s:%s" % (rc, data)) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + name=dict(required=False, type='str'), + ssid=dict(required=False, type='str'), + current_password=dict(required=False, no_log=True), + new_password=dict(required=True, no_log=True), + set_admin=dict(required=True, type='bool'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True) + ) + ) + module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']], + required_one_of=[['name', 'ssid']]) + + name = module.params['name'] + ssid = module.params['ssid'] + current_password = module.params['current_password'] + new_password = module.params['new_password'] + set_admin = module.params['set_admin'] + user = module.params['api_username'] + pwd = module.params['api_password'] + api_url = module.params['api_url'] + + if not api_url.endswith('/'): + api_url += '/' + + if name: + ssid = get_ssid(module, name, api_url, user, pwd) + + ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd) + + if admin_pwd and not current_password: + module.fail_json( + msg="Admin account has a password set. " + + "You must supply current_password in order to update the RO or Admin passwords") + + if len(new_password) > 30: + module.fail_json(msg="Passwords must not be greater than 30 characters in length") + + success = set_password(module, ssid, api_url, user, pwd, current_password=current_password, + new_password=new_password, + set_admin=set_admin) + + module.exit_json(changed=True, msg="Password Updated Successfully", **success) + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_facts.py b/storage/netapp/netapp_e_facts.py new file mode 100644 index 00000000000..5a877afab61 --- /dev/null +++ b/storage/netapp/netapp_e_facts.py @@ -0,0 +1,205 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: netapp_e_facts +version_added: '2.2' +short_description: Get facts about NetApp E-Series arrays +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + required: true + description: + - The ID of the array to manage. This value must be unique for each array. + +description: + - Return various information about NetApp E-Series storage arrays (eg, configuration, disks) + +author: Kevin Hulquest (@hulquest) +''' + +EXAMPLES = """ +--- + - name: Get array facts + netapp_e_facts: + array_id: "{{ netapp_array_id }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" +""" + +RETURN = """ +msg: Gathered facts for . +""" +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule, get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + data = None + except: + if ignore_errors: + pass + else: + raise + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + ssid=dict(required=True)) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True + ) + + p = module.params + + ssid = p['ssid'] + validate_certs = p['validate_certs'] + + api_usr = p['api_username'] + api_pwd = p['api_password'] + api_url = p['api_url'] + + facts = dict(ssid=ssid) + + # fetch the list of storage-pool objects and look for one with a matching name + try: + (rc, resp) = request(api_url + "/storage-systems/%s/graph" % ssid, + headers=dict(Accept="application/json"), + url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs) + except: + error = get_exception() + module.fail_json( + msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (ssid, str(error))) + + facts['snapshot_images'] = [ + dict( + id=d['id'], + status=d['status'], + pit_capacity=d['pitCapacity'], + creation_method=d['creationMethod'], + reposity_cap_utilization=d['repositoryCapacityUtilization'], + active_cow=d['activeCOW'], + rollback_source=d['isRollbackSource'] + ) for d in resp['highLevelVolBundle']['pit']] + + facts['netapp_disks'] = [ + dict( + id=d['id'], + available=d['available'], + media_type=d['driveMediaType'], + status=d['status'], + usable_bytes=d['usableCapacity'], + tray_ref=d['physicalLocation']['trayRef'], + product_id=d['productID'], + firmware_version=d['firmwareVersion'], + serial_number=d['serialNumber'].lstrip() + ) for d in resp['drive']] + + facts['netapp_storage_pools'] = [ + dict( + id=sp['id'], + name=sp['name'], + available_capacity=sp['freeSpace'], + total_capacity=sp['totalRaidedSpace'], + used_capacity=sp['usedSpace'] + ) for sp in resp['volumeGroup']] + + all_volumes = list(resp['volume']) + # all_volumes.extend(resp['thinVolume']) + + # TODO: exclude thin-volume repo volumes (how to ID?) + facts['netapp_volumes'] = [ + dict( + id=v['id'], + name=v['name'], + parent_storage_pool_id=v['volumeGroupRef'], + capacity=v['capacity'], + is_thin_provisioned=v['thinProvisioned'] + ) for v in all_volumes] + + features = [f for f in resp['sa']['capabilities']] + features.extend([f['capability'] for f in resp['sa']['premiumFeatures'] if f['isEnabled']]) + features = list(set(features)) # ensure unique + features.sort() + facts['netapp_enabled_features'] = features + + # TODO: include other details about the storage pool (size, type, id, etc) + result = dict(ansible_facts=facts, changed=False) + module.exit_json(msg="Gathered facts for %s." % ssid, **result) + + +if __name__ == "__main__": + main() diff --git a/storage/netapp/netapp_e_flashcache.py b/storage/netapp/netapp_e_flashcache.py new file mode 100644 index 00000000000..da7d520542b --- /dev/null +++ b/storage/netapp/netapp_e_flashcache.py @@ -0,0 +1,424 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: netapp_e_flashcache +author: Kevin Hulquest (@hulquest) +version_added: '2.2' +short_description: Manage NetApp SSD caches +description: +- Create or remove SSD caches on a NetApp E-Series storage array. +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + required: true + description: + - The ID of the array to manage (as configured on the web services proxy). + state: + required: true + description: + - Whether the specified SSD cache should exist or not. + choices: ['present', 'absent'] + default: present + name: + required: true + description: + - The name of the SSD cache to manage + io_type: + description: + - The type of workload to optimize the cache for. + choices: ['filesystem','database','media'] + default: filesystem + disk_count: + description: + - The minimum number of disks to use for building the cache. The cache will be expanded if this number exceeds the number of disks already in place + size_unit: + description: + - The unit to be applied to size arguments + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: gb + cache_size_min: + description: + - The minimum size (in size_units) of the ssd cache. The cache will be expanded if this exceeds the current size of the cache. +''' + +EXAMPLES = """ + - name: Flash Cache + netapp_e_flashcache: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + name: SSDCacheBuiltByAnsible +""" + +RETURN = """ +msg: + description: Success message + returned: success + type: string + sample: json for newly created flash cache +""" +import json +import logging +import sys + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url + +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class NetAppESeriesFlashCache(object): + def __init__(self): + self.name = None + self.log_mode = None + self.log_path = None + self.api_url = None + self.api_username = None + self.api_password = None + self.ssid = None + self.validate_certs = None + self.disk_count = None + self.size_unit = None + self.cache_size_min = None + self.io_type = None + self.driveRefs = None + self.state = None + self._size_unit_map = dict( + bytes=1, + b=1, + kb=1024, + mb=1024 ** 2, + gb=1024 ** 3, + tb=1024 ** 4, + pb=1024 ** 5, + eb=1024 ** 6, + zb=1024 ** 7, + yb=1024 ** 8 + ) + + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + state=dict(default='present', choices=['present', 'absent'], type='str'), + ssid=dict(required=True, type='str'), + name=dict(required=True, type='str'), + disk_count=dict(type='int'), + disk_refs=dict(type='list'), + cache_size_min=dict(type='int'), + io_type=dict(default='filesystem', choices=['filesystem', 'database', 'media']), + size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], + type='str'), + criteria_disk_phy_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'], + type='str'), + log_mode=dict(type='str'), + log_path=dict(type='str'), + )) + self.module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + + ], + mutually_exclusive=[ + + ], + # TODO: update validation for various selection criteria + supports_check_mode=True + ) + + self.__dict__.update(self.module.params) + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + self.debug = self._logger.debug + + if self.log_mode == 'file' and self.log_path: + logging.basicConfig(level=logging.DEBUG, filename=self.log_path) + elif self.log_mode == 'stderr': + logging.basicConfig(level=logging.DEBUG, stream=sys.stderr) + + self.post_headers = dict(Accept="application/json") + self.post_headers['Content-Type'] = 'application/json' + + def get_candidate_disks(self, disk_count, size_unit='gb', capacity=None): + self.debug("getting candidate disks...") + + drives_req = dict( + driveCount=disk_count, + sizeUnit=size_unit, + driveType='ssd', + ) + + if capacity: + drives_req['targetUsableCapacity'] = capacity + + (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), + data=json.dumps(drives_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + if rc == 204: + self.module.fail_json(msg='Cannot find disks to match requested criteria for ssd cache') + + disk_ids = [d['id'] for d in drives_resp] + bytes = reduce(lambda s, d: s + int(d['usableCapacity']), drives_resp, 0) + + return (disk_ids, bytes) + + def create_cache(self): + (disk_ids, bytes) = self.get_candidate_disks(disk_count=self.disk_count, size_unit=self.size_unit, + capacity=self.cache_size_min) + + self.debug("creating ssd cache...") + + create_fc_req = dict( + driveRefs=disk_ids, + name=self.name + ) + + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), + data=json.dumps(create_fc_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + def update_cache(self): + self.debug('updating flash cache config...') + update_fc_req = dict( + name=self.name, + configType=self.io_type + ) + + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/configure" % (self.ssid), + data=json.dumps(update_fc_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + def delete_cache(self): + self.debug('deleting flash cache...') + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), method='DELETE', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs, ignore_errors=True) + + @property + def needs_more_disks(self): + if len(self.cache_detail['driveRefs']) < self.disk_count: + self.debug("needs resize: current disk count %s < requested requested count %s" % ( + len(self.cache_detail['driveRefs']), self.disk_count)) + return True + + @property + def needs_less_disks(self): + if len(self.cache_detail['driveRefs']) > self.disk_count: + self.debug("needs resize: current disk count %s < requested requested count %s" % ( + len(self.cache_detail['driveRefs']), self.disk_count)) + return True + + @property + def current_size_bytes(self): + return int(self.cache_detail['fcDriveInfo']['fcWithDrives']['usedCapacity']) + + @property + def requested_size_bytes(self): + if self.cache_size_min: + return self.cache_size_min * self._size_unit_map[self.size_unit] + else: + return 0 + + @property + def needs_more_capacity(self): + if self.current_size_bytes < self.requested_size_bytes: + self.debug("needs resize: current capacity %sb is less than requested minimum %sb" % ( + self.current_size_bytes, self.requested_size_bytes)) + return True + + @property + def needs_resize(self): + return self.needs_more_disks or self.needs_more_capacity or self.needs_less_disks + + def resize_cache(self): + # increase up to disk count first, then iteratively add disks until we meet requested capacity + + # TODO: perform this calculation in check mode + current_disk_count = len(self.cache_detail['driveRefs']) + proposed_new_disks = 0 + + proposed_additional_bytes = 0 + proposed_disk_ids = [] + + if self.needs_more_disks: + proposed_disk_count = self.disk_count - current_disk_count + + (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_disk_count) + proposed_additional_bytes = bytes + proposed_disk_ids = disk_ids + + while self.current_size_bytes + proposed_additional_bytes < self.requested_size_bytes: + proposed_new_disks += 1 + (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_new_disks) + proposed_disk_ids = disk_ids + proposed_additional_bytes = bytes + + add_drives_req = dict( + driveRef=proposed_disk_ids + ) + + self.debug("adding drives to flash-cache...") + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/addDrives" % (self.ssid), + data=json.dumps(add_drives_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + elif self.needs_less_disks and self.driveRefs: + rm_drives = dict(driveRef=self.driveRefs) + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/removeDrives" % (self.ssid), + data=json.dumps(rm_drives), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + def apply(self): + result = dict(changed=False) + (rc, cache_resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs, ignore_errors=True) + + if rc == 200: + self.cache_detail = cache_resp + else: + self.cache_detail = None + + if rc not in [200, 404]: + raise Exception( + "Unexpected error code %s fetching flash cache detail. Response data was %s" % (rc, cache_resp)) + + if self.state == 'present': + if self.cache_detail: + # TODO: verify parameters against detail for changes + if self.cache_detail['name'] != self.name: + self.debug("CHANGED: name differs") + result['changed'] = True + if self.cache_detail['flashCacheBase']['configType'] != self.io_type: + self.debug("CHANGED: io_type differs") + result['changed'] = True + if self.needs_resize: + self.debug("CHANGED: resize required") + result['changed'] = True + else: + self.debug("CHANGED: requested state is 'present' but cache does not exist") + result['changed'] = True + else: # requested state is absent + if self.cache_detail: + self.debug("CHANGED: requested state is 'absent' but cache exists") + result['changed'] = True + + if not result['changed']: + self.debug("no changes, exiting...") + self.module.exit_json(**result) + + if self.module.check_mode: + self.debug("changes pending in check mode, exiting early...") + self.module.exit_json(**result) + + if self.state == 'present': + if not self.cache_detail: + self.create_cache() + else: + if self.needs_resize: + self.resize_cache() + + # run update here as well, since io_type can't be set on creation + self.update_cache() + + elif self.state == 'absent': + self.delete_cache() + + # TODO: include other details about the storage pool (size, type, id, etc) + self.module.exit_json(changed=result['changed'], **self.resp) + + +def main(): + sp = NetAppESeriesFlashCache() + try: + sp.apply() + except Exception: + e = get_exception() + sp.debug("Exception in apply(): \n%s" % str(e)) + sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % str(e)) + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_host.py b/storage/netapp/netapp_e_host.py new file mode 100644 index 00000000000..458bb6fb8b6 --- /dev/null +++ b/storage/netapp/netapp_e_host.py @@ -0,0 +1,429 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: netapp_e_host +short_description: manage eseries hosts +description: + - Create, update, remove hosts on NetApp E-series storage arrays +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + description: + - the id of the storage array you wish to act against + required: True + name: + description: + - If the host doesnt yet exist, the label to assign at creation time. + - If the hosts already exists, this is what is used to identify the host to apply any desired changes + required: True + host_type_index: + description: + - The index that maps to host type you wish to create. It is recommended to use the M(netapp_e_facts) module to gather this information. Alternatively you can use the WSP portal to retrieve the information. + required: True + ports: + description: + - a list of of dictionaries of host ports you wish to associate with the newly created host + required: False + group: + description: + - the group you want the host to be a member of + required: False + +""" + +EXAMPLES = """ + - name: Set Host Info + netapp_e_host: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + name: "{{ host_name }}" + host_type_index: "{{ host_type_index }}" +""" + +RETURN = """ +msg: + description: Success message + returned: success + type: string + sample: The host has been created. +""" +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data is None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class Host(object): + def __init__(self): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + ssid=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['absent', 'present']), + group=dict(type='str', required=False), + ports=dict(type='list', required=False), + force_port=dict(type='bool', default=False), + name=dict(type='str', required=True), + host_type_index=dict(type='int', required=True) + )) + + self.module = AnsibleModule(argument_spec=argument_spec) + args = self.module.params + self.group = args['group'] + self.ports = args['ports'] + self.force_port = args['force_port'] + self.name = args['name'] + self.host_type_index = args['host_type_index'] + self.state = args['state'] + self.ssid = args['ssid'] + self.url = args['api_url'] + self.user = args['api_username'] + self.pwd = args['api_password'] + self.certs = args['validate_certs'] + self.ports = args['ports'] + self.post_body = dict() + + if not self.url.endswith('/'): + self.url += '/' + + @property + def valid_host_type(self): + try: + (rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd, + url_username=self.user, validate_certs=self.certs, headers=HEADERS) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, str(err))) + + try: + match = filter(lambda host_type: host_type['index'] == self.host_type_index, host_types)[0] + return True + except IndexError: + self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index) + + @property + def hostports_available(self): + used_ids = list() + try: + (rc, self.available_ports) = request(self.url + 'storage-systems/%s/unassociated-host-ports' % self.ssid, + url_password=self.pwd, url_username=self.user, + validate_certs=self.certs, + headers=HEADERS) + except: + err = get_exception() + self.module.fail_json( + msg="Failed to get unassociated host ports. Array Id [%s]. Error [%s]." % (self.ssid, str(err))) + + if len(self.available_ports) > 0 and len(self.ports) <= len(self.available_ports): + for port in self.ports: + for free_port in self.available_ports: + # Desired Type matches but also make sure we havent already used the ID + if not free_port['id'] in used_ids: + # update the port arg to have an id attribute + used_ids.append(free_port['id']) + break + + if len(used_ids) != len(self.ports) and not self.force_port: + self.module.fail_json( + msg="There are not enough free host ports with the specified port types to proceed") + else: + return True + + else: + self.module.fail_json(msg="There are no host ports available OR there are not enough unassigned host ports") + + @property + def group_id(self): + if self.group: + try: + (rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid, + url_password=self.pwd, + url_username=self.user, validate_certs=self.certs, headers=HEADERS) + except: + err = get_exception() + self.module.fail_json( + msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, str(err))) + + try: + group_obj = filter(lambda group: group['name'] == self.group, all_groups)[0] + return group_obj['id'] + except IndexError: + self.module.fail_json(msg="No group with the name: %s exists" % self.group) + else: + # Return the value equivalent of no group + return "0000000000000000000000000000000000000000" + + @property + def host_exists(self): + try: + (rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd, + url_username=self.user, validate_certs=self.certs, headers=HEADERS) + except: + err = get_exception() + self.module.fail_json( + msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, str(err))) + + self.all_hosts = all_hosts + try: # Try to grab the host object + self.host_obj = filter(lambda host: host['label'] == self.name, all_hosts)[0] + return True + except IndexError: + # Host with the name passed in does not exist + return False + + @property + def needs_update(self): + needs_update = False + self.force_port_update = False + + if self.host_obj['clusterRef'] != self.group_id or \ + self.host_obj['hostTypeIndex'] != self.host_type_index: + needs_update = True + + if self.ports: + if not self.host_obj['ports']: + needs_update = True + for arg_port in self.ports: + # First a quick check to see if the port is mapped to a different host + if not self.port_on_diff_host(arg_port): + for obj_port in self.host_obj['ports']: + if arg_port['label'] == obj_port['label']: + # Confirmed that port arg passed in exists on the host + # port_id = self.get_port_id(obj_port['label']) + if arg_port['type'] != obj_port['portId']['ioInterfaceType']: + needs_update = True + if 'iscsiChapSecret' in arg_port: + # No way to know the current secret attr, so always return True just in case + needs_update = True + else: + # If the user wants the ports to be reassigned, do it + if self.force_port: + self.force_port_update = True + needs_update = True + else: + self.module.fail_json( + msg="The port you specified:\n%s\n is associated with a different host. Specify force_port as True or try a different port spec" % arg_port) + + return needs_update + + def port_on_diff_host(self, arg_port): + """ Checks to see if a passed in port arg is present on a different host """ + for host in self.all_hosts: + # Only check 'other' hosts + if self.host_obj['name'] != self.name: + for port in host['ports']: + # Check if the port label is found in the port dict list of each host + if arg_port['label'] == port['label']: + self.other_host = host + return True + return False + + def reassign_ports(self, apply=True): + if not self.post_body: + self.post_body = dict( + portsToUpdate=dict() + ) + + for port in self.ports: + if self.port_on_diff_host(port): + self.post_body['portsToUpdate'].update(dict( + portRef=self.other_host['hostPortRef'], + hostRef=self.host_obj['id'], + # Doesnt yet address port identifier or chap secret + )) + + if apply: + try: + (rc, self.host_obj) = request( + self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']), + url_username=self.user, url_password=self.pwd, headers=HEADERS, + validate_certs=self.certs, method='POST', data=json.dumps(self.post_body)) + except: + err = get_exception() + self.module.fail_json( + msg="Failed to reassign host port. Host Id [%s]. Array Id [%s]. Error [%s]." % ( + self.host_obj['id'], self.ssid, str(err))) + + def update_host(self): + if self.ports: + if self.hostports_available: + if self.force_port_update is True: + self.reassign_ports(apply=False) + # Make sure that only ports that arent being reassigned are passed into the ports attr + self.ports = [port for port in self.ports if not self.port_on_diff_host(port)] + + self.post_body['ports'] = self.ports + + if self.group: + self.post_body['groupId'] = self.group_id + + self.post_body['hostType'] = dict(index=self.host_type_index) + + try: + (rc, self.host_obj) = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']), + url_username=self.user, url_password=self.pwd, headers=HEADERS, + validate_certs=self.certs, method='POST', data=json.dumps(self.post_body)) + except: + err = get_exception() + self.module.fail_json(msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, str(err))) + + self.module.exit_json(changed=True, **self.host_obj) + + def create_host(self): + post_body = dict( + name=self.name, + host_type=dict(index=self.host_type_index), + groupId=self.group_id, + ports=self.ports + ) + if self.ports: + # Check that all supplied port args are valid + if self.hostports_available: + post_body.update(ports=self.ports) + elif not self.force_port: + self.module.fail_json( + msg="You supplied ports that are already in use. Supply force_port to True if you wish to reassign the ports") + + if not self.host_exists: + try: + (rc, create_resp) = request(self.url + "storage-systems/%s/hosts" % self.ssid, method='POST', + url_username=self.user, url_password=self.pwd, validate_certs=self.certs, + data=json.dumps(post_body), headers=HEADERS) + except: + err = get_exception() + self.module.fail_json( + msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, str(err))) + else: + self.module.exit_json(changed=False, + msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name)) + + self.host_obj = create_resp + + if self.ports and self.force_port: + self.reassign_ports() + + self.module.exit_json(changed=True, **self.host_obj) + + def remove_host(self): + try: + (rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']), + method='DELETE', + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + except: + err = get_exception() + self.module.fail_json( + msg="Failed to remote host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'], + self.ssid, + str(err))) + + def apply(self): + if self.state == 'present': + if self.host_exists: + if self.needs_update and self.valid_host_type: + self.update_host() + else: + self.module.exit_json(changed=False, msg="Host already present.", id=self.ssid, label=self.name) + elif self.valid_host_type: + self.create_host() + else: + if self.host_exists: + self.remove_host() + self.module.exit_json(changed=True, msg="Host removed.") + else: + self.module.exit_json(changed=False, msg="Host already absent.", id=self.ssid, label=self.name) + + +def main(): + host = Host() + host.apply() + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_hostgroup.py b/storage/netapp/netapp_e_hostgroup.py new file mode 100644 index 00000000000..f89397af59d --- /dev/null +++ b/storage/netapp/netapp_e_hostgroup.py @@ -0,0 +1,417 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: netapp_e_hostgroup +version_added: "2.2" +short_description: Manage NetApp Storage Array Host Groups +author: Kevin Hulquest (@hulquest) +description: +- Create, update or destroy host groups on a NetApp E-Series storage array. +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + required: true + description: + - The ID of the array to manage (as configured on the web services proxy). + state: + required: true + description: + - Whether the specified host group should exist or not. + choices: ['present', 'absent'] + name: + required: false + description: + - The name of the host group to manage. Either this or C(id_num) must be supplied. + new_name: + required: false + description: + - specify this when you need to update the name of a host group + id: + required: false + description: + - The id number of the host group to manage. Either this or C(name) must be supplied. + hosts:: + required: false + description: + - a list of host names/labels to add to the group +''' +EXAMPLES = ''' + - name: Configure Hostgroup + netapp_e_hostgroup: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present +''' +RETURN = ''' +clusterRef: + description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster. + returned: always except when state is absent + type: string + sample: "3233343536373839303132333100000000000000" +confirmLUNMappingCreation: + description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping will alter the volume access rights of other clusters, in addition to this one. + returned: always + type: boolean + sample: false +hosts: + description: A list of the hosts that are part of the host group after all operations. + returned: always except when state is absent + type: list + sample: ["HostA","HostB"] +id: + description: The id number of the hostgroup + returned: always except when state is absent + type: string + sample: "3233343536373839303132333100000000000000" +isSAControlled: + description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false, indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings. + returned: always except when state is absent + type: boolean + sample: false +label: + description: The user-assigned, descriptive label string for the cluster. + returned: always + type: string + sample: "MyHostGroup" +name: + description: same as label + returned: always except when state is absent + type: string + sample: "MyHostGroup" +protectionInformationCapableAccessMethod: + description: This field is true if the host has a PI capable access method. + returned: always except when state is absent + type: boolean + sample: true +''' + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json" +} + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception + +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def group_exists(module, id_type, ident, ssid, api_url, user, pwd): + rc, data = get_hostgroups(module, ssid, api_url, user, pwd) + for group in data: + if group[id_type] == ident: + return True, data + else: + continue + + return False, data + + +def get_hostgroups(module, ssid, api_url, user, pwd): + groups = "storage-systems/%s/host-groups" % ssid + url = api_url + groups + try: + rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd) + return rc, data + except HTTPError: + err = get_exception() + module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]." % (ssid, str(err))) + + +def get_hostref(module, ssid, name, api_url, user, pwd): + all_hosts = 'storage-systems/%s/hosts' % ssid + url = api_url + all_hosts + try: + rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd) + except Exception: + err = get_exception() + module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]." % (ssid, str(err))) + + for host in data: + if host['name'] == name: + return host['hostRef'] + else: + continue + + module.fail_json(msg="No host with the name %s could be found" % name) + + +def create_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None): + groups = "storage-systems/%s/host-groups" % ssid + url = api_url + groups + hostrefs = [] + + if hosts: + for host in hosts: + href = get_hostref(module, ssid, host, api_url, user, pwd) + hostrefs.append(href) + + post_data = json.dumps(dict(name=name, hosts=hostrefs)) + try: + rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd) + except Exception: + err = get_exception() + module.fail_json(msg="Failed to create host group. Id [%s]. Error [%s]." % (ssid, str(err))) + + return rc, data + + +def update_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None, new_name=None): + gid = get_hostgroup_id(module, ssid, name, api_url, user, pwd) + groups = "storage-systems/%s/host-groups/%s" % (ssid, gid) + url = api_url + groups + hostrefs = [] + + if hosts: + for host in hosts: + href = get_hostref(module, ssid, host, api_url, user, pwd) + hostrefs.append(href) + + if new_name: + post_data = json.dumps(dict(name=new_name, hosts=hostrefs)) + else: + post_data = json.dumps(dict(hosts=hostrefs)) + + try: + rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd) + except Exception: + err = get_exception() + module.fail_json(msg="Failed to update host group. Group [%s]. Id [%s]. Error [%s]." % (gid, ssid, + str(err))) + + return rc, data + + +def delete_hostgroup(module, ssid, group_id, api_url, user, pwd): + groups = "storage-systems/%s/host-groups/%s" % (ssid, group_id) + url = api_url + groups + # TODO: Loop through hosts, do mapping to href, make new list to pass to data + try: + rc, data = request(url, method='DELETE', headers=HEADERS, url_username=user, url_password=pwd) + except Exception: + err = get_exception() + module.fail_json(msg="Failed to delete host group. Group [%s]. Id [%s]. Error [%s]." % (group_id, ssid, str(err))) + + return rc, data + + +def get_hostgroup_id(module, ssid, name, api_url, user, pwd): + all_groups = 'storage-systems/%s/host-groups' % ssid + url = api_url + all_groups + rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd) + for hg in data: + if hg['name'] == name: + return hg['id'] + else: + continue + + module.fail_json(msg="A hostgroup with the name %s could not be found" % name) + + +def get_hosts_in_group(module, ssid, group_name, api_url, user, pwd): + all_groups = 'storage-systems/%s/host-groups' % ssid + g_url = api_url + all_groups + try: + g_rc, g_data = request(g_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd) + except Exception: + err = get_exception() + module.fail_json( + msg="Failed in first step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (group_name, + ssid, + str(err))) + + all_hosts = 'storage-systems/%s/hosts' % ssid + h_url = api_url + all_hosts + try: + h_rc, h_data = request(h_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd) + except Exception: + err = get_exception() + module.fail_json( + msg="Failed in second step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % ( + group_name, + ssid, + str(err))) + + hosts_in_group = [] + + for hg in g_data: + if hg['name'] == group_name: + clusterRef = hg['clusterRef'] + + for host in h_data: + if host['clusterRef'] == clusterRef: + hosts_in_group.append(host['name']) + + return hosts_in_group + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=False), + new_name=dict(required=False), + ssid=dict(required=True), + id=dict(required=False), + state=dict(required=True, choices=['present', 'absent']), + hosts=dict(required=False, type='list'), + api_url=dict(required=True), + api_username=dict(required=True), + validate_certs=dict(required=False, default=True), + api_password=dict(required=True, no_log=True) + ), + supports_check_mode=False, + mutually_exclusive=[['name', 'id']], + required_one_of=[['name', 'id']] + ) + + name = module.params['name'] + new_name = module.params['new_name'] + ssid = module.params['ssid'] + id_num = module.params['id'] + state = module.params['state'] + hosts = module.params['hosts'] + user = module.params['api_username'] + pwd = module.params['api_password'] + api_url = module.params['api_url'] + + if not api_url.endswith('/'): + api_url += '/' + + if name: + id_type = 'name' + id_key = name + elif id_num: + id_type = 'id' + id_key = id_num + + exists, group_data = group_exists(module, id_type, id_key, ssid, api_url, user, pwd) + + if state == 'present': + if not exists: + try: + rc, data = create_hostgroup(module, ssid, name, api_url, user, pwd, hosts) + except Exception: + err = get_exception() + module.fail_json(msg="Failed to create a host group. Id [%s]. Error [%s]." % (ssid, str(err))) + + hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd) + module.exit_json(changed=True, hosts=hosts, **data) + else: + current_hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd) + + if not current_hosts: + current_hosts = [] + + if not hosts: + hosts = [] + + if set(current_hosts) != set(hosts): + try: + rc, data = update_hostgroup(module, ssid, name, api_url, user, pwd, hosts, new_name) + except Exception: + err = get_exception() + module.fail_json( + msg="Failed to update host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, str(err))) + module.exit_json(changed=True, hosts=hosts, **data) + else: + for group in group_data: + if group['name'] == name: + module.exit_json(changed=False, hosts=current_hosts, **group) + + elif state == 'absent': + if exists: + hg_id = get_hostgroup_id(module, ssid, name, api_url, user, pwd) + try: + rc, data = delete_hostgroup(module, ssid, hg_id, api_url, user, pwd) + except Exception: + err = get_exception() + module.fail_json( + msg="Failed to delete host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, str(err))) + + module.exit_json(changed=True, msg="Host Group deleted") + else: + module.exit_json(changed=False, msg="Host Group is already absent") + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_lun_mapping.py b/storage/netapp/netapp_e_lun_mapping.py new file mode 100644 index 00000000000..5c9d71973b4 --- /dev/null +++ b/storage/netapp/netapp_e_lun_mapping.py @@ -0,0 +1,354 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: netapp_e_lun_mapping +author: Kevin Hulquest (@hulquest) +short_description: Create or Remove LUN Mappings +description: + - Allows for the creation and removal of volume to host mappings for NetApp E-series storage arrays. +version_added: "2.2" +options: + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + description: + - "The storage system array identifier." + required: False + lun: + description: + - The LUN number you wish to give the mapping + - If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here. + required: False + default: 0 + target: + description: + - The name of host or hostgroup you wish to assign to the mapping + - If omitted, the default hostgroup is used. + - If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here. + required: False + volume_name: + description: + - The name of the volume you wish to include in the mapping. + required: True + target_type: + description: + - Whether the target is a host or group. + - Required if supplying an explicit target. + required: False + choices: ["host", "group"] + state: + description: + - Present will ensure the mapping exists, absent will remove the mapping. + - All parameters I(lun), I(target), I(target_type) and I(volume_name) must still be supplied. + required: True + choices: ["present", "absent"] + api_url: + description: + - "The full API url. Example: http://ENDPOINT:8080/devmgr/v2" + - This can optionally be set via an environment variable, API_URL + required: False + api_username: + description: + - The username used to authenticate against the API. This can optionally be set via an environment variable, API_USERNAME + required: False + api_password: + description: + - The password used to authenticate against the API. This can optionally be set via an environment variable, API_PASSWORD + required: False +''' + +EXAMPLES = ''' +--- + - name: Lun Mapping Example + netapp_e_lun_mapping: + state: present + ssid: 1 + lun: 12 + target: Wilson + volume_name: Colby1 + target_type: group + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" +''' +RETURN = ''' +msg: Mapping exists. +msg: Mapping removed. +''' +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import HTTPError + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json" +} + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def get_host_and_group_map(module, ssid, api_url, user, pwd): + mapping = dict(host=dict(), group=dict()) + + hostgroups = 'storage-systems/%s/host-groups' % ssid + groups_url = api_url + hostgroups + try: + hg_rc, hg_data = request(groups_url, headers=HEADERS, url_username=user, url_password=pwd) + except: + err = get_exception() + module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]" % (ssid, str(err))) + + for group in hg_data: + mapping['group'][group['name']] = group['id'] + + hosts = 'storage-systems/%s/hosts' % ssid + hosts_url = api_url + hosts + try: + h_rc, h_data = request(hosts_url, headers=HEADERS, url_username=user, url_password=pwd) + except: + err = get_exception() + module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]" % (ssid, str(err))) + + for host in h_data: + mapping['host'][host['name']] = host['id'] + + return mapping + + +def get_volume_id(module, data, ssid, name, api_url, user, pwd): + qty = 0 + for volume in data: + if volume['name'] == name: + qty += 1 + + if qty > 1: + module.fail_json(msg="More than one volume with the name: %s was found, " + "please use the volume WWN instead" % name) + else: + wwn = volume['wwn'] + + try: + return wwn + except NameError: + module.fail_json(msg="No volume with the name: %s, was found" % (name)) + + +def get_hostgroups(module, ssid, api_url, user, pwd): + groups = "storage-systems/%s/host-groups" % ssid + url = api_url + groups + try: + rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd) + return data + except Exception: + module.fail_json(msg="There was an issue with connecting, please check that your" + "endpoint is properly defined and your credentials are correct") + + +def get_volumes(module, ssid, api_url, user, pwd, mappable): + volumes = 'storage-systems/%s/%s' % (ssid, mappable) + url = api_url + volumes + try: + rc, data = request(url, url_username=user, url_password=pwd) + except Exception: + err = get_exception() + module.fail_json( + msg="Failed to mappable objects. Type[%s. Id [%s]. Error [%s]." % (mappable, ssid, str(err))) + return data + + +def get_lun_mappings(ssid, api_url, user, pwd, get_all=None): + mappings = 'storage-systems/%s/volume-mappings' % ssid + url = api_url + mappings + rc, data = request(url, url_username=user, url_password=pwd) + + if not get_all: + remove_keys = ('ssid', 'perms', 'lunMappingRef', 'type', 'id') + + for key in remove_keys: + for mapping in data: + del mapping[key] + + return data + + +def create_mapping(module, ssid, lun_map, vol_name, api_url, user, pwd): + mappings = 'storage-systems/%s/volume-mappings' % ssid + url = api_url + mappings + post_body = json.dumps(dict( + mappableObjectId=lun_map['volumeRef'], + targetId=lun_map['mapRef'], + lun=lun_map['lun'] + )) + + rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS, + ignore_errors=True) + + if rc == 422: + data = move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd) + # module.fail_json(msg="The volume you specified '%s' is already " + # "part of a different LUN mapping. If you " + # "want to move it to a different host or " + # "hostgroup, then please use the " + # "netapp_e_move_lun module" % vol_name) + return data + + +def move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd): + lun_id = get_lun_id(module, ssid, lun_map, api_url, user, pwd) + move_lun = "storage-systems/%s/volume-mappings/%s/move" % (ssid, lun_id) + url = api_url + move_lun + post_body = json.dumps(dict(targetId=lun_map['mapRef'], lun=lun_map['lun'])) + rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS) + return data + + +def get_lun_id(module, ssid, lun_mapping, api_url, user, pwd): + data = get_lun_mappings(ssid, api_url, user, pwd, get_all=True) + + for lun_map in data: + if lun_map['volumeRef'] == lun_mapping['volumeRef']: + return lun_map['id'] + # This shouldn't ever get called + module.fail_json(msg="No LUN map found.") + + +def remove_mapping(module, ssid, lun_mapping, api_url, user, pwd): + lun_id = get_lun_id(module, ssid, lun_mapping, api_url, user, pwd) + lun_del = "storage-systems/%s/volume-mappings/%s" % (ssid, lun_id) + url = api_url + lun_del + rc, data = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS) + return data + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + state=dict(required=True, choices=['present', 'absent']), + target=dict(required=False, default=None), + target_type=dict(required=False, choices=['host', 'group']), + lun=dict(required=False, type='int', default=0), + ssid=dict(required=False), + volume_name=dict(required=True), + )) + + module = AnsibleModule(argument_spec=argument_spec) + + state = module.params['state'] + target = module.params['target'] + target_type = module.params['target_type'] + lun = module.params['lun'] + ssid = module.params['ssid'] + vol_name = module.params['volume_name'] + user = module.params['api_username'] + pwd = module.params['api_password'] + api_url = module.params['api_url'] + + if not api_url.endswith('/'): + api_url += '/' + + volume_map = get_volumes(module, ssid, api_url, user, pwd, "volumes") + thin_volume_map = get_volumes(module, ssid, api_url, user, pwd, "thin-volumes") + volref = None + + for vol in volume_map: + if vol['label'] == vol_name: + volref = vol['volumeRef'] + + if not volref: + for vol in thin_volume_map: + if vol['label'] == vol_name: + volref = vol['volumeRef'] + + if not volref: + module.fail_json(changed=False, msg="No volume with the name %s was found" % vol_name) + + host_and_group_mapping = get_host_and_group_map(module, ssid, api_url, user, pwd) + + desired_lun_mapping = dict( + mapRef=host_and_group_mapping[target_type][target], + lun=lun, + volumeRef=volref + ) + + lun_mappings = get_lun_mappings(ssid, api_url, user, pwd) + + if state == 'present': + if desired_lun_mapping in lun_mappings: + module.exit_json(changed=False, msg="Mapping exists") + else: + result = create_mapping(module, ssid, desired_lun_mapping, vol_name, api_url, user, pwd) + module.exit_json(changed=True, **result) + + elif state == 'absent': + if desired_lun_mapping in lun_mappings: + result = remove_mapping(module, ssid, desired_lun_mapping, api_url, user, pwd) + module.exit_json(changed=True, msg="Mapping removed") + else: + module.exit_json(changed=False, msg="Mapping absent") + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_snapshot_group.py b/storage/netapp/netapp_e_snapshot_group.py new file mode 100644 index 00000000000..f0464bbf7c8 --- /dev/null +++ b/storage/netapp/netapp_e_snapshot_group.py @@ -0,0 +1,386 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: netapp_e_snapshot_group +short_description: Manage snapshot groups +description: + - Create, update, delete snapshot groups for NetApp E-series storage arrays +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + state: + description: + - Whether to ensure the group is present or absent. + required: True + choices: + - present + - absent + name: + description: + - The name to give the snapshot group + required: True + base_volume_name: + description: + - The name of the base volume or thin volume to use as the base for the new snapshot group. + - If a snapshot group with an identical C(name) already exists but with a different base volume + an error will be returned. + required: True + repo_pct: + description: + - The size of the repository in relation to the size of the base volume + required: False + default: 20 + warning_threshold: + description: + - The repository utilization warning threshold, as a percentage of the repository volume capacity. + required: False + default: 80 + delete_limit: + description: + - The automatic deletion indicator. + - If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of snapshot images limited to the number specified. + - This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group. + required: False + default: 30 + full_policy: + description: + - The behavior on when the data repository becomes full. + - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group + required: False + default: purgepit + choices: + - purgepit + - unknown + - failbasewrites + - __UNDEFINED + storage_pool_name: + required: True + description: + - The name of the storage pool on which to allocate the repository volume. + rollback_priority: + required: False + description: + - The importance of the rollback operation. + - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group + choices: + - highest + - high + - medium + - low + - lowest + - __UNDEFINED + default: medium +""" + +EXAMPLES = """ + - name: Configure Snapshot group + netapp_e_snapshot_group: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + base_volume_name: SSGroup_test + name=: OOSS_Group + repo_pct: 20 + warning_threshold: 85 + delete_limit: 30 + full_policy: purgepit + storage_pool_name: Disk_Pool_1 + rollback_priority: medium +""" +RETURN = """ +msg: + description: Success message + returned: success + type: string + sample: json facts for newly created snapshot group. +""" +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class SnapshotGroup(object): + def __init__(self): + + argument_spec = basic_auth_argument_spec() + argument_spec.update( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + state=dict(required=True, choices=['present', 'absent']), + base_volume_name=dict(required=True), + name=dict(required=True), + repo_pct=dict(default=20, type='int'), + warning_threshold=dict(default=80, type='int'), + delete_limit=dict(default=30, type='int'), + full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']), + rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']), + storage_pool_name=dict(type='str'), + ssid=dict(required=True), + ) + + self.module = AnsibleModule(argument_spec=argument_spec) + + self.post_data = dict() + self.warning_threshold = self.module.params['warning_threshold'] + self.base_volume_name = self.module.params['base_volume_name'] + self.name = self.module.params['name'] + self.repo_pct = self.module.params['repo_pct'] + self.delete_limit = self.module.params['delete_limit'] + self.full_policy = self.module.params['full_policy'] + self.rollback_priority = self.module.params['rollback_priority'] + self.storage_pool_name = self.module.params['storage_pool_name'] + self.state = self.module.params['state'] + + self.url = self.module.params['api_url'] + self.user = self.module.params['api_username'] + self.pwd = self.module.params['api_password'] + self.certs = self.module.params['validate_certs'] + self.ssid = self.module.params['ssid'] + + if not self.url.endswith('/'): + self.url += '/' + + self.changed = False + + @property + def pool_id(self): + pools = 'storage-systems/%s/storage-pools' % self.ssid + url = self.url + pools + try: + (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd) + except: + err = get_exception() + self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " + + "Id [%s]. Error [%s]." % (self.ssid, str(err))) + + for pool in data: + if pool['name'] == self.storage_pool_name: + self.pool_data = pool + return pool['id'] + + self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name) + + @property + def volume_id(self): + volumes = 'storage-systems/%s/volumes' % self.ssid + url = self.url + volumes + try: + rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd, + validate_certs=self.certs) + except: + err = get_exception() + self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " + + "Id [%s]. Error [%s]." % (self.ssid, str(err))) + qty = 0 + for volume in data: + if volume['name'] == self.base_volume_name: + qty += 1 + + if qty > 1: + self.module.fail_json(msg="More than one volume with the name: %s was found, " + "please ensure your volume has a unique name" % self.base_volume_name) + else: + Id = volume['id'] + self.volume = volume + + try: + return Id + except NameError: + self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name) + + @property + def snapshot_group_id(self): + url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid + try: + rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd, + validate_certs=self.certs) + except: + err = get_exception() + self.module.fail_json(msg="Failed to fetch snapshot groups. " + + "Id [%s]. Error [%s]." % (self.ssid, str(err))) + for ssg in data: + if ssg['name'] == self.name: + self.ssg_data = ssg + return ssg['id'] + + return None + + @property + def ssg_needs_update(self): + if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \ + self.ssg_data['autoDeleteLimit'] != self.delete_limit or \ + self.ssg_data['repFullPolicy'] != self.full_policy or \ + self.ssg_data['rollbackPriority'] != self.rollback_priority: + return True + else: + return False + + def create_snapshot_group(self): + self.post_data = dict( + baseMappableObjectId=self.volume_id, + name=self.name, + repositoryPercentage=self.repo_pct, + warningThreshold=self.warning_threshold, + autoDeleteLimit=self.delete_limit, + fullPolicy=self.full_policy, + storagePoolId=self.pool_id, + ) + snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid + url = self.url + snapshot + try: + rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS, + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + except: + err = get_exception() + self.module.fail_json(msg="Failed to create snapshot group. " + + "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name, + self.ssid, + str(err))) + + if not self.snapshot_group_id: + self.snapshot_group_id = self.ssg_data['id'] + + if self.ssg_needs_update: + self.update_ssg() + else: + self.module.exit_json(changed=True, **self.ssg_data) + + def update_ssg(self): + self.post_data = dict( + warningThreshold=self.warning_threshold, + autoDeleteLimit=self.delete_limit, + fullPolicy=self.full_policy, + rollbackPriority=self.rollback_priority + ) + + url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id) + try: + rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS, + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + except: + err = get_exception() + self.module.fail_json(msg="Failed to update snapshot group. " + + "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name, + self.ssid, + str(err))) + + def apply(self): + if self.state == 'absent': + if self.snapshot_group_id: + try: + rc, resp = request( + self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id), + method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user, + validate_certs=self.certs) + except: + err = get_exception() + self.module.fail_json(msg="Failed to delete snapshot group. " + + "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name, + self.ssid, + str(err))) + self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data) + else: + self.module.exit_json(changed=False, msg="Snapshot group absent") + + elif self.snapshot_group_id: + if self.ssg_needs_update: + self.update_ssg() + self.module.exit_json(changed=True, **self.ssg_data) + else: + self.module.exit_json(changed=False, **self.ssg_data) + else: + self.create_snapshot_group() + + +def main(): + vg = SnapshotGroup() + vg.apply() + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_snapshot_images.py b/storage/netapp/netapp_e_snapshot_images.py new file mode 100644 index 00000000000..460d1a2a0c1 --- /dev/null +++ b/storage/netapp/netapp_e_snapshot_images.py @@ -0,0 +1,254 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: netapp_e_snapshot_images +short_description: Create and delete snapshot images +description: + - Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays. + - Only the oldest snapshot image can be deleted so consistency is preserved. + - "Related: Snapshot volumes are created from snapshot images." +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + snapshot_group: + description: + - The name of the snapshot group in which you want to create a snapshot image. + required: True + state: + description: + - Whether a new snapshot image should be created or oldest be deleted. + required: True + choices: ['create', 'remove'] +""" +EXAMPLES = """ + - name: Create Snapshot + netapp_e_snapshot_images: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ validate_certs }}" + snapshot_group: "3300000060080E5000299C24000005B656D9F394" + state: 'create' +""" +RETURN = """ +--- + changed: true + msg: "Created snapshot image" + image_id: "3400000060080E5000299B640063074057BC5C5E " +""" + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name): + snap_groups = 'storage-systems/%s/snapshot-groups' % ssid + snap_groups_url = api_url + snap_groups + (ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + + snapshot_group_id = None + for snapshot_group in snapshot_groups: + if name == snapshot_group['label']: + snapshot_group_id = snapshot_group['pitGroupRef'] + break + if snapshot_group_id is None: + module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid)) + + return snapshot_group + + +def oldest_image(module, ssid, api_url, api_pwd, api_usr, name): + get_status = 'storage-systems/%s/snapshot-images' % ssid + url = api_url + get_status + + try: + (ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + except: + err = get_exception() + module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" % + (name, ssid, str(err))) + if not images: + module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid)) + + oldest = min(images, key=lambda x: x['pitSequenceNumber']) + if oldest is None or "pitRef" not in oldest: + module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid)) + + return oldest + + +def create_image(module, ssid, api_url, pwd, user, p, snapshot_group): + snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group) + snapshot_group_id = snapshot_group_obj['pitGroupRef'] + endpoint = 'storage-systems/%s/snapshot-images' % ssid + url = api_url + endpoint + post_data = json.dumps({'groupId': snapshot_group_id}) + + image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + + if image_data[1]['status'] == 'optimal': + status = True + id = image_data[1]['id'] + else: + status = False + id = '' + + return status, id + + +def delete_image(module, ssid, api_url, pwd, user, snapshot_group): + image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group) + image_id = image['pitRef'] + endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id) + url = api_url + endpoint + + try: + (ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + except Exception: + e = get_exception() + image_data = (e[0], e[1]) + + if ret == 204: + deleted_status = True + error_message = '' + else: + deleted_status = False + error_message = image_data[1]['errorMessage'] + + return deleted_status, error_message + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + snapshot_group=dict(required=True, type='str'), + ssid=dict(required=True, type='str'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True), + validate_certs=dict(required=False, default=True), + state=dict(required=True, choices=['create', 'remove'], type='str'), + )) + module = AnsibleModule(argument_spec) + + p = module.params + + ssid = p.pop('ssid') + api_url = p.pop('api_url') + user = p.pop('api_username') + pwd = p.pop('api_password') + snapshot_group = p.pop('snapshot_group') + desired_state = p.pop('state') + + if not api_url.endswith('/'): + api_url += '/' + + if desired_state == 'create': + created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group) + + if created_status: + module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id) + else: + module.fail_json( + msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group)) + else: + deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group) + + if deleted: + module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group)) + else: + module.fail_json( + msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % ( + ssid, snapshot_group, error_msg)) + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_snapshot_volume.py b/storage/netapp/netapp_e_snapshot_volume.py new file mode 100644 index 00000000000..afc6e340aaf --- /dev/null +++ b/storage/netapp/netapp_e_snapshot_volume.py @@ -0,0 +1,291 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: netapp_e_snapshot_volume +short_description: Manage E/EF-Series snapshot volumes. +description: + - Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays. +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +note: Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status will be returned, no other changes can be made to a pre-existing snapshot volume. +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + description: + - storage array ID + required: True + snapshot_image_id: + required: True + description: + - The identifier of the snapshot image used to create the new snapshot volume. + - "Note: You'll likely want to use the M(netapp_e_facts) module to find the ID of the image you want." + full_threshold: + description: + - The repository utilization warning threshold percentage + default: 85 + name: + required: True + description: + - The name you wish to give the snapshot volume + view_mode: + required: True + description: + - The snapshot volume access mode + choices: + - modeUnknown + - readWrite + - readOnly + - __UNDEFINED + repo_percentage: + description: + - The size of the view in relation to the size of the base volume + default: 20 + storage_pool_name: + description: + - Name of the storage pool on which to allocate the repository volume. + required: True + state: + description: + - Whether to create or remove the snapshot volume + required: True + choices: + - absent + - present +""" +EXAMPLES = """ + - name: Snapshot volume + netapp_e_snapshot_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}"/ + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + state: present + storage_pool_name: "{{ snapshot_volume_storage_pool_name }}" + snapshot_image_id: "{{ snapshot_volume_image_id }}" + name: "{{ snapshot_volume_name }}" +""" +RETURN = """ +msg: + description: Success message + returned: success + type: string + sample: Json facts for the volume that was created. +""" +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class SnapshotVolume(object): + def __init__(self): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + ssid=dict(type='str', required=True), + snapshot_image_id=dict(type='str', required=True), + full_threshold=dict(type='int', default=85), + name=dict(type='str', required=True), + view_mode=dict(type='str', default='readOnly', + choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']), + repo_percentage=dict(type='int', default=20), + storage_pool_name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['absent', 'present']) + )) + + self.module = AnsibleModule(argument_spec=argument_spec) + args = self.module.params + self.state = args['state'] + self.ssid = args['ssid'] + self.snapshot_image_id = args['snapshot_image_id'] + self.full_threshold = args['full_threshold'] + self.name = args['name'] + self.view_mode = args['view_mode'] + self.repo_percentage = args['repo_percentage'] + self.storage_pool_name = args['storage_pool_name'] + self.url = args['api_url'] + self.user = args['api_username'] + self.pwd = args['api_password'] + self.certs = args['validate_certs'] + + if not self.url.endswith('/'): + self.url += '/' + + @property + def pool_id(self): + pools = 'storage-systems/%s/storage-pools' % self.ssid + url = self.url + pools + (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd, + validate_certs=self.certs) + + for pool in data: + if pool['name'] == self.storage_pool_name: + self.pool_data = pool + return pool['id'] + + self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name) + + @property + def ss_vol_exists(self): + rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS, + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + if ss_vols: + for ss_vol in ss_vols: + if ss_vol['name'] == self.name: + self.ss_vol = ss_vol + return True + else: + return False + + return False + + @property + def ss_vol_needs_update(self): + if self.ss_vol['fullWarnThreshold'] != self.full_threshold: + return True + else: + return False + + def create_ss_vol(self): + post_data = dict( + snapshotImageId=self.snapshot_image_id, + fullThreshold=self.full_threshold, + name=self.name, + viewMode=self.view_mode, + repositoryPercentage=self.repo_percentage, + repositoryPoolId=self.pool_id + ) + + rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, + data=json.dumps(post_data), headers=HEADERS, url_username=self.user, + url_password=self.pwd, validate_certs=self.certs, method='POST') + + self.ss_vol = create_resp + # Doing a check after creation because the creation call fails to set the specified warning threshold + if self.ss_vol_needs_update: + self.update_ss_vol() + else: + self.module.exit_json(changed=True, **create_resp) + + def update_ss_vol(self): + post_data = dict( + fullThreshold=self.full_threshold, + ) + + rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']), + data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd, + method='POST', validate_certs=self.certs) + + self.module.exit_json(changed=True, **resp) + + def remove_ss_vol(self): + rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']), + headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs, + method='DELETE') + self.module.exit_json(changed=True, msg="Volume successfully deleted") + + def apply(self): + if self.state == 'present': + if self.ss_vol_exists: + if self.ss_vol_needs_update: + self.update_ss_vol() + else: + self.module.exit_json(changed=False, **self.ss_vol) + else: + self.create_ss_vol() + else: + if self.ss_vol_exists: + self.remove_ss_vol() + else: + self.module.exit_json(changed=False, msg="Volume already absent") + + +def main(): + sv = SnapshotVolume() + sv.apply() + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_storage_system.py b/storage/netapp/netapp_e_storage_system.py new file mode 100644 index 00000000000..64414af6f1e --- /dev/null +++ b/storage/netapp/netapp_e_storage_system.py @@ -0,0 +1,310 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: netapp_e_storage_system +version_added: "2.2" +short_description: Add/remove arrays from the Web Services Proxy +description: +- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays. +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + required: true + description: + - The ID of the array to manage. This value must be unique for each array. + state: + required: true + description: + - Whether the specified array should be configured on the Web Services Proxy or not. + choices: ['present', 'absent'] + controller_addresses: + required: true + description: + - The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter. + array_wwn: + required: false + description: + - The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of controller_addresses parameter. + array_password: + required: false + description: + - The management password of the array to manage, if set. + enable_trace: + required: false + default: false + description: + - Enable trace logging for SYMbol calls to the storage system. + meta_tags: + required: false + default: None + description: + - Optional meta tags to associate to this storage system +author: Kevin Hulquest (@hulquest) +''' + +EXAMPLES = ''' +--- + - name: Presence of storage system + netapp_e_storage_system: + ssid: "{{ item.key }}" + state: present + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + controller_addresses: + - "{{ item.value.address1 }}" + - "{{ item.value.address2 }}" + with_dict: "{{ storage_systems }}" + when: check_storage_system +''' + +RETURN = ''' +msg: Storage system removed. +msg: Storage system added. +''' +import json +from datetime import datetime as dt, timedelta +from time import sleep + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout): + (rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers, + method='POST', url_username=api_usr, url_password=api_pwd, + validate_certs=validate_certs) + status = None + return_resp = resp + if 'status' in resp: + status = resp['status'] + + if rc == 201: + status = 'neverContacted' + fail_after_time = dt.utcnow() + timedelta(seconds=timeout) + + while status == 'neverContacted': + if dt.utcnow() > fail_after_time: + raise Exception("web proxy timed out waiting for array status") + + sleep(1) + (rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid, + headers=dict(Accept="application/json"), url_username=api_usr, + url_password=api_pwd, validate_certs=validate_certs, + ignore_errors=True) + status = system_resp['status'] + return_resp = system_resp + + return status, return_resp + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + ssid=dict(required=True, type='str'), + controller_addresses=dict(type='list'), + array_wwn=dict(required=False, type='str'), + array_password=dict(required=False, type='str', no_log=True), + array_status_timeout_sec=dict(default=60, type='int'), + enable_trace=dict(default=False, type='bool'), + meta_tags=dict(type='list') + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['controller_addresses', 'array_wwn']], + required_if=[('state', 'present', ['controller_addresses'])] + ) + + p = module.params + + state = p['state'] + ssid = p['ssid'] + controller_addresses = p['controller_addresses'] + array_wwn = p['array_wwn'] + array_password = p['array_password'] + array_status_timeout_sec = p['array_status_timeout_sec'] + validate_certs = p['validate_certs'] + meta_tags = p['meta_tags'] + enable_trace = p['enable_trace'] + + api_usr = p['api_username'] + api_pwd = p['api_password'] + api_url = p['api_url'] + + changed = False + array_exists = False + + try: + (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"), + url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs, + ignore_errors=True) + except: + err = get_exception() + module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, str(err))) + + array_exists = True + array_detail = resp + + if rc == 200: + if state == 'absent': + changed = True + array_exists = False + elif state == 'present': + current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i) + if set(controller_addresses) != current_addresses: + changed = True + if array_detail['wwn'] != array_wwn and array_wwn is not None: + module.fail_json( + msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' % (ssid, array_detail['wwn'])) + elif rc == 404: + if state == 'present': + changed = True + array_exists = False + else: + changed = False + module.exit_json(changed=changed, msg="Storage system was not present.") + + if changed and not module.check_mode: + if state == 'present': + if not array_exists: + # add the array + array_add_req = dict( + id=ssid, + controllerAddresses=controller_addresses, + metaTags=meta_tags, + enableTrace=enable_trace + ) + + if array_wwn: + array_add_req['wwn'] = array_wwn + + if array_password: + array_add_req['password'] = array_password + + post_headers = dict(Accept="application/json") + post_headers['Content-Type'] = 'application/json' + request_data = json.dumps(array_add_req) + + try: + (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data, + array_status_timeout_sec) + except: + err = get_exception() + module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." % + (ssid, request_data, str(err))) + + else: # array exists, modify... + post_headers = dict(Accept="application/json") + post_headers['Content-Type'] = 'application/json' + post_body = dict( + controllerAddresses=controller_addresses, + removeAllTags=True, + enableTrace=enable_trace, + metaTags=meta_tags + ) + + try: + (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body, + array_status_timeout_sec) + except: + err = get_exception() + module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." % + (ssid, post_body, str(err))) + + elif state == 'absent': + # delete the array + try: + (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE', + url_username=api_usr, + url_password=api_pwd, validate_certs=validate_certs) + except: + err = get_exception() + module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, str(err))) + + if rc == 422: + module.exit_json(changed=changed, msg="Storage system was not presnt.") + if rc == 204: + module.exit_json(changed=changed, msg="Storage system removed.") + + module.exit_json(changed=changed, **resp) + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_storagepool.py b/storage/netapp/netapp_e_storagepool.py new file mode 100644 index 00000000000..89309708efd --- /dev/null +++ b/storage/netapp/netapp_e_storagepool.py @@ -0,0 +1,888 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: netapp_e_storagepool +short_description: Manage disk groups and disk pools +version_added: '2.2' +description: + - Create or remove disk groups and disk pools for NetApp E-series storage arrays. +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + required: true + description: + - The ID of the array to manage (as configured on the web services proxy). + state: + required: true + description: + - Whether the specified storage pool should exist or not. + - Note that removing a storage pool currently requires the removal of all defined volumes first. + choices: ['present', 'absent'] + name: + required: true + description: + - The name of the storage pool to manage + criteria_drive_count: + description: + - The number of disks to use for building the storage pool. The pool will be expanded if this number exceeds the number of disks already in place + criteria_drive_type: + description: + - The type of disk (hdd or ssd) to use when searching for candidates to use. + choices: ['hdd','ssd'] + criteria_size_unit: + description: + - The unit used to interpret size parameters + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: 'gb' + criteria_drive_min_size: + description: + - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool. + criteria_min_usable_capacity: + description: + - The minimum size of the storage pool (in size_unit). The pool will be expanded if this value exceeds itscurrent size. + criteria_drive_interface_type: + description: + - The interface type to use when selecting drives for the storage pool (no value means all interface types will be considered) + choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'] + criteria_drive_require_fde: + description: + - Whether full disk encryption ability is required for drives to be added to the storage pool + raid_level: + required: true + choices: ['raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool'] + description: + - "Only required when the requested state is 'present'. The RAID level of the storage pool to be created." + erase_secured_drives: + required: false + choices: ['true', 'false'] + description: + - Whether to erase secured disks before adding to storage pool + secure_pool: + required: false + choices: ['true', 'false'] + description: + - Whether to convert to a secure storage pool. Will only work if all drives in the pool are security capable. + reserve_drive_count: + required: false + description: + - Set the number of drives reserved by the storage pool for reconstruction operations. Only valide on raid disk pools. + remove_volumes: + required: false + default: False + description: + - Prior to removing a storage pool, delete all volumes in the pool. +author: Kevin Hulquest (@hulquest) + +''' +EXAMPLES = ''' + - name: No disk groups + netapp_e_storagepool: + ssid: "{{ ssid }}" + name: "{{ item }}" + state: absent + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" +''' +RETURN = ''' +msg: + description: Success message + returned: success + type: string + sample: Json facts for the pool that was created. +''' + +import json +import logging +from traceback import format_exc + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def select(predicate, iterable): + # python 2, 3 generic filtering. + if predicate is None: + predicate = bool + for x in iterable: + if predicate(x): + yield x + + +class groupby(object): + # python 2, 3 generic grouping. + def __init__(self, iterable, key=None): + if key is None: + key = lambda x: x + self.keyfunc = key + self.it = iter(iterable) + self.tgtkey = self.currkey = self.currvalue = object() + + def __iter__(self): + return self + + def next(self): + while self.currkey == self.tgtkey: + self.currvalue = next(self.it) # Exit on StopIteration + self.currkey = self.keyfunc(self.currvalue) + self.tgtkey = self.currkey + return (self.currkey, self._grouper(self.tgtkey)) + + def _grouper(self, tgtkey): + while self.currkey == tgtkey: + yield self.currvalue + self.currvalue = next(self.it) # Exit on StopIteration + self.currkey = self.keyfunc(self.currvalue) + + +class NetAppESeriesStoragePool(object): + def __init__(self): + self._sp_drives_cached = None + + self._size_unit_map = dict( + bytes=1, + b=1, + kb=1024, + mb=1024 ** 2, + gb=1024 ** 3, + tb=1024 ** 4, + pb=1024 ** 5, + eb=1024 ** 6, + zb=1024 ** 7, + yb=1024 ** 8 + ) + + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + state=dict(required=True, choices=['present', 'absent'], type='str'), + ssid=dict(required=True, type='str'), + name=dict(required=True, type='str'), + criteria_size_unit=dict(default='gb', type='str'), + criteria_drive_count=dict(type='int'), + criteria_drive_interface_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'], + type='str'), + criteria_drive_type=dict(choices=['ssd', 'hdd'], type='str'), + criteria_drive_min_size=dict(type='int'), + criteria_drive_require_fde=dict(type='bool'), + criteria_min_usable_capacity=dict(type='int'), + raid_level=dict( + choices=['raidUnsupported', 'raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']), + erase_secured_drives=dict(type='bool'), + log_path=dict(type='str'), + remove_drives=dict(type='list'), + secure_pool=dict(type='bool', default=False), + reserve_drive_count=dict(type='int'), + remove_volumes=dict(type='bool', default=False) + )) + + self.module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['raid_level']) + ], + mutually_exclusive=[ + + ], + # TODO: update validation for various selection criteria + supports_check_mode=True + ) + + p = self.module.params + + log_path = p['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + self.debug = self._logger.debug + + if log_path: + logging.basicConfig(level=logging.DEBUG, filename=log_path) + + self.state = p['state'] + self.ssid = p['ssid'] + self.name = p['name'] + self.validate_certs = p['validate_certs'] + + self.criteria_drive_count = p['criteria_drive_count'] + self.criteria_drive_type = p['criteria_drive_type'] + self.criteria_size_unit = p['criteria_size_unit'] + self.criteria_drive_min_size = p['criteria_drive_min_size'] + self.criteria_min_usable_capacity = p['criteria_min_usable_capacity'] + self.criteria_drive_interface_type = p['criteria_drive_interface_type'] + self.criteria_drive_require_fde = p['criteria_drive_require_fde'] + + self.raid_level = p['raid_level'] + self.erase_secured_drives = p['erase_secured_drives'] + self.remove_drives = p['remove_drives'] + self.secure_pool = p['secure_pool'] + self.reserve_drive_count = p['reserve_drive_count'] + self.remove_volumes = p['remove_volumes'] + + try: + self.api_usr = p['api_username'] + self.api_pwd = p['api_password'] + self.api_url = p['api_url'] + except KeyError: + self.module.fail_json(msg="You must pass in api_username " + "and api_password and api_url to the module.") + + self.post_headers = dict(Accept="application/json") + self.post_headers['Content-Type'] = 'application/json' + + # Quick and dirty drive selector, since the one provided by web service proxy is broken for min_disk_size as of 2016-03-12. + # Doesn't really need to be a class once this is in module_utils or retired- just groups everything together so we + # can copy/paste to other modules more easily. + # Filters all disks by specified criteria, then groups remaining disks by capacity, interface and disk type, and selects + # the first set that matches the specified count and/or aggregate capacity. + # class DriveSelector(object): + def filter_drives( + self, + drives, # raw drives resp + interface_type=None, # sas, sata, fibre, etc + drive_type=None, # ssd/hdd + spindle_speed=None, # 7200, 10000, 15000, ssd (=0) + min_drive_size=None, + max_drive_size=None, + fde_required=None, + size_unit='gb', + min_total_capacity=None, + min_drive_count=None, + exact_drive_count=None, + raid_level=None + ): + if min_total_capacity is None and exact_drive_count is None: + raise Exception("One of criteria_min_total_capacity or criteria_drive_count must be specified.") + + if min_total_capacity: + min_total_capacity = min_total_capacity * self._size_unit_map[size_unit] + + # filter clearly invalid/unavailable drives first + drives = select(lambda d: self._is_valid_drive(d), drives) + + if interface_type: + drives = select(lambda d: d['phyDriveType'] == interface_type, drives) + + if drive_type: + drives = select(lambda d: d['driveMediaType'] == drive_type, drives) + + if spindle_speed is not None: # 0 is valid for ssds + drives = select(lambda d: d['spindleSpeed'] == spindle_speed, drives) + + if min_drive_size: + min_drive_size_bytes = min_drive_size * self._size_unit_map[size_unit] + drives = select(lambda d: int(d['rawCapacity']) >= min_drive_size_bytes, drives) + + if max_drive_size: + max_drive_size_bytes = max_drive_size * self._size_unit_map[size_unit] + drives = select(lambda d: int(d['rawCapacity']) <= max_drive_size_bytes, drives) + + if fde_required: + drives = select(lambda d: d['fdeCapable'], drives) + + # initial implementation doesn't have a preference for any of these values... + # just return the first set we find that matches the requested disk count and/or minimum total capacity + for (cur_capacity, drives_by_capacity) in groupby(drives, lambda d: int(d['rawCapacity'])): + for (cur_interface_type, drives_by_interface_type) in groupby(drives_by_capacity, + lambda d: d['phyDriveType']): + for (cur_drive_type, drives_by_drive_type) in groupby(drives_by_interface_type, + lambda d: d['driveMediaType']): + # listify so we can consume more than once + drives_by_drive_type = list(drives_by_drive_type) + candidate_set = list() # reset candidate list on each iteration of the innermost loop + + if exact_drive_count: + if len(drives_by_drive_type) < exact_drive_count: + continue # we know this set is too small, move on + + for drive in drives_by_drive_type: + candidate_set.append(drive) + if self._candidate_set_passes(candidate_set, min_capacity_bytes=min_total_capacity, + min_drive_count=min_drive_count, + exact_drive_count=exact_drive_count, raid_level=raid_level): + return candidate_set + + raise Exception("couldn't find an available set of disks to match specified criteria") + + def _is_valid_drive(self, d): + is_valid = d['available'] \ + and d['status'] == 'optimal' \ + and not d['pfa'] \ + and not d['removed'] \ + and not d['uncertified'] \ + and not d['invalidDriveData'] \ + and not d['nonRedundantAccess'] + + return is_valid + + def _candidate_set_passes(self, candidate_set, min_capacity_bytes=None, min_drive_count=None, + exact_drive_count=None, raid_level=None): + if not self._is_drive_count_valid(len(candidate_set), min_drive_count=min_drive_count, + exact_drive_count=exact_drive_count, raid_level=raid_level): + return False + # TODO: this assumes candidate_set is all the same size- if we want to allow wastage, need to update to use min size of set + if min_capacity_bytes is not None and self._calculate_usable_capacity(int(candidate_set[0]['rawCapacity']), + len(candidate_set), + raid_level=raid_level) < min_capacity_bytes: + return False + + return True + + def _calculate_usable_capacity(self, disk_size_bytes, disk_count, raid_level=None): + if raid_level in [None, 'raid0']: + return disk_size_bytes * disk_count + if raid_level == 'raid1': + return (disk_size_bytes * disk_count) / 2 + if raid_level in ['raid3', 'raid5']: + return (disk_size_bytes * disk_count) - disk_size_bytes + if raid_level in ['raid6', 'raidDiskPool']: + return (disk_size_bytes * disk_count) - (disk_size_bytes * 2) + raise Exception("unsupported raid_level: %s" % raid_level) + + def _is_drive_count_valid(self, drive_count, min_drive_count=0, exact_drive_count=None, raid_level=None): + if exact_drive_count and exact_drive_count != drive_count: + return False + if raid_level == 'raidDiskPool': + if drive_count < 11: + return False + if raid_level == 'raid1': + if drive_count % 2 != 0: + return False + if raid_level in ['raid3', 'raid5']: + if drive_count < 3: + return False + if raid_level == 'raid6': + if drive_count < 4: + return False + if min_drive_count and drive_count < min_drive_count: + return False + + return True + + def get_storage_pool(self, storage_pool_name): + # global ifilter + self.debug("fetching storage pools") + # map the storage pool name to its id + try: + (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid), + headers=dict(Accept="application/json"), url_username=self.api_usr, + url_password=self.api_pwd, validate_certs=self.validate_certs) + except Exception: + err = get_exception() + rc = err.args[0] + if rc == 404 and self.state == 'absent': + self.module.exit_json( + msg="Storage pool [%s] did not exist." % (self.name)) + else: + err = get_exception() + self.module.exit_json( + msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." % + (self.ssid, str(err), self.state, rc)) + + self.debug("searching for storage pool '%s'" % storage_pool_name) + + pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None) + + if pool_detail: + found = 'found' + else: + found = 'not found' + self.debug(found) + + return pool_detail + + def get_candidate_disks(self): + self.debug("getting candidate disks...") + + # driveCapacityMin is broken on /drives POST. Per NetApp request we built our own + # switch back to commented code below if it gets fixed + # drives_req = dict( + # driveCount = self.criteria_drive_count, + # sizeUnit = 'mb', + # raidLevel = self.raid_level + # ) + # + # if self.criteria_drive_type: + # drives_req['driveType'] = self.criteria_drive_type + # if self.criteria_disk_min_aggregate_size_mb: + # drives_req['targetUsableCapacity'] = self.criteria_disk_min_aggregate_size_mb + # + # # TODO: this arg appears to be ignored, uncomment if it isn't + # #if self.criteria_disk_min_size_gb: + # # drives_req['driveCapacityMin'] = self.criteria_disk_min_size_gb * 1024 + # (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs) + # + # if rc == 204: + # self.module.fail_json(msg='Cannot find disks to match requested criteria for storage pool') + + # disk_ids = [d['id'] for d in drives_resp] + + try: + (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET', + url_username=self.api_usr, url_password=self.api_pwd, + validate_certs=self.validate_certs) + except: + err = get_exception() + self.module.exit_json( + msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." % (self.ssid, str(err))) + + try: + candidate_set = self.filter_drives(drives_resp, + exact_drive_count=self.criteria_drive_count, + drive_type=self.criteria_drive_type, + min_drive_size=self.criteria_drive_min_size, + raid_level=self.raid_level, + size_unit=self.criteria_size_unit, + min_total_capacity=self.criteria_min_usable_capacity, + interface_type=self.criteria_drive_interface_type, + fde_required=self.criteria_drive_require_fde + ) + except: + err = get_exception() + self.module.fail_json( + msg="Failed to allocate adequate drive count. Id [%s]. Error [%s]." % (self.ssid, str(err))) + + disk_ids = [d['id'] for d in candidate_set] + + return disk_ids + + def create_storage_pool(self): + self.debug("creating storage pool...") + + sp_add_req = dict( + raidLevel=self.raid_level, + diskDriveIds=self.disk_ids, + name=self.name + ) + + if self.erase_secured_drives: + sp_add_req['eraseSecuredDrives'] = self.erase_secured_drives + + try: + (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid), + data=json.dumps(sp_add_req), headers=self.post_headers, method='POST', + url_username=self.api_usr, url_password=self.api_pwd, + validate_certs=self.validate_certs, + timeout=120) + except: + err = get_exception() + pool_id = self.pool_detail['id'] + self.module.exit_json( + msg="Failed to create storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, + self.ssid, + str(err))) + + self.pool_detail = self.get_storage_pool(self.name) + + if self.secure_pool: + secure_pool_data = dict(securePool=True) + try: + (retc, r) = request( + self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']), + data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST', + url_username=self.api_usr, + url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120, ignore_errors=True) + except: + err = get_exception() + pool_id = self.pool_detail['id'] + self.module.exit_json( + msg="Failed to update storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, + self.ssid, + str(err))) + + @property + def needs_raid_level_migration(self): + current_raid_level = self.pool_detail['raidLevel'] + needs_migration = self.raid_level != current_raid_level + + if needs_migration: # sanity check some things so we can fail early/check-mode + if current_raid_level == 'raidDiskPool': + self.module.fail_json(msg="raid level cannot be changed for disk pools") + + return needs_migration + + def migrate_raid_level(self): + self.debug("migrating storage pool to raid level '%s'..." % self.raid_level) + sp_raid_migrate_req = dict( + raidLevel=self.raid_level + ) + try: + (rc, resp) = request( + self.api_url + "/storage-systems/%s/storage-pools/%s/raid-type-migration" % (self.ssid, + self.name), + data=json.dumps(sp_raid_migrate_req), headers=self.post_headers, method='POST', + url_username=self.api_usr, + url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) + except: + err = get_exception() + pool_id = self.pool_detail['id'] + self.module.exit_json( + msg="Failed to change the raid level of storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % ( + pool_id, self.ssid, str(err))) + + @property + def sp_drives(self, exclude_hotspares=True): + if not self._sp_drives_cached: + + self.debug("fetching drive list...") + try: + (rc, resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET', + url_username=self.api_usr, url_password=self.api_pwd, + validate_certs=self.validate_certs) + except: + err = get_exception() + pool_id = self.pool_detail['id'] + self.module.exit_json( + msg="Failed to fetch disk drives. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, self.ssid, str(err))) + + sp_id = self.pool_detail['id'] + if exclude_hotspares: + self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id and not d['hotSpare']] + else: + self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id] + + return self._sp_drives_cached + + @property + def reserved_drive_count_differs(self): + if int(self.pool_detail['volumeGroupData']['diskPoolData'][ + 'reconstructionReservedDriveCount']) != self.reserve_drive_count: + return True + return False + + @property + def needs_expansion(self): + if self.criteria_drive_count > len(self.sp_drives): + return True + # TODO: is totalRaidedSpace the best attribute for "how big is this SP"? + if self.criteria_min_usable_capacity and \ + (self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]) > int(self.pool_detail['totalRaidedSpace']): + return True + + return False + + def get_expansion_candidate_drives(self): + # sanity checks; don't call this if we can't/don't need to expand + if not self.needs_expansion: + self.module.fail_json(msg="can't get expansion candidates when pool doesn't need expansion") + + self.debug("fetching expansion candidate drives...") + try: + (rc, resp) = request( + self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid, + self.pool_detail['id']), + method='GET', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, + timeout=120) + except: + err = get_exception() + pool_id = self.pool_detail['id'] + self.module.exit_json( + msg="Failed to fetch candidate drives for storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % ( + pool_id, self.ssid, str(err))) + + current_drive_count = len(self.sp_drives) + current_capacity_bytes = int(self.pool_detail['totalRaidedSpace']) # TODO: is this the right attribute to use? + + if self.criteria_min_usable_capacity: + requested_capacity_bytes = self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit] + else: + requested_capacity_bytes = current_capacity_bytes + + if self.criteria_drive_count: + minimum_disks_to_add = max((self.criteria_drive_count - current_drive_count), 1) + else: + minimum_disks_to_add = 1 + + minimum_bytes_to_add = max(requested_capacity_bytes - current_capacity_bytes, 0) + + # FUTURE: allow more control over expansion candidate selection? + # loop over candidate disk sets and add until we've met both criteria + + added_drive_count = 0 + added_capacity_bytes = 0 + + drives_to_add = set() + + for s in resp: + # don't trust the API not to give us duplicate drives across candidate sets, especially in multi-drive sets + candidate_drives = s['drives'] + if len(drives_to_add.intersection(candidate_drives)) != 0: + # duplicate, skip + continue + drives_to_add.update(candidate_drives) + added_drive_count += len(candidate_drives) + added_capacity_bytes += int(s['usableCapacity']) + + if added_drive_count >= minimum_disks_to_add and added_capacity_bytes >= minimum_bytes_to_add: + break + + if (added_drive_count < minimum_disks_to_add) or (added_capacity_bytes < minimum_bytes_to_add): + self.module.fail_json( + msg="unable to find at least %s drives to add that would add at least %s bytes of capacity" % ( + minimum_disks_to_add, minimum_bytes_to_add)) + + return list(drives_to_add) + + def expand_storage_pool(self): + drives_to_add = self.get_expansion_candidate_drives() + + self.debug("adding %s drives to storage pool..." % len(drives_to_add)) + sp_expand_req = dict( + drives=drives_to_add + ) + try: + request( + self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid, + self.pool_detail['id']), + data=json.dumps(sp_expand_req), headers=self.post_headers, method='POST', url_username=self.api_usr, + url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) + except: + err = get_exception() + pool_id = self.pool_detail['id'] + self.module.exit_json( + msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, + self.ssid, + str( + err))) + + # TODO: check response + # TODO: support blocking wait? + + def reduce_drives(self, drive_list): + if all(drive in drive_list for drive in self.sp_drives): + # all the drives passed in are present in the system + pass + else: + self.module.fail_json( + msg="One of the drives you wish to remove does not currently exist in the storage pool you specified") + + try: + (rc, resp) = request( + self.api_url + "/storage-systems/%s/storage-pools/%s/reduction" % (self.ssid, + self.pool_detail['id']), + data=json.dumps(drive_list), headers=self.post_headers, method='POST', url_username=self.api_usr, + url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) + except: + err = get_exception() + pool_id = self.pool_detail['id'] + self.module.exit_json( + msg="Failed to remove drives from storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % ( + pool_id, self.ssid, str(err))) + + def update_reserve_drive_count(self, qty): + data = dict(reservedDriveCount=qty) + try: + (rc, resp) = request( + self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']), + data=json.dumps(data), headers=self.post_headers, method='POST', url_username=self.api_usr, + url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120) + except: + err = get_exception() + pool_id = self.pool_detail['id'] + self.module.exit_json( + msg="Failed to update reserve drive count. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, + self.ssid, + str( + err))) + + def apply(self): + changed = False + pool_exists = False + + self.pool_detail = self.get_storage_pool(self.name) + + if self.pool_detail: + pool_exists = True + pool_id = self.pool_detail['id'] + + if self.state == 'absent': + self.debug("CHANGED: storage pool exists, but requested state is 'absent'") + changed = True + elif self.state == 'present': + # sanity checks first- we can't change these, so we'll bomb if they're specified + if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail['driveMediaType']: + self.module.fail_json( + msg="drive media type %s cannot be changed to %s" % (self.pool_detail['driveMediaType'], + self.criteria_drive_type)) + + # now the things we can change... + if self.needs_expansion: + self.debug("CHANGED: storage pool needs expansion") + changed = True + + if self.needs_raid_level_migration: + self.debug( + "CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'" % ( + self.pool_detail['raidLevel'], self.raid_level)) + changed = True + + # if self.reserved_drive_count_differs: + # changed = True + + # TODO: validate other state details? (pool priority, alert threshold) + + # per FPoole and others, pool reduce operations will not be supported. Automatic "smart" reduction + # presents a difficult parameter issue, as the disk count can increase due to expansion, so we + # can't just use disk count > criteria_drive_count. + + else: # pool does not exist + if self.state == 'present': + self.debug("CHANGED: storage pool does not exist, but requested state is 'present'") + changed = True + + # ensure we can get back a workable set of disks + # (doing this early so candidate selection runs under check mode) + self.disk_ids = self.get_candidate_disks() + else: + self.module.exit_json(msg="Storage pool [%s] did not exist." % (self.name)) + + if changed and not self.module.check_mode: + # apply changes + if self.state == 'present': + if not pool_exists: + self.create_storage_pool() + else: # pool exists but differs, modify... + if self.needs_expansion: + self.expand_storage_pool() + + if self.remove_drives: + self.reduce_drives(self.remove_drives) + + if self.needs_raid_level_migration: + self.migrate_raid_level() + + # if self.reserved_drive_count_differs: + # self.update_reserve_drive_count(self.reserve_drive_count) + + if self.secure_pool: + secure_pool_data = dict(securePool=True) + try: + (retc, r) = request( + self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, + self.pool_detail[ + 'id']), + data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST', + url_username=self.api_usr, url_password=self.api_pwd, + validate_certs=self.validate_certs, timeout=120, ignore_errors=True) + except: + err = get_exception() + self.module.exit_json( + msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % ( + pool_id, self.ssid, str(err))) + + if int(retc) == 422: + self.module.fail_json( + msg="Error in enabling secure pool. One of the drives in the specified storage pool is likely not security capable") + + elif self.state == 'absent': + # delete the storage pool + try: + remove_vol_opt = '' + if self.remove_volumes: + remove_vol_opt = '?delete-volumes=true' + (rc, resp) = request( + self.api_url + "/storage-systems/%s/storage-pools/%s%s" % (self.ssid, pool_id, + remove_vol_opt), + method='DELETE', + url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, + timeout=120) + except: + err = get_exception() + self.module.exit_json( + msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, + self.ssid, + str(err))) + + self.module.exit_json(changed=changed, **self.pool_detail) + + +def main(): + sp = NetAppESeriesStoragePool() + try: + sp.apply() + except Exception: + e = get_exception() + sp.debug("Exception in apply(): \n%s" % format_exc(e)) + raise + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_volume.py b/storage/netapp/netapp_e_volume.py new file mode 100644 index 00000000000..26107965855 --- /dev/null +++ b/storage/netapp/netapp_e_volume.py @@ -0,0 +1,622 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +from ansible.module_utils.api import basic_auth_argument_spec + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: netapp_e_volume +version_added: "2.2" +short_description: Manage storage volumes (standard and thin) +description: + - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays. +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + ssid: + required: true + description: + - The ID of the array to manage (as configured on the web services proxy). + state: + required: true + description: + - Whether the specified volume should exist or not. + choices: ['present', 'absent'] + name: + required: true + description: + - The name of the volume to manage + storage_pool_name: + required: true + description: + - "Required only when requested state is 'present'. The name of the storage pool the volume should exist on." + size_unit: + description: + - The unit used to interpret the size parameter + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: 'gb' + size: + required: true + description: + - "Required only when state = 'present'. The size of the volume in (size_unit)." + segment_size_kb: + description: + - The segment size of the new volume + default: 512 + thin_provision: + description: + - Whether the volume should be thin provisioned. Thin volumes can only be created on disk pools (raidDiskPool). + default: False + choices: ['yes','no','true','false'] + thin_volume_repo_size: + description: + - Initial size of the thin volume repository volume (in size_unit) + required: True + thin_volume_max_repo_size: + description: + - Maximum size that the thin volume repository volume will automatically expand to + default: same as size (in size_unit) + ssd_cache_enabled: + description: + - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined) + default: None (ignores existing SSD cache setting) + choices: ['yes','no','true','false'] + data_assurance_enabled: + description: + - If data assurance should be enabled for the volume + default: false + +# TODO: doc thin volume parameters + +author: Kevin Hulquest (@hulquest) + +''' +EXAMPLES = ''' + - name: No thin volume + netapp_e_volume: + ssid: "{{ ssid }}" + name: NewThinVolumeByAnsible + state: absent + log_path: /tmp/volume.log + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + when: check_volume + + + - name: No fat volume + netapp_e_volume: + ssid: "{{ ssid }}" + name: NewVolumeByAnsible + state: absent + log_path: /tmp/volume.log + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + when: check_volume +''' +RETURN = ''' +--- +msg: "Standard volume [workload_vol_1] has been created." +msg: "Thin volume [workload_thin_vol] has been created." +msg: "Volume [workload_vol_1] has been expanded." +msg: "Volume [workload_vol_1] has been deleted." +msg: "Volume [workload_vol_1] did not exist." +msg: "Volume [workload_vol_1] already exists." +''' + +import json +import logging +import time +from traceback import format_exc + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data is None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def ifilter(predicate, iterable): + # python 2, 3 generic filtering. + if predicate is None: + predicate = bool + for x in iterable: + if predicate(x): + yield x + + +class NetAppESeriesVolume(object): + def __init__(self): + self._size_unit_map = dict( + bytes=1, + b=1, + kb=1024, + mb=1024 ** 2, + gb=1024 ** 3, + tb=1024 ** 4, + pb=1024 ** 5, + eb=1024 ** 6, + zb=1024 ** 7, + yb=1024 ** 8 + ) + + self._post_headers = dict(Accept="application/json") + self._post_headers['Content-Type'] = 'application/json' + + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + ssid=dict(required=True, type='str'), + name=dict(required=True, type='str'), + storage_pool_name=dict(type='str'), + size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], + type='str'), + size=dict(type='int'), + segment_size_kb=dict(default=128, choices=[8, 16, 32, 64, 128, 256, 512], type='int'), + ssd_cache_enabled=dict(type='bool'), # no default, leave existing setting alone + data_assurance_enabled=dict(default=False, type='bool'), + thin_provision=dict(default=False, type='bool'), + thin_volume_repo_size=dict(type='int'), + thin_volume_max_repo_size=dict(type='int'), + # TODO: add cache, owning controller support, thin expansion policy, etc + log_path=dict(type='str'), + api_url=dict(type='str'), + api_username=dict(type='str'), + api_password=dict(type='str'), + validate_certs=dict(type='bool'), + )) + + self.module = AnsibleModule(argument_spec=argument_spec, + required_if=[ + ('state', 'present', ['storage_pool_name', 'size']), + ('thin_provision', 'true', ['thin_volume_repo_size']) + ], + supports_check_mode=True) + p = self.module.params + + log_path = p['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + self.debug = self._logger.debug + + if log_path: + logging.basicConfig(level=logging.DEBUG, filename=log_path) + + self.state = p['state'] + self.ssid = p['ssid'] + self.name = p['name'] + self.storage_pool_name = p['storage_pool_name'] + self.size_unit = p['size_unit'] + self.size = p['size'] + self.segment_size_kb = p['segment_size_kb'] + self.ssd_cache_enabled = p['ssd_cache_enabled'] + self.data_assurance_enabled = p['data_assurance_enabled'] + self.thin_provision = p['thin_provision'] + self.thin_volume_repo_size = p['thin_volume_repo_size'] + self.thin_volume_max_repo_size = p['thin_volume_max_repo_size'] + + if not self.thin_volume_max_repo_size: + self.thin_volume_max_repo_size = self.size + + self.validate_certs = p['validate_certs'] + + try: + self.api_usr = p['api_username'] + self.api_pwd = p['api_password'] + self.api_url = p['api_url'] + except KeyError: + self.module.fail_json(msg="You must pass in api_username " + "and api_password and api_url to the module.") + + def get_volume(self, volume_name): + self.debug('fetching volumes') + # fetch the list of volume objects and look for one with a matching name (we'll need to merge volumes and thin-volumes) + try: + (rc, volumes) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid), + headers=dict(Accept="application/json"), url_username=self.api_usr, + url_password=self.api_pwd, validate_certs=self.validate_certs) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to obtain list of standard/thick volumes. Array Id [%s]. Error[%s]." % (self.ssid, + str(err))) + + try: + self.debug('fetching thin-volumes') + (rc, thinvols) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid), + headers=dict(Accept="application/json"), url_username=self.api_usr, + url_password=self.api_pwd, validate_certs=self.validate_certs) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." % (self.ssid, str(err))) + + volumes.extend(thinvols) + + self.debug("searching for volume '%s'" % volume_name) + volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None) + + if volume_detail: + self.debug('found') + else: + self.debug('not found') + + return volume_detail + + def get_storage_pool(self, storage_pool_name): + self.debug("fetching storage pools") + # map the storage pool name to its id + try: + (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid), + headers=dict(Accept="application/json"), url_username=self.api_usr, + url_password=self.api_pwd, validate_certs=self.validate_certs) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err))) + + self.debug("searching for storage pool '%s'" % storage_pool_name) + pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None) + + if pool_detail: + self.debug('found') + else: + self.debug('not found') + + return pool_detail + + def create_volume(self, pool_id, name, size_unit, size, segment_size_kb, data_assurance_enabled): + volume_add_req = dict( + name=name, + poolId=pool_id, + sizeUnit=size_unit, + size=size, + segSize=segment_size_kb, + dataAssuranceEnabled=data_assurance_enabled, + ) + + self.debug("creating volume '%s'" % name) + try: + (rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid), + data=json.dumps(volume_add_req), headers=self._post_headers, method='POST', + url_username=self.api_usr, url_password=self.api_pwd, + validate_certs=self.validate_certs, + timeout=120) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid, + str(err))) + + def create_thin_volume(self, pool_id, name, size_unit, size, thin_volume_repo_size, + thin_volume_max_repo_size, data_assurance_enabled): + thin_volume_add_req = dict( + name=name, + poolId=pool_id, + sizeUnit=size_unit, + virtualSize=size, + repositorySize=thin_volume_repo_size, + maximumRepositorySize=thin_volume_max_repo_size, + dataAssuranceEnabled=data_assurance_enabled, + ) + + self.debug("creating thin-volume '%s'" % name) + try: + (rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid), + data=json.dumps(thin_volume_add_req), headers=self._post_headers, method='POST', + url_username=self.api_usr, url_password=self.api_pwd, + validate_certs=self.validate_certs, + timeout=120) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, + self.ssid, + str(err))) + + def delete_volume(self): + # delete the volume + self.debug("deleting volume '%s'" % self.volume_detail['name']) + try: + (rc, resp) = request( + self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name, + self.volume_detail['id']), + method='DELETE', url_username=self.api_usr, url_password=self.api_pwd, + validate_certs=self.validate_certs, timeout=120) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid, + str(err))) + + @property + def volume_resource_name(self): + if self.volume_detail['thinProvisioned']: + return 'thin-volumes' + else: + return 'volumes' + + @property + def volume_properties_changed(self): + return self.volume_ssdcache_setting_changed # or with other props here when extended + + # TODO: add support for r/w cache settings, owning controller, scan settings, expansion policy, growth alert threshold + + @property + def volume_ssdcache_setting_changed(self): + # None means ignore existing setting + if self.ssd_cache_enabled is not None and self.ssd_cache_enabled != self.volume_detail['flashCached']: + self.debug("flash cache setting changed") + return True + + def update_volume_properties(self): + update_volume_req = dict() + + # conditionally add values so we ignore unspecified props + if self.volume_ssdcache_setting_changed: + update_volume_req['flashCache'] = self.ssd_cache_enabled + + self.debug("updating volume properties...") + try: + (rc, resp) = request( + self.api_url + "/storage-systems/%s/%s/%s/" % (self.ssid, self.volume_resource_name, + self.volume_detail['id']), + data=json.dumps(update_volume_req), headers=self._post_headers, method='POST', + url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, + timeout=120) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to update volume properties. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, + self.ssid, + str(err))) + + @property + def volume_needs_expansion(self): + current_size_bytes = int(self.volume_detail['capacity']) + requested_size_bytes = self.size * self._size_unit_map[self.size_unit] + + # TODO: check requested/current repo volume size for thin-volumes as well + + # TODO: do we need to build any kind of slop factor in here? + return requested_size_bytes > current_size_bytes + + def expand_volume(self): + is_thin = self.volume_detail['thinProvisioned'] + if is_thin: + # TODO: support manual repo expansion as well + self.debug('expanding thin volume') + thin_volume_expand_req = dict( + newVirtualSize=self.size, + sizeUnit=self.size_unit + ) + try: + (rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes/%s/expand" % (self.ssid, + self.volume_detail[ + 'id']), + data=json.dumps(thin_volume_expand_req), headers=self._post_headers, method='POST', + url_username=self.api_usr, url_password=self.api_pwd, + validate_certs=self.validate_certs, timeout=120) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, + self.ssid, + str(err))) + + # TODO: check return code + else: + self.debug('expanding volume') + volume_expand_req = dict( + expansionSize=self.size, + sizeUnit=self.size_unit + ) + try: + (rc, resp) = request( + self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid, + self.volume_detail['id']), + data=json.dumps(volume_expand_req), headers=self._post_headers, method='POST', + url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs, + timeout=120) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, + self.ssid, + str(err))) + + self.debug('polling for completion...') + + while True: + try: + (rc, resp) = request(self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid, + self.volume_detail[ + 'id']), + method='GET', url_username=self.api_usr, url_password=self.api_pwd, + validate_certs=self.validate_certs) + except Exception: + err = get_exception() + self.module.fail_json( + msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]. Error[%s]." % ( + self.name, self.ssid, str(err))) + + action = resp['action'] + percent_complete = resp['percentComplete'] + + self.debug('expand action %s, %s complete...' % (action, percent_complete)) + + if action == 'none': + self.debug('expand complete') + break + else: + time.sleep(5) + + def apply(self): + changed = False + volume_exists = False + msg = None + + self.volume_detail = self.get_volume(self.name) + + if self.volume_detail: + volume_exists = True + + if self.state == 'absent': + self.debug("CHANGED: volume exists, but requested state is 'absent'") + changed = True + elif self.state == 'present': + # check requested volume size, see if expansion is necessary + if self.volume_needs_expansion: + self.debug( + "CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size, + self.size_unit, + self.volume_detail[ + 'capacity'])) + changed = True + + if self.volume_properties_changed: + self.debug("CHANGED: one or more volume properties have changed") + changed = True + + else: + if self.state == 'present': + self.debug("CHANGED: volume does not exist, but requested state is 'present'") + changed = True + + if changed: + if self.module.check_mode: + self.debug('skipping changes due to check mode') + else: + if self.state == 'present': + if not volume_exists: + pool_detail = self.get_storage_pool(self.storage_pool_name) + + if not pool_detail: + self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name) + + if self.thin_provision and not pool_detail['diskPool']: + self.module.fail_json( + msg='Thin provisioned volumes can only be located on disk pools (not volume groups)') + + pool_id = pool_detail['id'] + + if not self.thin_provision: + self.create_volume(pool_id, self.name, self.size_unit, self.size, self.segment_size_kb, + self.data_assurance_enabled) + msg = "Standard volume [%s] has been created." % (self.name) + + else: + self.create_thin_volume(pool_id, self.name, self.size_unit, self.size, + self.thin_volume_repo_size, self.thin_volume_max_repo_size, + self.data_assurance_enabled) + msg = "Thin volume [%s] has been created." % (self.name) + + else: # volume exists but differs, modify... + if self.volume_needs_expansion: + self.expand_volume() + msg = "Volume [%s] has been expanded." % (self.name) + + # this stuff always needs to run on present (since props can't be set on creation) + if self.volume_properties_changed: + self.update_volume_properties() + msg = "Properties of volume [%s] has been updated." % (self.name) + + elif self.state == 'absent': + self.delete_volume() + msg = "Volume [%s] has been deleted." % (self.name) + else: + self.debug("exiting with no changes") + if self.state == 'absent': + msg = "Volume [%s] did not exist." % (self.name) + else: + msg = "Volume [%s] already exists." % (self.name) + + self.module.exit_json(msg=msg, changed=changed) + + +def main(): + v = NetAppESeriesVolume() + + try: + v.apply() + except Exception: + e = get_exception() + v.debug("Exception in apply(): \n%s" % format_exc(e)) + v.module.fail_json(msg="Module failed. Error [%s]." % (str(e))) + + +if __name__ == '__main__': + main() diff --git a/storage/netapp/netapp_e_volume_copy.py b/storage/netapp/netapp_e_volume_copy.py new file mode 100644 index 00000000000..179ee8ff5ad --- /dev/null +++ b/storage/netapp/netapp_e_volume_copy.py @@ -0,0 +1,443 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: netapp_e_volume_copy +short_description: Create volume copy pairs +description: + - Create and delete snapshots images on volume groups for NetApp E-series storage arrays. +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + example: + - https://prod-1.wahoo.acme.com/devmgr/v2 + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + source_volume_id: + description: + - The the id of the volume copy source. + - If used, must be paired with destination_volume_id + - Mutually exclusive with volume_copy_pair_id, and search_volume_id + destination_volume_id: + description: + - The the id of the volume copy destination. + - If used, must be paired with source_volume_id + - Mutually exclusive with volume_copy_pair_id, and search_volume_id + volume_copy_pair_id: + description: + - The the id of a given volume copy pair + - Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id + - Can use to delete or check presence of volume pairs + - Must specify this or (destination_volume_id and source_volume_id) + state: + description: + - Whether the specified volume copy pair should exist or not. + required: True + choices: ['present', 'absent'] + create_copy_pair_if_does_not_exist: + description: + - Defines if a copy pair will be created if it does not exist. + - If set to True destination_volume_id and source_volume_id are required. + choices: [True, False] + default: True + start_stop_copy: + description: + - starts a re-copy or stops a copy in progress + - "Note: If you stop the initial file copy before it it done the copy pair will be destroyed" + - Requires volume_copy_pair_id + search_volume_id: + description: + - Searches for all valid potential target and source volumes that could be used in a copy_pair + - Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id +""" +RESULTS = """ +""" +EXAMPLES = """ +--- +msg: + description: Success message + returned: success + type: string + sample: Json facts for the volume copy that was created. +""" +RETURN = """ +msg: + description: Success message + returned: success + type: string + sample: Created Volume Copy Pair with ID +""" + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError: + err = get_exception() + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params): + get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid'] + url = params['api_url'] + get_status + + (rc, resp) = request(url, method='GET', url_username=params['api_username'], + url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + + volume_copy_pair_id = None + for potential_copy_pair in resp: + if potential_copy_pair['sourceVolume'] == params['source_volume_id']: + if potential_copy_pair['sourceVolume'] == params['source_volume_id']: + volume_copy_pair_id = potential_copy_pair['id'] + + return volume_copy_pair_id + + +def create_copy_pair(params): + get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid'] + url = params['api_url'] + get_status + + rData = { + "sourceId": params['source_volume_id'], + "targetId": params['destination_volume_id'] + } + + (rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST', + url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + if rc != 200: + return False, (rc, resp) + else: + return True, (rc, resp) + + +def delete_copy_pair_by_copy_pair_id(params): + get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % ( + params['ssid'], params['volume_copy_pair_id']) + url = params['api_url'] + get_status + + (rc, resp) = request(url, ignore_errors=True, method='DELETE', + url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + if rc != 204: + return False, (rc, resp) + else: + return True, (rc, resp) + + +def find_volume_copy_pair_id_by_volume_copy_pair_id(params): + get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % ( + params['ssid'], params['volume_copy_pair_id']) + url = params['api_url'] + get_status + + (rc, resp) = request(url, ignore_errors=True, method='DELETE', + url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + if rc != 200: + return False, (rc, resp) + else: + return True, (rc, resp) + + +def start_stop_copy(params): + get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % ( + params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy']) + url = params['api_url'] + get_status + + (response_code, response_data) = request(url, ignore_errors=True, method='POST', + url_username=params['api_username'], url_password=params['api_password'], + headers=HEADERS, + validate_certs=params['validate_certs']) + + if response_code == 200: + return True, response_data[0]['percentComplete'] + else: + return False, response_data + + +def check_copy_status(params): + get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % ( + params['ssid'], params['volume_copy_pair_id']) + url = params['api_url'] + get_status + + (response_code, response_data) = request(url, ignore_errors=True, method='GET', + url_username=params['api_username'], url_password=params['api_password'], + headers=HEADERS, + validate_certs=params['validate_certs']) + + if response_code == 200: + if response_data['percentComplete'] != -1: + + return True, response_data['percentComplete'] + else: + return False, response_data['percentComplete'] + else: + return False, response_data + + +def find_valid_copy_pair_targets_and_sources(params): + get_status = 'storage-systems/%s/volumes' % params['ssid'] + url = params['api_url'] + get_status + + (response_code, response_data) = request(url, ignore_errors=True, method='GET', + url_username=params['api_username'], url_password=params['api_password'], + headers=HEADERS, + validate_certs=params['validate_certs']) + + if response_code == 200: + source_capacity = None + candidates = [] + for volume in response_data: + if volume['id'] == params['search_volume_id']: + source_capacity = volume['capacity'] + else: + candidates.append(volume) + + potential_sources = [] + potential_targets = [] + + for volume in candidates: + if volume['capacity'] > source_capacity: + if volume['volumeCopyTarget'] is False: + if volume['volumeCopySource'] is False: + potential_targets.append(volume['id']) + else: + if volume['volumeCopyTarget'] is False: + if volume['volumeCopySource'] is False: + potential_sources.append(volume['id']) + + return potential_targets, potential_sources + + else: + raise Exception("Response [%s]" % response_code) + + +def main(): + module = AnsibleModule(argument_spec=dict( + source_volume_id=dict(type='str'), + destination_volume_id=dict(type='str'), + copy_priority=dict(required=False, default=0, type='int'), + ssid=dict(required=True, type='str'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True), + validate_certs=dict(required=False, default=True), + targetWriteProtected=dict(required=False, default=True, type='bool'), + onlineCopy=dict(required=False, default=False, type='bool'), + volume_copy_pair_id=dict(type='str'), + status=dict(required=True, choices=['present', 'absent'], type='str'), + create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'), + start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'), + search_volume_id=dict(type='str'), + ), + mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'], + ['volume_copy_pair_id', 'source_volume_id'], + ['volume_copy_pair_id', 'search_volume_id'], + ['search_volume_id', 'destination_volume_id'], + ['search_volume_id', 'source_volume_id'], + ], + required_together=[['source_volume_id', 'destination_volume_id'], + ], + required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ], + ["start_stop_copy", 'stop', ['volume_copy_pair_id'], ], + ["start_stop_copy", 'start', ['volume_copy_pair_id'], ], + ] + + ) + params = module.params + + if not params['api_url'].endswith('/'): + params['api_url'] += '/' + + # Check if we want to search + if params['search_volume_id'] is not None: + try: + potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params) + except: + e = get_exception() + module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % str(e)) + + module.exit_json(changed=False, + msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)), + search_volume_id=params['search_volume_id'], + valid_targets=potential_targets, + valid_sources=potential_sources) + + # Check if we want to start or stop a copy operation + if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop': + + # Get the current status info + currenty_running, status_info = check_copy_status(params) + + # If we want to start + if params['start_stop_copy'] == 'start': + + # If we have already started + if currenty_running is True: + module.exit_json(changed=False, msg='Volume Copy Pair copy has started.', + volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info) + # If we need to start + else: + + start_status, info = start_stop_copy(params) + + if start_status is True: + module.exit_json(changed=True, msg='Volume Copy Pair copy has started.', + volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info) + else: + module.fail_json(msg="Could not start volume copy pair Error: %s" % info) + + # If we want to stop + else: + # If it has already stopped + if currenty_running is False: + module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.', + volume_copy_pair_id=params['volume_copy_pair_id']) + + # If we need to stop it + else: + start_status, info = start_stop_copy(params) + + if start_status is True: + module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.', + volume_copy_pair_id=params['volume_copy_pair_id']) + else: + module.fail_json(msg="Could not stop volume copy pair Error: %s" % info) + + # If we want the copy pair to exist we do this stuff + if params['status'] == 'present': + + # We need to check if it exists first + if params['volume_copy_pair_id'] is None: + params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id( + params) + + # If no volume copy pair is found we need need to make it. + if params['volume_copy_pair_id'] is None: + + # In order to create we can not do so with just a volume_copy_pair_id + + copy_began_status, (rc, resp) = create_copy_pair(params) + + if copy_began_status is True: + module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id']) + else: + module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp)) + + # If it does exist we do nothing + else: + # We verify that it exists + exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id( + params) + + if exist_status: + module.exit_json(changed=False, + msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id']) + else: + if exist_status_code == 404: + module.fail_json( + msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' % + params['volume_copy_pair_id']) + else: + module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % ( + exist_status_code, exist_status_data)) + + module.fail_json(msg="Done") + + # If we want it to not exist we do this + else: + + if params['volume_copy_pair_id'] is None: + params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id( + params) + + # We delete it by the volume_copy_pair_id + delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params) + + if delete_status is True: + module.exit_json(changed=True, + msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id']) + else: + if delete_status_code == 404: + module.exit_json(changed=False, + msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id']) + else: + module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % ( + delete_status_code, delete_status_data)) + + +if __name__ == '__main__': + main() diff --git a/system/alternatives.py b/system/alternatives.py index 90e2237f86c..833ef27aaa5 100644 --- a/system/alternatives.py +++ b/system/alternatives.py @@ -22,6 +22,10 @@ along with Ansible. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: alternatives @@ -47,28 +51,48 @@ - The path to the symbolic link that should point to the real executable. - This option is required on RHEL-based distributions required: false + priority: + description: + - The priority of the alternative + required: false + default: 50 + version_added: "2.2" requirements: [ update-alternatives ] ''' EXAMPLES = ''' - name: correct java version selected - alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + alternatives: + name: java + path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java - name: alternatives link created - alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible + alternatives: + name: hadoop-conf + link: /etc/hadoop/conf + path: /etc/hadoop/conf.ansible + +- name: make java 32 bit an alternative with low priority + alternatives: + name: java + path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java + priority: -10 ''' -DEFAULT_LINK_PRIORITY = 50 - import re +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception + def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - path = dict(required=True), - link = dict(required=False), + path = dict(required=True, type='path'), + link = dict(required=False, type='path'), + priority = dict(required=False, type='int', + default=50), ), supports_check_mode=True, ) @@ -77,6 +101,7 @@ def main(): name = params['name'] path = params['path'] link = params['link'] + priority = params['priority'] UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) @@ -124,7 +149,7 @@ def main(): module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link") module.run_command( - [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], + [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)], check_rc=True ) @@ -135,12 +160,11 @@ def main(): ) module.exit_json(changed=True) - except subprocess.CalledProcessError, cpe: + except subprocess.CalledProcessError: + e = get_exception() module.fail_json(msg=str(dir(cpe))) else: module.exit_json(changed=False) - -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/at.py b/system/at.py index 0ce9ff2c7d4..2c01c5d3195 100644 --- a/system/at.py +++ b/system/at.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: at @@ -64,13 +68,22 @@ EXAMPLES = ''' # Schedule a command to execute in 20 minutes as root. -- at: command="ls -d / > /dev/null" count=20 units="minutes" +- at: + command: "ls -d / > /dev/null" + count: 20 + units: minutes # Match a command to an existing job and delete the job. -- at: command="ls -d / > /dev/null" state="absent" +- at: + command: "ls -d / > /dev/null" + state: absent # Schedule a command to execute in 20 minutes making sure it is unique in the queue. -- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes" +- at: + command: "ls -d / > /dev/null" + unique: true + count: 20 + units: minutes ''' import os @@ -197,4 +210,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/system/capabilities.py b/system/capabilities.py index ce8ffcfa632..27f3c7519cc 100644 --- a/system/capabilities.py +++ b/system/capabilities.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: capabilities @@ -55,10 +59,16 @@ EXAMPLES = ''' # Set cap_sys_chroot+ep on /foo -- capabilities: path=/foo capability=cap_sys_chroot+ep state=present +- capabilities: + path: /foo + capability: cap_sys_chroot+ep + state: present # Remove cap_net_bind_service from /bar -- capabilities: path=/bar capability=cap_net_bind_service state=absent +- capabilities: + path: /bar + capability: cap_net_bind_service + state: absent ''' @@ -180,8 +190,9 @@ def main(): CapabilitiesModule(module) - sys.exit(0) # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/system/cronvar.py b/system/cronvar.py index fe337752d59..a65610811b7 100644 --- a/system/cronvar.py +++ b/system/cronvar.py @@ -26,6 +26,10 @@ # This module is based on the crontab module. # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ --- module: cronvar @@ -70,7 +74,9 @@ default: root cron_file: description: - - If specified, uses this file in cron.d instead of an individual user's crontab. + - If specified, uses this file instead of an individual user's crontab. + Without a leading /, this is assumed to be in /etc/cron.d. With a leading + /, this is taken as absolute. required: false default: null backup: @@ -87,15 +93,22 @@ EXAMPLES = ''' # Ensure a variable exists. # Creates an entry like "EMAIL=doug@ansibmod.con.com" -- cronvar: name="EMAIL" value="doug@ansibmod.con.com" +- cronvar: + name: EMAIL + value: doug@ansibmod.con.com # Make sure a variable is gone. This will remove any variable named # "LEGACY" -- cronvar: name="LEGACY" state=absent +- cronvar: + name: LEGACY + state: absent # Adds a variable to a file under /etc/cron.d -- cronvar: name="LOGFILE" value="/var/log/yum-autoupdate.log" - user="root" cron_file=ansible_yum-autoupdate +- cronvar: + name: LOGFILE + value: /var/log/yum-autoupdate.log + user: root + cron_file: ansible_yum-autoupdate ''' import os @@ -104,6 +117,8 @@ import platform import pipes import shlex +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception CRONCMD = "/usr/bin/crontab" @@ -124,11 +139,13 @@ def __init__(self, module, user=None, cron_file=None): self.user = 'root' self.lines = None self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"', )) - # select whether we dump additional debug info through syslog - self.syslogging = False if cron_file: - self.cron_file = '/etc/cron.d/%s' % cron_file + self.cron_file = "" + if os.path.isabs(cron_file): + self.cron_file = cron_file + else: + self.cron_file = os.path.join('/etc/cron.d', cron_file) else: self.cron_file = None @@ -143,7 +160,8 @@ def read(self): f = open(self.cron_file, 'r') self.lines = f.read().splitlines() f.close() - except IOError, e: + except IOError: + e = get_exception() # cron file does not exist return except: @@ -165,8 +183,7 @@ def read(self): count += 1 def log_message(self, message): - if self.syslogging: - syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message) + self.module.debug('ansible: "%s"' % message) def write(self, backup_file=None): """ @@ -200,7 +217,8 @@ def remove_variable_file(self): try: os.unlink(self.cron_file) return True - except OSError, e: + except OSError: + e = get_exception() # cron file does not exist return False except: @@ -360,12 +378,10 @@ def main(): res_args = dict() # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option. - os.umask(022) + os.umask(int('022',8)) cronvar = CronVar(module, user, cron_file) - if cronvar.syslogging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) - syslog.syslog(syslog.LOG_NOTICE, 'cronvar instantiated - name: "%s"' % name) + module.debug('cronvar instantiated - name: "%s"' % name) # --- user input validation --- @@ -424,7 +440,6 @@ def main(): # --- should never get here module.exit_json(msg="Unable to execute cronvar task.") -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/crypttab.py b/system/crypttab.py index 44d9f859791..f957a51293a 100644 --- a/system/crypttab.py +++ b/system/crypttab.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: crypttab @@ -29,7 +33,7 @@ name: description: - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or - optionaly prefixed with C(/dev/mapper), as it appears in the filesystem. I(/dev/mapper) + optionaly prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/) will be stripped from I(name). required: true default: null @@ -52,7 +56,7 @@ default: null password: description: - - Encryption password, the path to a file containing the pasword, or + - Encryption password, the path to a file containing the password, or 'none' or '-' if the password should be entered at boot. required: false default: "none" @@ -73,15 +77,26 @@ ''' EXAMPLES = ''' -- name: Set the options explicitly a deivce which must already exist - crypttab: name=luks-home state=present opts=discard,cipher=aes-cbc-essiv:sha256 + +# Since column is a special character in YAML, if your string contains a column, it's better to use quotes around the string +- name: Set the options explicitly a device which must already exist + crypttab: + name: luks-home + state: present + opts: 'discard,cipher=aes-cbc-essiv:sha256' - name: Add the 'discard' option to any existing options for all devices - crypttab: name={{ item.device }} state=opts_present opts=discard - with_items: ansible_mounts + crypttab: + name: '{{ item.device }}' + state: opts_present + opts: discard + with_items: '{{ ansible_mounts }}' when: '/dev/mapper/luks-' in {{ item.device }} ''' +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception + def main(): module = AnsibleModule( @@ -89,19 +104,22 @@ def main(): name = dict(required=True), state = dict(required=True, choices=['present', 'absent', 'opts_present', 'opts_absent']), backing_device = dict(default=None), - password = dict(default=None), + password = dict(default=None, type='path'), opts = dict(default=None), - path = dict(default='/etc/crypttab') + path = dict(default='/etc/crypttab', type='path') ), supports_check_mode = True ) - name = module.params['name'].lstrip('/dev/mapper') backing_device = module.params['backing_device'] password = module.params['password'] opts = module.params['opts'] state = module.params['state'] path = module.params['path'] + name = module.params['name'] + if name.startswith('/dev/mapper/'): + name = name[len('/dev/mapper/'):] + if state != 'absent' and backing_device is None and password is None and opts is None: module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'", @@ -123,7 +141,8 @@ def main(): try: crypttab = Crypttab(path) existing_line = crypttab.match(name) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg="failed to open and parse crypttab file: %s" % e, **module.params) @@ -202,6 +221,8 @@ def __str__(self): for line in self._lines: lines.append(str(line)) crypttab = '\n'.join(lines) + if len(crypttab) == 0: + crypttab += '\n' if crypttab[-1] != '\n': crypttab += '\n' return crypttab @@ -249,18 +270,18 @@ def _line_valid(self, line): def _split_line(self, line): fields = line.split() try: - field2 = field[2] + field2 = fields[2] except IndexError: field2 = None try: - field3 = field[3] + field3 = fields[3] except IndexError: field3 = None return (fields[0], fields[1], field2, - fields3) + field3) def remove(self): self.line, self.name, self.backing_device = '', None, None @@ -303,7 +324,7 @@ def __init__(self, opts_string): def add(self, opts_string): changed = False for k, v in Options(opts_string).items(): - if self.has_key(k): + if k in self: if self[k] != v: changed = True else: @@ -314,7 +335,7 @@ def add(self, opts_string): def remove(self, opts_string): changed = False for k in Options(opts_string): - if self.has_key(k): + if k in self: del self[k] changed = True return changed, 'removed options' @@ -332,7 +353,7 @@ def __iter__(self): return iter(self.itemlist) def __setitem__(self, key, value): - if not self.has_key(key): + if key not in self: self.itemlist.append(key) super(Options, self).__setitem__(key, value) @@ -353,6 +374,5 @@ def __str__(self): ret.append('%s=%s' % (k, v)) return ','.join(ret) -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/debconf.py b/system/debconf.py index b249986a947..224f2fbcb9b 100644 --- a/system/debconf.py +++ b/system/debconf.py @@ -21,6 +21,10 @@ along with Ansible. If not, see . """ +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: debconf @@ -51,11 +55,11 @@ aliases: ['setting', 'selection'] vtype: description: - - The type of the value supplied + - The type of the value supplied. + - C(seen) was added in 2.2. required: false default: null - choices: [string, password, boolean, select, multiselect, note, error, title, text] - aliases: [] + choices: [string, password, boolean, select, multiselect, note, error, title, text, seen] value: description: - Value to set the configuration to @@ -67,23 +71,35 @@ - Do not set 'seen' flag when pre-seeding required: false default: False - aliases: [] author: "Brian Coca (@bcoca)" ''' EXAMPLES = ''' # Set default locale to fr_FR.UTF-8 -debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 vtype='select' +- debconf: + name: locales + question: locales/default_environment_locale + value: fr_FR.UTF-8 + vtype: select # set to generate locales: -debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' vtype='multiselect' +- debconf: + name: locales + question: locales/locales_to_be_generated + value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8 + vtype: multiselect # Accept oracle license -debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select' +- debconf: + name: oracle-java7-installer + question: shared/accepted-oracle-license-v1-1 + value: true + vtype: select # Specifying package you can register/return the list of questions and current values -debconf: name='tzdata' +- debconf: + name: tzdata ''' def get_selections(module, pkg): @@ -109,6 +125,11 @@ def set_selection(module, pkg, question, vtype, value, unseen): if unseen: cmd.append('-u') + if vtype == 'boolean': + if value == 'True': + value = 'true' + elif value == 'False': + value = 'false' data = ' '.join([pkg, question, vtype, value]) return module.run_command(cmd, data=data) @@ -119,8 +140,8 @@ def main(): argument_spec = dict( name = dict(required=True, aliases=['pkg'], type='str'), question = dict(required=False, aliases=['setting', 'selection'], type='str'), - vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text']), - value= dict(required=False, type='str'), + vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text', 'seen']), + value = dict(required=False, type='str', aliases=['answer']), unseen = dict(required=False, type='bool'), ), required_together = ( ['question','vtype', 'value'],), @@ -157,12 +178,19 @@ def main(): prev = {question: prev[question]} else: prev[question] = '' + if module._diff: + after = prev.copy() + after.update(curr) + diff_dict = {'before': prev, 'after': after} + else: + diff_dict = {} - module.exit_json(changed=changed, msg=msg, current=curr, previous=prev) + module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict) module.exit_json(changed=changed, msg=msg, current=prev) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/facter.py b/system/facter.py index 6c09877fcbe..5ae13ab7371 100644 --- a/system/facter.py +++ b/system/facter.py @@ -20,6 +20,10 @@ # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: facter @@ -47,12 +51,15 @@ def main(): argument_spec = dict() ) - cmd = ["/usr/bin/env", "facter", "--puppet", "--json"] + facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin']) + + cmd = [facter_path, "--puppet", "--json"] + rc, out, err = module.run_command(cmd, check_rc=True) module.exit_json(**json.loads(out)) # import module snippets from ansible.module_utils.basic import * -main() - +if __name__ == '__main__': + main() diff --git a/system/filesystem.py b/system/filesystem.py index b44168a0e06..d49360f09bc 100644 --- a/system/filesystem.py +++ b/system/filesystem.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- author: "Alexander Bulimov (@abulimov)" @@ -30,6 +34,7 @@ fstype: description: - File System type to be created. + - reiserfs support was added in 2.2. required: true dev: description: @@ -57,12 +62,61 @@ EXAMPLES = ''' # Create a ext2 filesystem on /dev/sdb1. -- filesystem: fstype=ext2 dev=/dev/sdb1 +- filesystem: + fstype: ext2 + dev: /dev/sdb1 # Create a ext4 filesystem on /dev/sdb1 and check disk blocks. -- filesystem: fstype=ext4 dev=/dev/sdb1 opts="-cc" +- filesystem: + fstype: ext4 + dev: /dev/sdb1 + opts: -cc ''' +def _get_dev_size(dev, module): + """ Return size in bytes of device. Returns int """ + blockdev_cmd = module.get_bin_path("blockdev", required=True) + rc, devsize_in_bytes, err = module.run_command("%s %s %s" % (blockdev_cmd, "--getsize64", dev)) + return int(devsize_in_bytes) + + +def _get_fs_size(fssize_cmd, dev, module): + """ Return size in bytes of filesystem on device. Returns int """ + cmd = module.get_bin_path(fssize_cmd, required=True) + if 'tune2fs' == fssize_cmd: + # Get Block count and Block size + rc, size, err = module.run_command("%s %s %s" % (cmd, '-l', dev)) + if rc == 0: + for line in size.splitlines(): + if 'Block count:' in line: + block_count = int(line.split(':')[1].strip()) + elif 'Block size:' in line: + block_size = int(line.split(':')[1].strip()) + break + else: + module.fail_json(msg="Failed to get block count and block size of %s with %s" % (dev, cmd), rc=rc, err=err ) + elif 'xfs_info' == fssize_cmd: + # Get Block count and Block size + rc, size, err = module.run_command("%s %s" % (cmd, dev)) + if rc == 0: + for line in size.splitlines(): + #if 'data' in line: + if 'data ' in line: + block_size = int(line.split('=')[2].split()[0]) + block_count = int(line.split('=')[3].split(',')[0]) + break + else: + module.fail_json(msg="Failed to get block count and block size of %s with %s" % (dev, cmd), rc=rc, err=err ) + elif 'btrfs' == fssize_cmd: + #ToDo + # There is no way to get the blocksize and blockcount for btrfs filesystems + block_size = 1 + block_count = 1 + + + return block_size*block_count + + def main(): module = AnsibleModule( argument_spec = dict( @@ -82,36 +136,49 @@ def main(): 'grow' : 'resize2fs', 'grow_flag' : None, 'force_flag' : '-F', + 'fsinfo': 'tune2fs', }, 'ext3' : { 'mkfs' : 'mkfs.ext3', 'grow' : 'resize2fs', 'grow_flag' : None, 'force_flag' : '-F', + 'fsinfo': 'tune2fs', }, 'ext4' : { 'mkfs' : 'mkfs.ext4', 'grow' : 'resize2fs', 'grow_flag' : None, 'force_flag' : '-F', + 'fsinfo': 'tune2fs', + }, + 'reiserfs' : { + 'mkfs' : 'mkfs.reiserfs', + 'grow' : 'resize_reiserfs', + 'grow_flag' : None, + 'force_flag' : '-f', + 'fsinfo': 'reiserfstune', }, 'ext4dev' : { 'mkfs' : 'mkfs.ext4', 'grow' : 'resize2fs', 'grow_flag' : None, 'force_flag' : '-F', + 'fsinfo': 'tune2fs', }, 'xfs' : { 'mkfs' : 'mkfs.xfs', 'grow' : 'xfs_growfs', 'grow_flag' : None, 'force_flag' : '-f', + 'fsinfo': 'xfs_info', }, 'btrfs' : { 'mkfs' : 'mkfs.btrfs', 'grow' : 'btrfs', 'grow_flag' : 'filesystem resize', 'force_flag' : '-f', + 'fsinfo': 'btrfs', } } @@ -131,6 +198,7 @@ def main(): mkfscmd = fs_cmd_map[fstype]['mkfs'] force_flag = fs_cmd_map[fstype]['force_flag'] growcmd = fs_cmd_map[fstype]['grow'] + fssize_cmd = fs_cmd_map[fstype]['fsinfo'] if not os.path.exists(dev): module.fail_json(msg="Device %s not found."%dev) @@ -140,13 +208,24 @@ def main(): rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) fs = raw_fs.strip() - if fs == fstype and resizefs == False: + if fs == fstype and resizefs == False and not force: module.exit_json(changed=False) elif fs == fstype and resizefs == True: - cmd = module.get_bin_path(growcmd, required=True) - if module.check_mode: - module.exit_json(changed=True, msg="May resize filesystem") + # Get dev and fs size and compare + devsize_in_bytes = _get_dev_size(dev, module) + fssize_in_bytes = _get_fs_size(fssize_cmd, dev, module) + if fssize_in_bytes < devsize_in_bytes: + fs_smaller = True else: + fs_smaller = False + + + if module.check_mode and fs_smaller: + module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (fstype,dev)) + elif module.check_mode and not fs_smaller: + module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev)) + elif fs_smaller: + cmd = module.get_bin_path(growcmd, required=True) rc,out,err = module.run_command("%s %s" % (cmd, dev)) # Sadly there is no easy way to determine if this has changed. For now, just say "true" and move on. # in the future, you would have to parse the output to determine this. @@ -155,6 +234,8 @@ def main(): module.exit_json(changed=True, msg=out) else: module.fail_json(msg="Resizing filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err) + else: + module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev)) elif fs and not force: module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite"%(dev,fs), rc=rc, err=err) @@ -180,4 +261,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/firewalld.py b/system/firewalld.py index 9a63da3a544..8324069b1b3 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: firewalld @@ -28,7 +32,7 @@ options: service: description: - - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services." + - "Name of a service to add/remove to/from firewalld - service must be listed in output of firewall-cmd --get-services." required: false default: null port: @@ -47,6 +51,12 @@ required: false default: null version_added: "2.0" + interface: + description: + - 'The interface you would like to add/remove to/from a zone in firewalld' + required: false + default: null + version_added: "2.1" zone: description: - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' @@ -55,8 +65,9 @@ choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ] permanent: description: - - "Should this configuration be in the running firewalld configuration or persist across reboots." - required: true + - "Should this configuration be in the running firewalld configuration or persist across reboots. As of Ansible version 2.3, permanent operations can operate on firewalld configs when it's not running (requires firewalld >= 3.0.9)" + required: false + default: null immediate: description: - "Should this configuration be applied immediately, if set as permanent" @@ -73,39 +84,167 @@ - "The amount of time the rule should be in effect for when non-permanent." required: false default: 0 + masquerade: + description: + - 'The masquerade setting you would like to enable/disable to/from zones within firewalld' + required: false + default: null + version_added: "2.1" notes: - Not tested on any Debian based system. + - Requires the python2 bindings of firewalld, which may not be installed by default if the distribution switched to python 3 requirements: [ 'firewalld >= 0.2.11' ] -author: "Adam Miller (@maxamillion)" +author: "Adam Miller (@maxamillion)" ''' EXAMPLES = ''' -- firewalld: service=https permanent=true state=enabled -- firewalld: port=8081/tcp permanent=true state=disabled -- firewalld: port=161-162/udp permanent=true state=enabled -- firewalld: zone=dmz service=http permanent=true state=enabled -- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled -- firewalld: source='192.168.1.0/24' zone=internal state=enabled +- firewalld: + service: https + permanent: true + state: enabled + +- firewalld: + port: 8081/tcp + permanent: true + state: disabled + +- firewalld: + port: 161-162/udp + permanent: true + state: enabled + +- firewalld: + zone: dmz + service: http + permanent: true + state: enabled + +- firewalld: + rich_rule: 'rule service name="ftp" audit limit value="1/m" accept' + permanent: true + state: enabled + +- firewalld: + source: 192.0.2.0/24 + zone: internal + state: enabled + +- firewalld: + zone: trusted + interface: eth2 + permanent: true + state: enabled + +- firewalld: + masquerade: yes + state: enabled + permanent: true + zone: dmz ''' -import os -import re +from ansible.module_utils.basic import AnsibleModule + +import sys + +##################### +# Globals +# +fw = None +module = None +fw_offline = False +Rich_Rule = None +FirewallClientZoneSettings = None + +module = None + +##################### +# exception handling +# +def action_handler(action_func, action_func_args): + """ + Function to wrap calls to make actions on firewalld in try/except + logic and emit (hopefully) useful error messages + """ + + msgs = [] + + try: + return action_func(*action_func_args) + except Exception: + # Make python 2.4 shippable ci tests happy + e = sys.exc_info()[1] + + # If there are any commonly known errors that we should provide more + # context for to help the users diagnose what's wrong. Handle that here + if "INVALID_SERVICE" in "%s" % e: + msgs.append("Services are defined by port/tcp relationship and named as they are in /etc/services (on most systems)") + + if len(msgs) > 0: + module.fail_json( + msg='ERROR: Exception caught: %s %s' % (e, ', '.join(msgs)) + ) + else: + module.fail_json(msg='ERROR: Exception caught: %s' % e) + +##################### +# fw_offline helpers +# +def get_fw_zone_settings(zone): + if fw_offline: + fw_zone = fw.config.get_zone(zone) + fw_settings = FirewallClientZoneSettings( + list(fw.config.get_zone_config(fw_zone)) + ) + else: + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + + return (fw_zone, fw_settings) + +def update_fw_settings(fw_zone, fw_settings): + if fw_offline: + fw.config.set_zone_config(fw_zone, fw_settings.settings) + else: + fw_zone.update(fw_settings) + +##################### +# masquerade handling +# +def get_masquerade_enabled(zone): + if fw.queryMasquerade(zone) == True: + return True + else: + return False -try: - import firewall.config - FW_VERSION = firewall.config.VERSION +def get_masquerade_enabled_permanent(zone): + fw_zone, fw_settings = get_fw_zone_settings(zone) + if fw_settings.getMasquerade() == True: + return True + else: + return False - from firewall.client import FirewallClient - fw = FirewallClient() - HAS_FIREWALLD = True -except ImportError: - HAS_FIREWALLD = False +def set_masquerade_enabled(zone): + fw.addMasquerade(zone) + +def set_masquerade_disabled(zone): + fw.removeMasquerade(zone) + +def set_masquerade_permanent(zone, masquerade): + fw_zone, fw_settings = get_fw_zone_settings(zone) + fw_settings.setMasquerade(masquerade) + update_fw_settings(fw_zone, fw_settings) ################ # port handling # def get_port_enabled(zone, port_proto): - if port_proto in fw.getPorts(zone): + if fw_offline: + fw_zone, fw_settings = get_fw_zone_settings(zone) + ports_list = fw_settings.getPorts() + else: + ports_list = fw.getPorts(zone) + + if port_proto in ports_list: return True else: return False @@ -117,45 +256,113 @@ def set_port_disabled(zone, port, protocol): fw.removePort(zone, port, protocol) def get_port_enabled_permanent(zone, port_proto): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) + if tuple(port_proto) in fw_settings.getPorts(): return True else: return False def set_port_enabled_permanent(zone, port, protocol): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) fw_settings.addPort(port, protocol) - fw_zone.update(fw_settings) + update_fw_settings(fw_zone, fw_settings) def set_port_disabled_permanent(zone, port, protocol): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) fw_settings.removePort(port, protocol) - fw_zone.update(fw_settings) + update_fw_settings(fw_zone, fw_settings) #################### # source handling -# +# def get_source(zone, source): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) if source in fw_settings.getSources(): return True else: return False def add_source(zone, source): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) fw_settings.addSource(source) + update_fw_settings(fw_zone, fw_settings) def remove_source(zone, source): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) fw_settings.removeSource(source) + update_fw_settings(fw_zone, fw_settings) + +#################### +# interface handling +# +def get_interface(zone, interface): + if fw_offline: + fw_zone, fw_settings = get_fw_zone_settings(zone) + interface_list = fw_settings.getInterfaces() + else: + interface_list = fw.getInterfaces(zone) + if interface in fw.getInterfaces(zone): + return True + else: + return False + +def change_zone_of_interface(zone, interface): + fw.changeZoneOfInterface(zone, interface) + +def remove_interface(zone, interface): + fw.removeInterface(zone, interface) + +def get_interface_permanent(zone, interface): + fw_zone, fw_settings = get_fw_zone_settings(zone) + + if interface in fw_settings.getInterfaces(): + return True + else: + return False + +def change_zone_of_interface_permanent(zone, interface): + fw_zone, fw_settings = get_fw_zone_settings(zone) + if fw_offline: + iface_zone_objs = [ ] + for zone in fw.config.get_zones(): + old_zone_obj = fw.config.get_zone(zone) + if interface in old_zone_obj.interfaces: + iface_zone_objs.append(old_zone_obj) + if len(iface_zone_objs) > 1: + # Even it shouldn't happen, it's actually possible that + # the same interface is in several zone XML files + module.fail_json( + msg = 'ERROR: interface {} is in {} zone XML file, can only be in one'.format( + interface, + len(iface_zone_objs) + ) + ) + old_zone_obj = iface_zone_objs[0] + if old_zone_obj.name != zone: + old_zone_settings = FirewallClientZoneSettings( + fw.config.get_zone_config(old_zone_obj) + ) + old_zone_settings.removeInterface(interface) # remove from old + fw.config.set_zone_config(old_zone_obj, old_zone_settings.settings) + + fw_settings.addInterface(interface) # add to new + fw.config.set_zone_config(fw_zone, fw_settings.settings) + else: + old_zone_name = fw.config().getZoneOfInterface(interface) + if old_zone_name != zone: + if old_zone_name: + old_zone_obj = fw.config().getZoneByName(old_zone_name) + old_zone_settings = old_zone_obj.getSettings() + old_zone_settings.removeInterface(interface) # remove from old + old_zone_obj.update(old_zone_settings) + fw_settings.addInterface(interface) # add to new + fw_zone.update(fw_settings) + +def remove_interface_permanent(zone, interface): + fw_zone, fw_settings = get_fw_zone_settings(zone) + fw_settings.removeInterface(interface) + update_fw_settings(fw_zone, fw_settings) #################### # service handling @@ -173,30 +380,30 @@ def set_service_disabled(zone, service): fw.removeService(zone, service) def get_service_enabled_permanent(zone, service): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) + if service in fw_settings.getServices(): return True else: return False def set_service_enabled_permanent(zone, service): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) fw_settings.addService(service) - fw_zone.update(fw_settings) + update_fw_settings(fw_zone, fw_settings) def set_service_disabled_permanent(zone, service): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) fw_settings.removeService(service) - fw_zone.update(fw_settings) - + update_fw_settings(fw_zone, fw_settings) #################### # rich rule handling # def get_rich_rule_enabled(zone, rule): + # Convert the rule string to standard format + # before checking whether it is present + rule = str(Rich_Rule(rule_str=rule)) if rule in fw.getRichRules(zone): return True else: @@ -209,28 +416,31 @@ def set_rich_rule_disabled(zone, rule): fw.removeRichRule(zone, rule) def get_rich_rule_enabled_permanent(zone, rule): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) + # Convert the rule string to standard format + # before checking whether it is present + rule = str(Rich_Rule(rule_str=rule)) if rule in fw_settings.getRichRules(): return True else: return False def set_rich_rule_enabled_permanent(zone, rule): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) fw_settings.addRichRule(rule) - fw_zone.update(fw_settings) + update_fw_settings(fw_zone, fw_settings) def set_rich_rule_disabled_permanent(zone, rule): - fw_zone = fw.config().getZoneByName(zone) - fw_settings = fw_zone.getSettings() + fw_zone, fw_settings = get_fw_zone_settings(zone) fw_settings.removeRichRule(rule) - fw_zone.update(fw_settings) - + update_fw_settings(fw_zone, fw_settings) def main(): + global module + ## make module global so we don't have to pass it to action_handler every + ## function call + global module module = AnsibleModule( argument_spec = dict( service=dict(required=False,default=None), @@ -242,18 +452,77 @@ def main(): permanent=dict(type='bool',required=False,default=None), state=dict(choices=['enabled', 'disabled'], required=True), timeout=dict(type='int',required=False,default=0), + interface=dict(required=False,default=None), + masquerade=dict(required=False,default=None), + offline=dict(type='bool',required=False,default=None), ), supports_check_mode=True ) + + ## Handle running (online) daemon vs non-running (offline) daemon + global fw + global fw_offline + global Rich_Rule + global FirewallClientZoneSettings + + ## Imports + try: + import firewall.config + FW_VERSION = firewall.config.VERSION + + from firewall.client import Rich_Rule + from firewall.client import FirewallClient + fw = None + fw_offline = False + + try: + fw = FirewallClient() + fw.getDefaultZone() + except AttributeError: + ## Firewalld is not currently running, permanent-only operations + + ## Import other required parts of the firewalld API + ## + ## NOTE: + ## online and offline operations do not share a common firewalld API + from firewall.core.fw_test import Firewall_test + from firewall.client import FirewallClientZoneSettings + fw = Firewall_test() + fw.start() + fw_offline = True + + except ImportError: + ## Make python 2.4 shippable ci tests happy + e = sys.exc_info()[1] + module.fail_json(msg='firewalld and its python 2 module are required for this module, version 2.0.11 or newer required (3.0.9 or newer for offline operations) \n %s' % e) + + if fw_offline: + ## Pre-run version checking + if FW_VERSION < "0.3.9": + module.fail_json(msg='unsupported version of firewalld, offline operations require >= 3.0.9') + else: + ## Pre-run version checking + if FW_VERSION < "0.2.11": + module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11') + + ## Check for firewalld running + try: + if fw.connected == False: + module.fail_json(msg='firewalld service must be running, or try with offline=true') + except AttributeError: + module.fail_json(msg="firewalld connection can't be established,\ + installed version (%s) likely too old. Requires firewalld >= 2.0.11" % FW_VERSION) + + + ## Verify required params are provided if module.params['source'] == None and module.params['permanent'] == None: - module.fail(msg='permanent is a required parameter') + module.fail_json(msg='permanent is a required parameter') - if not HAS_FIREWALLD: - module.fail_json(msg='firewalld required for this module') + if module.params['interface'] != None and module.params['zone'] == None: + module.fail(msg='zone is a required parameter') - ## Pre-run version checking - if FW_VERSION < "0.2.11": - module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11') + if module.params['immediate'] and fw_offline: + module.fail(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon') ## Global Vars changed=False @@ -272,20 +541,17 @@ def main(): if module.params['zone'] != None: zone = module.params['zone'] else: - zone = fw.getDefaultZone() + if fw_offline: + zone = fw.get_default_zone() + else: + zone = fw.getDefaultZone() permanent = module.params['permanent'] desired_state = module.params['state'] immediate = module.params['immediate'] timeout = module.params['timeout'] - - ## Check for firewalld running - try: - if fw.connected == False: - module.fail_json(msg='firewalld service must be running') - except AttributeError: - module.fail_json(msg="firewalld connection can't be established,\ - version likely too old. Requires firewalld >= 2.0.11") + interface = module.params['interface'] + masquerade = module.params['masquerade'] modification_count = 0 if service != None: @@ -294,13 +560,66 @@ def main(): modification_count += 1 if rich_rule != None: modification_count += 1 + if interface != None: + modification_count += 1 + if masquerade != None: + modification_count += 1 if modification_count > 1: - module.fail_json(msg='can only operate on port, service or rich_rule at once') + module.fail_json(msg='can only operate on port, service, rich_rule or interface at once') if service != None: - if permanent: - is_enabled = get_service_enabled_permanent(zone, service) + if immediate and permanent: + is_enabled_permanent = action_handler( + get_service_enabled_permanent, + (zone, service) + ) + is_enabled_immediate = action_handler( + get_service_enabled, + (zone, service) + ) + msgs.append('Permanent and Non-Permanent(immediate) operation') + + if desired_state == "enabled": + if not is_enabled_permanent or not is_enabled_immediate: + if module.check_mode: + module.exit_json(changed=True) + if not is_enabled_permanent: + action_handler( + set_service_enabled_permanent, + (zone, service) + ) + changed=True + if not is_enabled_immediate: + action_handler( + set_service_enabled, + (zone, service, timeout) + ) + changed=True + + + elif desired_state == "disabled": + if is_enabled_permanent or is_enabled_immediate: + if module.check_mode: + module.exit_json(changed=True) + if is_enabled_permanent: + action_handler( + set_service_disabled_permanent, + (zone, service) + ) + changed=True + if is_enabled_immediate: + action_handler( + set_service_disabled, + (zone, service) + ) + changed=True + + elif permanent and not immediate: + is_enabled = action_handler( + get_service_enabled_permanent, + (zone, service) + ) msgs.append('Permanent operation') if desired_state == "enabled": @@ -308,17 +627,26 @@ def main(): if module.check_mode: module.exit_json(changed=True) - set_service_enabled_permanent(zone, service) + action_handler( + set_service_enabled_permanent, + (zone, service) + ) changed=True elif desired_state == "disabled": if is_enabled == True: if module.check_mode: module.exit_json(changed=True) - set_service_disabled_permanent(zone, service) + action_handler( + set_service_disabled_permanent, + (zone, service) + ) changed=True - if immediate or not permanent: - is_enabled = get_service_enabled(zone, service) + elif immediate and not permanent: + is_enabled = action_handler( + get_service_enabled, + (zone, service) + ) msgs.append('Non-permanent operation') @@ -327,27 +655,35 @@ def main(): if module.check_mode: module.exit_json(changed=True) - set_service_enabled(zone, service, timeout) + action_handler( + set_service_enabled, + (zone, service, timeout) + ) changed=True elif desired_state == "disabled": if is_enabled == True: if module.check_mode: module.exit_json(changed=True) - set_service_disabled(zone, service) + action_handler( + set_service_disabled, + (zone, service) + ) changed=True if changed == True: msgs.append("Changed service %s to %s" % (service, desired_state)) + # FIXME - source type does not handle non-permanent mode, this was an + # oversight in the past. if source != None: - is_enabled = get_source(zone, source) + is_enabled = action_handler(get_source, (zone, source)) if desired_state == "enabled": if is_enabled == False: if module.check_mode: module.exit_json(changed=True) - add_source(zone, source) + action_handler(add_source, (zone, source)) changed=True msgs.append("Added %s to zone %s" % (source, zone)) elif desired_state == "disabled": @@ -355,12 +691,61 @@ def main(): if module.check_mode: module.exit_json(changed=True) - remove_source(zone, source) + action_handler(remove_source, (zone, source)) changed=True msgs.append("Removed %s from zone %s" % (source, zone)) + if port != None: - if permanent: - is_enabled = get_port_enabled_permanent(zone, [port, protocol]) + if immediate and permanent: + is_enabled_permanent = action_handler( + get_port_enabled_permanent, + (zone,[port, protocol]) + ) + is_enabled_immediate = action_handler( + get_port_enabled, + (zone, [port, protocol]) + ) + msgs.append('Permanent and Non-Permanent(immediate) operation') + + if desired_state == "enabled": + if not is_enabled_permanent or not is_enabled_immediate: + if module.check_mode: + module.exit_json(changed=True) + if not is_enabled_permanent: + action_handler( + set_port_enabled_permanent, + (zone, port, protocol) + ) + changed=True + if not is_enabled_immediate: + action_handler( + set_port_enabled, + (zone, port, protocol, timeout) + ) + changed=True + + elif desired_state == "disabled": + if is_enabled_permanent or is_enabled_immediate: + if module.check_mode: + module.exit_json(changed=True) + if is_enabled_permanent: + action_handler( + set_port_disabled_permanent, + (zone, port, protocol) + ) + changed=True + if is_enabled_immediate: + action_handler( + set_port_disabled, + (zone, port, protocol) + ) + changed=True + + elif permanent and not immediate: + is_enabled = action_handler( + get_port_enabled_permanent, + (zone, [port, protocol]) + ) msgs.append('Permanent operation') if desired_state == "enabled": @@ -368,17 +753,26 @@ def main(): if module.check_mode: module.exit_json(changed=True) - set_port_enabled_permanent(zone, port, protocol) + action_handler( + set_port_enabled_permanent, + (zone, port, protocol) + ) changed=True elif desired_state == "disabled": if is_enabled == True: if module.check_mode: module.exit_json(changed=True) - set_port_disabled_permanent(zone, port, protocol) + action_handler( + set_port_disabled_permanent, + (zone, port, protocol) + ) changed=True - if immediate or not permanent: - is_enabled = get_port_enabled(zone, [port,protocol]) + if immediate and not permanent: + is_enabled = action_handler( + get_port_enabled, + (zone, [port,protocol]) + ) msgs.append('Non-permanent operation') if desired_state == "enabled": @@ -386,14 +780,20 @@ def main(): if module.check_mode: module.exit_json(changed=True) - set_port_enabled(zone, port, protocol, timeout) + action_handler( + set_port_enabled, + (zone, port, protocol, timeout) + ) changed=True elif desired_state == "disabled": if is_enabled == True: if module.check_mode: module.exit_json(changed=True) - set_port_disabled(zone, port, protocol) + action_handler( + set_port_disabled, + (zone, port, protocol) + ) changed=True if changed == True: @@ -401,8 +801,55 @@ def main(): desired_state)) if rich_rule != None: - if permanent: - is_enabled = get_rich_rule_enabled_permanent(zone, rich_rule) + if immediate and permanent: + is_enabled_permanent = action_handler( + get_rich_rule_enabled_permanent, + (zone, rich_rule) + ) + is_enabled_immediate = action_handler( + get_rich_rule_enabled, + (zone, rich_rule) + ) + msgs.append('Permanent and Non-Permanent(immediate) operation') + + if desired_state == "enabled": + if not is_enabled_permanent or not is_enabled_immediate: + if module.check_mode: + module.exit_json(changed=True) + if not is_enabled_permanent: + action_handler( + set_rich_rule_enabled_permanent, + (zone, rich_rule) + ) + changed=True + if not is_enabled_immediate: + action_handler( + set_rich_rule_enabled, + (zone, rich_rule, timeout) + ) + changed=True + + elif desired_state == "disabled": + if is_enabled_permanent or is_enabled_immediate: + if module.check_mode: + module.exit_json(changed=True) + if is_enabled_permanent: + action_handler( + set_rich_rule_disabled_permanent, + (zone, rich_rule) + ) + changed=True + if is_enabled_immediate: + action_handler( + set_rich_rule_disabled, + (zone, rich_rule) + ) + changed=True + if permanent and not immediate: + is_enabled = action_handler( + get_rich_rule_enabled_permanent, + (zone, rich_rule) + ) msgs.append('Permanent operation') if desired_state == "enabled": @@ -410,17 +857,26 @@ def main(): if module.check_mode: module.exit_json(changed=True) - set_rich_rule_enabled_permanent(zone, rich_rule) + action_handler( + set_rich_rule_enabled_permanent, + (zone, rich_rule) + ) changed=True elif desired_state == "disabled": if is_enabled == True: if module.check_mode: module.exit_json(changed=True) - set_rich_rule_disabled_permanent(zone, rich_rule) + action_handler( + set_rich_rule_disabled_permanent, + (zone, rich_rule) + ) changed=True - if immediate or not permanent: - is_enabled = get_rich_rule_enabled(zone, rich_rule) + if immediate and not permanent: + is_enabled = action_handler( + get_rich_rule_enabled, + (zone, rich_rule) + ) msgs.append('Non-permanent operation') if desired_state == "enabled": @@ -428,23 +884,189 @@ def main(): if module.check_mode: module.exit_json(changed=True) - set_rich_rule_enabled(zone, rich_rule, timeout) + action_handler( + set_rich_rule_enabled, + (zone, rich_rule, timeout) + ) changed=True elif desired_state == "disabled": if is_enabled == True: if module.check_mode: module.exit_json(changed=True) - set_rich_rule_disabled(zone, rich_rule) + action_handler( + set_rich_rule_disabled, + (zone, rich_rule) + ) changed=True if changed == True: msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state)) + if interface != None: + if immediate and permanent: + is_enabled_permanent = action_handler( + get_interface_permanent, + (zone, interface) + ) + is_enabled_immediate = action_handler( + get_interface, + (zone, interface) + ) + msgs.append('Permanent and Non-Permanent(immediate) operation') + + if desired_state == "enabled": + if not is_enabled_permanent or not is_enabled_immediate: + if module.check_mode: + module.exit_json(changed=True) + if not is_enabled_permanent: + change_zone_of_interface_permanent(zone, interface) + changed=True + if not is_enabled_immediate: + change_zone_of_interface(zone, interface) + changed=True + if changed: + msgs.append("Changed %s to zone %s" % (interface, zone)) + + elif desired_state == "disabled": + if is_enabled_permanent or is_enabled_immediate: + if module.check_mode: + module.exit_json(changed=True) + if is_enabled_permanent: + remove_interface_permanent(zone, interface) + changed=True + if is_enabled_immediate: + remove_interface(zone, interface) + changed=True + if changed: + msgs.append("Removed %s from zone %s" % (interface, zone)) + + elif permanent and not immediate: + is_enabled = action_handler( + get_interface_permanent, + (zone, interface) + ) + msgs.append('Permanent operation') + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + change_zone_of_interface_permanent(zone, interface) + changed=True + msgs.append("Changed %s to zone %s" % (interface, zone)) + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + remove_interface_permanent(zone, interface) + changed=True + msgs.append("Removed %s from zone %s" % (interface, zone)) + elif immediate and not permanent: + is_enabled = action_handler( + get_interface, + (zone, interface) + ) + msgs.append('Non-permanent operation') + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + change_zone_of_interface(zone, interface) + changed=True + msgs.append("Changed %s to zone %s" % (interface, zone)) + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + remove_interface(zone, interface) + changed=True + msgs.append("Removed %s from zone %s" % (interface, zone)) + + if masquerade != None: + + if immediate and permanent: + is_enabled_permanent = action_handler( + get_masquerade_enabled_permanent, + (zone) + ) + is_enabled_immediate = action_handler(get_masquerade_enabled, (zone)) + msgs.append('Permanent and Non-Permanent(immediate) operation') + + if desired_state == "enabled": + if not is_enabled_permanent or not is_enabled_immediate: + if module.check_mode: + module.exit_json(changed=True) + if not is_enabled_permanent: + action_handler(set_masquerade_permanent, (zone, True)) + changed=True + if not is_enabled_immediate: + action_handler(set_masquerade_enabled, (zone)) + changed=True + if changed: + msgs.append("Added masquerade to zone %s" % (zone)) + + elif desired_state == "disabled": + if is_enabled_permanent or is_enabled_immediate: + if module.check_mode: + module.exit_json(changed=True) + if is_enabled_permanent: + action_handler(set_masquerade_permanent, (zone, False)) + changed=True + if is_enabled_immediate: + action_handler(set_masquerade_disabled, (zone)) + changed=True + if changed: + msgs.append("Removed masquerade from zone %s" % (zone)) + + elif permanent and not immediate: + is_enabled = action_handler(get_masquerade_enabled_permanent, (zone)) + msgs.append('Permanent operation') + + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + action_handler(set_masquerade_permanent, (zone, True)) + changed=True + msgs.append("Added masquerade to zone %s" % (zone)) + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + action_handler(set_masquerade_permanent, (zone, False)) + changed=True + msgs.append("Removed masquerade from zone %s" % (zone)) + elif immediate and not permanent: + is_enabled = action_handler(get_masquerade_enabled, (zone)) + msgs.append('Non-permanent operation') + + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + action_handler(set_masquerade_enabled, (zone)) + changed=True + msgs.append("Added masquerade to zone %s" % (zone)) + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + action_handler(set_masquerade_disabled, (zone)) + changed=True + msgs.append("Removed masquerade from zone %s" % (zone)) + + if fw_offline: + msgs.append("(offline operation: only on-disk configs were altered)") module.exit_json(changed=changed, msg=', '.join(msgs)) -################################################# -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/getent.py b/system/getent.py index 7df9e1d795f..960a1221f70 100644 --- a/system/getent.py +++ b/system/getent.py @@ -20,6 +20,10 @@ # +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: getent @@ -59,27 +63,46 @@ EXAMPLES = ''' # get root user info -- getent: database=passwd key=root -- debug: var=getent_passwd +- getent: + database: passwd + key: root +- debug: + var: getent_passwd # get all groups -- getent: database=group split=':' -- debug: var=getent_group +- getent: + database: group + split: ':' +- debug: + var: getent_group # get all hosts, split by tab -- getent: database=hosts -- debug: var=getent_hosts +- getent: + database: hosts +- debug: + var: getent_hosts # get http service info, no error if missing -- getent: database=services key=http fail_key=False -- debug: var=getent_services +- getent: + database: services + key: http + fail_key: False +- debug: + var: getent_services # get user password hash (requires sudo/root) -- getent: database=shadow key=www-data split=: -- debug: var=getent_shadow +- getent: + database: shadow + key: www-data + split: ':' +- debug: + var: getent_shadow ''' +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception + def main(): module = AnsibleModule( argument_spec = dict( @@ -110,7 +133,8 @@ def main(): try: rc, out, err = module.run_command(cmd) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg=str(e)) msg = "Unexpected failure!" @@ -136,8 +160,6 @@ def main(): module.fail_json(msg=msg) -# import module snippets -from ansible.module_utils.basic import * - -main() +if __name__ == '__main__': + main() diff --git a/system/gluster_volume.py b/system/gluster_volume.py index ff1ce9831db..7fcca45886d 100644 --- a/system/gluster_volume.py +++ b/system/gluster_volume.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: gluster_volume short_description: Manage GlusterFS volumes @@ -56,6 +60,18 @@ default: null description: - Stripe count for volume + disperses: + required: false + default: null + description: + - Disperse count for volume + version_added: "2.2" + redundancies: + required: false + default: null + description: + - Redundancy count for volume + version_added: "2.2" transport: required: false choices: [ 'tcp', 'rdma', 'tcp,rdma' ] @@ -71,6 +87,7 @@ start_on_create: choices: [ 'yes', 'no'] required: false + default: 'yes' description: - Controls whether the volume is started after creation or not, defaults to yes rebalance: @@ -108,32 +125,61 @@ EXAMPLES = """ - name: create gluster volume - gluster_volume: state=present name=test1 bricks=/bricks/brick1/g1 rebalance=yes cluster="192.168.1.10,192.168.1.11" + gluster_volume: + state: present + name: test1 + bricks: /bricks/brick1/g1 + rebalance: yes + cluster: + - 192.0.2.10 + - 192.0.2.11 run_once: true - name: tune - gluster_volume: state=present name=test1 options='{performance.cache-size: 256MB}' + gluster_volume: + state: present + name: test1 + options: + performance.cache-size: 256MB - name: start gluster volume - gluster_volume: state=started name=test1 + gluster_volume: + state: started + name: test1 - name: limit usage - gluster_volume: state=present name=test1 directory=/foo quota=20.0MB + gluster_volume: + state: present + name: test1 + directory: /foo + quota: 20.0MB - name: stop gluster volume - gluster_volume: state=stopped name=test1 + gluster_volume: + state: stopped + name: test1 - name: remove gluster volume - gluster_volume: state=absent name=test1 + gluster_volume: + state: absent + name: test1 - name: create gluster volume with multiple bricks - gluster_volume: state=present name=test2 bricks="/bricks/brick1/g2,/bricks/brick2/g2" cluster="192.168.1.10,192.168.1.11" + gluster_volume: + state: present + name: test2 + bricks: /bricks/brick1/g2,/bricks/brick2/g2 + cluster: + - 192.0.2.10 + - 192.0.2.11 run_once: true """ import shutil import time import socket +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.basic import * glusterbin = '' @@ -146,7 +192,8 @@ def run_gluster(gargs, **kwargs): rc, out, err = module.run_command(args, **kwargs) if rc != 0: module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err)) - except Exception, e: + except Exception: + e = get_exception() module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args), str(e))) return out @@ -177,16 +224,24 @@ def get_peers(): hostname = None uuid = None state = None + shortNames = False for row in out.split('\n'): if ': ' in row: key, value = row.split(': ') if key.lower() == 'hostname': hostname = value + shortNames = False if key.lower() == 'uuid': uuid = value if key.lower() == 'state': state = value peers[hostname] = [ uuid, state ] + elif row.lower() == 'other names:': + shortNames = True + elif row != '' and shortNames == True: + peers[row] = [ uuid, state ] + elif row == '': + shortNames = False return peers def get_volumes(): @@ -249,8 +304,8 @@ def wait_for_peer(host): def probe(host, myhostname): global module - run_gluster([ 'peer', 'probe', host ]) - if not wait_for_peer(host): + out = run_gluster([ 'peer', 'probe', host ]) + if out.find('localhost') == -1 and not wait_for_peer(host): module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname)) changed = True @@ -258,11 +313,9 @@ def probe_all_peers(hosts, peers, myhostname): for host in hosts: host = host.strip() # Clean up any extra space for exact comparison if host not in peers: - # dont probe ourselves - if myhostname != host: - probe(host, myhostname) + probe(host, myhostname) -def create_volume(name, stripe, replica, transport, hosts, bricks, force): +def create_volume(name, stripe, replica, disperse, redundancy, transport, hosts, bricks, force): args = [ 'volume', 'create' ] args.append(name) if stripe: @@ -271,6 +324,12 @@ def create_volume(name, stripe, replica, transport, hosts, bricks, force): if replica: args.append('replica') args.append(str(replica)) + if disperse: + args.append('disperse') + args.append(str(disperse)) + if redundancy: + args.append('redundancy') + args.append(str(redundancy)) args.append('transport') args.append(transport) for brick in bricks: @@ -289,8 +348,15 @@ def stop_volume(name): def set_volume_option(name, option, parameter): run_gluster([ 'volume', 'set', name, option, parameter ]) -def add_brick(name, brick, force): - args = [ 'volume', 'add-brick', name, brick ] +def add_bricks(name, new_bricks, stripe, replica, force): + args = [ 'volume', 'add-brick', name ] + if stripe: + args.append('stripe') + args.append(str(stripe)) + if replica: + args.append('replica') + args.append(str(replica)) + args.extend(new_bricks) if force: args.append('force') run_gluster(args) @@ -317,6 +383,8 @@ def main(): host=dict(required=False, default=None), stripes=dict(required=False, default=None, type='int'), replicas=dict(required=False, default=None, type='int'), + disperses=dict(required=False, default=None, type='int'), + redundancies=dict(required=False, default=None, type='int'), transport=dict(required=False, default='tcp', choices=[ 'tcp', 'rdma', 'tcp,rdma' ]), bricks=dict(required=False, default=None, aliases=['brick']), start_on_create=dict(required=False, default=True, type='bool'), @@ -339,6 +407,8 @@ def main(): brick_paths = module.params['bricks'] stripes = module.params['stripes'] replicas = module.params['replicas'] + disperses = module.params['disperses'] + redundancies = module.params['redundancies'] transport = module.params['transport'] myhostname = module.params['host'] start_on_create = module.boolean(module.params['start_on_create']) @@ -350,9 +420,12 @@ def main(): # Clean up if last element is empty. Consider that yml can look like this: # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}" - if cluster != None and cluster[-1] == '': + if cluster != None and len(cluster) > 1 and cluster[-1] == '': cluster = cluster[0:-1] + if cluster == None or cluster[0] == '': + cluster = [myhostname] + if brick_paths != None and "," in brick_paths: brick_paths = brick_paths.split(",") else: @@ -383,7 +456,7 @@ def main(): # create if it doesn't exist if volume_name not in volumes: - create_volume(volume_name, stripes, replicas, transport, cluster, brick_paths, force) + create_volume(volume_name, stripes, replicas, disperses, redundancies, transport, cluster, brick_paths, force) volumes = get_volumes() changed = True @@ -408,8 +481,8 @@ def main(): if brick not in all_bricks: removed_bricks.append(brick) - for brick in new_bricks: - add_brick(volume_name, brick, force) + if new_bricks: + add_bricks(volume_name, new_bricks, stripes, replicas, force) changed = True # handle quotas @@ -430,7 +503,7 @@ def main(): else: module.fail_json(msg='failed to create volume %s' % volume_name) - if volume_name not in volumes: + if action != 'delete' and volume_name not in volumes: module.fail_json(msg='volume not found %s' % volume_name) if action == 'started': @@ -453,6 +526,5 @@ def main(): module.exit_json(changed=changed, ansible_facts=facts) -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/iptables.py b/system/iptables.py new file mode 100644 index 00000000000..521ad6b043a --- /dev/null +++ b/system/iptables.py @@ -0,0 +1,564 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Linus Unnebäck +# +# This file is part of Ansible +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +BINS = dict( + ipv4='iptables', + ipv6='ip6tables', +) + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: iptables +short_description: Modify the systems iptables +requirements: [] +version_added: "2.0" +author: Linus Unnebäck (@LinusU) +description: + - Iptables is used to set up, maintain, and inspect the tables of IP packet + filter rules in the Linux kernel. This module does not handle the saving + and/or loading of rules, but rather only manipulates the current rules + that are present in memory. This is the same as the behaviour of the + "iptables" and "ip6tables" command which this module uses internally. +notes: + - This module just deals with individual rules. If you need advanced + chaining of rules the recommended way is to template the iptables restore + file. +options: + table: + description: + - This option specifies the packet matching table which the command + should operate on. If the kernel is configured with automatic module + loading, an attempt will be made to load the appropriate module for + that table if it is not already there. + required: false + default: filter + choices: [ "filter", "nat", "mangle", "raw", "security" ] + state: + description: + - Whether the rule should be absent or present. + required: false + default: present + choices: [ "present", "absent" ] + action: + version_added: "2.2" + description: + - Whether the rule should be appended at the bottom or inserted at the + top. If the rule already exists the chain won't be modified. + required: false + default: append + choices: [ "append", "insert" ] + ip_version: + description: + - Which version of the IP protocol this rule should apply to. + required: false + default: ipv4 + choices: [ "ipv4", "ipv6" ] + chain: + description: + - "Chain to operate on. This option can either be the name of a user + defined chain or any of the builtin chains: 'INPUT', 'FORWARD', + 'OUTPUT', 'PREROUTING', 'POSTROUTING', 'SECMARK', 'CONNSECMARK'." + required: false + protocol: + description: + - The protocol of the rule or of the packet to check. The specified + protocol can be one of tcp, udp, udplite, icmp, esp, ah, sctp or the + special keyword "all", or it can be a numeric value, representing one + of these protocols or a different one. A protocol name from + /etc/protocols is also allowed. A "!" argument before the protocol + inverts the test. The number zero is equivalent to all. "all" will + match with all protocols and is taken as default when this option is + omitted. + required: false + default: null + source: + description: + - Source specification. Address can be either a network name, + a hostname, a network IP address (with /mask), or a plain IP address. + Hostnames will be resolved once only, before the rule is submitted to + the kernel. Please note that specifying any name to be resolved with + a remote query such as DNS is a really bad idea. The mask can be + either a network mask or a plain number, specifying the number of 1's + at the left side of the network mask. Thus, a mask of 24 is equivalent + to 255.255.255.0. A "!" argument before the address specification + inverts the sense of the address. + required: false + default: null + destination: + description: + - Destination specification. Address can be either a network name, + a hostname, a network IP address (with /mask), or a plain IP address. + Hostnames will be resolved once only, before the rule is submitted to + the kernel. Please note that specifying any name to be resolved with + a remote query such as DNS is a really bad idea. The mask can be + either a network mask or a plain number, specifying the number of 1's + at the left side of the network mask. Thus, a mask of 24 is equivalent + to 255.255.255.0. A "!" argument before the address specification + inverts the sense of the address. + required: false + default: null + match: + description: + - Specifies a match to use, that is, an extension module that tests for + a specific property. The set of matches make up the condition under + which a target is invoked. Matches are evaluated first to last if + specified as an array and work in short-circuit fashion, i.e. if one + extension yields false, evaluation will stop. + required: false + default: [] + jump: + description: + - This specifies the target of the rule; i.e., what to do if the packet + matches it. The target can be a user-defined chain (other than the one + this rule is in), one of the special builtin targets which decide the + fate of the packet immediately, or an extension (see EXTENSIONS + below). If this option is omitted in a rule (and the goto paramater + is not used), then matching the rule will have no effect on the + packet's fate, but the counters on the rule will be incremented. + required: false + default: null + goto: + description: + - This specifies that the processing should continue in a user specified + chain. Unlike the jump argument return will not continue processing in + this chain but instead in the chain that called us via jump. + required: false + default: null + in_interface: + description: + - Name of an interface via which a packet was received (only for packets + entering the INPUT, FORWARD and PREROUTING chains). When the "!" + argument is used before the interface name, the sense is inverted. If + the interface name ends in a "+", then any interface which begins with + this name will match. If this option is omitted, any interface name + will match. + required: false + default: null + out_interface: + description: + - Name of an interface via which a packet is going to be sent (for + packets entering the FORWARD, OUTPUT and POSTROUTING chains). When the + "!" argument is used before the interface name, the sense is inverted. + If the interface name ends in a "+", then any interface which begins + with this name will match. If this option is omitted, any interface + name will match. + required: false + default: null + fragment: + description: + - This means that the rule only refers to second and further fragments + of fragmented packets. Since there is no way to tell the source or + destination ports of such a packet (or ICMP type), such a packet will + not match any rules which specify them. When the "!" argument precedes + fragment argument, the rule will only match head fragments, or + unfragmented packets. + required: false + default: null + set_counters: + description: + - This enables the administrator to initialize the packet and byte + counters of a rule (during INSERT, APPEND, REPLACE operations). + required: false + default: null + source_port: + description: + - "Source port or port range specification. This can either be a service + name or a port number. An inclusive range can also be specified, using + the format first:last. If the first port is omitted, '0' is assumed; + if the last is omitted, '65535' is assumed. If the first port is + greater than the second one they will be swapped." + required: false + default: null + destination_port: + description: + - "Destination port or port range specification. This can either be + a service name or a port number. An inclusive range can also be + specified, using the format first:last. If the first port is omitted, + '0' is assumed; if the last is omitted, '65535' is assumed. If the + first port is greater than the second one they will be swapped." + required: false + default: null + to_ports: + description: + - "This specifies a destination port or range of ports to use: without + this, the destination port is never altered. This is only valid if the + rule also specifies one of the following protocols: tcp, udp, dccp or + sctp." + required: false + default: null + to_destination: + version_added: "2.1" + description: + - "This specifies a destination address to use with DNAT: without + this, the destination address is never altered." + required: false + default: null + to_source: + version_added: "2.2" + description: + - "This specifies a source address to use with SNAT: without + this, the source address is never altered." + required: false + default: null + set_dscp_mark: + version_added: "2.1" + description: + - "This allows specifying a DSCP mark to be added to packets. + It takes either an integer or hex value. Mutually exclusive with + C(set_dscp_mark_class)." + required: false + default: null + set_dscp_mark_class: + version_added: "2.1" + description: + - "This allows specifying a predefined DiffServ class which will be + translated to the corresponding DSCP mark. Mutually exclusive with + C(set_dscp_mark)." + required: false + default: null + comment: + description: + - "This specifies a comment that will be added to the rule" + required: false + default: null + ctstate: + description: + - "ctstate is a list of the connection states to match in the conntrack + module. + Possible states are: 'INVALID', 'NEW', 'ESTABLISHED', 'RELATED', + 'UNTRACKED', 'SNAT', 'DNAT'" + required: false + default: [] + limit: + description: + - "Specifies the maximum average number of matches to allow per second. + The number can specify units explicitly, using `/second', `/minute', + `/hour' or `/day', or parts of them (so `5/second' is the same as + `5/s')." + required: false + default: null + limit_burst: + version_added: "2.1" + description: + - "Specifies the maximum burst before the above limit kicks in." + required: false + default: null + uid_owner: + version_added: "2.1" + description: + - "Specifies the UID or username to use in match by owner rule." + required: false + reject_with: + version_added: "2.1" + description: + - "Specifies the error packet type to return while rejecting." + required: false + icmp_type: + version_added: "2.2" + description: + - "This allows specification of the ICMP type, which can be a numeric + ICMP type, type/code pair, or one of the ICMP type names shown by the + command 'iptables -p icmp -h'" + required: false + flush: + version_added: "2.2" + description: + - "Flushes the specified table and chain of all rules. If no chain is + specified then the entire table is purged. Ignores all other + parameters." + required: false + policy: + version_added: "2.2" + description: + - "Set the policy for the chain to the given target. Valid targets are + ACCEPT, DROP, QUEUE, RETURN. Only built in chains can have policies. + This parameter requires the chain parameter. Ignores all other + parameters." +''' + +EXAMPLES = ''' +# Block specific IP +- iptables: + chain: INPUT + source: 8.8.8.8 + jump: DROP + become: yes + +# Forward port 80 to 8600 +- iptables: + table: nat + chain: PREROUTING + in_interface: eth0 + protocol: tcp + match: tcp + destination_port: 80 + jump: REDIRECT + to_ports: 8600 + comment: Redirect web traffic to port 8600 + become: yes + +# Allow related and established connections +- iptables: + chain: INPUT + ctstate: ESTABLISHED,RELATED + jump: ACCEPT + become: yes + +# Tag all outbound tcp packets with DSCP mark 8 +- iptables: + chain: OUTPUT + jump: DSCP + table: mangle + set_dscp_mark: 8 + protocol: tcp + +# Tag all outbound tcp packets with DSCP DiffServ class CS1 +- iptables: + chain: OUTPUT + jump: DSCP + table: mangle + set_dscp_mark_class: CS1 + protocol: tcp +''' + +def append_param(rule, param, flag, is_list): + if is_list: + for item in param: + append_param(rule, item, flag, False) + else: + if param is not None: + rule.extend([flag, param]) + + +def append_csv(rule, param, flag): + if param: + rule.extend([flag, ','.join(param)]) + + +def append_match(rule, param, match): + if param: + rule.extend(['-m', match]) + + +def append_jump(rule, param, jump): + if param: + rule.extend(['-j', jump]) + + +def construct_rule(params): + rule = [] + append_param(rule, params['protocol'], '-p', False) + append_param(rule, params['source'], '-s', False) + append_param(rule, params['destination'], '-d', False) + append_param(rule, params['match'], '-m', True) + append_param(rule, params['jump'], '-j', False) + append_param(rule, params['to_destination'], '--to-destination', False) + append_param(rule, params['to_source'], '--to-source', False) + append_param(rule, params['goto'], '-g', False) + append_param(rule, params['in_interface'], '-i', False) + append_param(rule, params['out_interface'], '-o', False) + append_param(rule, params['fragment'], '-f', False) + append_param(rule, params['set_counters'], '-c', False) + append_param(rule, params['source_port'], '--source-port', False) + append_param(rule, params['destination_port'], '--destination-port', False) + append_param(rule, params['to_ports'], '--to-ports', False) + append_param(rule, params['set_dscp_mark'], '--set-dscp', False) + append_param( + rule, + params['set_dscp_mark_class'], + '--set-dscp-class', + False) + append_match(rule, params['comment'], 'comment') + append_param(rule, params['comment'], '--comment', False) + append_match(rule, params['ctstate'], 'state') + append_csv(rule, params['ctstate'], '--state') + append_match(rule, params['limit'] or params['limit_burst'], 'limit') + append_param(rule, params['limit'], '--limit', False) + append_param(rule, params['limit_burst'], '--limit-burst', False) + append_match(rule, params['uid_owner'], 'owner') + append_param(rule, params['uid_owner'], '--uid-owner', False) + append_jump(rule, params['reject_with'], 'REJECT') + append_param(rule, params['reject_with'], '--reject-with', False) + append_param(rule, params['icmp_type'], '--icmp-type', False) + return rule + + +def push_arguments(iptables_path, action, params, make_rule=True): + cmd = [iptables_path] + cmd.extend(['-t', params['table']]) + cmd.extend([action, params['chain']]) + if make_rule: + cmd.extend(construct_rule(params)) + return cmd + + +def check_present(iptables_path, module, params): + cmd = push_arguments(iptables_path, '-C', params) + rc, _, __ = module.run_command(cmd, check_rc=False) + return (rc == 0) + + +def append_rule(iptables_path, module, params): + cmd = push_arguments(iptables_path, '-A', params) + module.run_command(cmd, check_rc=True) + + +def insert_rule(iptables_path, module, params): + cmd = push_arguments(iptables_path, '-I', params) + module.run_command(cmd, check_rc=True) + + +def remove_rule(iptables_path, module, params): + cmd = push_arguments(iptables_path, '-D', params) + module.run_command(cmd, check_rc=True) + + +def flush_table(iptables_path, module, params): + cmd = push_arguments(iptables_path, '-F', params, make_rule=False) + module.run_command(cmd, check_rc=True) + + +def set_chain_policy(iptables_path, module, params): + cmd = push_arguments(iptables_path, '-P', params, make_rule=False) + cmd.append(params['policy']) + module.run_command(cmd, check_rc=True) + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + table=dict( + required=False, + default='filter', + choices=['filter', 'nat', 'mangle', 'raw', 'security']), + state=dict( + required=False, + default='present', + choices=['present', 'absent']), + action=dict( + required=False, + default='append', + type='str', + choices=['append', 'insert']), + ip_version=dict( + required=False, + default='ipv4', + choices=['ipv4', 'ipv6']), + chain=dict(required=False, default=None, type='str'), + protocol=dict(required=False, default=None, type='str'), + source=dict(required=False, default=None, type='str'), + to_source=dict(required=False, default=None, type='str'), + destination=dict(required=False, default=None, type='str'), + to_destination=dict(required=False, default=None, type='str'), + match=dict(required=False, default=[], type='list'), + jump=dict(required=False, default=None, type='str'), + goto=dict(required=False, default=None, type='str'), + in_interface=dict(required=False, default=None, type='str'), + out_interface=dict(required=False, default=None, type='str'), + fragment=dict(required=False, default=None, type='str'), + set_counters=dict(required=False, default=None, type='str'), + source_port=dict(required=False, default=None, type='str'), + destination_port=dict(required=False, default=None, type='str'), + to_ports=dict(required=False, default=None, type='str'), + set_dscp_mark=dict(required=False, default=None, type='str'), + set_dscp_mark_class=dict(required=False, default=None, type='str'), + comment=dict(required=False, default=None, type='str'), + ctstate=dict(required=False, default=[], type='list'), + limit=dict(required=False, default=None, type='str'), + limit_burst=dict(required=False, default=None, type='str'), + uid_owner=dict(required=False, default=None, type='str'), + reject_with=dict(required=False, default=None, type='str'), + icmp_type=dict(required=False, default=None, type='str'), + flush=dict(required=False, default=False, type='bool'), + policy=dict( + required=False, + default=None, + type='str', + choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']), + ), + mutually_exclusive=( + ['set_dscp_mark', 'set_dscp_mark_class'], + ['flush', 'policy'], + ), + ) + args = dict( + changed=False, + failed=False, + ip_version=module.params['ip_version'], + table=module.params['table'], + chain=module.params['chain'], + flush=module.params['flush'], + rule=' '.join(construct_rule(module.params)), + state=module.params['state'], + ) + + ip_version = module.params['ip_version'] + iptables_path = module.get_bin_path(BINS[ip_version], True) + + # Check if chain option is required + if args['flush'] is False and args['chain'] is None: + module.fail_json( + msg="Either chain or flush parameter must be specified.") + + # Flush the table + if args['flush'] is True: + flush_table(iptables_path, module, module.params) + module.exit_json(**args) + + # Set the policy + if module.params['policy']: + set_chain_policy(iptables_path, module, module.params) + module.exit_json(**args) + + insert = (module.params['action'] == 'insert') + rule_is_present = check_present(iptables_path, module, module.params) + should_be_present = (args['state'] == 'present') + + # Check if target is up to date + args['changed'] = (rule_is_present != should_be_present) + + # Check only; don't modify + if module.check_mode: + module.exit_json(changed=args['changed']) + + # Target is already up to date + if args['changed'] is False: + module.exit_json(**args) + + if should_be_present: + if insert: + insert_rule(iptables_path, module, module.params) + else: + append_rule(iptables_path, module, module.params) + else: + remove_rule(iptables_path, module, module.params) + + module.exit_json(**args) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/system/kernel_blacklist.py b/system/kernel_blacklist.py index 296a082a2ea..5498f10b3a1 100644 --- a/system/kernel_blacklist.py +++ b/system/kernel_blacklist.py @@ -22,6 +22,10 @@ import re +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: kernel_blacklist @@ -52,7 +56,9 @@ EXAMPLES = ''' # Blacklist the nouveau driver module -- kernel_blacklist: name=nouveau state=present +- kernel_blacklist: + name: nouveau + state: present ''' @@ -138,4 +144,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/system/known_hosts.py b/system/known_hosts.py index 7592574d4e7..69210d9fdf2 100644 --- a/system/known_hosts.py +++ b/system/known_hosts.py @@ -18,14 +18,19 @@ along with this module. If not, see . """ +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: known_hosts short_description: Add or remove a host from the C(known_hosts) file description: - - The M(known_hosts) module lets you add or remove a host from the C(known_hosts) file. - This is useful if you're going to want to use the M(git) module over ssh, for example. - If you have a very large number of host keys to manage, you will find the M(template) module more useful. + - The M(known_hosts) module lets you add or remove a host keys from the C(known_hosts) file. + - Starting at Ansible 2.2, multiple entries per host are allowed, but only one for each key type supported by ssh. + This is useful if you're going to want to use the M(git) module over ssh, for example. + - If you have a very large number of host keys to manage, you will find the M(template) module more useful. version_added: "1.9" options: name: @@ -36,7 +41,7 @@ default: null key: description: - - The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed) + - The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed). The key must be in the right format for ssh (see sshd(1), section "SSH_KNOWN_HOSTS FILE FORMAT") required: false default: null path: @@ -44,9 +49,15 @@ - The known_hosts file to edit required: no default: "(homedir)+/.ssh/known_hosts" + hash_host: + description: + - Hash the hostname in the known_hosts file + required: no + default: no + version_added: "2.3" state: description: - - I(present) to add the host, I(absent) to remove it. + - I(present) to add the host key, I(absent) to remove it. choices: [ "present", "absent" ] required: no default: present @@ -55,11 +66,11 @@ ''' EXAMPLES = ''' -# Example using with_file to set the system known_hosts file - name: tell the host about our servers it might want to ssh to - known_hosts: path='/etc/ssh/ssh_known_hosts' - name='foo.com.invalid' - key="{{ lookup('file', 'pubkeys/foo.com.invalid') }}" + known_hosts: + path: /etc/ssh/ssh_known_hosts + name: foo.com.invalid + key: "{{ lookup('file', 'pubkeys/foo.com.invalid') }}" ''' # Makes sure public host keys are present or absent in the given known_hosts @@ -70,12 +81,16 @@ # name = hostname whose key should be added (alias: host) # key = line(s) to add to known_hosts file # path = the known_hosts file to edit (default: ~/.ssh/known_hosts) +# hash_host = yes|no (default: no) hash the hostname in the known_hosts file # state = absent|present (default: present) import os import os.path import tempfile import errno +import re +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.basic import * def enforce_state(module, params): """ @@ -85,15 +100,14 @@ def enforce_state(module, params): host = params["name"] key = params.get("key",None) port = params.get("port",None) - #expand the path parameter; otherwise module.add_path_info - #(called by exit_json) unhelpfully says the unexpanded path is absent. - path = os.path.expanduser(params.get("path")) + path = params.get("path") + hash_host = params.get("hash_host") state = params.get("state") #Find the ssh-keygen binary sshkeygen = module.get_bin_path("ssh-keygen",True) - #trailing newline in files gets lost, so re-add if necessary - if key is not None and key[-1]!='\n': + # Trailing newline in files gets lost, so re-add if necessary + if key and key[-1] != '\n': key+='\n' if key is None and state != "absent": @@ -101,27 +115,28 @@ def enforce_state(module, params): sanity_check(module,host,key,sshkeygen) - current,replace=search_for_host_key(module,host,key,path,sshkeygen) + found,replace_or_add,found_line,key=search_for_host_key(module,host,key,hash_host,path,sshkeygen) - #We will change state if current==True & state!="present" - #or current==False & state=="present" - #i.e (current) XOR (state=="present") + #We will change state if found==True & state!="present" + #or found==False & state=="present" + #i.e found XOR (state=="present") #Alternatively, if replace is true (i.e. key present, and we must change it) if module.check_mode: - module.exit_json(changed = replace or ((state=="present") != current)) + module.exit_json(changed = replace_or_add or (state=="present") != found) #Now do the work. - #First, remove an extant entry if required - if replace==True or (current==True and state=="absent"): - module.run_command([sshkeygen,'-R',host,'-f',path], - check_rc=True) + #Only remove whole host if found and no key provided + if found and key is None and state=="absent": + module.run_command([sshkeygen,'-R',host,'-f',path], check_rc=True) params['changed'] = True + #Next, add a new (or replacing) entry - if replace==True or (current==False and state=="present"): + if replace_or_add or found != (state=="present"): try: inf=open(path,"r") - except IOError, e: + except IOError: + e = get_exception() if e.errno == errno.ENOENT: inf=None else: @@ -130,13 +145,17 @@ def enforce_state(module, params): try: outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path)) if inf is not None: - for line in inf: + for line_number, line in enumerate(inf, start=1): + if found_line==line_number and (replace_or_add or state=='absent'): + continue # skip this line to replace its key outf.write(line) inf.close() - outf.write(key) + if state == 'present': + outf.write(key) outf.flush() module.atomic_move(outf.name,path) - except (IOError,OSError),e: + except (IOError,OSError): + e = get_exception() module.fail_json(msg="Failed to write to file %s: %s" % \ (path,str(e))) @@ -170,7 +189,8 @@ def sanity_check(module,host,key,sshkeygen): outf=tempfile.NamedTemporaryFile() outf.write(key) outf.flush() - except IOError,e: + except IOError: + e = get_exception() module.fail_json(msg="Failed to write to temporary file %s: %s" % \ (outf.name,str(e))) rc,stdout,stderr=module.run_command([sshkeygen,'-F',host, @@ -184,55 +204,94 @@ def sanity_check(module,host,key,sshkeygen): if stdout=='': #host not found module.fail_json(msg="Host parameter does not match hashed host field in supplied key") -def search_for_host_key(module,host,key,path,sshkeygen): - '''search_for_host_key(module,host,key,path,sshkeygen) -> (current,replace) +def search_for_host_key(module,host,key,hash_host,path,sshkeygen): + '''search_for_host_key(module,host,key,path,sshkeygen) -> (found,replace_or_add,found_line) - Looks up host in the known_hosts file path; if it's there, looks to see + Looks up host and keytype in the known_hosts file path; if it's there, looks to see if one of those entries matches key. Returns: - current (Boolean): is host found in path? - replace (Boolean): is the key in path different to that supplied by user? - if current=False, then replace is always False. + found (Boolean): is host found in path? + replace_or_add (Boolean): is the key in path different to that supplied by user? + found_line (int or None): the line where a key of the same type was found + if found=False, then replace is always False. sshkeygen is the path to ssh-keygen, found earlier with get_bin_path ''' - replace=False if os.path.exists(path)==False: - return False, False + return False, False, None, key + + sshkeygen_command=[sshkeygen,'-F',host,'-f',path] + #openssh >=6.4 has changed ssh-keygen behaviour such that it returns #1 if no host is found, whereas previously it returned 0 - rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path], + rc,stdout,stderr=module.run_command(sshkeygen_command, check_rc=False) if stdout=='' and stderr=='' and (rc==0 or rc==1): - return False, False #host not found, no other errors + return False, False, None, key #host not found, no other errors if rc!=0: #something went wrong module.fail_json(msg="ssh-keygen failed (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr)) -#If user supplied no key, we don't want to try and replace anything with it + #If user supplied no key, we don't want to try and replace anything with it if key is None: - return True, False + return True, False, None, key lines=stdout.split('\n') - k=key.strip() #trim trailing newline - #ssh-keygen returns only the host we ask about in the host field, - #even if the key entry has multiple hosts. Emulate this behaviour here, - #otherwise we get false negatives. - #Only necessary for unhashed entries. - if k[0] !='|': - k=k.split() - #The optional "marker" field, used for @cert-authority or @revoked - if k[0][0] == '@': - k[1]=host - else: - k[0]=host - k=' '.join(k) - for l in lines: - if l=='': - continue - if l[0]=='#': #comment + new_key = normalize_known_hosts_key(key) + + sshkeygen_command.insert(1,'-H') + rc,stdout,stderr=module.run_command(sshkeygen_command,check_rc=False) + if rc!=0: #something went wrong + module.fail_json(msg="ssh-keygen failed to hash host (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr)) + hashed_lines=stdout.split('\n') + + for lnum,l in enumerate(lines): + if l=='': continue - if k==l: #found a match - return True, False #current, not-replace - #No match found, return current and replace - return True, True + elif l[0]=='#': # info output from ssh-keygen; contains the line number where key was found + try: + # This output format has been hardcoded in ssh-keygen since at least OpenSSH 4.0 + # It always outputs the non-localized comment before the found key + found_line = int(re.search(r'found: line (\d+)', l).group(1)) + except IndexError: + e = get_exception() + module.fail_json(msg="failed to parse output of ssh-keygen for line number: '%s'" % l) + else: + found_key = normalize_known_hosts_key(l) + if hash_host==True: + if found_key['host'][:3]=='|1|': + new_key['host']=found_key['host'] + else: + hashed_host=normalize_known_hosts_key(hashed_lines[lnum]) + found_key['host']=hashed_host['host'] + key=key.replace(host,found_key['host']) + if new_key==found_key: #found a match + return True, False, found_line, key #found exactly the same key, don't replace + elif new_key['type'] == found_key['type']: # found a different key for the same key type + return True, True, found_line, key + #No match found, return found and replace, but no line + return True, True, None, key + +def normalize_known_hosts_key(key): + ''' + Transform a key, either taken from a known_host file or provided by the + user, into a normalized form. + The host part (which might include multiple hostnames or be hashed) gets + replaced by the provided host. Also, any spurious information gets removed + from the end (like the username@host tag usually present in hostkeys, but + absent in known_hosts files) + ''' + k=key.strip() #trim trailing newline + k=key.split() + d = dict() + #The optional "marker" field, used for @cert-authority or @revoked + if k[0][0] == '@': + d['options'] = k[0] + d['host']=k[1] + d['type']=k[2] + d['key']=k[3] + else: + d['host']=k[0] + d['type']=k[1] + d['key']=k[2] + return d def main(): @@ -240,7 +299,8 @@ def main(): argument_spec = dict( name = dict(required=True, type='str', aliases=['host']), key = dict(required=False, type='str'), - path = dict(default="~/.ssh/known_hosts", type='str'), + path = dict(default="~/.ssh/known_hosts", type='path'), + hash_host = dict(required=False, type='bool' ,default=False), state = dict(default='present', choices=['absent','present']), ), supports_check_mode = True @@ -249,6 +309,5 @@ def main(): results = enforce_state(module,module.params) module.exit_json(**results) -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/locale_gen.py b/system/locale_gen.py index 410f1dfc23d..b56a5e498e2 100644 --- a/system/locale_gen.py +++ b/system/locale_gen.py @@ -15,10 +15,10 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import os -import os.path -from subprocess import Popen, PIPE, call -import re + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} DOCUMENTATION = ''' --- @@ -45,13 +45,30 @@ EXAMPLES = ''' # Ensure a locale exists. -- locale_gen: name=de_CH.UTF-8 state=present +- locale_gen: + name: de_CH.UTF-8 + state: present ''' +import os +import os.path +from subprocess import Popen, PIPE, call +import re + +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception + LOCALE_NORMALIZATION = { ".utf8": ".UTF-8", ".eucjp": ".EUC-JP", ".iso885915": ".ISO-8859-15", + ".cp1251": ".CP1251", + ".koi8r": ".KOI8-R", + ".armscii8": ".ARMSCII-8", + ".euckr": ".EUC-KR", + ".gbk": ".GBK", + ".gb18030": ".GB18030", + ".euctw": ".EUC-TW", } # =========================================== @@ -87,7 +104,7 @@ def is_present(name): def fix_case(name): """locale -a might return the encoding in either lower or upper case. Passing through this function makes them uniform for comparisons.""" - for s, r in LOCALE_NORMALIZATION.iteritems(): + for s, r in LOCALE_NORMALIZATION.items(): name = name.replace(s, r) return name @@ -218,12 +235,12 @@ def main(): apply_change(state, name) else: apply_change_ubuntu(state, name) - except EnvironmentError, e: + except EnvironmentError: + e = get_exception() module.fail_json(msg=e.strerror, exitValue=e.errno) module.exit_json(name=name, changed=changed, msg="OK") -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/lvg.py b/system/lvg.py index 9e3ba2d2931..9c638f4d317 100644 --- a/system/lvg.py +++ b/system/lvg.py @@ -19,6 +19,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- author: "Alexander Bulimov (@abulimov)" @@ -35,6 +39,7 @@ pvs: description: - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group. + - The module will take care of running pvcreate if needed. required: false pesize: description: @@ -65,17 +70,24 @@ EXAMPLES = ''' # Create a volume group on top of /dev/sda1 with physical extent size = 32MB. -- lvg: vg=vg.services pvs=/dev/sda1 pesize=32 +- lvg: + vg: vg.services + pvs: /dev/sda1 + pesize: 32 # Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. # If, for example, we already have VG vg.services on top of /dev/sdb1, # this VG will be extended by /dev/sdc5. Or if vg.services was created on # top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, # and then reduce by /dev/sda5. -- lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5 +- lvg: + vg: vg.services + pvs: /dev/sdb1,/dev/sdc5 # Remove a volume group with name vg.services. -- lvg: vg=vg.services state=absent +- lvg: + vg: vg.services + state: absent ''' def parse_vgs(data): @@ -130,6 +142,7 @@ def main(): pesize = module.params['pesize'] vgoptions = module.params['vg_options'].split() + dev_list = [] if module.params['pvs']: dev_list = module.params['pvs'] elif state == 'present': @@ -183,7 +196,7 @@ def main(): ### create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in dev_list: - rc,_,err = module.run_command("%s %s" % (pvcreate_cmd,current_dev)) + rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd,current_dev)) if rc == 0: changed = True else: @@ -224,7 +237,7 @@ def main(): ### create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in devs_to_add: - rc,_,err = module.run_command("%s %s" % (pvcreate_cmd, current_dev)) + rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd, current_dev)) if rc == 0: changed = True else: @@ -251,4 +264,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/system/lvol.py b/system/lvol.py index 7a01d83829c..3ab60cb40ac 100644 --- a/system/lvol.py +++ b/system/lvol.py @@ -18,11 +18,15 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- author: - - "Jeroen Hoekx (@jhoekx)" - - "Alexander Bulimov (@abulimov)" + - "Jeroen Hoekx (@jhoekx)" + - "Alexander Bulimov (@abulimov)" module: lvol short_description: Configure LVM logical volumes description: @@ -42,12 +46,21 @@ - The size of the logical volume, according to lvcreate(8) --size, by default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; - resizing is not supported with percentages. + Float values must begin with a digit. + Resizing using percentage values was not supported prior to 2.1. state: choices: [ "present", "absent" ] default: present description: - - Control if the logical volume exists. + - Control if the logical volume exists. If C(present) and the + volume does not already exist then the C(size) option is required. + required: false + active: + version_added: "2.2" + choices: [ "yes", "no" ] + default: "yes" + description: + - Whether the volume is activate and visible to the host. required: false force: version_added: "1.5" @@ -61,36 +74,136 @@ version_added: "2.0" description: - Free-form options to be passed to the lvcreate command + snapshot: + version_added: "2.1" + description: + - The name of the snapshot volume + required: false + pvs: + version_added: "2.2" + description: + - Comma separated list of physical volumes e.g. /dev/sda,/dev/sdb + required: false + shrink: + version_added: "2.2" + description: + - shrink if current size is higher than size requested + required: false + default: yes notes: - Filesystems on top of the volume are not resized. ''' EXAMPLES = ''' # Create a logical volume of 512m. -- lvol: vg=firefly lv=test size=512 +- lvol: + vg: firefly + lv: test + size: 512 + +# Create a logical volume of 512m with disks /dev/sda and /dev/sdb +- lvol: + vg: firefly + lv: test + size: 512 + pvs: /dev/sda,/dev/sdb + +# Create cache pool logical volume +- lvol: + vg: firefly + lv: lvcache + size: 512m + opts: --type cache-pool # Create a logical volume of 512g. -- lvol: vg=firefly lv=test size=512g +- lvol: + vg: firefly + lv: test + size: 512g # Create a logical volume the size of all remaining space in the volume group -- lvol: vg=firefly lv=test size=100%FREE +- lvol: + vg: firefly + lv: test + size: 100%FREE # Create a logical volume with special options -- lvol: vg=firefly lv=test size=512g opts="-r 16" +- lvol: + vg: firefly + lv: test + size: 512g + opts: -r 16 # Extend the logical volume to 1024m. -- lvol: vg=firefly lv=test size=1024 +- lvol: + vg: firefly + lv: test + size: 1024 + +# Extend the logical volume to consume all remaining space in the volume group +- lvol: + vg: firefly + lv: test + size: +100%FREE + +# Extend the logical volume to take all remaining space of the PVs +- lvol: + vg: firefly + lv: test + size: 100%PVS + +# Resize the logical volume to % of VG +- lvol: + vg: firefly + lv: test + size: 80%VG + force: yes # Reduce the logical volume to 512m -- lvol: vg=firefly lv=test size=512 force=yes +- lvol: + vg: firefly + lv: test + size: 512 + force: yes + +# Set the logical volume to 512m and do not try to shrink if size is lower than current one +- lvol: + vg: firefly + lv: test + size: 512 + shrink: no # Remove the logical volume. -- lvol: vg=firefly lv=test state=absent force=yes +- lvol: + vg: firefly + lv: test + state: absent + force: yes + +# Create a snapshot volume of the test logical volume. +- lvol: + vg: firefly + lv: test + snapshot: snap1 + size: 100m + +# Deactivate a logical volume +- lvol: + vg: firefly + lv: test + active: false + +# Create a deactivated logical volume +- lvol: + vg: firefly + lv: test + size: 512g + active: false ''' import re -decimal_point = re.compile(r"(\.|,)") +decimal_point = re.compile(r"(\d+)") def mkversion(major, minor, patch): return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch) @@ -100,11 +213,24 @@ def parse_lvs(data): for line in data.splitlines(): parts = line.strip().split(';') lvs.append({ - 'name': parts[0], - 'size': int(decimal_point.split(parts[1])[0]), + 'name': parts[0].replace('[','').replace(']',''), + 'size': int(decimal_point.match(parts[1]).group(1)), + 'active': (parts[2][4] == 'a') }) return lvs +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'size': int(decimal_point.match(parts[1]).group(1)), + 'free': int(decimal_point.match(parts[2]).group(1)), + 'ext_size': int(decimal_point.match(parts[3]).group(1)) + }) + return vgs + def get_lvm_version(module): ver_cmd = module.get_bin_path("lvm", required=True) @@ -122,10 +248,14 @@ def main(): argument_spec=dict( vg=dict(required=True), lv=dict(required=True), - size=dict(), + size=dict(type='str'), opts=dict(type='str'), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), + shrink=dict(type='bool', default='yes'), + active=dict(type='bool', default='yes'), + snapshot=dict(type='str', default=None), + pvs=dict(type='str') ), supports_check_mode=True, ) @@ -146,12 +276,27 @@ def main(): opts = module.params['opts'] state = module.params['state'] force = module.boolean(module.params['force']) + shrink = module.boolean(module.params['shrink']) + active = module.boolean(module.params['active']) size_opt = 'L' size_unit = 'm' + snapshot = module.params['snapshot'] + pvs = module.params['pvs'] + + if pvs is None: + pvs = "" + else: + pvs = pvs.replace(",", " ") if opts is None: opts = "" + # Add --test option when running in check-mode + if module.check_mode: + test_opt = ' --test' + else: + test_opt = '' + if size: # LVCREATE(8) -l --extents option with percentage if '%' in size: @@ -167,35 +312,46 @@ def main(): size_opt = 'l' size_unit = '' + if not '%' in size: # LVCREATE(8) -L --size option unit - elif size[-1].isalpha(): if size[-1].lower() in 'bskmgtpe': - size_unit = size[-1].lower() - if size[0:-1].isdigit(): - size = int(size[0:-1]) - else: - module.fail_json(msg="Bad size specification for unit %s" % size_unit) - size_opt = 'L' - else: - module.fail_json(msg="Size unit should be one of [bBsSkKmMgGtTpPeE]") - # when no unit, megabytes by default - elif size.isdigit(): - size = int(size) - else: - module.fail_json(msg="Bad size specification") + size_unit = size[-1].lower() + size = size[0:-1] + + try: + float(size) + if not size[0].isdigit(): raise ValueError() + except ValueError: + module.fail_json(msg="Bad size specification of '%s'" % size) + # when no unit, megabytes by default if size_opt == 'l': unit = 'm' else: unit = size_unit + # Get information on volume group requested + vgs_cmd = module.get_bin_path("vgs", required=True) + rc, current_vgs, err = module.run_command( + "%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg)) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) + + vgs = parse_vgs(current_vgs) + this_vg = vgs[0] + + # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( - "%s --noheadings --nosuffix -o lv_name,size --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) + "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) if rc != 0: if state == 'absent': - module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False) + module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) @@ -203,8 +359,12 @@ def main(): lvs = parse_lvs(current_lvs) + if snapshot is None: + check_lv = lv + else: + check_lv = snapshot for test_lv in lvs: - if test_lv['name'] == lv: + if test_lv['name'] == check_lv: this_lv = test_lv break else: @@ -213,61 +373,118 @@ def main(): if state == 'present' and not size: if this_lv is None: module.fail_json(msg="No size given.") - else: - module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) msg = '' if this_lv is None: if state == 'present': ### create LV - if module.check_mode: + lvcreate_cmd = module.get_bin_path("lvcreate", required=True) + if snapshot is not None: + cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) + else: + cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) + rc, _, err = module.run_command(cmd) + if rc == 0: changed = True else: - lvcreate_cmd = module.get_bin_path("lvcreate", required=True) - cmd = "%s %s -n %s -%s %s%s %s %s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg) - rc, _, err = module.run_command(cmd) - if rc == 0: - changed = True - else: - module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) + module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) else: if state == 'absent': ### remove LV - if module.check_mode: - module.exit_json(changed=True) if not force: module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) - rc, _, err = module.run_command("%s --force %s/%s" % (lvremove_cmd, vg, this_lv['name'])) + rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) + elif not size: + pass + elif size_opt == 'l': - module.exit_json(changed=False, msg="Resizing extents with percentage not supported.") + ### Resize LV based on % value + tool = None + size_free = this_vg['free'] + if size_whole == 'VG' or size_whole == 'PVS': + size_requested = size_percent * this_vg['size'] / 100 + else: # size_whole == 'FREE': + size_requested = size_percent * this_vg['free'] / 100 + if '+' in size: + size_requested += this_lv['size'] + if this_lv['size'] < size_requested: + if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))): + tool = module.get_bin_path("lvextend", required=True) + else: + module.fail_json(msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)) + elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large + if size_requested == 0: + module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) + elif not force: + module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name'])) + else: + tool = module.get_bin_path("lvreduce", required=True) + tool = '%s %s' % (tool, '--force') + + if tool: + cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + rc, out, err = module.run_command(cmd) + if "Reached maximum COW size" in out: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) + elif rc == 0: + changed = True + msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) + elif "matches existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + elif "not larger than existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) + else: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) + else: - ### resize LV + ### resize LV based on absolute values tool = None - if size > this_lv['size']: + if int(size) > this_lv['size']: tool = module.get_bin_path("lvextend", required=True) - elif size < this_lv['size']: + elif shrink and int(size) < this_lv['size']: + if int(size) == 0: + module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) - tool = module.get_bin_path("lvreduce", required=True) - tool = '%s %s' % (tool, '--force') + else: + tool = module.get_bin_path("lvreduce", required=True) + tool = '%s %s' % (tool, '--force') if tool: - if module.check_mode: + cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) + rc, out, err = module.run_command(cmd) + if "Reached maximum COW size" in out: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) + elif rc == 0: changed = True + elif "matches existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + elif "not larger than existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: - rc, _, err = module.run_command("%s -%s %s%s %s/%s" % (tool, size_opt, size, size_unit, vg, this_lv['name'])) - if rc == 0: - changed = True - elif "matches existing size" in err: - module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) - else: - module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) + + if this_lv is not None: + if active: + lvchange_cmd = module.get_bin_path("lvchange", required=True) + rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + if rc == 0: + module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) + else: + module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) + else: + lvchange_cmd = module.get_bin_path("lvchange", required=True) + rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) + if rc == 0: + module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) + else: + module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err) module.exit_json(changed=changed, msg=msg) diff --git a/system/make.py b/system/make.py new file mode 100644 index 00000000000..2b618db9fac --- /dev/null +++ b/system/make.py @@ -0,0 +1,161 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Linus Unnebäck +# +# This file is part of Ansible +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: make +short_description: Run targets in a Makefile +requirements: [ make ] +version_added: "2.1" +author: Linus Unnebäck (@LinusU) +description: + - Run targets in a Makefile. +options: + target: + description: + - The target to run + required: false + default: none + params: + description: + - Any extra parameters to pass to make + required: false + default: none + chdir: + description: + - cd into this directory before running make + required: true +''' + +EXAMPLES = ''' +# Build the default target +- make: + chdir: /home/ubuntu/cool-project + +# Run `install` target as root +- make: + chdir: /home/ubuntu/cool-project + target: install + become: yes + +# Pass in extra arguments to build +- make: + chdir: /home/ubuntu/cool-project + target: all + params: + NUM_THREADS: 4 + BACKEND: lapack +''' + +# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to +# fix this +RETURN = '''# ''' + +from ansible.module_utils.six import iteritems +from ansible.module_utils.basic import AnsibleModule + + +def run_command(command, module, check_rc=True): + """ + Run a command using the module, return + the result code and std{err,out} content. + + :param command: list of command arguments + :param module: Ansible make module instance + :return: return code, stdout content, stderr content + """ + rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir']) + return rc, sanitize_output(out), sanitize_output(err) + + +def sanitize_output(output): + """ + Sanitize the output string before we + pass it to module.fail_json. Defaults + the string to empty if it is None, else + strips trailing newlines. + + :param output: output to sanitize + :return: sanitized output + """ + if output is None: + return '' + else: + return output.rstrip("\r\n") + + +def main(): + module = AnsibleModule( + supports_check_mode=True, + argument_spec=dict( + target=dict(required=False, default=None, type='str'), + params=dict(required=False, default=None, type='dict'), + chdir=dict(required=True, default=None, type='path'), + ), + ) + # Build up the invocation of `make` we are going to use + make_path = module.get_bin_path('make', True) + make_target = module.params['target'] + if module.params['params'] is not None: + make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])] + else: + make_parameters = [] + + base_command = [make_path, make_target] + base_command.extend(make_parameters) + + # Check if the target is already up to date + rc, out, err = run_command(base_command + ['--question'], module, check_rc=False) + if module.check_mode: + # If we've been asked to do a dry run, we only need + # to report whether or not the target is up to date + changed = (rc != 0) + else: + if rc == 0: + # The target is up to date, so we don't have to + # do anything + changed = False + else: + # The target isn't upd to date, so we need to run it + rc, out, err = run_command(base_command, module) + changed = True + + # We don't report the return code, as if this module failed + # we would be calling fail_json from run_command, so even if + # we had a non-zero return code, we did not fail. However, if + # we report a non-zero return code here, we will be marked as + # failed regardless of what we signal using the failed= kwarg. + module.exit_json( + changed=changed, + failed=False, + stdout=out, + stderr=err, + target=module.params['target'], + params=module.params['params'], + chdir=module.params['chdir'] + ) + + +if __name__ == '__main__': + main() diff --git a/system/modprobe.py b/system/modprobe.py index 64e36c784a7..d84f0d3377d 100644 --- a/system/modprobe.py +++ b/system/modprobe.py @@ -19,6 +19,10 @@ # along with this software. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: modprobe @@ -52,11 +56,22 @@ EXAMPLES = ''' # Add the 802.1q module -- modprobe: name=8021q state=present +- modprobe: + name: 8021q + state: present + # Add the dummy module -- modprobe: name=dummy state=present params="numdummies=2" +- modprobe: + name: dummy + state: present + params: 'numdummies=2' ''' +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception +import shlex + + def main(): module = AnsibleModule( argument_spec={ @@ -84,7 +99,8 @@ def main(): present = True break modules.close() - except IOError, e: + except IOError: + e = get_exception() module.fail_json(msg=str(e), **args) # Check only; don't modify @@ -100,19 +116,20 @@ def main(): # Add/remove module as needed if args['state'] == 'present': if not present: - rc, _, err = module.run_command([module.get_bin_path('modprobe', True), args['name'], args['params']]) + command = [module.get_bin_path('modprobe', True), args['name']] + command.extend(shlex.split(args['params'])) + rc, _, err = module.run_command(command) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True elif args['state'] == 'absent': if present: - rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']]) + rc, _, err = module.run_command([module.get_bin_path('modprobe', True), '-r', args['name']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True module.exit_json(**args) -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/ohai.py b/system/ohai.py index 6f066ec5ad8..47926a34d12 100644 --- a/system/ohai.py +++ b/system/ohai.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ohai @@ -53,6 +57,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() - - +if __name__ == '__main__': + main() diff --git a/system/open_iscsi.py b/system/open_iscsi.py index 084303d7b52..2e3c0e838f8 100644 --- a/system/open_iscsi.py +++ b/system/open_iscsi.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: open_iscsi @@ -84,23 +88,32 @@ description: - whether the list of nodes in the persistent iscsi database should be returned by the module +''' -examples: - - description: perform a discovery on 10.1.2.3 and show available target - nodes - code: > - open_iscsi: show_nodes=yes discover=yes portal=10.1.2.3 - - description: discover targets on portal and login to the one available - (only works if exactly one target is exported to the initiator) - code: > - open_iscsi: portal={{iscsi_target}} login=yes discover=yes - - description: connect to the named target, after updating the local - persistent database (cache) - code: > - open_iscsi: login=yes target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d - - description: discconnect from the cached named target - code: > - open_iscsi: login=no target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d" +EXAMPLES = ''' +# perform a discovery on 10.1.2.3 and show available target nodes +- open_iscsi: + show_nodes: yes + discover: yes + portal: 10.1.2.3 + +# discover targets on portal and login to the one available +# (only works if exactly one target is exported to the initiator) +- open_iscsi: + portal: '{{ iscsi_target }}' + login: yes + discover: yes + +# description: connect to the named target, after updating the local +# persistent database (cache) +- open_iscsi: + login: yes + target: 'iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d' + +# description: discconnect from the cached named target +- open_iscsi: + login: no + target: 'iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d' ''' import glob @@ -372,5 +385,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() - +if __name__ == '__main__': + main() diff --git a/system/openwrt_init.py b/system/openwrt_init.py new file mode 100644 index 00000000000..7b4f7f79d37 --- /dev/null +++ b/system/openwrt_init.py @@ -0,0 +1,213 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# (c) 2016, Andrew Gaffney +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +module: openwrt_init +author: + - "Andrew Gaffney (@agaffney)" +version_added: "2.3" +short_description: Manage services on OpenWrt. +description: + - Controls OpenWrt services on remote hosts. +options: + name: + required: true + description: + - Name of the service. + aliases: ['service'] + state: + required: false + default: null + choices: [ 'started', 'stopped', 'restarted', 'reloaded' ] + description: + - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. + C(restarted) will always bounce the service. C(reloaded) will always reload. + enabled: + required: false + choices: [ "yes", "no" ] + default: null + description: + - Whether the service should start on boot. B(At least one of state and enabled are required.) + pattern: + required: false + description: + - If the service does not respond to the 'running' command, name a + substring to look for as would be found in the output of the I(ps) + command as a stand-in for a 'running' result. If the string is found, + the service will be assumed to be running. +notes: + - One option other than name is required. +requirements: + - An OpenWrt system +''' + +EXAMPLES = ''' +# Example action to start service httpd, if not running +- openwrt_init: + state: started + name: httpd + +# Example action to stop service cron, if running +- openwrt_init: + name: cron + state: stopped + +# Example action to reload service httpd, in all cases +- openwrt_init: + name: httpd + state: reloaded + +# Example action to enable service httpd +- openwrt_init: + name: httpd + enabled: yes +''' + +RETURN = ''' +''' + +import os +import glob +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_bytes, to_native + +module = None +init_script = None + +# =============================== +# Check if service is enabled +def is_enabled(): + (rc, out, err) = module.run_command("%s enabled" % init_script) + if rc == 0: + return True + return False + +# =========================================== +# Main control flow + +def main(): + global module, init_script + # init + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, type='str', aliases=['service']), + state = dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'), + enabled = dict(type='bool'), + pattern = dict(required=False, default=None), + ), + supports_check_mode=True, + required_one_of=[['state', 'enabled']], + ) + + # initialize + service = module.params['name'] + init_script = '/etc/init.d/' + service + rc = 0 + out = err = '' + result = { + 'name': service, + 'changed': False, + } + + # check if service exists + if not os.path.exists(init_script): + module.fail_json(msg='service %s does not exist' % service) + + # Enable/disable service startup at boot if requested + if module.params['enabled'] is not None: + # do we need to enable the service? + enabled = is_enabled() + + # default to current state + result['enabled'] = enabled + + # Change enable/disable if needed + if enabled != module.params['enabled']: + result['changed'] = True + if module.params['enabled']: + action = 'enable' + else: + action = 'disable' + + if not module.check_mode: + (rc, out, err) = module.run_command("%s %s" % (init_script, action)) + # openwrt init scripts can return a non-zero exit code on a successful 'enable' + # command if the init script doesn't contain a STOP value, so we ignore the exit + # code and explicitly check if the service is now in the desired state + if is_enabled() != module.params['enabled']: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err)) + + result['enabled'] = not enabled + + if module.params['state'] is not None: + running = False + + # check if service is currently running + if module.params['pattern']: + # Find ps binary + psbin = module.get_bin_path('ps', True) + + # this should be busybox ps, so we only want/need to the 'w' option + (rc, psout, pserr) = module.run_command('%s w' % psbin) + # If rc is 0, set running as appropriate + if rc == 0: + lines = psout.split("\n") + for line in lines: + if module.params['pattern'] in line and not "pattern=" in line: + # so as to not confuse ./hacking/test-module + running = True + break + else: + (rc, out, err) = module.run_command("%s running" % init_script) + if rc == 0: + running = True + + # default to desired state + result['state'] = module.params['state'] + + # determine action, if any + action = None + if module.params['state'] == 'started': + if not running: + action = 'start' + result['changed'] = True + elif module.params['state'] == 'stopped': + if running: + action = 'stop' + result['changed'] = True + else: + action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded + result['state'] = 'started' + result['changed'] = True + + if action: + if not module.check_mode: + (rc, out, err) = module.run_command("%s %s" % (init_script, action)) + if rc != 0: + module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err)) + + + module.exit_json(**result) + +if __name__ == '__main__': + main() diff --git a/system/osx_defaults.py b/system/osx_defaults.py index e4dc5f8c750..757cc811d92 100644 --- a/system/osx_defaults.py +++ b/system/osx_defaults.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: osx_defaults @@ -33,6 +37,13 @@ - The domain is a domain name of the form com.companyname.appname. required: false default: NSGlobalDomain + host: + description: + - The host on which the preference should apply. The special value "currentHost" corresponds to the + "-currentHost" switch of the defaults commandline tool. + required: false + default: null + version_added: "2.1" key: description: - The key of the user preference @@ -65,17 +76,48 @@ ''' EXAMPLES = ''' -- osx_defaults: domain=com.apple.Safari key=IncludeInternalDebugMenu type=bool value=true state=present -- osx_defaults: domain=NSGlobalDomain key=AppleMeasurementUnits type=string value=Centimeters state=present -- osx_defaults: key=AppleMeasurementUnits type=string value=Centimeters +- osx_defaults: + domain: com.apple.Safari + key: IncludeInternalDebugMenu + type: bool + value: true + state: present + +- osx_defaults: + domain: NSGlobalDomain + key: AppleMeasurementUnits + type: string + value: Centimeters + state: present + +- osx_defaults: + domain: com.apple.screensaver + host: currentHost + key: showClock + type: int + value: 1 + +- osx_defaults: + key: AppleMeasurementUnits + type: string + value: Centimeters + - osx_defaults: key: AppleLanguages type: array - value: ["en", "nl"] -- osx_defaults: domain=com.geekchimp.macable key=ExampleKeyToRemove state=absent + value: + - en + - nl + +- osx_defaults: + domain: com.geekchimp.macable + key: ExampleKeyToRemove + state: absent ''' -from datetime import datetime +import datetime +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception # exceptions --------------------------------------------------------------- {{{ class OSXDefaultsException(Exception): @@ -124,14 +166,16 @@ def _convert_type(self, type, value): if type == "string": return str(value) elif type in ["bool", "boolean"]: - if value.lower() in [True, 1, "true", "1", "yes"]: + if isinstance(value, basestring): + value = value.lower() + if value in [True, 1, "true", "1", "yes"]: return True - elif value.lower() in [False, 0, "false", "0", "no"]: + elif value in [False, 0, "false", "0", "no"]: return False raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value))) elif type == "date": try: - return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S") + return datetime.datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S") except ValueError: raise OSXDefaultsException( "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value)) @@ -153,6 +197,19 @@ def _convert_type(self, type, value): raise OSXDefaultsException('Type is not supported: {0}'.format(type)) + """ Returns a normalized list of commandline arguments based on the "host" attribute """ + def _host_args(self): + if self.host is None: + return [] + elif self.host == 'currentHost': + return ['-currentHost'] + else: + return ['-host', self.host] + + """ Returns a list containing the "defaults" executable and any common base arguments """ + def _base_command(self): + return [self.executable] + self._host_args() + """ Converts array output from defaults to an list """ @staticmethod def _convert_defaults_str_to_list(value): @@ -174,7 +231,7 @@ def _convert_defaults_str_to_list(value): """ Reads value of this domain & key from defaults """ def read(self): # First try to find out the type - rc, out, err = self.module.run_command([self.executable, "read-type", self.domain, self.key]) + rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key]) # If RC is 1, the key does not exists if rc == 1: @@ -188,7 +245,7 @@ def read(self): type = out.strip().replace('Type is ', '') # Now get the current value - rc, out, err = self.module.run_command([self.executable, "read", self.domain, self.key]) + rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key]) # Strip output out = out.strip() @@ -208,16 +265,16 @@ def read(self): def write(self): # We need to convert some values so the defaults commandline understands it - if type(self.value) is bool: + if isinstance(self.value, bool): if self.value: value = "TRUE" else: value = "FALSE" - elif type(self.value) is int or type(self.value) is float: + elif isinstance(self.value, (int, float)): value = str(self.value) elif self.array_add and self.current_value is not None: value = list(set(self.value) - set(self.current_value)) - elif isinstance(self.value, datetime): + elif isinstance(self.value, datetime.datetime): value = self.value.strftime('%Y-%m-%d %H:%M:%S') else: value = self.value @@ -230,14 +287,14 @@ def write(self): if not isinstance(value, list): value = [value] - rc, out, err = self.module.run_command([self.executable, 'write', self.domain, self.key, '-' + self.type] + value) + rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value) if rc != 0: raise OSXDefaultsException('An error occurred while writing value to defaults: ' + out) """ Deletes defaults key from domain """ def delete(self): - rc, out, err = self.module.run_command([self.executable, 'delete', self.domain, self.key]) + rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key]) if rc != 0: raise OSXDefaultsException("An error occurred while deleting key from defaults: " + out) @@ -252,14 +309,16 @@ def run(self): # Handle absent state if self.state == "absent": - print "Absent state detected!" if self.current_value is None: return False + if self.module.check_mode: + return True self.delete() return True # There is a type mismatch! Given type does not match the type in defaults - if self.current_value is not None and type(self.current_value) is not type(self.value): + value_type = type(self.value) + if self.current_value is not None and not isinstance(self.current_value, value_type): raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__) # Current value matches the given value. Nothing need to be done. Arrays need extra care @@ -272,6 +331,9 @@ def run(self): elif self.current_value == self.value: return False + if self.module.check_mode: + return True + # Change/Create/Set given key/value for domain in defaults self.write() return True @@ -289,6 +351,10 @@ def main(): default="NSGlobalDomain", required=False, ), + host=dict( + default=None, + required=False, + ), key=dict( default=None, ), @@ -309,7 +375,7 @@ def main(): array_add=dict( default=False, required=False, - choices=BOOLEANS, + type='bool', ), value=dict( default=None, @@ -331,6 +397,7 @@ def main(): ) domain = module.params['domain'] + host = module.params['host'] key = module.params['key'] type = module.params['type'] array_add = module.params['array_add'] @@ -339,14 +406,15 @@ def main(): path = module.params['path'] try: - defaults = OSXDefaults(module=module, domain=domain, key=key, type=type, + defaults = OSXDefaults(module=module, domain=domain, host=host, key=key, type=type, array_add=array_add, value=value, state=state, path=path) changed = defaults.run() module.exit_json(changed=changed) - except OSXDefaultsException, e: + except OSXDefaultsException: + e = get_exception() module.fail_json(msg=e.message) # /main ------------------------------------------------------------------- }}} -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/pam_limits.py b/system/pam_limits.py index 080b938dd01..f47fbf06bbf 100644 --- a/system/pam_limits.py +++ b/system/pam_limits.py @@ -23,10 +23,16 @@ import shutil import re +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: pam_limits version_added: "2.0" +authors: + - "Sebastien Rohaut (@usawa)" short_description: Modify Linux PAM limits description: - The M(pam_limits) module modify PAM limits, default in /etc/security/limits.conf. @@ -40,7 +46,7 @@ description: - Limit type, see C(man limits) for an explanation required: true - choices: [ "hard", "soft" ] + choices: [ "hard", "soft", "-" ] limit_item: description: - The limit to be set @@ -78,14 +84,36 @@ - Modify the limits.conf path. required: false default: "/etc/security/limits.conf" + comment: + description: + - Comment associated with the limit. + required: false + default: '' ''' EXAMPLES = ''' -# Add or modify limits for the user joe -- pam_limits: domain=joe limit_type=soft limit_item=nofile value=64000 - -# Add or modify limits for the user joe. Keep or set the maximal value -- pam_limits: domain=joe limit_type=soft limit_item=nofile value=1000000 +# Add or modify nofile soft limit for the user joe +- pam_limits: + domain: joe + limit_type: soft + limit_item: nofile + value: 64000 + +# Add or modify fsize hard limit for the user smith. Keep or set the maximal value. +- pam_limits: + domain: smith + limit_type: hard + limit_item: fsize + value: 1000000 + use_max: yes + +# Add or modify memlock, both soft and hard, limit for the user james with a comment. +- pam_limits: + domain: james + limit_type: - + limit_item: memlock + value: unlimited + comment: unlimited memory lock for james ''' def main(): @@ -102,7 +130,7 @@ def main(): domain = dict(required=True, type='str'), limit_type = dict(required=True, type='str', choices=pam_types), limit_item = dict(required=True, type='str', choices=pam_items), - value = dict(required=True, type='int'), + value = dict(required=True, type='str'), use_max = dict(default=False, type='bool'), use_min = dict(default=False, type='bool'), backup = dict(default=False, type='bool'), @@ -132,6 +160,9 @@ def main(): if use_max and use_min: module.fail_json(msg="Cannot use use_min and use_max at the same time." ) + if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()): + module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.") + # Backup if backup: backup_file = module.backup_local(limits_conf) @@ -141,7 +172,7 @@ def main(): message = '' f = open (limits_conf, 'r') # Tempfile - nf = tempfile.NamedTemporaryFile(delete = False) + nf = tempfile.NamedTemporaryFile() found = False new_value = value @@ -181,7 +212,10 @@ def main(): line_domain = line_fields[0] line_type = line_fields[1] line_item = line_fields[2] - actual_value = int(line_fields[3]) + actual_value = line_fields[3] + + if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()): + module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item)) # Found the line if line_domain == domain and line_type == limit_type and line_item == limit_item: @@ -191,16 +225,29 @@ def main(): nf.write(line) continue + actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1'] + value_unlimited = value in ['unlimited', 'infinity', '-1'] + if use_max: - new_value = max(value, actual_value) + if value.isdigit() and actual_value.isdigit(): + new_value = str(max(int(value), int(actual_value))) + elif actual_value_unlimited: + new_value = actual_value + else: + new_value = value if use_min: - new_value = min(value,actual_value) + if value.isdigit() and actual_value.isdigit(): + new_value = str(min(int(value), int(actual_value))) + elif value_unlimited: + new_value = actual_value + else: + new_value = value # Change line only if value has changed if new_value != actual_value: changed = True - new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + str(new_value) + new_comment + "\n" + new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n" message = new_limit nf.write(new_limit) else: @@ -211,16 +258,21 @@ def main(): if not found: changed = True - new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + str(new_value) + new_comment + "\n" + new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n" message = new_limit nf.write(new_limit) f.close() - nf.close() + nf.flush() # Copy tempfile to newfile module.atomic_move(nf.name, f.name) + try: + nf.close() + except: + pass + res_args = dict( changed = changed, msg = message ) @@ -233,4 +285,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/system/puppet.py b/system/puppet.py index 48a497c37ce..15acb97d262 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -15,11 +15,24 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . -import json import os import pipes import stat +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + + +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: puppet @@ -39,16 +52,10 @@ required: false default: None manifest: - desciption: + description: - Path to the manifest file to run puppet apply on. required: false default: None - show_diff: - description: - - Should puppet return diffs of changes applied. Defaults to off to avoid leaking secret changes by default. - required: false - default: no - choices: [ "yes", "no" ] facts: description: - A dict of values to pass in as persistent external facter facts @@ -64,6 +71,32 @@ - Puppet environment to be used. required: false default: None + logdest: + description: + - Where the puppet logs should go, if puppet apply is being used + required: false + default: stdout + choices: [ 'stdout', 'syslog' ] + version_added: "2.1" + certname: + description: + - The name to use when handling certificates. + required: false + default: None + version_added: "2.1" + tags: + description: + - A comma-separated list of puppet tags to be used. + required: false + default: None + version_added: "2.1" + execute: + description: + - Execute a specific piece of Puppet code. It has no effect with + a puppetmaster. + required: false + default: None + version_added: "2.1" requirements: [ puppet ] author: "Monty Taylor (@emonty)" ''' @@ -73,10 +106,25 @@ - puppet # Run puppet and timeout in 5 minutes -- puppet: timeout=5m +- puppet: + timeout: 5m # Run puppet using a different environment -- puppet: environment=testing +- puppet: + environment: testing + +# Run puppet using a specific certname +- puppet: + certname: agent01.example.com + +# Run puppet using a specific piece of Puppet code. Has no effect with a +# puppetmaster. +- puppet: + execute: 'include ::mymodule' + +# Run puppet using a specific tags +- puppet: + tags: update,nginx ''' @@ -108,26 +156,37 @@ def main(): timeout=dict(default="30m"), puppetmaster=dict(required=False, default=None), manifest=dict(required=False, default=None), + logdest=dict( + required=False, default='stdout', + choices=['stdout', 'syslog']), show_diff=dict( + # internal code to work with --diff, do not use default=False, aliases=['show-diff'], type='bool'), facts=dict(default=None), facter_basename=dict(default='ansible'), environment=dict(required=False, default=None), + certname=dict(required=False, default=None), + tags=dict(required=False, default=None, type='list'), + execute=dict(required=False, default=None), ), supports_check_mode=True, mutually_exclusive=[ ('puppetmaster', 'manifest'), + ('puppetmaster', 'manifest', 'execute'), ], ) p = module.params global PUPPET_CMD - PUPPET_CMD = module.get_bin_path("puppet", False) + PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin']) if not PUPPET_CMD: module.fail_json( msg="Could not find puppet. Please ensure it is installed.") + global TIMEOUT_CMD + TIMEOUT_CMD = module.get_bin_path("timeout", False) + if p['manifest']: if not os.path.exists(p['manifest']): module.fail_json( @@ -140,7 +199,8 @@ def main(): PUPPET_CMD + " config print agent_disabled_lockfile") if os.path.exists(stdout.strip()): module.fail_json( - msg="Puppet agent is administratively disabled.", disabled=True) + msg="Puppet agent is administratively disabled.", + disabled=True) elif rc != 0: module.fail_json( msg="Puppet agent state could not be determined.") @@ -151,13 +211,18 @@ def main(): module.params['facter_basename'], module.params['facts']) - base_cmd = "timeout -s 9 %(timeout)s %(puppet_cmd)s" % dict( - timeout=pipes.quote(p['timeout']), puppet_cmd=PUPPET_CMD) + if TIMEOUT_CMD: + base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict( + timeout_cmd=TIMEOUT_CMD, + timeout=pipes.quote(p['timeout']), + puppet_cmd=PUPPET_CMD) + else: + base_cmd = PUPPET_CMD if not p['manifest']: cmd = ("%(base_cmd)s agent --onetime" " --ignorecache --no-daemonize --no-usecacheonfailure --no-splay" - " --detailed-exitcodes --verbose") % dict( + " --detailed-exitcodes --verbose --color 0") % dict( base_cmd=base_cmd, ) if p['puppetmaster']: @@ -166,14 +231,26 @@ def main(): cmd += " --show_diff" if p['environment']: cmd += " --environment '%s'" % p['environment'] + if p['tags']: + cmd += " --tags '%s'" % ','.join(p['tags']) + if p['certname']: + cmd += " --certname='%s'" % p['certname'] if module.check_mode: cmd += " --noop" else: cmd += " --no-noop" else: cmd = "%s apply --detailed-exitcodes " % base_cmd + if p['logdest'] == 'syslog': + cmd += "--logdest syslog " if p['environment']: cmd += "--environment '%s' " % p['environment'] + if p['certname']: + cmd += " --certname='%s'" % p['certname'] + if p['execute']: + cmd += " --execute '%s'" % p['execute'] + if p['tags']: + cmd += " --tags '%s'" % ','.join(p['tags']) if module.check_mode: cmd += "--noop " else: @@ -183,7 +260,7 @@ def main(): if rc == 0: # success - module.exit_json(rc=rc, changed=False, stdout=stdout) + module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr) elif rc == 1: # rc==1 could be because it's disabled # rc==1 could also mean there was a compilation failure @@ -197,7 +274,7 @@ def main(): error=True, stdout=stdout, stderr=stderr) elif rc == 2: # success with changes - module.exit_json(rc=0, changed=True) + module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr) elif rc == 124: # timeout module.exit_json( @@ -211,4 +288,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/sefcontext.py b/system/sefcontext.py new file mode 100644 index 00000000000..f1000b34cc1 --- /dev/null +++ b/system/sefcontext.py @@ -0,0 +1,265 @@ +#!/usr/bin/python + +# (c) 2016, Dag Wieers +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: sefcontext +short_description: Manages SELinux file context mapping definitions +description: + - Manages SELinux file context mapping definitions + - Similar to the C(semanage fcontext) command +version_added: "2.2" +options: + target: + description: + - Target path (expression). + required: true + default: null + aliases: ['path'] + ftype: + description: + - File type. + required: false + default: a + setype: + description: + - SELinux type for the specified target. + required: true + default: null + seuser: + description: + - SELinux user for the specified target. + required: false + default: null + selevel: + description: + - SELinux range for the specified target. + required: false + default: null + aliases: ['serange'] + state: + description: + - Desired boolean value. + required: false + default: present + choices: [ 'present', 'absent' ] + reload: + description: + - Reload SELinux policy after commit. + required: false + default: yes +notes: + - The changes are persistent across reboots +requirements: [ 'libselinux-python', 'policycoreutils-python' ] +author: Dag Wieers +''' + +EXAMPLES = ''' +# Allow apache to modify files in /srv/git_repos +- sefcontext: + target: '/srv/git_repos(/.*)?' + setype: httpd_git_rw_content_t + state: present +''' + +RETURN = ''' +# Default return values +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils._text import to_native + +try: + import selinux + HAVE_SELINUX=True +except ImportError: + HAVE_SELINUX=False + +try: + import seobject + HAVE_SEOBJECT=True +except ImportError: + HAVE_SEOBJECT=False + +### Add missing entries (backward compatible) +seobject.file_types.update(dict( + a = seobject.SEMANAGE_FCONTEXT_ALL, + b = seobject.SEMANAGE_FCONTEXT_BLOCK, + c = seobject.SEMANAGE_FCONTEXT_CHAR, + d = seobject.SEMANAGE_FCONTEXT_DIR, + f = seobject.SEMANAGE_FCONTEXT_REG, + l = seobject.SEMANAGE_FCONTEXT_LINK, + p = seobject.SEMANAGE_FCONTEXT_PIPE, + s = seobject.SEMANAGE_FCONTEXT_SOCK, +)) + +### Make backward compatible +option_to_file_type_str = dict( + a = 'all files', + b = 'block device', + c = 'character device', + d = 'directory', + f = 'regular file', + l = 'symbolic link', + p = 'named pipe', + s = 'socket file', +) + +def semanage_fcontext_exists(sefcontext, target, ftype): + ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. ''' + + # Beware that records comprise of a string representation of the file_type + record = (target, option_to_file_type_str[ftype]) + records = sefcontext.get_all() + try: + return records[record] + except KeyError: + return None + +def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''): + ''' Add or modify SELinux file context mapping definition to the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Modify existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if seuser is None: + seuser = orig_seuser + if serange is None: + serange = orig_serange + + if setype != orig_setype or seuser != orig_seuser or serange != orig_serange: + if not module.check_mode: + sefcontext.modify(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Change to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange) + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange) + else: + # Add missing entry + if seuser is None: + seuser = 'system_u' + if serange is None: + serange = 's0' + + if not module.check_mode: + sefcontext.add(target, setype, ftype, serange, seuser) + changed = True + + if module._diff: + prepared_diff += '# Addition to semanage file context mappings\n' + prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange) + + except Exception: + e = get_exception() + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, seuser=seuser, serange=serange, **result) + +def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''): + ''' Delete SELinux file context mapping definition from the policy. ''' + + changed = False + prepared_diff = '' + + try: + sefcontext = seobject.fcontextRecords(sestore) + sefcontext.set_reload(do_reload) + exists = semanage_fcontext_exists(sefcontext, target, ftype) + if exists: + # Remove existing entry + orig_seuser, orig_serole, orig_setype, orig_serange = exists + + if not module.check_mode: + sefcontext.delete(target, ftype) + changed = True + + if module._diff: + prepared_diff += '# Deletion to semanage file context mappings\n' + prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3]) + + except Exception: + e = get_exception() + module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e))) + + if module._diff and prepared_diff: + result['diff'] = dict(prepared=prepared_diff) + + module.exit_json(changed=changed, **result) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + target = dict(required=True, aliases=['path']), + ftype = dict(required=False, choices=option_to_file_type_str.keys(), default='a'), + setype = dict(required=True), + seuser = dict(required=False, default=None), + selevel = dict(required=False, default=None, aliases=['serange']), + state = dict(required=False, choices=['present', 'absent'], default='present'), + reload = dict(required=False, type='bool', default='yes'), + ), + supports_check_mode = True, + ) + if not HAVE_SELINUX: + module.fail_json(msg="This module requires libselinux-python") + + if not HAVE_SEOBJECT: + module.fail_json(msg="This module requires policycoreutils-python") + + if not selinux.is_selinux_enabled(): + module.fail_json(msg="SELinux is disabled on this host.") + + target = module.params['target'] + ftype = module.params['ftype'] + setype = module.params['setype'] + seuser = module.params['seuser'] + serange = module.params['selevel'] + state = module.params['state'] + do_reload = module.params['reload'] + + result = dict(target=target, ftype=ftype, setype=setype, state=state) + + if state == 'present': + semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser) + elif state == 'absent': + semanage_fcontext_delete(module, result, target, ftype, do_reload) + else: + module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) + + +if __name__ == '__main__': + main() diff --git a/system/selinux_permissive.py b/system/selinux_permissive.py index 1e2a5c6c996..fed5db2bcf2 100644 --- a/system/selinux_permissive.py +++ b/system/selinux_permissive.py @@ -19,6 +19,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: selinux_permissive @@ -56,7 +60,9 @@ ''' EXAMPLES = ''' -- selinux_permissive: name=httpd_t permissive=true +- selinux_permissive: + name: httpd_t + permissive: true ''' HAVE_SEOBJECT = False @@ -65,6 +71,8 @@ HAVE_SEOBJECT = True except ImportError: pass +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception def main(): @@ -90,7 +98,8 @@ def main(): try: permissive_domains = seobject.permissiveRecords(store) - except ValueError, e: + except ValueError: + e = get_exception() module.fail_json(domain=domain, msg=str(e)) # not supported on EL 6 @@ -99,7 +108,8 @@ def main(): try: all_domains = permissive_domains.get_all() - except ValueError, e: + except ValueError: + e = get_exception() module.fail_json(domain=domain, msg=str(e)) if permissive: @@ -107,7 +117,8 @@ def main(): if not module.check_mode: try: permissive_domains.add(domain) - except ValueError, e: + except ValueError: + e = get_exception() module.fail_json(domain=domain, msg=str(e)) changed = True else: @@ -115,7 +126,8 @@ def main(): if not module.check_mode: try: permissive_domains.delete(domain) - except ValueError, e: + except ValueError: + e = get_exception() module.fail_json(domain=domain, msg=str(e)) changed = True @@ -123,8 +135,5 @@ def main(): permissive=permissive, domain=domain) -################################################# -# import module snippets -from ansible.module_utils.basic import * - -main() +if __name__ == '__main__': + main() diff --git a/system/seport.py b/system/seport.py index fb1cef661a2..bbd049c030c 100644 --- a/system/seport.py +++ b/system/seport.py @@ -17,13 +17,17 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: seport short_description: Manages SELinux network port type definitions description: - Manages SELinux network port type definitions. -version_added: "1.7.1" +version_added: "2.0" options: ports: description: @@ -61,11 +65,25 @@ EXAMPLES = ''' # Allow Apache to listen on tcp port 8888 -- seport: ports=8888 proto=tcp setype=http_port_t state=present +- seport: + ports: 8888 + proto: tcp + setype: http_port_t + state: present + # Allow sshd to listen on tcp port 8991 -- seport: ports=8991 proto=tcp setype=ssh_port_t state=present +- seport: + ports: 8991 + proto: tcp + setype: ssh_port_t + state: present + # Allow memcached to listen on tcp ports 10000-10100 and 10112 -- seport: ports=10000-10100,10112 proto=tcp setype=memcache_port_t state=present +- seport: + ports: 10000-10100,10112 + proto: tcp + setype: memcache_port_t + state: present ''' try: @@ -80,10 +98,33 @@ except ImportError: HAVE_SEOBJECT=False +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception + + +def semanage_port_get_ports(seport, setype, proto): + """ Get the list of ports that have the specified type definition. + + :param seport: Instance of seobject.portRecords + + :type setype: str + :param setype: SELinux type. + + :type proto: str + :param proto: Protocol ('tcp' or 'udp') + + :rtype: list + :return: List of ports that have the specified SELinux type. + """ + records = seport.get_all_by_type() + if (setype, proto) in records: + return records[(setype, proto)] + else: + return [] + -def semanage_port_exists(seport, port, proto): - """ Get the SELinux port type definition from policy. Return None if it does - not exist. +def semanage_port_get_type(seport, port, proto): + """ Get the SELinux type of the specified port. :param seport: Instance of seobject.portRecords @@ -93,15 +134,19 @@ def semanage_port_exists(seport, port, proto): :type proto: str :param proto: Protocol ('tcp' or 'udp') - :rtype: bool - :return: True if the SELinux port type definition exists, False otherwise + :rtype: tuple + :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found. """ ports = port.split('-', 1) if len(ports) == 1: ports.extend(ports) - ports = map(int, ports) - record = (ports[0], ports[1], proto) - return record in seport.get_all() + key = (int(ports[0]), int(ports[1]), proto) + + records = seport.get_all() + if key in records: + return records[key] + else: + return None def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''): @@ -135,27 +180,36 @@ def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', ses seport = seobject.portRecords(sestore) seport.set_reload(do_reload) change = False + ports_by_type = semanage_port_get_ports(seport, setype, proto) for port in ports: - exists = semanage_port_exists(seport, port, proto) - if not exists and not module.check_mode: - seport.add(port, proto, serange, setype) - change = change or not exists - - except ValueError, e: + if port not in ports_by_type: + change = True + port_type = semanage_port_get_type(seport, port, proto) + if port_type is None and not module.check_mode: + seport.add(port, proto, serange, setype) + elif port_type is not None and not module.check_mode: + seport.modify(port, proto, serange, setype) + + except ValueError: + e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) - except IOError, e: + except IOError: + e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) - except KeyError, e: + except KeyError: + e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) - except OSError, e: + except OSError: + e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) - except RuntimeError, e: + except RuntimeError: + e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) return change -def semanage_port_del(module, ports, proto, do_reload, sestore=''): +def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''): """ Delete SELinux port type definition from the policy. :type module: AnsibleModule @@ -167,6 +221,9 @@ def semanage_port_del(module, ports, proto, do_reload, sestore=''): :type proto: str :param proto: Protocol ('tcp' or 'udp') + :type setype: str + :param setype: SELinux type. + :type do_reload: bool :param do_reload: Whether to reload SELinux policy after commit @@ -180,21 +237,27 @@ def semanage_port_del(module, ports, proto, do_reload, sestore=''): seport = seobject.portRecords(sestore) seport.set_reload(do_reload) change = False + ports_by_type = semanage_port_get_ports(seport, setype, proto) for port in ports: - exists = semanage_port_exists(seport, port, proto) - if not exists and not module.check_mode: - seport.delete(port, proto) - change = change or not exists + if port in ports_by_type: + change = True + if not module.check_mode: + seport.delete(port, proto) - except ValueError, e: + except ValueError: + e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) - except IOError,e: + except IOError: + e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) - except KeyError, e: + except KeyError: + e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) - except OSError, e: + except OSError: + e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) - except RuntimeError, e: + except RuntimeError: + e = get_exception() module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e))) return change @@ -234,7 +297,7 @@ def main(): if not selinux.is_selinux_enabled(): module.fail_json(msg="SELinux is disabled on this host.") - ports = [x.strip() for x in module.params['ports'].split(',')] + ports = [x.strip() for x in str(module.params['ports']).split(',')] proto = module.params['proto'] setype = module.params['setype'] state = module.params['state'] @@ -250,12 +313,12 @@ def main(): if state == 'present': result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload) elif state == 'absent': - result['changed'] = semanage_port_del(module, ports, proto, do_reload) + result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload) else: module.fail_json(msg='Invalid value of argument "state": {0}'.format(state)) module.exit_json(**result) -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/solaris_zone.py b/system/solaris_zone.py index 375196cb1e7..85e0f41a1ca 100644 --- a/system/solaris_zone.py +++ b/system/solaris_zone.py @@ -22,6 +22,10 @@ import platform import tempfile +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: solaris_zone @@ -104,31 +108,55 @@ EXAMPLES = ''' # Create and install a zone, but don't boot it -solaris_zone: name=zone1 state=present path=/zones/zone1 sparse=true root_password="Be9oX7OSwWoU." - config='set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' +- solaris_zone: + name: zone1 + state: present + path: /zones/zone1 + sparse: true + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' # Create and install a zone and boot it -solaris_zone: name=zone1 state=running path=/zones/zone1 root_password="Be9oX7OSwWoU." - config='set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' +- solaris_zone: + name: zone1 + state: running + path: /zones/zone1 + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' # Boot an already installed zone -solaris_zone: name=zone1 state=running +- solaris_zone: + name: zone1 + state: running # Stop a zone -solaris_zone: name=zone1 state=stopped +- solaris_zone: + name: zone1 + state: stopped # Destroy a zone -solaris_zone: name=zone1 state=absent +- solaris_zone: + name: zone1 + state: absent # Detach a zone -solaris_zone: name=zone1 state=detached +- solaris_zone: + name: zone1 + state: detached # Configure a zone, ready to be attached -solaris_zone: name=zone1 state=configured path=/zones/zone1 root_password="Be9oX7OSwWoU." - config='set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' +- solaris_zone: + name: zone1 + state: configured + path: /zones/zone1 + root_password: Be9oX7OSwWoU. + config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end' # Attach a zone -solaris_zone: name=zone1 state=attached attach_options='-u' +- solaris_zone: + name: zone1 + state: attached + attach_options=: -u ''' class Zone(object): @@ -219,7 +247,7 @@ def configure_sysid(self): node = open('%s/root/etc/nodename' % self.path, 'w') node.write(self.name) - node.close + node.close() id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w') id.write('1 # System previously configured?\n') @@ -417,9 +445,9 @@ def main(): argument_spec = dict( name = dict(required=True), state = dict(default='present', choices=['running', 'started', 'present', 'installed', 'stopped', 'absent', 'configured', 'detached', 'attached']), - path = dict(defalt=None), + path = dict(default=None), sparse = dict(default=False, type='bool'), - root_password = dict(default=None), + root_password = dict(default=None, no_log=True), timeout = dict(default=600, type='int'), config = dict(default=''), create_options = dict(default=''), @@ -453,4 +481,6 @@ def main(): module.exit_json(changed=zone.changed, msg=', '.join(zone.msg)) from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/system/svc.py b/system/svc.py old mode 100644 new mode 100755 index 9831ce42ea7..378d647bee9 --- a/system/svc.py +++ b/system/svc.py @@ -18,11 +18,15 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see +ANSIBLE_METADATA = {'status': ['stableinterface'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: svc author: "Brian Coca (@bcoca)" -version_added: +version_added: "1.9" short_description: Manage daemontools services. description: - Controls daemontools services on remote hosts using the svc utility. @@ -38,7 +42,7 @@ - C(Started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the svc (svc -t) and C(killed) will always bounce the svc (svc -k). - C(reloaded) will send a sigusr1 (svc -u). + C(reloaded) will send a sigusr1 (svc -1). C(once) will run a normally downed svc once (svc -o), not really an idempotent operation. downed: @@ -67,26 +71,41 @@ EXAMPLES = ''' # Example action to start svc dnscache, if not running - - svc: name=dnscache state=started + - svc: + name: dnscache + state: started # Example action to stop svc dnscache, if running - - svc: name=dnscache state=stopped + - svc: + name: dnscache + state: stopped # Example action to kill svc dnscache, in all cases - - svc : name=dnscache state=killed + - svc: + name: dnscache + state: killed # Example action to restart svc dnscache, in all cases - - svc : name=dnscache state=restarted + - svc: + name: dnscache + state: restarted # Example action to reload svc dnscache, in all cases - - svc: name=dnscache state=reloaded + - svc: + name: dnscache + state: reloaded # Example using alt svc directory location - - svc: name=dnscache state=reloaded service_dir=/var/service + - svc: + name: dnscache + state: reloaded + service_dir: /var/service ''' import platform import shlex +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.basic import * def _load_dist_subclass(cls, *args, **kwargs): ''' @@ -152,7 +171,8 @@ def enable(self): if os.path.exists(self.src_full): try: os.symlink(self.src_full, self.svc_full) - except OSError, e: + except OSError: + e = get_exception() self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e)) else: self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full) @@ -160,7 +180,8 @@ def enable(self): def disable(self): try: os.unlink(self.svc_full) - except OSError, e: + except OSError: + e = get_exception() self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e)) self.execute_command([self.svc_cmd,'-dx',self.src_full]) @@ -221,7 +242,8 @@ def kill(self): def execute_command(self, cmd): try: (rc, out, err) = self.module.run_command(' '.join(cmd)) - except Exception, e: + except Exception: + e = get_exception() self.module.fail_json(msg="failed to execute: %s" % str(e)) return (rc, out, err) @@ -240,8 +262,8 @@ def main(): argument_spec = dict( name = dict(required=True), state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']), - enabled = dict(required=False, type='bool', choices=BOOLEANS), - downed = dict(required=False, type='bool', choices=BOOLEANS), + enabled = dict(required=False, type='bool'), + downed = dict(required=False, type='bool'), dist = dict(required=False, default='daemontools'), service_dir = dict(required=False, default='/service'), service_src = dict(required=False, default='/etc/service'), @@ -249,6 +271,8 @@ def main(): supports_check_mode=True, ) + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + state = module.params['state'] enabled = module.params['enabled'] downed = module.params['downed'] @@ -265,7 +289,8 @@ def main(): svc.enable() else: svc.disable() - except (OSError, IOError), e: + except (OSError, IOError): + e = get_exception() module.fail_json(msg="Could change service link: %s" % str(e)) if state is not None and state != svc.state: @@ -282,13 +307,14 @@ def main(): open(d_file, "a").close() else: os.unlink(d_file) - except (OSError, IOError), e: + except (OSError, IOError): + e = get_exception() module.fail_json(msg="Could change downed file: %s " % (str(e))) module.exit_json(changed=changed, svc=svc.report()) -# this is magic, not normal python include -from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/system/timezone.py b/system/timezone.py new file mode 100644 index 00000000000..7d8d9aef76c --- /dev/null +++ b/system/timezone.py @@ -0,0 +1,467 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Shinichi TAMURA (@tmshn) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import re +from ansible.module_utils.basic import AnsibleModule, get_platform +from ansible.module_utils.six import iteritems + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: timezone +short_description: Configure timezone setting +description: + - This module configures the timezone setting, both of the system clock + and of the hardware clock. I(Currently only Linux platform is supported.) + It is recommended to restart C(crond) after changing the timezone, + otherwise the jobs may run at the wrong time. + It uses the C(timedatectl) command if available. Otherwise, it edits + C(/etc/sysconfig/clock) or C(/etc/timezone) for the system clock, + and uses the C(hwclock) command for the hardware clock. + If you want to set up the NTP, use M(service) module. +version_added: "2.2.0" +options: + name: + description: + - Name of the timezone for the system clock. + Default is to keep current setting. + required: false + hwclock: + description: + - Whether the hardware clock is in UTC or in local timezone. + Default is to keep current setting. + Note that this option is recommended not to change and may fail + to configure, especially on virtual environments such as AWS. + required: false + aliases: ['rtc'] +author: "Shinichi TAMURA (@tmshn)" +''' + +RETURN = ''' +diff: + description: The differences about the given arguments. + returned: success + type: dictionary + contains: + before: + description: The values before change + type: dict + after: + description: The values after change + type: dict +''' + +EXAMPLES = ''' +- name: set timezone to Asia/Tokyo + timezone: + name: Asia/Tokyo +''' + + +class Timezone(object): + """This is a generic Timezone manipulation class that is subclassed based on platform. + + A subclass may wish to override the following action methods: + - get(key, phase) ... get the value from the system at `phase` + - set(key, value) ... set the value to the current system + """ + + def __new__(cls, module): + """Return the platform-specific subclass. + + It does not use load_platform_subclass() because it need to judge based + on whether the `timedatectl` command exists. + + Args: + module: The AnsibleModule. + """ + if get_platform() == 'Linux': + if module.get_bin_path('timedatectl') is not None: + return super(Timezone, SystemdTimezone).__new__(SystemdTimezone) + else: + return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone) + else: + # Not supported yet + return super(Timezone, Timezone).__new__(Timezone) + + def __init__(self, module): + """Initialize of the class. + + Args: + module: The AnsibleModule. + """ + super(Timezone, self).__init__() + self.msg = [] + # `self.value` holds the values for each params on each phases. + # Initially there's only info of "planned" phase, but the + # `self.check()` function will fill out it. + self.value = dict() + for key in module.argument_spec: + value = module.params[key] + if value is not None: + self.value[key] = dict(planned=value) + self.module = module + + def abort(self, msg): + """Abort the process with error message. + + This is just the wrapper of module.fail_json(). + + Args: + msg: The error message. + """ + error_msg = ['Error message:', msg] + if len(self.msg) > 0: + error_msg.append('Other message(s):') + error_msg.extend(self.msg) + self.module.fail_json(msg='\n'.join(error_msg)) + + def execute(self, *commands, **kwargs): + """Execute the shell command. + + This is just the wrapper of module.run_command(). + + Args: + *commands: The command to execute. + It will be concatenated with single space. + **kwargs: Only 'log' key is checked. + If kwargs['log'] is true, record the command to self.msg. + + Returns: + stdout: Standard output of the command. + """ + command = ' '.join(commands) + (rc, stdout, stderr) = self.module.run_command(command, check_rc=True) + if kwargs.get('log', False): + self.msg.append('executed `%s`' % command) + return stdout + + def diff(self, phase1='before', phase2='after'): + """Calculate the difference between given 2 phases. + + Args: + phase1, phase2: The names of phase to compare. + + Returns: + diff: The difference of value between phase1 and phase2. + This is in the format which can be used with the + `--diff` option of ansible-playbook. + """ + diff = {phase1: {}, phase2: {}} + for key, value in iteritems(self.value): + diff[phase1][key] = value[phase1] + diff[phase2][key] = value[phase2] + return diff + + def check(self, phase): + """Check the state in given phase and set it to `self.value`. + + Args: + phase: The name of the phase to check. + + Returns: + NO RETURN VALUE + """ + if phase == 'planned': + return + for key, value in iteritems(self.value): + value[phase] = self.get(key, phase) + + def change(self): + """Make the changes effect based on `self.value`.""" + for key, value in iteritems(self.value): + if value['before'] != value['planned']: + self.set(key, value['planned']) + + # =========================================== + # Platform specific methods (must be replaced by subclass). + + def get(self, key, phase): + """Get the value for the key at the given phase. + + Called from self.check(). + + Args: + key: The key to get the value + phase: The phase to get the value + + Return: + value: The value for the key at the given phase. + """ + self.abort('get(key, phase) is not implemented on target platform') + + def set(self, key, value): + """Set the value for the key (of course, for the phase 'after'). + + Called from self.change(). + + Args: + key: Key to set the value + value: Value to set + """ + self.abort('set(key, value) is not implemented on target platform') + + def _verify_timezone(self): + tz = self.value['name']['planned'] + tzfile = '/usr/share/zoneinfo/%s' % tz + if not os.path.isfile(tzfile): + self.abort('given timezone "%s" is not available' % tz) + + +class SystemdTimezone(Timezone): + """This is a Timezone manipulation class systemd-powered Linux. + + It uses the `timedatectl` command to check/set all arguments. + """ + + regexps = dict( + hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE), + name =re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE) + ) + + subcmds = dict( + hwclock='set-local-rtc', + name ='set-timezone' + ) + + def __init__(self, module): + super(SystemdTimezone, self).__init__(module) + self.timedatectl = module.get_bin_path('timedatectl', required=True) + self.status = dict() + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + + def _get_status(self, phase): + if phase not in self.status: + self.status[phase] = self.execute(self.timedatectl, 'status') + return self.status[phase] + + def get(self, key, phase): + status = self._get_status(phase) + value = self.regexps[key].search(status).group(1) + if key == 'hwclock': + # For key='hwclock'; convert yes/no -> local/UTC + if self.module.boolean(value): + value = 'local' + else: + value = 'UTC' + return value + + def set(self, key, value): + # For key='hwclock'; convert UTC/local -> yes/no + if key == 'hwclock': + if value == 'local': + value = 'yes' + else: + value = 'no' + self.execute(self.timedatectl, self.subcmds[key], value, log=True) + + +class NosystemdTimezone(Timezone): + """This is a Timezone manipulation class for non systemd-powered Linux. + + For timezone setting, it edits the following file and reflect changes: + - /etc/sysconfig/clock ... RHEL/CentOS + - /etc/timezone ... Debian/Ubuntu + For hwclock setting, it executes `hwclock --systohc` command with the + '--utc' or '--localtime' option. + """ + + conf_files = dict( + name =None, # To be set in __init__ + hwclock=None, # To be set in __init__ + adjtime='/etc/adjtime' + ) + + regexps = dict( + name =None, # To be set in __init__ + hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE), + adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE) + ) + + def __init__(self, module): + super(NosystemdTimezone, self).__init__(module) + # Validate given timezone + if 'name' in self.value: + self._verify_timezone() + self.update_timezone = self.module.get_bin_path('cp', required=True) + self.update_timezone += ' %s /etc/localtime' % tzfile + self.update_hwclock = self.module.get_bin_path('hwclock', required=True) + # Distribution-specific configurations + if self.module.get_bin_path('dpkg-reconfigure') is not None: + # Debian/Ubuntu + self.update_timezone = self.module.get_bin_path('dpkg-reconfigure', required=True) + self.update_timezone += ' --frontend noninteractive tzdata' + self.conf_files['name'] = '/etc/timezone' + self.conf_files['hwclock'] = '/etc/default/rcS' + self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE) + self.tzline_format = '%s\n' + else: + # RHEL/CentOS + if self.module.get_bin_path('tzdata-update') is not None: + self.update_timezone = self.module.get_bin_path('tzdata-update', required=True) + # else: + # self.update_timezone = 'cp ...' <- configured above + self.conf_files['name'] = '/etc/sysconfig/clock' + self.conf_files['hwclock'] = '/etc/sysconfig/clock' + self.regexps['name'] = re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE) + self.tzline_format = 'ZONE="%s"\n' + self.update_hwclock = self.module.get_bin_path('hwclock', required=True) + + def _edit_file(self, filename, regexp, value): + """Replace the first matched line with given `value`. + + If `regexp` matched more than once, other than the first line will be deleted. + + Args: + filename: The name of the file to edit. + regexp: The regular expression to search with. + value: The line which will be inserted. + """ + # Read the file + try: + file = open(filename, 'r') + except IOError: + self.abort('cannot read "%s"' % filename) + else: + lines = file.readlines() + file.close() + # Find the all matched lines + matched_indices = [] + for i, line in enumerate(lines): + if regexp.search(line): + matched_indices.append(i) + if len(matched_indices) > 0: + insert_line = matched_indices[0] + else: + insert_line = 0 + # Remove all matched lines + for i in matched_indices[::-1]: + del lines[i] + # ...and insert the value + lines.insert(insert_line, value) + # Write the changes + try: + file = open(filename, 'w') + except IOError: + self.abort('cannot write to "%s"' % filename) + else: + file.writelines(lines) + file.close() + self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename)) + + def get(self, key, phase): + if key == 'hwclock' and os.path.isfile('/etc/adjtime'): + # If /etc/adjtime exists, use that file. + key = 'adjtime' + + filename = self.conf_files[key] + + try: + file = open(filename, mode='r') + except IOError: + self.abort('cannot read configuration file "%s" for %s' % (filename, key)) + else: + status = file.read() + file.close() + try: + value = self.regexps[key].search(status).group(1) + except AttributeError: + self.abort('cannot find the valid value from configuration file "%s" for %s' % (filename, key)) + else: + if key == 'hwclock': + # For key='hwclock'; convert yes/no -> UTC/local + if self.module.boolean(value): + value = 'UTC' + else: + value = 'local' + elif key == 'adjtime': + # For key='adjtime'; convert LOCAL -> local + if value != 'UTC': + value = value.lower() + return value + + def set_timezone(self, value): + self._edit_file(filename=self.conf_files['name'], + regexp=self.regexps['name'], + value=self.tzline_format % value) + self.execute(self.update_timezone) + + def set_hwclock(self, value): + if value == 'local': + option = '--localtime' + else: + option = '--utc' + self.execute(self.update_hwclock, '--systohc', option, log=True) + + def set(self, key, value): + if key == 'name': + self.set_timezone(value) + elif key == 'hwclock': + self.set_hwclock(value) + else: + self.abort('unknown parameter "%s"' % key) + + +def main(): + # Construct 'module' and 'tz' + arg_spec = dict( + hwclock=dict(choices=['UTC', 'local'], aliases=['rtc']), + name =dict(), + ) + module = AnsibleModule( + argument_spec=arg_spec, + required_one_of=[arg_spec.keys()], + supports_check_mode=True + ) + tz = Timezone(module) + + # Check the current state + tz.check(phase='before') + if module.check_mode: + diff = tz.diff('before', 'planned') + # In check mode, 'planned' state is treated as 'after' state + diff['after'] = diff.pop('planned') + else: + # Make change + tz.change() + # Check the current state + tz.check(phase='after') + # Examine if the current state matches planned state + (after, planned) = tz.diff('after', 'planned').values() + if after != planned: + tz.abort('still not desired state, though changes have made') + diff = tz.diff('before', 'after') + + changed = (diff['before'] != diff['after']) + if len(tz.msg) > 0: + module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg)) + else: + module.exit_json(changed=changed, diff=diff) + + +if __name__ == '__main__': + main() diff --git a/system/ufw.py b/system/ufw.py index cd148edf2ef..6d381785bc5 100644 --- a/system/ufw.py +++ b/system/ufw.py @@ -21,6 +21,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ufw @@ -125,55 +129,103 @@ EXAMPLES = ''' # Allow everything and enable UFW -ufw: state=enabled policy=allow +- ufw: + state: enabled + policy: allow # Set logging -ufw: logging=on +- ufw: + logging: on # Sometimes it is desirable to let the sender know when traffic is # being denied, rather than simply ignoring it. In these cases, use # reject instead of deny. In addition, log rejected connections: -ufw: rule=reject port=auth log=yes +- ufw: + rule: reject + port: auth + log: yes # ufw supports connection rate limiting, which is useful for protecting # against brute-force login attacks. ufw will deny connections if an IP # address has attempted to initiate 6 or more connections in the last # 30 seconds. See http://www.debian-administration.org/articles/187 # for details. Typical usage is: -ufw: rule=limit port=ssh proto=tcp - -# Allow OpenSSH -ufw: rule=allow name=OpenSSH +- ufw: + rule: limit + port: ssh + proto: tcp + +# Allow OpenSSH. (Note that as ufw manages its own state, simply removing +# a rule=allow task can leave those ports exposed. Either use delete=yes +# or a separate state=reset task) +- ufw: + rule: allow + name: OpenSSH # Delete OpenSSH rule -ufw: rule=allow name=OpenSSH delete=yes +- ufw: + rule: allow + name: OpenSSH + delete: yes # Deny all access to port 53: -ufw: rule=deny port=53 +- ufw: + rule: deny + port: 53 + +# Allow port range 60000-61000 +- ufw: + rule: allow + port: '60000:61000' # Allow all access to tcp port 80: -ufw: rule=allow port=80 proto=tcp +- ufw: + rule: allow + port: 80 + proto: tcp # Allow all access from RFC1918 networks to this host: -ufw: rule=allow src={{ item }} -with_items: -- 10.0.0.0/8 -- 172.16.0.0/12 -- 192.168.0.0/16 +- ufw: + rule: allow + src: '{{ item }}' + with_items: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 # Deny access to udp port 514 from host 1.2.3.4: -ufw: rule=deny proto=udp src=1.2.3.4 port=514 +- ufw: + rule: deny + proto: udp + src: 1.2.3.4 + port: 514 # Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469 -ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 dest=1.2.3.4 to_port=5469 +- ufw: + rule: allow + interface: eth0 + direction: in + proto: udp + src: 1.2.3.5 + from_port: 5469 + dest: 1.2.3.4 + to_port: 5469 # Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host. # Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. -ufw: rule=deny proto=tcp src=2001:db8::/32 port=25 +- ufw: + rule: deny + proto: tcp + src: '2001:db8::/32' + port: 25 # Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24. # Can be used to further restrict a global FORWARD policy set to allow -ufw: rule=deny route=yes src=1.2.3.0/24 dest=4.5.6.0/24 +- ufw: + rule: deny + route: yes + src: 1.2.3.0/24 + dest: 4.5.6.0/24 ''' from operator import itemgetter @@ -223,7 +275,7 @@ def execute(cmd): if len(commands) < 1: module.fail_json(msg="Not any of the command arguments %s given" % commands) - if('interface' in params and 'direction' not in params): + if(params['interface'] is not None and params['direction'] is None): module.fail_json(msg="Direction must be specified when creating a rule on an interface") # Ensure ufw is available @@ -258,10 +310,11 @@ def execute(cmd): cmd.append([module.boolean(params['route']), 'route']) cmd.append([params['insert'], "insert %s" % params['insert']]) cmd.append([value]) + cmd.append([params['direction'], "%s" % params['direction']]) + cmd.append([params['interface'], "on %s" % params['interface']]) cmd.append([module.boolean(params['log']), 'log']) - for (key, template) in [('direction', "%s" ), ('interface', "on %s" ), - ('from_ip', "from %s" ), ('from_port', "port %s" ), + for (key, template) in [('from_ip', "from %s" ), ('from_port', "port %s" ), ('to_ip', "to %s" ), ('to_port', "port %s" ), ('proto', "proto %s"), ('app', "app '%s'")]: @@ -280,4 +333,5 @@ def execute(cmd): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/system/zfs.py b/system/zfs.py index 51b9db63692..d95971455ed 100644 --- a/system/zfs.py +++ b/system/zfs.py @@ -19,12 +19,16 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: zfs short_description: Manage zfs description: - - Manages ZFS file systems on Solaris and FreeBSD. Can manage file systems, volumes and snapshots. See zfs(1M) for more information about the properties. + - Manages ZFS file systems, volumes, clones and snapshots. version_added: "1.1" options: name: @@ -33,224 +37,104 @@ required: true state: description: - - Whether to create (C(present)), or remove (C(absent)) a file system, snapshot or volume. + - Whether to create (C(present)), or remove (C(absent)) a + file system, snapshot or volume. All parents/children + will be created/destroyed as needed to reach the desired state. + choices: ['present', 'absent'] required: true - choices: [present, absent] - aclinherit: - description: - - The aclinherit property. - required: False - choices: [discard,noallow,restricted,passthrough,passthrough-x] - aclmode: - description: - - The aclmode property. - required: False - choices: [discard,groupmask,passthrough] - atime: - description: - - The atime property. - required: False - choices: ['on','off'] - canmount: - description: - - The canmount property. - required: False - choices: ['on','off','noauto'] - casesensitivity: - description: - - The casesensitivity property. - required: False - choices: [sensitive,insensitive,mixed] - checksum: - description: - - The checksum property. - required: False - choices: ['on','off',fletcher2,fletcher4,sha256] - compression: - description: - - The compression property. - required: False - choices: ['on','off',lzjb,gzip,gzip-1,gzip-2,gzip-3,gzip-4,gzip-5,gzip-6,gzip-7,gzip-8,gzip-9,lz4,zle] - copies: - description: - - The copies property. - required: False - choices: [1,2,3] - dedup: - description: - - The dedup property. - required: False - choices: ['on','off'] - devices: - description: - - The devices property. - required: False - choices: ['on','off'] - exec: - description: - - The exec property. - required: False - choices: ['on','off'] - jailed: - description: - - The jailed property. - required: False - choices: ['on','off'] - logbias: - description: - - The logbias property. - required: False - choices: [latency,throughput] - mountpoint: - description: - - The mountpoint property. - required: False - nbmand: - description: - - The nbmand property. - required: False - choices: ['on','off'] - normalization: - description: - - The normalization property. - required: False - choices: [none,formC,formD,formKC,formKD] origin: description: - - Name of the snapshot to clone - required: False - version_added: "2.0" - primarycache: - description: - - The primarycache property. - required: False - choices: [all,none,metadata] - quota: - description: - - The quota property. - required: False - readonly: - description: - - The readonly property. - required: False - choices: ['on','off'] - recordsize: - description: - - The recordsize property. - required: False - refquota: - description: - - The refquota property. - required: False - refreservation: - description: - - The refreservation property. - required: False - reservation: - description: - - The reservation property. - required: False - secondarycache: - description: - - The secondarycache property. - required: False - choices: [all,none,metadata] - setuid: - description: - - The setuid property. - required: False - choices: ['on','off'] - shareiscsi: - description: - - The shareiscsi property. - required: False - choices: ['on','off'] - sharenfs: + - Snapshot from which to create a clone + default: null + required: false + key_value: description: - - The sharenfs property. - required: False - sharesmb: - description: - - The sharesmb property. - required: False - snapdir: - description: - - The snapdir property. - required: False - choices: [hidden,visible] - sync: - description: - - The sync property. - required: False - choices: ['standard','always','disabled'] - utf8only: - description: - - The utf8only property. - required: False - choices: ['on','off'] - volsize: - description: - - The volsize property. - required: False - volblocksize: - description: - - The volblocksize property. - required: False - vscan: - description: - - The vscan property. - required: False - choices: ['on','off'] - xattr: - description: - - The xattr property. - required: False - choices: ['on','off'] - zoned: - description: - - The zoned property. - required: False - choices: ['on','off'] + - The C(zfs) module takes key=value pairs for zfs properties to be set. See the zfs(8) man page for more information. + default: null + required: false + author: "Johan Wiren (@johanwiren)" ''' EXAMPLES = ''' -# Create a new file system called myfs in pool rpool -- zfs: name=rpool/myfs state=present - -# Create a new volume called myvol in pool rpool. -- zfs: name=rpool/myvol state=present volsize=10M +# Create a new file system called myfs in pool rpool with the setuid property turned off +- zfs: + name: rpool/myfs + state: present + setuid: off + +# Create a new volume called myvol in pool rpool. +- zfs: + name: rpool/myvol + state: present + volsize: 10M # Create a snapshot of rpool/myfs file system. -- zfs: name=rpool/myfs@mysnapshot state=present +- zfs: + name: rpool/myfs@mysnapshot + state: present # Create a new file system called myfs2 with snapdir enabled -- zfs: name=rpool/myfs2 state=present snapdir=enabled +- zfs: + name: rpool/myfs2 + state: present + snapdir: enabled # Create a new file system by cloning a snapshot -- zfs: name=rpool/cloned_fs state=present origin=rpool/myfs@mysnapshot +- zfs: + name: rpool/cloned_fs + state: present + origin: rpool/myfs@mysnapshot # Destroy a filesystem -- zfs: name=rpool/myfs state=absent +- zfs: + name: rpool/myfs + state: absent ''' import os + class Zfs(object): + def __init__(self, module, name, properties): self.module = module self.name = name self.properties = properties self.changed = False - - self.immutable_properties = [ 'casesensitivity', 'normalization', 'utf8only' ] + self.zfs_cmd = module.get_bin_path('zfs', True) + self.zpool_cmd = module.get_bin_path('zpool', True) + self.pool = name.split('/')[0] + self.is_solaris = os.uname()[0] == 'SunOS' + self.is_openzfs = self.check_openzfs() + self.enhanced_sharing = self.check_enhanced_sharing() + + def check_openzfs(self): + cmd = [self.zpool_cmd] + cmd.extend(['get', 'version']) + cmd.append(self.pool) + (rc, out, err) = self.module.run_command(cmd, check_rc=True) + version = out.splitlines()[-1].split()[2] + if version == '-': + return True + if int(version) == 5000: + return True + return False + + def check_enhanced_sharing(self): + if self.is_solaris and not self.is_openzfs: + cmd = [self.zpool_cmd] + cmd.extend(['get', 'version']) + cmd.append(self.pool) + (rc, out, err) = self.module.run_command(cmd, check_rc=True) + version = out.splitlines()[-1].split()[2] + if int(version) >= 34: + return True + return False def exists(self): - cmd = [self.module.get_bin_path('zfs', True)] - cmd.append('list') - cmd.append('-t all') - cmd.append(self.name) + cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name] (rc, out, err) = self.module.run_command(' '.join(cmd)) if rc == 0: return True @@ -265,6 +149,8 @@ def create(self): volsize = properties.pop('volsize', None) volblocksize = properties.pop('volblocksize', None) origin = properties.pop('origin', None) + cmd = [self.zfs_cmd] + if "@" in self.name: action = 'snapshot' elif origin: @@ -272,135 +158,83 @@ def create(self): else: action = 'create' - cmd = [self.module.get_bin_path('zfs', True)] cmd.append(action) - if createparent: - cmd.append('-p') + if action in ['create', 'clone']: + cmd += ['-p'] + if volsize: + cmd += ['-V', volsize] if volblocksize: - cmd.append('-b %s' % volblocksize) + cmd += ['-b', 'volblocksize'] if properties: for prop, value in properties.iteritems(): - cmd.append('-o %s="%s"' % (prop, value)) - if volsize: - cmd.append('-V') - cmd.append(volsize) + cmd += ['-o', '%s="%s"' % (prop, value)] if origin: cmd.append(origin) cmd.append(self.name) - (rc, err, out) = self.module.run_command(' '.join(cmd)) + (rc, out, err) = self.module.run_command(' '.join(cmd)) if rc == 0: self.changed = True else: - self.module.fail_json(msg=out) + self.module.fail_json(msg=err) def destroy(self): if self.module.check_mode: self.changed = True return - cmd = [self.module.get_bin_path('zfs', True)] - cmd.append('destroy') - cmd.append(self.name) - (rc, err, out) = self.module.run_command(' '.join(cmd)) + cmd = [self.zfs_cmd, 'destroy', '-R', self.name] + (rc, out, err) = self.module.run_command(' '.join(cmd)) if rc == 0: self.changed = True else: - self.module.fail_json(msg=out) + self.module.fail_json(msg=err) def set_property(self, prop, value): if self.module.check_mode: self.changed = True return - cmd = self.module.get_bin_path('zfs', True) - args = [cmd, 'set', prop + '=' + value, self.name] - (rc, err, out) = self.module.run_command(args) + cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name] + (rc, out, err) = self.module.run_command(cmd) if rc == 0: self.changed = True else: - self.module.fail_json(msg=out) + self.module.fail_json(msg=err) def set_properties_if_changed(self): current_properties = self.get_current_properties() for prop, value in self.properties.iteritems(): - if current_properties[prop] != value: - if prop in self.immutable_properties: - self.module.fail_json(msg='Cannot change property %s after creation.' % prop) - else: - self.set_property(prop, value) + if current_properties.get(prop, None) != value: + self.set_property(prop, value) def get_current_properties(self): - def get_properties_by_name(propname): - cmd = [self.module.get_bin_path('zfs', True)] - cmd += ['get', '-H', propname, self.name] - rc, out, err = self.module.run_command(cmd) - return [l.split('\t')[1:3] for l in out.splitlines()] - properties = dict(get_properties_by_name('all')) - if 'share.*' in properties: - # Some ZFS pools list the sharenfs and sharesmb properties - # hierarchically as share.nfs and share.smb respectively. - del properties['share.*'] - for p, v in get_properties_by_name('share.all'): - alias = p.replace('.', '') # share.nfs -> sharenfs (etc) - properties[alias] = v + cmd = [self.zfs_cmd, 'get', '-H'] + if self.enhanced_sharing: + cmd += ['-e'] + cmd += ['all', self.name] + rc, out, err = self.module.run_command(" ".join(cmd)) + properties = dict() + for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]: + if source == 'local': + properties[prop] = value + # Add alias for enhanced sharing properties + if self.enhanced_sharing: + properties['sharenfs'] = properties.get('share.nfs', None) + properties['sharesmb'] = properties.get('share.smb', None) return properties - def run_command(self, cmd): - progname = cmd[0] - cmd[0] = module.get_bin_path(progname, True) - return module.run_command(cmd) def main(): - # FIXME: should use dict() constructor like other modules, required=False is default module = AnsibleModule( - argument_spec = { - 'name': {'required': True}, - 'state': {'required': True, 'choices':['present', 'absent']}, - 'aclinherit': {'required': False, 'choices':['discard', 'noallow', 'restricted', 'passthrough', 'passthrough-x']}, - 'aclmode': {'required': False, 'choices':['discard', 'groupmask', 'passthrough']}, - 'atime': {'required': False, 'choices':['on', 'off']}, - 'canmount': {'required': False, 'choices':['on', 'off', 'noauto']}, - 'casesensitivity': {'required': False, 'choices':['sensitive', 'insensitive', 'mixed']}, - 'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']}, - 'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']}, - 'copies': {'required': False, 'choices':['1', '2', '3']}, - 'createparent': {'required': False, 'choices':['on', 'off']}, - 'dedup': {'required': False, 'choices':['on', 'off']}, - 'devices': {'required': False, 'choices':['on', 'off']}, - 'exec': {'required': False, 'choices':['on', 'off']}, - # Not supported - #'groupquota': {'required': False}, - 'jailed': {'required': False, 'choices':['on', 'off']}, - 'logbias': {'required': False, 'choices':['latency', 'throughput']}, - 'mountpoint': {'required': False}, - 'nbmand': {'required': False, 'choices':['on', 'off']}, - 'normalization': {'required': False, 'choices':['none', 'formC', 'formD', 'formKC', 'formKD']}, - 'origin': {'required': False}, - 'primarycache': {'required': False, 'choices':['all', 'none', 'metadata']}, - 'quota': {'required': False}, - 'readonly': {'required': False, 'choices':['on', 'off']}, - 'recordsize': {'required': False}, - 'refquota': {'required': False}, - 'refreservation': {'required': False}, - 'reservation': {'required': False}, - 'secondarycache': {'required': False, 'choices':['all', 'none', 'metadata']}, - 'setuid': {'required': False, 'choices':['on', 'off']}, - 'shareiscsi': {'required': False, 'choices':['on', 'off']}, - 'sharenfs': {'required': False}, - 'sharesmb': {'required': False}, - 'snapdir': {'required': False, 'choices':['hidden', 'visible']}, - 'sync': {'required': False, 'choices':['standard', 'always', 'disabled']}, - # Not supported - #'userquota': {'required': False}, - 'utf8only': {'required': False, 'choices':['on', 'off']}, - 'volsize': {'required': False}, - 'volblocksize': {'required': False}, - 'vscan': {'required': False, 'choices':['on', 'off']}, - 'xattr': {'required': False, 'choices':['on', 'off']}, - 'zoned': {'required': False, 'choices':['on', 'off']}, - }, - supports_check_mode=True + argument_spec = dict( + name = dict(type='str', required=True), + state = dict(type='str', required=True, choices=['present', 'absent']), + # No longer used. Kept here to not interfere with zfs properties + createparent = dict(type='bool', required=False) + ), + supports_check_mode=True, + check_invalid_arguments=False ) state = module.params.pop('state') @@ -409,10 +243,16 @@ def main(): # Get all valid zfs-properties properties = dict() for prop, value in module.params.iteritems(): - if prop in ['CHECKMODE']: - continue - if value: - properties[prop] = value + # All freestyle params are zfs properties + if prop not in module.argument_spec: + # Reverse the boolification of freestyle zfs properties + if isinstance(value, bool): + if value is True: + properties[prop] = 'on' + else: + properties[prop] = 'off' + else: + properties[prop] = value result = {} result['name'] = name @@ -436,4 +276,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/test-docs.sh b/test-docs.sh deleted file mode 100755 index 76297fbada6..00000000000 --- a/test-docs.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh -set -x - -CHECKOUT_DIR=".ansible-checkout" -MOD_REPO="$1" - -# Hidden file to avoid the module_formatter recursing into the checkout -git clone https://github.com/ansible/ansible "$CHECKOUT_DIR" -cd "$CHECKOUT_DIR" -git submodule update --init -rm -rf "lib/ansible/modules/$MOD_REPO" -ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO" - -pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx - -. ./hacking/env-setup -PAGER=/bin/cat bin/ansible-doc -l -if [ $? -ne 0 ] ; then - exit $? -fi -make -C docsite diff --git a/test/utils/shippable/ci.sh b/test/utils/shippable/ci.sh new file mode 100755 index 00000000000..5c0f847e661 --- /dev/null +++ b/test/utils/shippable/ci.sh @@ -0,0 +1,7 @@ +#!/bin/bash -eux + +set -o pipefail + +source_root=$(python -c "from os import path; print(path.abspath(path.join(path.dirname('$0'), '../../..')))") + +"${source_root}/test/utils/shippable/${TEST}.sh" 2>&1 | gawk '{ print strftime("%Y-%m-%d %H:%M:%S"), $0; fflush(); }' diff --git a/test/utils/shippable/docs-requirements.txt b/test/utils/shippable/docs-requirements.txt new file mode 100644 index 00000000000..4e859bb8c71 --- /dev/null +++ b/test/utils/shippable/docs-requirements.txt @@ -0,0 +1,2 @@ +jinja2 +pyyaml diff --git a/test/utils/shippable/docs.sh b/test/utils/shippable/docs.sh new file mode 100755 index 00000000000..2858f87c997 --- /dev/null +++ b/test/utils/shippable/docs.sh @@ -0,0 +1,62 @@ +#!/bin/bash -eux + +set -o pipefail + +ansible_repo_url="https://github.com/ansible/ansible.git" + +build_dir="${SHIPPABLE_BUILD_DIR}" +repo="${REPO_NAME}" + +case "${repo}" in + "ansible-modules-core") + this_module_group="core" + other_module_group="extras" + ;; + "ansible-modules-extras") + this_module_group="extras" + other_module_group="core" + ;; + *) + echo "Unsupported repo name: ${repo}" + exit 1 + ;; +esac + +modules_tmp_dir="${build_dir}.tmp" +this_modules_dir="${build_dir}/lib/ansible/modules/${this_module_group}" +other_modules_dir="${build_dir}/lib/ansible/modules/${other_module_group}" + +cd / +mv "${build_dir}" "${modules_tmp_dir}" +git clone "${ansible_repo_url}" "${build_dir}" +cd "${build_dir}" +rmdir "${this_modules_dir}" +mv "${modules_tmp_dir}" "${this_modules_dir}" +mv "${this_modules_dir}/shippable" "${build_dir}" +git submodule init "${other_modules_dir}" +git submodule sync "${other_modules_dir}" +git submodule update "${other_modules_dir}" + +pip install -r lib/ansible/modules/${this_module_group}/test/utils/shippable/docs-requirements.txt --upgrade +pip list + +source hacking/env-setup + +docs_status=0 + +PAGER=/bin/cat \ + ANSIBLE_DEPRECATION_WARNINGS=false \ + bin/ansible-doc -l \ + 2>/tmp/ansible-doc.err || docs_status=$? + +if [ -s /tmp/ansible-doc.err ]; then + # report warnings as errors + echo "Output from 'ansible-doc -l' on stderr is considered an error:" + cat /tmp/ansible-doc.err + exit 1 +fi + +if [ "${docs_status}" -ne 0 ]; then + echo "Running 'ansible-doc -l' failed with no output on stderr and exit code: ${docs_status}" + exit 1 +fi diff --git a/test/utils/shippable/integration.sh b/test/utils/shippable/integration.sh new file mode 100755 index 00000000000..cf10e681bfb --- /dev/null +++ b/test/utils/shippable/integration.sh @@ -0,0 +1,55 @@ +#!/bin/bash -eux + +set -o pipefail + +ansible_repo_url="https://github.com/ansible/ansible.git" + +is_pr="${IS_PULL_REQUEST}" +build_dir="${SHIPPABLE_BUILD_DIR}" +repo="${REPO_NAME}" + +if [ "${is_pr}" != "true" ]; then + echo "Module integration tests are only supported on pull requests." + exit 0 +fi + +case "${repo}" in + "ansible-modules-core") + this_module_group="core" + other_module_group="extras" + ;; + "ansible-modules-extras") + this_module_group="extras" + other_module_group="core" + ;; + *) + echo "Unsupported repo name: ${repo}" + exit 1 + ;; +esac + +modules_tmp_dir="${build_dir}.tmp" +this_modules_dir="${build_dir}/lib/ansible/modules/${this_module_group}" +other_modules_dir="${build_dir}/lib/ansible/modules/${other_module_group}" + +cd / +mv "${build_dir}" "${modules_tmp_dir}" +git clone "${ansible_repo_url}" "${build_dir}" +cd "${build_dir}" +rmdir "${this_modules_dir}" +mv "${modules_tmp_dir}" "${this_modules_dir}" +mv "${this_modules_dir}/shippable" "${build_dir}" +git submodule init "${other_modules_dir}" +git submodule sync "${other_modules_dir}" +git submodule update "${other_modules_dir}" + +pip install -r test/utils/shippable/modules/generate-tests-requirements.txt --upgrade +pip list + +source hacking/env-setup + +test/utils/shippable/modules/generate-tests "${this_module_group}" --verbose --output /tmp/integration.sh >/dev/null + +if [ -f /tmp/integration.sh ]; then + /bin/bash -eux /tmp/integration.sh +fi diff --git a/test/utils/shippable/sanity-skip-python24.txt b/test/utils/shippable/sanity-skip-python24.txt new file mode 100644 index 00000000000..cf392501c6f --- /dev/null +++ b/test/utils/shippable/sanity-skip-python24.txt @@ -0,0 +1,16 @@ +/cloud/ +/clustering/consul.*.py +/clustering/znode.py +/database/influxdb/ +/database/mssql/ +/monitoring/zabbix.*.py +/network/f5/ +/notification/pushbullet.py +/packaging/language/maven_artifact.py +/packaging/os/dnf.py +/packaging/os/layman.py +/remote_management/ipmi/ +/univention/ +/web_infrastructure/letsencrypt.py +/infrastructure/foreman/ +/network/nmcli.py diff --git a/test/utils/shippable/sanity.sh b/test/utils/shippable/sanity.sh new file mode 100755 index 00000000000..8c1453022e7 --- /dev/null +++ b/test/utils/shippable/sanity.sh @@ -0,0 +1,41 @@ +#!/bin/bash -eux + +source_root=$(python -c "from os import path; print(path.abspath(path.join(path.dirname('$0'), '../../..')))") + +install_deps="${INSTALL_DEPS:-}" + +cd "${source_root}" + +# FIXME REPOMERGE: No need to checkout ansible +build_dir=$(mktemp -d) +trap 'rm -rf "${build_dir}"' EXIT + +git clone "https://github.com/ansible/ansible.git" "${build_dir}" --recursive +source "${build_dir}/hacking/env-setup" +# REPOMERGE: END + +if [ "${install_deps}" != "" ]; then + add-apt-repository ppa:fkrull/deadsnakes + apt-add-repository 'deb http://archive.ubuntu.com/ubuntu trusty-backports universe' + apt-get update -qq + + apt-get install -qq shellcheck python2.4 + + # Install dependencies for ansible and validate_modules + pip install -r "${build_dir}/test/utils/shippable/sanity-requirements.txt" --upgrade + pip list + +fi + +validate_modules="${build_dir}/test/sanity/validate-modules/validate-modules" + +python2.4 -m compileall -fq -x "($(printf %s "$(< "test/utils/shippable/sanity-skip-python24.txt"))" | tr '\n' '|')" . +python2.6 -m compileall -fq . +python2.7 -m compileall -fq . +python3.5 -m compileall -fq . + +ANSIBLE_DEPRECATION_WARNINGS=false \ + "${validate_modules}" --exclude '/utilities/|/shippable(/|$)' . + +shellcheck \ + test/utils/shippable/*.sh diff --git a/univention/__init__.py b/univention/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/univention/udm_dns_record.py b/univention/udm_dns_record.py new file mode 100644 index 00000000000..92cea504948 --- /dev/null +++ b/univention/udm_dns_record.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, + config, + uldap, +) +from univention.admin.handlers.dns import ( + forward_zone, + reverse_zone, +) + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: udm_dns_record +version_added: "2.2" +author: "Tobias Rueetschi (@2-B)" +short_description: Manage dns entries on a univention corporate server +description: + - "This module allows to manage dns records on a univention corporate server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Python >= 2.6 +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the dns record is present or not. + name: + required: true + description: + - "Name of the record, this is also the DNS record. E.g. www for + www.example.com." + zone: + required: true + description: + - Corresponding DNS zone for this record, e.g. example.com. + type: + required: true + choices: [ host_record, alias, ptr_record, srv_record, txt_record ] + description: + - "Define the record type. C(host_record) is a A or AAAA record, + C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record) + is a SRV record and C(txt_record) is a TXT record." + data: + required: false + default: [] + description: + - "Additional data for this record, e.g. ['a': '192.0.2.1']. + Required if C(state=present)." +''' + + +EXAMPLES = ''' +# Create a DNS record on a UCS +- udm_dns_zone: + name: www + zone: example.com + type: host_record + data: + - a: 192.0.2.1 +''' + + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec = dict( + type = dict(required=True, + type='str'), + zone = dict(required=True, + type='str'), + name = dict(required=True, + type='str'), + data = dict(default=[], + type='dict'), + state = dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if = ([ + ('state', 'present', ['data']) + ]) + ) + type = module.params['type'] + zone = module.params['zone'] + name = module.params['name'] + data = module.params['data'] + state = module.params['state'] + changed = False + + obj = list(ldap_search( + '(&(objectClass=dNSZone)(zoneName={})(relativeDomainName={}))'.format(zone, name), + attr=['dNSZone'] + )) + + exists = bool(len(obj)) + container = 'zoneName={},cn=dns,{}'.format(zone, base_dn()) + dn = 'relativeDomainName={},{}'.format(name, container) + + if state == 'present': + try: + if not exists: + so = forward_zone.lookup( + config(), + uldap(), + '(zone={})'.format(zone), + scope='domain', + ) or reverse_zone.lookup( + config(), + uldap(), + '(zone={})'.format(zone), + scope='domain', + ) + obj = umc_module_for_add('dns/{}'.format(type), container, superordinate=so[0]) + else: + obj = umc_module_for_edit('dns/{}'.format(type), dn) + obj['name'] = name + for k, v in data.items(): + obj[k] = v + diff = obj.diff() + changed = obj.diff() != [] + if not module.check_mode: + if not exists: + obj.create() + else: + obj.modify() + except BaseException as e: + module.fail_json( + msg='Creating/editing dns entry {} in {} failed: {}'.format(name, container, e) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('dns/{}'.format(type), dn) + if not module.check_mode: + obj.remove() + changed = True + except BaseException as e: + module.fail_json( + msg='Removing dns entry {} in {} failed: {}'.format(name, container, e) + ) + + module.exit_json( + changed=changed, + name=name, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/univention/udm_dns_zone.py b/univention/udm_dns_zone.py new file mode 100644 index 00000000000..2d7bbd09070 --- /dev/null +++ b/univention/udm_dns_zone.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: udm_dns_zone +version_added: "2.2" +author: "Tobias Rueetschi (@2-B)" +short_description: Manage dns zones on a univention corporate server +description: + - "This module allows to manage dns zones on a univention corporate server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Python >= 2.6 +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the dns zone is present or not. + type: + required: true + choices: [ forward_zone, reverse_zone ] + description: + - Define if the zone is a forward or reverse DNS zone. + zone: + required: true + description: + - DNS zone name, e.g. C(example.com). + nameserver: + required: false + description: + - List of appropriate name servers. Required if C(state=present). + interfaces: + required: false + description: + - List of interface IP addresses, on which the server should + response this zone. Required if C(state=present). + + refresh: + required: false + default: 3600 + description: + - Interval before the zone should be refreshed. + retry: + required: false + default: 1800 + description: + - Interval that should elapse before a failed refresh should be retried. + expire: + required: false + default: 604800 + description: + - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative. + ttl: + required: false + default: 600 + description: + - Minimum TTL field that should be exported with any RR from this zone. + + contact: + required: false + default: '' + description: + - Contact person in the SOA record. + mx: + required: false + default: [] + description: + - List of MX servers. (Must declared as A or AAAA records). +''' + + +EXAMPLES = ''' +# Create a DNS zone on a UCS +- udm_dns_zone: + zone: example.com + type: forward_zone + nameserver: + - ucs.example.com + interfaces: + - 192.0.2.1 +''' + + +RETURN = '''# ''' + + +def convert_time(time): + """Convert a time in seconds into the biggest unit""" + units = [ + (24 * 60 * 60 , 'days'), + (60 * 60 , 'hours'), + (60 , 'minutes'), + (1 , 'seconds'), + ] + + if time == 0: + return ('0', 'seconds') + for unit in units: + if time >= unit[0]: + return ('{}'.format(time // unit[0]), unit[1]) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + type = dict(required=True, + type='str'), + zone = dict(required=True, + aliases=['name'], + type='str'), + nameserver = dict(default=[], + type='list'), + interfaces = dict(default=[], + type='list'), + refresh = dict(default=3600, + type='int'), + retry = dict(default=1800, + type='int'), + expire = dict(default=604800, + type='int'), + ttl = dict(default=600, + type='int'), + contact = dict(default='', + type='str'), + mx = dict(default=[], + type='list'), + state = dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if = ([ + ('state', 'present', ['nameserver', 'interfaces']) + ]) + ) + type = module.params['type'] + zone = module.params['zone'] + nameserver = module.params['nameserver'] + interfaces = module.params['interfaces'] + refresh = module.params['refresh'] + retry = module.params['retry'] + expire = module.params['expire'] + ttl = module.params['ttl'] + contact = module.params['contact'] + mx = module.params['mx'] + state = module.params['state'] + changed = False + + obj = list(ldap_search( + '(&(objectClass=dNSZone)(zoneName={}))'.format(zone), + attr=['dNSZone'] + )) + + exists = bool(len(obj)) + container = 'cn=dns,{}'.format(base_dn()) + dn = 'zoneName={},{}'.format(zone, container) + if contact == '': + contact = 'root@{}.'.format(zone) + + if state == 'present': + try: + if not exists: + obj = umc_module_for_add('dns/{}'.format(type), container) + else: + obj = umc_module_for_edit('dns/{}'.format(type), dn) + obj['zone'] = zone + obj['nameserver'] = nameserver + obj['a'] = interfaces + obj['refresh'] = convert_time(refresh) + obj['retry'] = convert_time(retry) + obj['expire'] = convert_time(expire) + obj['ttl'] = convert_time(ttl) + obj['contact'] = contact + obj['mx'] = mx + diff = obj.diff() + if exists: + for k in obj.keys(): + if obj.hasChanged(k): + changed = True + else: + changed = True + if not module.check_mode: + if not exists: + obj.create() + elif changed: + obj.modify() + except Exception as e: + module.fail_json( + msg='Creating/editing dns zone {} failed: {}'.format(zone, e) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('dns/{}'.format(type), dn) + if not module.check_mode: + obj.remove() + changed = True + except Exception as e: + module.fail_json( + msg='Removing dns zone {} failed: {}'.format(zone, e) + ) + + module.exit_json( + changed=changed, + diff=diff, + zone=zone + ) + + +if __name__ == '__main__': + main() diff --git a/univention/udm_group.py b/univention/udm_group.py new file mode 100644 index 00000000000..82ef43faef5 --- /dev/null +++ b/univention/udm_group.py @@ -0,0 +1,183 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: udm_group +version_added: "2.2" +author: "Tobias Rueetschi (@2-B)" +short_description: Manage of the posix group +description: + - "This module allows to manage user groups on a univention corporate server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Python >= 2.6 +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the group is present or not. + name: + required: true + description: + - Name of the posix group. + description: + required: false + description: + - Group description. + position: + required: false + description: + - define the whole ldap position of the group, e.g. + C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com). + ou: + required: false + description: + - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com). + subpath: + required: false + description: + - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups). +''' + + +EXAMPLES = ''' +# Create a POSIX group +- udm_group: + name: g123m-1A + +# Create a POSIX group with the exact DN +# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com) +- udm_group: + name: g123m-1A + subpath: 'cn=classes,cn=students,cn=groups' + ou: school +# or +- udm_group: + name: g123m-1A + position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com' +''' + + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, + type='str'), + description = dict(default=None, + type='str'), + position = dict(default='', + type='str'), + ou = dict(default='', + type='str'), + subpath = dict(default='cn=groups', + type='str'), + state = dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True + ) + name = module.params['name'] + description = module.params['description'] + position = module.params['position'] + ou = module.params['ou'] + subpath = module.params['subpath'] + state = module.params['state'] + changed = False + + groups = list(ldap_search( + '(&(objectClass=posixGroup)(cn={}))'.format(name), + attr=['cn'] + )) + if position != '': + container = position + else: + if ou != '': + ou = 'ou={},'.format(ou) + if subpath != '': + subpath = '{},'.format(subpath) + container = '{}{}{}'.format(subpath, ou, base_dn()) + group_dn = 'cn={},{}'.format(name, container) + + exists = bool(len(groups)) + + if state == 'present': + try: + if not exists: + grp = umc_module_for_add('groups/group', container) + else: + grp = umc_module_for_edit('groups/group', group_dn) + grp['name'] = name + grp['description'] = description + diff = grp.diff() + changed = grp.diff() != [] + if not module.check_mode: + if not exists: + grp.create() + else: + grp.modify() + except: + module.fail_json( + msg="Creating/editing group {} in {} failed".format(name, container) + ) + + if state == 'absent' and exists: + try: + grp = umc_module_for_edit('groups/group', group_dn) + if not module.check_mode: + grp.remove() + changed = True + except: + module.fail_json( + msg="Removing group {} failed".format(name) + ) + + module.exit_json( + changed=changed, + name=name, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/univention/udm_share.py b/univention/udm_share.py new file mode 100644 index 00000000000..7cb472c3141 --- /dev/null +++ b/univention/udm_share.py @@ -0,0 +1,622 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: udm_share +version_added: "2.2" +author: "Tobias Rueetschi (@2-B)" +short_description: Manage samba shares on a univention corporate server +description: + - "This module allows to manage samba shares on a univention corporate + server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Python >= 2.6 +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the share is present or not. + name: + required: true + description: + - Name + host: + required: false + default: None + description: + - Host FQDN (server which provides the share), e.g. C({{ + ansible_fqdn }}). Required if C(state=present). + path: + required: false + default: None + description: + - Directory on the providing server, e.g. C(/home). Required if C(state=present). + samba_name: + required: false + default: None + description: + - Windows name. Required if C(state=present). + aliases: [ sambaName ] + ou: + required: true + description: + - Organisational unit, inside the LDAP Base DN. + owner: + required: false + default: 0 + description: + - Directory owner of the share's root directory. + group: + required: false + default: '0' + description: + - Directory owner group of the share's root directory. + directorymode: + required: false + default: '00755' + description: + - Permissions for the share's root directory. + root_squash: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - Modify user ID for root user (root squashing). + subtree_checking: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - Subtree checking. + sync: + required: false + default: 'sync' + description: + - NFS synchronisation. + writeable: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - NFS write access. + samba_block_size: + required: false + default: None + description: + - Blocking size. + aliases: [ sambaBlockSize ] + samba_blocking_locks: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - Blocking locks. + aliases: [ sambaBlockingLocks ] + samba_browseable: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - Show in Windows network environment. + aliases: [ sambaBrowseable ] + samba_create_mode: + required: false + default: '0744' + description: + - File mode. + aliases: [ sambaCreateMode ] + samba_csc_policy: + required: false + default: 'manual' + description: + - Client-side caching policy. + aliases: [ sambaCscPolicy ] + samba_custom_settings: + required: false + default: [] + description: + - Option name in smb.conf and its value. + aliases: [ sambaCustomSettings ] + samba_directory_mode: + required: false + default: '0755' + description: + - Directory mode. + aliases: [ sambaDirectoryMode ] + samba_directory_security_mode: + required: false + default: '0777' + description: + - Directory security mode. + aliases: [ sambaDirectorySecurityMode ] + samba_dos_filemode: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - Users with write access may modify permissions. + aliases: [ sambaDosFilemode ] + samba_fake_oplocks: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - Fake oplocks. + aliases: [ sambaFakeOplocks ] + samba_force_create_mode: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - Force file mode. + aliases: [ sambaForceCreateMode ] + samba_force_directory_mode: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - Force directory mode. + aliases: [ sambaForceDirectoryMode ] + samba_force_directory_security_mode: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - Force directory security mode. + aliases: [ sambaForceDirectorySecurityMode ] + samba_force_group: + required: false + default: None + description: + - Force group. + aliases: [ sambaForceGroup ] + samba_force_security_mode: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - Force security mode. + aliases: [ sambaForceSecurityMode ] + samba_force_user: + required: false + default: None + description: + - Force user. + aliases: [ sambaForceUser ] + samba_hide_files: + required: false + default: None + description: + - Hide files. + aliases: [ sambaHideFiles ] + samba_hide_unreadable: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - Hide unreadable files/directories. + aliases: [ sambaHideUnreadable ] + samba_hosts_allow: + required: false + default: [] + description: + - Allowed host/network. + aliases: [ sambaHostsAllow ] + samba_hosts_deny: + required: false + default: [] + description: + - Denied host/network. + aliases: [ sambaHostsDeny ] + samba_inherit_acls: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - Inherit ACLs. + aliases: [ sambaInheritAcls ] + samba_inherit_owner: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - Create files/directories with the owner of the parent directory. + aliases: [ sambaInheritOwner ] + samba_inherit_permissions: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - Create files/directories with permissions of the parent directory. + aliases: [ sambaInheritPermissions ] + samba_invalid_users: + required: false + default: None + description: + - Invalid users or groups. + aliases: [ sambaInvalidUsers ] + samba_level_2_oplocks: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - Level 2 oplocks. + aliases: [ sambaLevel2Oplocks ] + samba_locking: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - Locking. + aliases: [ sambaLocking ] + samba_msdfs_root: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - MSDFS root. + aliases: [ sambaMSDFSRoot ] + samba_nt_acl_support: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - NT ACL support. + aliases: [ sambaNtAclSupport ] + samba_oplocks: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - Oplocks. + aliases: [ sambaOplocks ] + samba_postexec: + required: false + default: None + description: + - Postexec script. + aliases: [ sambaPostexec ] + samba_preexec: + required: false + default: None + description: + - Preexec script. + aliases: [ sambaPreexec ] + samba_public: + required: false + default: '0' + choices: [ '0', '1' ] + description: + - Allow anonymous read-only access with a guest user. + aliases: [ sambaPublic ] + samba_security_mode: + required: false + default: '0777' + description: + - Security mode. + aliases: [ sambaSecurityMode ] + samba_strict_locking: + required: false + default: 'Auto' + description: + - Strict locking. + aliases: [ sambaStrictLocking ] + samba_vfs_objects: + required: false + default: None + description: + - VFS objects. + aliases: [ sambaVFSObjects ] + samba_valid_users: + required: false + default: None + description: + - Valid users or groups. + aliases: [ sambaValidUsers ] + samba_write_list: + required: false + default: None + description: + - Restrict write access to these users/groups. + aliases: [ sambaWriteList ] + samba_writeable: + required: false + default: '1' + choices: [ '0', '1' ] + description: + - Samba write access. + aliases: [ sambaWriteable ] + nfs_hosts: + required: false + default: [] + description: + - Only allow access for this host, IP address or network. + nfs_custom_settings: + required: false + default: [] + description: + - Option name in exports file. + aliases: [ nfsCustomSettings ] +''' + + +EXAMPLES = ''' +# Create a share named home on the server ucs.example.com with the path /home. +- udm_share: + name: home + path: /home + host: ucs.example.com + sambaName: Home +''' + + +RETURN = '''# ''' + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, + type='str'), + ou = dict(required=True, + type='str'), + owner = dict(type='str', + default='0'), + group = dict(type='str', + default='0'), + path = dict(type='path', + default=None), + directorymode = dict(type='str', + default='00755'), + host = dict(type='str', + default=None), + root_squash = dict(type='bool', + default=True), + subtree_checking = dict(type='bool', + default=True), + sync = dict(type='str', + default='sync'), + writeable = dict(type='bool', + default=True), + sambaBlockSize = dict(type='str', + aliases=['samba_block_size'], + default=None), + sambaBlockingLocks = dict(type='bool', + aliases=['samba_blocking_locks'], + default=True), + sambaBrowseable = dict(type='bool', + aliases=['samba_browsable'], + default=True), + sambaCreateMode = dict(type='str', + aliases=['samba_create_mode'], + default='0744'), + sambaCscPolicy = dict(type='str', + aliases=['samba_csc_policy'], + default='manual'), + sambaCustomSettings = dict(type='list', + aliases=['samba_custom_settings'], + default=[]), + sambaDirectoryMode = dict(type='str', + aliases=['samba_directory_mode'], + default='0755'), + sambaDirectorySecurityMode = dict(type='str', + aliases=['samba_directory_security_mode'], + default='0777'), + sambaDosFilemode = dict(type='bool', + aliases=['samba_dos_filemode'], + default=False), + sambaFakeOplocks = dict(type='bool', + aliases=['samba_fake_oplocks'], + default=False), + sambaForceCreateMode = dict(type='bool', + aliases=['samba_force_create_mode'], + default=False), + sambaForceDirectoryMode = dict(type='bool', + aliases=['samba_force_directory_mode'], + default=False), + sambaForceDirectorySecurityMode = dict(type='bool', + aliases=['samba_force_directory_security_mode'], + default=False), + sambaForceGroup = dict(type='str', + aliases=['samba_force_group'], + default=None), + sambaForceSecurityMode = dict(type='bool', + aliases=['samba_force_security_mode'], + default=False), + sambaForceUser = dict(type='str', + aliases=['samba_force_user'], + default=None), + sambaHideFiles = dict(type='str', + aliases=['samba_hide_files'], + default=None), + sambaHideUnreadable = dict(type='bool', + aliases=['samba_hide_unreadable'], + default=False), + sambaHostsAllow = dict(type='list', + aliases=['samba_hosts_allow'], + default=[]), + sambaHostsDeny = dict(type='list', + aliases=['samba_hosts_deny'], + default=[]), + sambaInheritAcls = dict(type='bool', + aliases=['samba_inherit_acls'], + default=True), + sambaInheritOwner = dict(type='bool', + aliases=['samba_inherit_owner'], + default=False), + sambaInheritPermissions = dict(type='bool', + aliases=['samba_inherit_permissions'], + default=False), + sambaInvalidUsers = dict(type='str', + aliases=['samba_invalid_users'], + default=None), + sambaLevel2Oplocks = dict(type='bool', + aliases=['samba_level_2_oplocks'], + default=True), + sambaLocking = dict(type='bool', + aliases=['samba_locking'], + default=True), + sambaMSDFSRoot = dict(type='bool', + aliases=['samba_msdfs_root'], + default=False), + sambaName = dict(type='str', + aliases=['samba_name'], + default=None), + sambaNtAclSupport = dict(type='bool', + aliases=['samba_nt_acl_support'], + default=True), + sambaOplocks = dict(type='bool', + aliases=['samba_oplocks'], + default=True), + sambaPostexec = dict(type='str', + aliases=['samba_postexec'], + default=None), + sambaPreexec = dict(type='str', + aliases=['samba_preexec'], + default=None), + sambaPublic = dict(type='bool', + aliases=['samba_public'], + default=False), + sambaSecurityMode = dict(type='str', + aliases=['samba_security_mode'], + default='0777'), + sambaStrictLocking = dict(type='str', + aliases=['samba_strict_locking'], + default='Auto'), + sambaVFSObjects = dict(type='str', + aliases=['samba_vfs_objects'], + default=None), + sambaValidUsers = dict(type='str', + aliases=['samba_valid_users'], + default=None), + sambaWriteList = dict(type='str', + aliases=['samba_write_list'], + default=None), + sambaWriteable = dict(type='bool', + aliases=['samba_writeable'], + default=True), + nfs_hosts = dict(type='list', + default=[]), + nfsCustomSettings = dict(type='list', + aliases=['nfs_custom_settings'], + default=[]), + state = dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if = ([ + ('state', 'present', ['path', 'host', 'sambaName']) + ]) + ) + name = module.params['name'] + state = module.params['state'] + changed = False + + obj = list(ldap_search( + '(&(objectClass=univentionShare)(cn={}))'.format(name), + attr=['cn'] + )) + + exists = bool(len(obj)) + container = 'cn=shares,ou={},{}'.format(module.params['ou'], base_dn()) + dn = 'cn={},{}'.format(name, container) + + if state == 'present': + try: + if not exists: + obj = umc_module_for_add('shares/share', container) + else: + obj = umc_module_for_edit('shares/share', dn) + + module.params['printablename'] = '{} ({})'.format(name, module.params['host']) + for k in obj.keys(): + if module.params[k] is True: + module.params[k] = '1' + elif module.params[k] is False: + module.params[k] = '0' + obj[k] = module.params[k] + + diff = obj.diff() + if exists: + for k in obj.keys(): + if obj.hasChanged(k): + changed = True + else: + changed = True + if not module.check_mode: + if not exists: + obj.create() + elif changed: + obj.modify() + except BaseException as err: + module.fail_json( + msg='Creating/editing share {} in {} failed: {}'.format( + name, + container, + err, + ) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('shares/share', dn) + if not module.check_mode: + obj.remove() + changed = True + except BaseException as err: + module.fail_json( + msg='Removing share {} in {} failed: {}'.format( + name, + container, + err, + ) + ) + + module.exit_json( + changed=changed, + name=name, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/univention/udm_user.py b/univention/udm_user.py new file mode 100644 index 00000000000..ac2d8acb11e --- /dev/null +++ b/univention/udm_user.py @@ -0,0 +1,598 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +# Copyright (c) 2016, Adfinis SyGroup AG +# Tobias Rueetschi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +from datetime import date +import crypt +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.univention_umc import ( + umc_module_for_add, + umc_module_for_edit, + ldap_search, + base_dn, +) +from dateutil.relativedelta import relativedelta + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: udm_user +version_added: "2.2" +author: "Tobias Rueetschi (@2-B)" +short_description: Manage posix users on a univention corporate server +description: + - "This module allows to manage posix users on a univention corporate + server (UCS). + It uses the python API of the UCS to create a new object or edit it." +requirements: + - Python >= 2.6 +options: + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the user is present or not. + username: + required: true + description: + - User name + aliases: ['name'] + firstname: + required: false + description: + - First name. Required if C(state=present). + lastname: + required: false + description: + - Last name. Required if C(state=present). + password: + required: false + default: None + description: + - Password. Required if C(state=present). + birthday: + required: false + default: None + description: + - Birthday + city: + required: false + default: None + description: + - City of users business address. + country: + required: false + default: None + description: + - Country of users business address. + department_number: + required: false + default: None + description: + - Department number of users business address. + aliases: [ departmentNumber ] + description: + required: false + default: None + description: + - Description (not gecos) + display_name: + required: false + default: None + description: + - Display name (not gecos) + aliases: [ displayName ] + email: + required: false + default: [''] + description: + - A list of e-mail addresses. + employee_number: + required: false + default: None + description: + - Employee number + aliases: [ employeeNumber ] + employee_type: + required: false + default: None + description: + - Employee type + aliases: [ employeeType ] + gecos: + required: false + default: None + description: + - GECOS + groups: + required: false + default: [] + description: + - "POSIX groups, the LDAP DNs of the groups will be found with the + LDAP filter for each group as $GROUP: + C((&(objectClass=posixGroup)(cn=$GROUP)))." + home_share: + required: false + default: None + description: + - "Home NFS share. Must be a LDAP DN, e.g. + C(cn=home,cn=shares,ou=school,dc=example,dc=com)." + aliases: [ homeShare ] + home_share_path: + required: false + default: None + description: + - Path to home NFS share, inside the homeShare. + aliases: [ homeSharePath ] + home_telephone_number: + required: false + default: [] + description: + - List of private telephone numbers. + aliases: [ homeTelephoneNumber ] + homedrive: + required: false + default: None + description: + - Windows home drive, e.g. C("H:"). + mail_alternative_address: + required: false + default: [] + description: + - List of alternative e-mail addresses. + aliases: [ mailAlternativeAddress ] + mail_home_server: + required: false + default: None + description: + - FQDN of mail server + aliases: [ mailHomeServer ] + mail_primary_address: + required: false + default: None + description: + - Primary e-mail address + aliases: [ mailPrimaryAddress ] + mobile_telephone_number: + required: false + default: [] + description: + - Mobile phone number + aliases: [ mobileTelephoneNumber ] + organisation: + required: false + default: None + description: + - Organisation + override_pw_history: + required: false + default: False + description: + - Override password history + aliases: [ overridePWHistory ] + override_pw_length: + required: false + default: False + description: + - Override password check + aliases: [ overridePWLength ] + pager_telephonenumber: + required: false + default: [] + description: + - List of pager telephone numbers. + aliases: [ pagerTelephonenumber ] + phone: + required: false + default: [] + description: + - List of telephone numbers. + postcode: + required: false + default: None + description: + - Postal code of users business address. + primary_group: + required: false + default: cn=Domain Users,cn=groups,$LDAP_BASE_DN + description: + - Primary group. This must be the group LDAP DN. + aliases: [ primaryGroup ] + profilepath: + required: false + default: None + description: + - Windows profile directory + pwd_change_next_login: + required: false + default: None + choices: [ '0', '1' ] + description: + - Change password on next login. + aliases: [ pwdChangeNextLogin ] + room_number: + required: false + default: None + description: + - Room number of users business address. + aliases: [ roomNumber ] + samba_privileges: + required: false + default: [] + description: + - "Samba privilege, like allow printer administration, do domain + join." + aliases: [ sambaPrivileges ] + samba_user_workstations: + required: false + default: [] + description: + - Allow the authentication only on this Microsoft Windows host. + aliases: [ sambaUserWorkstations ] + sambahome: + required: false + default: None + description: + - Windows home path, e.g. C('\\\\$FQDN\\$USERNAME'). + scriptpath: + required: false + default: None + description: + - Windows logon script. + secretary: + required: false + default: [] + description: + - A list of superiors as LDAP DNs. + serviceprovider: + required: false + default: [''] + description: + - Enable user for the following service providers. + shell: + required: false + default: '/bin/bash' + description: + - Login shell + street: + required: false + default: None + description: + - Street of users business address. + title: + required: false + default: None + description: + - Title, e.g. C(Prof.). + unixhome: + required: false + default: '/home/$USERNAME' + description: + - Unix home directory + userexpiry: + required: false + default: Today + 1 year + description: + - Account expiry date, e.g. C(1999-12-31). + position: + required: false + default: '' + description: + - "Define the whole position of users object inside the LDAP tree, + e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)." + ou: + required: false + default: '' + description: + - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for + LDAP OU C(ou=school,dc=example,dc=com)." + subpath: + required: false + default: 'cn=users' + description: + - "LDAP subpath inside the organizational unit, e.g. + C(cn=teachers,cn=users) for LDAP container + C(cn=teachers,cn=users,dc=example,dc=com)." +''' + + +EXAMPLES = ''' +# Create a user on a UCS +- udm_user: + name: FooBar + password: secure_password + firstname: Foo + lastname: Bar + +# Create a user with the DN +# C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com) +- udm_user: + name: foo + password: secure_password + firstname: Foo + lastname: Bar + ou: school + subpath: 'cn=teachers,cn=users' +# or define the position +- udm_user: + name: foo + password: secure_password + firstname: Foo + lastname: Bar + position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com' +''' + + +RETURN = '''# ''' + + +def main(): + expiry = date.strftime(date.today() + relativedelta(years=1), "%Y-%m-%d") + module = AnsibleModule( + argument_spec = dict( + birthday = dict(default=None, + type='str'), + city = dict(default=None, + type='str'), + country = dict(default=None, + type='str'), + department_number = dict(default=None, + type='str', + aliases=['departmentNumber']), + description = dict(default=None, + type='str'), + display_name = dict(default=None, + type='str', + aliases=['displayName']), + email = dict(default=[''], + type='list'), + employee_number = dict(default=None, + type='str', + aliases=['employeeNumber']), + employee_type = dict(default=None, + type='str', + aliases=['employeeType']), + firstname = dict(default=None, + type='str'), + gecos = dict(default=None, + type='str'), + groups = dict(default=[], + type='list'), + home_share = dict(default=None, + type='str', + aliases=['homeShare']), + home_share_path = dict(default=None, + type='str', + aliases=['homeSharePath']), + home_telephone_number = dict(default=[], + type='list', + aliases=['homeTelephoneNumber']), + homedrive = dict(default=None, + type='str'), + lastname = dict(default=None, + type='str'), + mail_alternative_address= dict(default=[], + type='list', + aliases=['mailAlternativeAddress']), + mail_home_server = dict(default=None, + type='str', + aliases=['mailHomeServer']), + mail_primary_address = dict(default=None, + type='str', + aliases=['mailPrimaryAddress']), + mobile_telephone_number = dict(default=[], + type='list', + aliases=['mobileTelephoneNumber']), + organisation = dict(default=None, + type='str'), + overridePWHistory = dict(default=False, + type='bool', + aliases=['override_pw_history']), + overridePWLength = dict(default=False, + type='bool', + aliases=['override_pw_length']), + pager_telephonenumber = dict(default=[], + type='list', + aliases=['pagerTelephonenumber']), + password = dict(default=None, + type='str', + no_log=True), + phone = dict(default=[], + type='list'), + postcode = dict(default=None, + type='str'), + primary_group = dict(default=None, + type='str', + aliases=['primaryGroup']), + profilepath = dict(default=None, + type='str'), + pwd_change_next_login = dict(default=None, + type='str', + choices=['0', '1'], + aliases=['pwdChangeNextLogin']), + room_number = dict(default=None, + type='str', + aliases=['roomNumber']), + samba_privileges = dict(default=[], + type='list', + aliases=['sambaPrivileges']), + samba_user_workstations = dict(default=[], + type='list', + aliases=['sambaUserWorkstations']), + sambahome = dict(default=None, + type='str'), + scriptpath = dict(default=None, + type='str'), + secretary = dict(default=[], + type='list'), + serviceprovider = dict(default=[''], + type='list'), + shell = dict(default='/bin/bash', + type='str'), + street = dict(default=None, + type='str'), + title = dict(default=None, + type='str'), + unixhome = dict(default=None, + type='str'), + userexpiry = dict(default=expiry, + type='str'), + username = dict(required=True, + aliases=['name'], + type='str'), + position = dict(default='', + type='str'), + ou = dict(default='', + type='str'), + subpath = dict(default='cn=users', + type='str'), + state = dict(default='present', + choices=['present', 'absent'], + type='str') + ), + supports_check_mode=True, + required_if = ([ + ('state', 'present', ['firstname', 'lastname', 'password']) + ]) + ) + username = module.params['username'] + position = module.params['position'] + ou = module.params['ou'] + subpath = module.params['subpath'] + state = module.params['state'] + changed = False + + users = list(ldap_search( + '(&(objectClass=posixAccount)(uid={}))'.format(username), + attr=['uid'] + )) + if position != '': + container = position + else: + if ou != '': + ou = 'ou={},'.format(ou) + if subpath != '': + subpath = '{},'.format(subpath) + container = '{}{}{}'.format(subpath, ou, base_dn()) + user_dn = 'uid={},{}'.format(username, container) + + exists = bool(len(users)) + + if state == 'present': + try: + if not exists: + obj = umc_module_for_add('users/user', container) + else: + obj = umc_module_for_edit('users/user', user_dn) + + if module.params['displayName'] is None: + module.params['displayName'] = '{} {}'.format( + module.params['firstname'], + module.params['lastname'] + ) + if module.params['unixhome'] is None: + module.params['unixhome'] = '/home/{}'.format( + module.params['username'] + ) + for k in obj.keys(): + if (k != 'password' and + k != 'groups' and + k != 'overridePWHistory' and + k in module.params and + module.params[k] is not None): + obj[k] = module.params[k] + # handle some special values + obj['e-mail'] = module.params['email'] + password = module.params['password'] + if obj['password'] is None: + obj['password'] = password + else: + old_password = obj['password'].split('}', 2)[1] + if crypt.crypt(password, old_password) != old_password: + obj['overridePWHistory'] = module.params['overridePWHistory'] + obj['overridePWLength'] = module.params['overridePWLength'] + obj['password'] = password + + diff = obj.diff() + if exists: + for k in obj.keys(): + if obj.hasChanged(k): + changed = True + else: + changed = True + if not module.check_mode: + if not exists: + obj.create() + elif changed: + obj.modify() + except: + module.fail_json( + msg="Creating/editing user {} in {} failed".format( + username, + container + ) + ) + try: + groups = module.params['groups'] + if groups: + filter = '(&(objectClass=posixGroup)(|(cn={})))'.format( + ')(cn='.join(groups) + ) + group_dns = list(ldap_search(filter, attr=['dn'])) + for dn in group_dns: + grp = umc_module_for_edit('groups/group', dn[0]) + if user_dn not in grp['users']: + grp['users'].append(user_dn) + if not module.check_mode: + grp.modify() + changed = True + except: + module.fail_json( + msg="Adding groups to user {} failed".format(username) + ) + + if state == 'absent' and exists: + try: + obj = umc_module_for_edit('users/user', user_dn) + if not module.check_mode: + obj.remove() + changed = True + except: + module.fail_json( + msg="Removing user {} failed".format(username) + ) + + module.exit_json( + changed=changed, + username=username, + diff=diff, + container=container + ) + + +if __name__ == '__main__': + main() diff --git a/web_infrastructure/apache2_mod_proxy.py b/web_infrastructure/apache2_mod_proxy.py new file mode 100644 index 00000000000..4d2f2c39a8f --- /dev/null +++ b/web_infrastructure/apache2_mod_proxy.py @@ -0,0 +1,453 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Olivier Boukili +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: apache2_mod_proxy +version_added: "2.2" +short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool +description: + - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer + pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member + status page has to be enabled and accessible, as this module relies on parsing + this page. This module supports ansible check_mode, and requires BeautifulSoup + python module. +options: + balancer_url_suffix: + default: /balancer-manager/ + description: + - Suffix of the balancer pool url required to access the balancer pool + status page (e.g. balancer_vhost[:port]/balancer_url_suffix). + required: false + balancer_vhost: + default: None + description: + - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool. + required: true + member_host: + default: None + description: + - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to. + Port number is autodetected and should not be specified here. + If undefined, apache2_mod_proxy module will return a members list of + dictionaries of all the current balancer pool members' attributes. + required: false + state: + default: None + description: + - Desired state of the member host. + (absent|disabled),drained,hot_standby,ignore_errors can be + simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors). + required: false + choices: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"] + tls: + default: false + description: + - Use https to access balancer management page. + choices: ["true", "false"] + validate_certs: + default: true + description: + - Validate ssl/tls certificates. + choices: ["true", "false"] +''' + +EXAMPLES = ''' +# Get all current balancer pool members' attributes: +- apache2_mod_proxy: + balancer_vhost: 10.0.0.2 + +# Get a specific member's attributes: +- apache2_mod_proxy: + balancer_vhost: myws.mydomain.org + balancer_suffix: /lb/ + member_host: node1.myws.mydomain.org + +# Enable all balancer pool members: +- apache2_mod_proxy: + balancer_vhost: '{{ myloadbalancer_host }}' + register: result +- apache2_mod_proxy: + balancer_vhost: '{{ myloadbalancer_host }}' + member_host: '{{ item.host }}' + state: present + with_items: '{{ result.members }}' + +# Gracefully disable a member from a loadbalancer node: +- apache2_mod_proxy: + balancer_vhost: '{{ vhost_host }}' + member_host: '{{ member.host }}' + state: drained + delegate_to: myloadbalancernode +- wait_for: + host: '{{ member.host }}' + port: '{{ member.port }}' + state: drained + delegate_to: myloadbalancernode +- apache2_mod_proxy: + balancer_vhost: '{{ vhost_host }}' + member_host: '{{ member.host }}' + state: absent + delegate_to: myloadbalancernode +''' + +RETURN = ''' +member: + description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter. + type: dict + returned: success + sample: + {"attributes": + {"Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + } +members: + description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args. + returned: success + type: list + sample: + [{"attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false + } + }, + {"attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.21", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false} + } + ] +''' + +import re + +try: + from BeautifulSoup import BeautifulSoup +except ImportError: + HAS_BEAUTIFULSOUP = False +else: + HAS_BEAUTIFULSOUP = True + +# balancer member attributes extraction regexp: +EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)" +# Apache2 server version extraction regexp: +APACHE_VERSION_EXPRESSION = r"Server Version: Apache/([\d.]+) \(([\w]+)\)" + +def regexp_extraction(string, _regexp, groups=1): + """ Returns the capture group (default=1) specified in the regexp, applied to the string """ + regexp_search = re.search(string=str(string), pattern=str(_regexp)) + if regexp_search: + if regexp_search.group(groups) != '': + return str(regexp_search.group(groups)) + return None + +class BalancerMember(object): + """ Apache 2.4 mod_proxy LB balancer member. + attributes: + read-only: + host -> member host (string), + management_url -> member management url (string), + protocol -> member protocol (string) + port -> member port (string), + path -> member location (string), + balancer_url -> url of this member's parent balancer (string), + attributes -> whole member attributes (dictionary) + module -> ansible module instance (AnsibleModule object). + writable: + status -> status of the member (dictionary) + """ + + def __init__(self, management_url, balancer_url, module): + self.host = regexp_extraction(management_url, str(EXPRESSION), 4) + self.management_url = str(management_url) + self.protocol = regexp_extraction(management_url, EXPRESSION, 3) + self.port = regexp_extraction(management_url, EXPRESSION, 5) + self.path = regexp_extraction(management_url, EXPRESSION, 6) + self.balancer_url = str(balancer_url) + self.module = module + + def get_member_attributes(self): + """ Returns a dictionary of a balancer member's attributes.""" + + balancer_member_page = fetch_url(self.module, self.management_url) + + try: + assert balancer_member_page[1]['status'] == 200 + except AssertionError: + self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1]) + else: + try: + soup = BeautifulSoup(balancer_member_page[0]) + except TypeError: + self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup)) + else: + subsoup = soup.findAll('table')[1].findAll('tr') + keys = subsoup[0].findAll('th') + for valuesset in subsoup[1::1]: + if re.search(pattern=self.host, string=str(valuesset)): + values = valuesset.findAll('td') + return dict((keys[x].string, values[x].string) for x in range(0, len(keys))) + + def get_member_status(self): + """ Returns a dictionary of a balancer member's status attributes.""" + status_mapping = {'disabled':'Dis', + 'drained':'Drn', + 'hot_standby':'Stby', + 'ignore_errors':'Ign'} + status = {} + actual_status = str(self.attributes['Status']) + for mode in status_mapping.keys(): + if re.search(pattern=status_mapping[mode], string=actual_status): + status[mode] = True + else: + status[mode] = False + return status + + def set_member_status(self, values): + """ Sets a balancer member's status attributes amongst pre-mapped values.""" + values_mapping = {'disabled':'&w_status_D', + 'drained':'&w_status_N', + 'hot_standby':'&w_status_H', + 'ignore_errors':'&w_status_I'} + + request_body = regexp_extraction(self.management_url, EXPRESSION, 1) + for k in values_mapping.keys(): + if values[str(k)]: + request_body = request_body + str(values_mapping[k]) + '=1' + else: + request_body = request_body + str(values_mapping[k]) + '=0' + + response = fetch_url(self.module, self.management_url, data=str(request_body)) + try: + assert response[1]['status'] == 200 + except AssertionError: + self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status']) + + attributes = property(get_member_attributes) + status = property(get_member_status, set_member_status) + + +class Balancer(object): + """ Apache httpd 2.4 mod_proxy balancer object""" + def __init__(self, host, suffix, module, members=None, tls=False): + if tls: + self.base_url = str(str('https://') + str(host)) + self.url = str(str('https://') + str(host) + str(suffix)) + else: + self.base_url = str(str('http://') + str(host)) + self.url = str(str('http://') + str(host) + str(suffix)) + self.module = module + self.page = self.fetch_balancer_page() + if members is None: + self._members = [] + + def fetch_balancer_page(self): + """ Returns the balancer management html page as a string for later parsing.""" + page = fetch_url(self.module, str(self.url)) + try: + assert page[1]['status'] == 200 + except AssertionError: + self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status'])) + else: + content = page[0].read() + apache_version = regexp_extraction(content, APACHE_VERSION_EXPRESSION, 1) + if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): + self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version)) + return content + + def get_balancer_members(self): + """ Returns members of the balancer as a generator object for later iteration.""" + try: + soup = BeautifulSoup(self.page) + except TypeError: + self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page)) + else: + for element in soup.findAll('a')[1::1]: + balancer_member_suffix = str(element.get('href')) + try: + assert balancer_member_suffix is not '' + except AssertionError: + self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!") + else: + yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module) + + members = property(get_balancer_members) + +def main(): + """ Initiates module.""" + module = AnsibleModule( + argument_spec=dict( + balancer_vhost=dict(required=True, default=None, type='str'), + balancer_url_suffix=dict(default="/balancer-manager/", type='str'), + member_host=dict(type='str'), + state=dict(type='str'), + tls=dict(default=False, type='bool'), + validate_certs=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + if HAS_BEAUTIFULSOUP is False: + module.fail_json(msg="python module 'BeautifulSoup' is required!") + + if module.params['state'] != None: + states = module.params['state'].split(',') + if (len(states) > 1) and (("present" in states) or ("enabled" in states)): + module.fail_json(msg="state present/enabled is mutually exclusive with other states!") + else: + for _state in states: + if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']: + module.fail_json(msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'.") + else: + states = ['None'] + + mybalancer = Balancer(module.params['balancer_vhost'], + module.params['balancer_url_suffix'], + module=module, + tls=module.params['tls']) + + if module.params['member_host'] is None: + json_output_list = [] + for member in mybalancer.members: + json_output_list.append({ + "host": member.host, + "status": member.status, + "protocol": member.protocol, + "port": member.port, + "path": member.path, + "attributes": member.attributes, + "management_url": member.management_url, + "balancer_url": member.balancer_url + }) + module.exit_json( + changed=False, + members=json_output_list + ) + else: + changed = False + member_exists = False + member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors':False} + for mode in member_status.keys(): + for state in states: + if mode == state: + member_status[mode] = True + elif mode == 'disabled' and state == 'absent': + member_status[mode] = True + + for member in mybalancer.members: + if str(member.host) == str(module.params['member_host']): + member_exists = True + if module.params['state'] is not None: + member_status_before = member.status + if not module.check_mode: + member_status_after = member.status = member_status + else: + member_status_after = member_status + if member_status_before != member_status_after: + changed = True + json_output = { + "host": member.host, + "status": member.status, + "protocol": member.protocol, + "port": member.port, + "path": member.path, + "attributes": member.attributes, + "management_url": member.management_url, + "balancer_url": member.balancer_url + } + if member_exists: + module.exit_json( + changed=changed, + member=json_output + ) + else: + module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!') + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url +if __name__ == '__main__': + main() diff --git a/web_infrastructure/deploy_helper.py b/web_infrastructure/deploy_helper.py new file mode 100644 index 00000000000..a40abda2427 --- /dev/null +++ b/web_infrastructure/deploy_helper.py @@ -0,0 +1,534 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Jasper N. Brouwer +# (c) 2014, Ramon de la Fuente +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: deploy_helper +version_added: "2.0" +author: "Ramon de la Fuente (@ramondelafuente)" +short_description: Manages some of the steps common in deploying projects. +description: + - The Deploy Helper manages some of the steps common in deploying software. + It creates a folder structure, manages a symlink for the current release + and cleans up old releases. + - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact. + C(project_path), whatever you set in the path parameter, + C(current_path), the path to the symlink that points to the active release, + C(releases_path), the path to the folder to keep releases in, + C(shared_path), the path to the folder to keep shared resources in, + C(unfinished_filename), the file to check for to recognize unfinished builds, + C(previous_release), the release the 'current' symlink is pointing to, + C(previous_release_path), the full path to the 'current' symlink target, + C(new_release), either the 'release' parameter or a generated timestamp, + C(new_release_path), the path to the new release folder (not created by the module)." + +options: + path: + required: True + aliases: ['dest'] + description: + - the root path of the project. Alias I(dest). + Returned in the C(deploy_helper.project_path) fact. + + state: + required: False + choices: [ present, finalize, absent, clean, query ] + default: present + description: + - the state of the project. + C(query) will only gather facts, + C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders, + C(finalize) will remove the unfinished_filename file, create a symlink to the newly + deployed release and optionally clean old releases, + C(clean) will remove failed & old releases, + C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent)) + + release: + required: False + default: None + description: + - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359'). + This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize). + You can use the generated fact C(release={{ deploy_helper.new_release }}). + + releases_path: + required: False + default: releases + description: + - the name of the folder that will hold the releases. This can be relative to C(path) or absolute. + Returned in the C(deploy_helper.releases_path) fact. + + shared_path: + required: False + default: shared + description: + - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute. + If this is set to an empty string, no shared folder will be created. + Returned in the C(deploy_helper.shared_path) fact. + + current_path: + required: False + default: current + description: + - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean). + Returned in the C(deploy_helper.current_path) fact. + + unfinished_filename: + required: False + default: DEPLOY_UNFINISHED + description: + - the name of the file that indicates a deploy has not finished. All folders in the releases_path that + contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is + automatically deleted from the I(new_release_path) during C(state=finalize). + + clean: + required: False + default: True + description: + - Whether to run the clean procedure in case of C(state=finalize). + + keep_releases: + required: False + default: 5 + description: + - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds + will be deleted first, so only correct releases will count. The current version will not count. + +notes: + - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden + parameters to both calls, otherwise the second call will overwrite the facts of the first one. + - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a + new naming strategy without problems. + - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent + unless you pass your own release name with C(release). Due to the nature of deploying software, this should not + be much of a problem. +''' + +EXAMPLES = ''' + +# General explanation, starting with an example folder structure for a project: + +root: + releases: + - 20140415234508 + - 20140415235146 + - 20140416082818 + + shared: + - sessions + - uploads + + current: -> releases/20140416082818 + + +The 'releases' folder holds all the available releases. A release is a complete build of the application being +deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem. +Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like +git tags or commit hashes. + +During a deploy, a new folder should be created in the releases folder and any build steps required should be +performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink +with a link to this build. + +The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server +session files, or files uploaded by users of your application. It's quite common to have symlinks from a release +folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps. + +The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress. +The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new +release is reduced to the time it takes to switch the link. + +To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release +that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated +procedure to remove it during cleanup. + + +# Typical usage: +- name: Initialize the deploy root and gather facts + deploy_helper: + path: /path/to/root +- name: Clone the project to the new release folder + git: + repo: 'git://foosball.example.org/path/to/repo.git' + dest: '{{ deploy_helper.new_release_path }}' + version: 'v1.1.1' +- name: Add an unfinished file, to allow cleanup on successful finalize + file: + path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}' + state: touch +- name: Perform some build steps, like running your dependency manager for example + composer: + command: install + working_dir: '{{ deploy_helper.new_release_path }}' +- name: Create some folders in the shared folder + file: + path: '{{ deploy_helper.shared_path }}/{{ item }}' + state: directory + with_items: + - sessions + - uploads +- name: Add symlinks from the new release to the shared folder + file: + path: '{{ deploy_helper.new_release_path }}/{{ item.path }}' + src: '{{ deploy_helper.shared_path }}/{{ item.src }}' + state: link + with_items: + - path: app/sessions + src: sessions + - path: web/uploads + src: uploads +- name: Finalize the deploy, removing the unfinished file and switching the symlink + deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Retrieving facts before running a deploy +- name: Run 'state=query' to gather facts without changing anything + deploy_helper: + path: /path/to/root + state: query +# Remember to set the 'release' parameter when you actually call 'state=present' later +- name: Initialize the deploy root + deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: present + +# all paths can be absolute or relative (to the 'path' parameter) +- deploy_helper: + path: /path/to/root + releases_path: /var/www/project/releases + shared_path: /var/www/shared + current_path: /var/www/active + +# Using your own naming strategy for releases (a version tag in this case): +- deploy_helper: + path: /path/to/root + release: 'v1.1.1' + state: present +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Using a different unfinished_filename: +- deploy_helper: + path: /path/to/root + unfinished_filename: README.md + release: '{{ deploy_helper.new_release }}' + state: finalize + +# Postponing the cleanup of older builds: +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + clean: False +- deploy_helper: + path: /path/to/root + state: clean +# Or running the cleanup ahead of the new deploy +- deploy_helper: + path: /path/to/root + state: clean +- deploy_helper: + path: /path/to/root + state: present + +# Keeping more old releases: +- deploy_helper: + path: /path/to/root + release: '{{ deploy_helper.new_release }}' + state: finalize + keep_releases: 10 +# Or, if you use 'clean=false' on finalize: +- deploy_helper: + path: /path/to/root + state: clean + keep_releases: 10 + +# Removing the entire project root folder +- deploy_helper: + path: /path/to/root + state: absent + +# Debugging the facts returned by the module +- deploy_helper: + path: /path/to/root +- debug: + var: deploy_helper +''' + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.pycompat24 import get_exception + +class DeployHelper(object): + + def __init__(self, module): + module.params['path'] = os.path.expanduser(module.params['path']) + + self.module = module + self.file_args = module.load_file_common_arguments(module.params) + + self.clean = module.params['clean'] + self.current_path = module.params['current_path'] + self.keep_releases = module.params['keep_releases'] + self.path = module.params['path'] + self.release = module.params['release'] + self.releases_path = module.params['releases_path'] + self.shared_path = module.params['shared_path'] + self.state = module.params['state'] + self.unfinished_filename = module.params['unfinished_filename'] + + def gather_facts(self): + current_path = os.path.join(self.path, self.current_path) + releases_path = os.path.join(self.path, self.releases_path) + if self.shared_path: + shared_path = os.path.join(self.path, self.shared_path) + else: + shared_path = None + + previous_release, previous_release_path = self._get_last_release(current_path) + + if not self.release and (self.state == 'query' or self.state == 'present'): + self.release = time.strftime("%Y%m%d%H%M%S") + + new_release_path = os.path.join(releases_path, self.release) + + return { + 'project_path': self.path, + 'current_path': current_path, + 'releases_path': releases_path, + 'shared_path': shared_path, + 'previous_release': previous_release, + 'previous_release_path': previous_release_path, + 'new_release': self.release, + 'new_release_path': new_release_path, + 'unfinished_filename': self.unfinished_filename + } + + def delete_path(self, path): + if not os.path.lexists(path): + return False + + if not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + if not self.module.check_mode: + try: + shutil.rmtree(path, ignore_errors=False) + except Exception: + e = get_exception() + self.module.fail_json(msg="rmtree failed: %s" % str(e)) + + return True + + def create_path(self, path): + changed = False + + if not os.path.lexists(path): + changed = True + if not self.module.check_mode: + os.makedirs(path) + + elif not os.path.isdir(path): + self.module.fail_json(msg="%s exists but is not a directory" % path) + + changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed) + + return changed + + def check_link(self, path): + if os.path.lexists(path): + if not os.path.islink(path): + self.module.fail_json(msg="%s exists but is not a symbolic link" % path) + + def create_link(self, source, link_name): + changed = False + + if os.path.islink(link_name): + norm_link = os.path.normpath(os.path.realpath(link_name)) + norm_source = os.path.normpath(os.path.realpath(source)) + if norm_link == norm_source: + changed = False + else: + changed = True + if not self.module.check_mode: + if not os.path.lexists(source): + self.module.fail_json(msg="the symlink target %s doesn't exists" % source) + tmp_link_name = link_name + '.' + self.unfinished_filename + if os.path.islink(tmp_link_name): + os.unlink(tmp_link_name) + os.symlink(source, tmp_link_name) + os.rename(tmp_link_name, link_name) + else: + changed = True + if not self.module.check_mode: + os.symlink(source, link_name) + + return changed + + def remove_unfinished_file(self, new_release_path): + changed = False + unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename) + if os.path.lexists(unfinished_file_path): + changed = True + if not self.module.check_mode: + os.remove(unfinished_file_path) + + return changed + + def remove_unfinished_builds(self, releases_path): + changes = 0 + + for release in os.listdir(releases_path): + if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)): + if self.module.check_mode: + changes += 1 + else: + changes += self.delete_path(os.path.join(releases_path, release)) + + return changes + + def remove_unfinished_link(self, path): + changed = False + + tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename) + if not self.module.check_mode and os.path.exists(tmp_link_name): + changed = True + os.remove(tmp_link_name) + + return changed + + def cleanup(self, releases_path, reserve_version): + changes = 0 + + if os.path.lexists(releases_path): + releases = [ f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path,f)) ] + try: + releases.remove(reserve_version) + except ValueError: + pass + + if not self.module.check_mode: + releases.sort( key=lambda x: os.path.getctime(os.path.join(releases_path,x)), reverse=True) + for release in releases[self.keep_releases:]: + changes += self.delete_path(os.path.join(releases_path, release)) + elif len(releases) > self.keep_releases: + changes += (len(releases) - self.keep_releases) + + return changes + + def _get_file_args(self, path): + file_args = self.file_args.copy() + file_args['path'] = path + return file_args + + def _get_last_release(self, current_path): + previous_release = None + previous_release_path = None + + if os.path.lexists(current_path): + previous_release_path = os.path.realpath(current_path) + previous_release = os.path.basename(previous_release_path) + + return previous_release, previous_release_path + +def main(): + + module = AnsibleModule( + argument_spec = dict( + path = dict(aliases=['dest'], required=True, type='str'), + release = dict(required=False, type='str', default=None), + releases_path = dict(required=False, type='str', default='releases'), + shared_path = dict(required=False, type='str', default='shared'), + current_path = dict(required=False, type='str', default='current'), + keep_releases = dict(required=False, type='int', default=5), + clean = dict(required=False, type='bool', default=True), + unfinished_filename = dict(required=False, type='str', default='DEPLOY_UNFINISHED'), + state = dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present') + ), + add_file_common_args = True, + supports_check_mode = True + ) + + deploy_helper = DeployHelper(module) + facts = deploy_helper.gather_facts() + + result = { + 'state': deploy_helper.state + } + + changes = 0 + + if deploy_helper.state == 'query': + result['ansible_facts'] = { 'deploy_helper': facts } + + elif deploy_helper.state == 'present': + deploy_helper.check_link(facts['current_path']) + changes += deploy_helper.create_path(facts['project_path']) + changes += deploy_helper.create_path(facts['releases_path']) + if deploy_helper.shared_path: + changes += deploy_helper.create_path(facts['shared_path']) + + result['ansible_facts'] = { 'deploy_helper': facts } + + elif deploy_helper.state == 'finalize': + if not deploy_helper.release: + module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)") + if deploy_helper.keep_releases <= 0: + module.fail_json(msg="'keep_releases' should be at least 1") + + changes += deploy_helper.remove_unfinished_file(facts['new_release_path']) + changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path']) + if deploy_helper.clean: + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'clean': + changes += deploy_helper.remove_unfinished_link(facts['project_path']) + changes += deploy_helper.remove_unfinished_builds(facts['releases_path']) + changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release']) + + elif deploy_helper.state == 'absent': + # destroy the facts + result['ansible_facts'] = { 'deploy_helper': [] } + changes += deploy_helper.delete_path(facts['project_path']) + + if changes > 0: + result['changed'] = True + else: + result['changed'] = False + + module.exit_json(**result) + + + + +if __name__ == '__main__': + main() diff --git a/web_infrastructure/ejabberd_user.py b/web_infrastructure/ejabberd_user.py index bf86806ad52..84a8dadbf63 100644 --- a/web_infrastructure/ejabberd_user.py +++ b/web_infrastructure/ejabberd_user.py @@ -16,6 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: ejabberd_user @@ -59,15 +63,22 @@ EXAMPLES = ''' Example playbook entries using the ejabberd_user module to manage users state. - tasks: - - - name: create a user if it does not exists - action: ejabberd_user username=test host=server password=password - - - name: delete a user if it exists - action: ejabberd_user username=test host=server state=absent +- name: create a user if it does not exists + ejabberd_user: + username: test + host: server + password: password + +- name: delete a user if it exists + ejabberd_user: + username: test + host: server + state: absent ''' + import syslog +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.basic import * class EjabberdUserException(Exception): """ Base exeption for EjabberdUser class object """ @@ -98,7 +109,8 @@ def changed(self): try: options = [self.user, self.host, self.pwd] (rc, out, err) = self.run_command('check_password', options) - except EjabberdUserException, e: + except EjabberdUserException: + e = get_exception() (rc, out, err) = (1, None, "required attribute(s) missing") return rc @@ -111,14 +123,15 @@ def exists(self): try: options = [self.user, self.host] (rc, out, err) = self.run_command('check_account', options) - except EjabberdUserException, e: + except EjabberdUserException: + e = get_exception() (rc, out, err) = (1, None, "required attribute(s) missing") return not bool(int(rc)) def log(self, entry): """ This method will log information to the local syslog facility """ if self.logging: - syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.openlog('ansible-%s' % self.module._name) syslog.syslog(syslog.LOG_NOTICE, entry) def run_command(self, cmd, options): @@ -139,7 +152,8 @@ def update(self): try: options = [self.user, self.host, self.pwd] (rc, out, err) = self.run_command('change_password', options) - except EjabberdUserException, e: + except EjabberdUserException: + e = get_exception() (rc, out, err) = (1, None, "required attribute(s) missing") return (rc, out, err) @@ -150,7 +164,8 @@ def create(self): try: options = [self.user, self.host, self.pwd] (rc, out, err) = self.run_command('register', options) - except EjabberdUserException, e: + except EjabberdUserException: + e = get_exception() (rc, out, err) = (1, None, "required attribute(s) missing") return (rc, out, err) @@ -160,7 +175,8 @@ def delete(self): try: options = [self.user, self.host] (rc, out, err) = self.run_command('unregister', options) - except EjabberdUserException, e: + except EjabberdUserException: + e = get_exception() (rc, out, err) = (1, None, "required attribute(s) missing") return (rc, out, err) @@ -209,6 +225,5 @@ def main(): module.exit_json(**result) -# import module snippets -from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/web_infrastructure/jboss.py b/web_infrastructure/jboss.py index 9ec67b7c7b1..738b536782d 100644 --- a/web_infrastructure/jboss.py +++ b/web_infrastructure/jboss.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: jboss version_added: "1.4" @@ -52,11 +56,21 @@ EXAMPLES = """ # Deploy a hello world application -- jboss: src=/tmp/hello-1.0-SNAPSHOT.war deployment=hello.war state=present +- jboss: + src: /tmp/hello-1.0-SNAPSHOT.war + deployment: hello.war + state: present + # Update the hello world application -- jboss: src=/tmp/hello-1.1-SNAPSHOT.war deployment=hello.war state=present +- jboss: + src: /tmp/hello-1.1-SNAPSHOT.war + deployment: hello.war + state: present + # Undeploy the hello world application -- jboss: deployment=hello.war state=absent +- jboss: + deployment: hello.war + state: absent """ import os @@ -137,4 +151,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() + +if __name__ == '__main__': + main() diff --git a/web_infrastructure/jenkins_job.py b/web_infrastructure/jenkins_job.py new file mode 100644 index 00000000000..0c91c8b876e --- /dev/null +++ b/web_infrastructure/jenkins_job.py @@ -0,0 +1,362 @@ +#!/usr/bin/python +# +# This is a free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This Ansible library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this library. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: jenkins_job +short_description: Manage jenkins jobs +description: + - Manage Jenkins jobs by using Jenkins REST API. +requirements: + - "python-jenkins >= 0.4.12" + - "lxml >= 3.3.3" +version_added: "2.2" +author: "Sergio Millan Rodriguez (@sermilrod)" +options: + config: + description: + - config in XML format. + - Required if job does not yet exist. + - Mututally exclusive with C(enabled). + - Considered if C(state=present). + required: false + enabled: + description: + - Whether the job should be enabled or disabled. + - Mututally exclusive with C(config). + - Considered if C(state=present). + required: false + name: + description: + - Name of the Jenkins job. + required: true + password: + description: + - Password to authenticate with the Jenkins server. + required: false + state: + description: + - Attribute that specifies if the job has to be created or deleted. + required: false + default: present + choices: ['present', 'absent'] + token: + description: + - API token used to authenticate alternatively to password. + required: false + url: + description: + - Url where the Jenkins server is accessible. + required: false + default: http://localhost:8080 + user: + description: + - User to authenticate with the Jenkins server. + required: false +''' + +EXAMPLES = ''' +# Create a jenkins job using basic authentication +- jenkins_job: + config: "{{ lookup('file', 'templates/test.xml') }}" + name: test + password: admin + url: "http://localhost:8080" + user: admin + +# Create a jenkins job using the token +- jenkins_job: + config: "{{ lookup('template', 'templates/test.xml.j2') }}" + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + url: "http://localhost:8080" + user: admin + +# Delete a jenkins job using basic authentication +- jenkins_job: + name: test + password: admin + state: absent + url: "http://localhost:8080" + user: admin + +# Delete a jenkins job using the token +- jenkins_job: + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + state: absent + url: "http://localhost:8080" + user: admin + +# Disable a jenkins job using basic authentication +- jenkins_job: + name: test + password: admin + enabled: false + url: "http://localhost:8080" + user: admin + +# Disable a jenkins job using the token +- jenkins_job: + name: test + token: asdfasfasfasdfasdfadfasfasdfasdfc + enabled: false + url: "http://localhost:8080" + user: admin +''' + +RETURN = ''' +--- +name: + description: Name of the jenkins job. + returned: success + type: string + sample: test-job +state: + description: State of the jenkins job. + returned: success + type: string + sample: present +enabled: + description: Whether the jenkins job is enabled or not. + returned: success + type: bool + sample: true +user: + description: User used for authentication. + returned: success + type: string + sample: admin +url: + description: Url to connect to the Jenkins server. + returned: success + type: string + sample: https://jenkins.mydomain.com +''' + +try: + import jenkins + python_jenkins_installed = True +except ImportError: + python_jenkins_installed = False + +try: + from lxml import etree as ET + python_lxml_installed = True +except ImportError: + python_lxml_installed = False + +class JenkinsJob: + def __init__(self, module): + self.module = module + + self.config = module.params.get('config') + self.name = module.params.get('name') + self.password = module.params.get('password') + self.state = module.params.get('state') + self.enabled = module.params.get('enabled') + self.token = module.params.get('token') + self.user = module.params.get('user') + self.jenkins_url = module.params.get('url') + self.server = self.get_jenkins_connection() + + self.result = { + 'changed': False, + 'url': self.jenkins_url, + 'name': self.name, + 'user': self.user, + 'state': self.state, + 'diff': { + 'before': "", + 'after': "" + } + } + + def get_jenkins_connection(self): + try: + if (self.user and self.password): + return jenkins.Jenkins(self.jenkins_url, self.user, self.password) + elif (self.user and self.token): + return jenkins.Jenkins(self.jenkins_url, self.user, self.token) + elif (self.user and not (self.password or self.token)): + return jenkins.Jenkins(self.jenkins_url, self.user) + else: + return jenkins.Jenkins(self.jenkins_url) + except Exception: + e = get_exception() + self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % str(e)) + + def get_job_status(self): + try: + return self.server.get_job_info(self.name)['color'].encode('utf-8') + except Exception: + e = get_exception() + self.module.fail_json(msg='Unable to fetch job information, %s' % str(e)) + + def job_exists(self): + try: + return bool(self.server.job_exists(self.name)) + except Exception: + e = get_exception() + self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (str(e), self.jenkins_url)) + + def get_config(self): + return job_config_to_string(self.config) + + def get_current_config(self): + return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8')) + + def has_config_changed(self): + # config is optional, if not provided we keep the current config as is + if self.config is None: + return False + + config_file = self.get_config() + machine_file = self.get_current_config() + + self.result['diff']['after'] = config_file + self.result['diff']['before'] = machine_file + + if machine_file != config_file: + return True + return False + + def present_job(self): + if self.config is None and self.enabled is None: + self.module.fail_json(msg='one of the following params is required on state=present: config,enabled') + + if not self.job_exists(): + self.create_job() + else: + self.update_job() + + def has_state_changed(self, status): + # Keep in current state if enabled arg_spec is not given + if self.enabled is None: + return False + + if ( (self.enabled == False and status != "disabled") or (self.enabled == True and status == "disabled") ): + return True + return False + + def switch_state(self): + if self.enabled == False: + self.server.disable_job(self.name) + else: + self.server.enable_job(self.name) + + def update_job(self): + try: + status = self.get_job_status() + + # Handle job config + if self.has_config_changed(): + self.result['changed'] = True + if not self.module.check_mode: + self.server.reconfig_job(self.name, self.get_config()) + + # Handle job disable/enable + elif self.has_state_changed(status): + self.result['changed'] = True + if not self.module.check_mode: + self.switch_state() + + except Exception: + e = get_exception() + self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (str(e), self.jenkins_url)) + + def create_job(self): + if self.config is None: + self.module.fail_json(msg='missing required param: config') + + self.result['changed'] = True + try: + config_file = self.get_config() + self.result['diff']['after'] = config_file + if not self.module.check_mode: + self.server.create_job(self.name, config_file) + except Exception: + e = get_exception() + self.module.fail_json(msg='Unable to create job, %s for %s' % (str(e), self.jenkins_url)) + + def absent_job(self): + if self.job_exists(): + self.result['changed'] = True + self.result['diff']['before'] = self.get_current_config() + if not self.module.check_mode: + try: + self.server.delete_job(self.name) + except Exception: + e = get_exception() + self.module.fail_json(msg='Unable to delete job, %s for %s' % (str(e), self.jenkins_url)) + + def get_result(self): + result = self.result + if self.job_exists(): + result['enabled'] = self.get_job_status() != "disabled" + else: + result['enabled'] = None + return result + +def test_dependencies(module): + if not python_jenkins_installed: + module.fail_json(msg="python-jenkins required for this module. "\ + "see http://python-jenkins.readthedocs.io/en/latest/install.html") + + if not python_lxml_installed: + module.fail_json(msg="lxml required for this module. "\ + "see http://lxml.de/installation.html") + +def job_config_to_string(xml_str): + return ET.tostring(ET.fromstring(xml_str)) + +def main(): + module = AnsibleModule( + argument_spec = dict( + config = dict(required=False), + name = dict(required=True), + password = dict(required=False, no_log=True), + state = dict(required=False, choices=['present', 'absent'], default="present"), + enabled = dict(required=False, type='bool'), + token = dict(required=False, no_log=True), + url = dict(required=False, default="http://localhost:8080"), + user = dict(required=False) + ), + mutually_exclusive = [ + ['password', 'token'], + ['config', 'enabled'], + ], + supports_check_mode=True, + ) + + test_dependencies(module) + jenkins_job = JenkinsJob(module) + + if module.params.get('state') == "present": + jenkins_job.present_job() + else: + jenkins_job.absent_job() + + result = jenkins_job.get_result() + module.exit_json(**result) + + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/web_infrastructure/jenkins_plugin.py b/web_infrastructure/jenkins_plugin.py new file mode 100644 index 00000000000..56067c38a60 --- /dev/null +++ b/web_infrastructure/jenkins_plugin.py @@ -0,0 +1,833 @@ +#!/usr/bin/python +# encoding: utf-8 + +# (c) 2016, Jiri Tyr +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.pycompat24 import get_exception +from ansible.module_utils.urls import fetch_url +from ansible.module_utils.urls import url_argument_spec +import base64 +import hashlib +import json +import os +import tempfile +import time +import urllib + + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: jenkins_plugin +author: Jiri Tyr (@jtyr) +version_added: '2.2' +short_description: Add or remove Jenkins plugin +description: + - Ansible module which helps to manage Jenkins plugins. + +options: + group: + required: false + default: jenkins + description: + - Name of the Jenkins group on the OS. + jenkins_home: + required: false + default: /var/lib/jenkins + description: + - Home directory of the Jenkins user. + mode: + required: false + default: '0664' + description: + - File mode applied on versioned plugins. + name: + required: true + description: + - Plugin name. + owner: + required: false + default: jenkins + description: + - Name of the Jenkins user on the OS. + params: + required: false + default: null + description: + - Option used to allow the user to overwrite any of the other options. To + remove an option, set the value of the option to C(null). + state: + required: false + choices: [absent, present, pinned, unpinned, enabled, disabled, latest] + default: present + description: + - Desired plugin state. + - If the C(latest) is set, the check for new version will be performed + every time. This is suitable to keep the plugin up-to-date. + timeout: + required: false + default: 30 + description: + - Server connection timeout in secs. + updates_expiration: + required: false + default: 86400 + description: + - Number of seconds after which a new copy of the I(update-center.json) + file is downloaded. This is used to avoid the need to download the + plugin to calculate its checksum when C(latest) is specified. + - Set it to C(0) if no cache file should be used. In that case, the + plugin file will always be downloaded to calculate its checksum when + C(latest) is specified. + updates_url: + required: false + default: https://updates.jenkins-ci.org + description: + - URL of the Update Centre. + - Used as the base URL to download the plugins and the + I(update-center.json) JSON file. + url: + required: false + default: http://localhost:8080 + description: + - URL of the Jenkins server. + version: + required: false + default: null + description: + - Plugin version number. + - If this option is specified, all plugin dependencies must be installed + manually. + - It might take longer to verify that the correct version is installed. + This is especially true if a specific version number is specified. + with_dependencies: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - Defines whether to install plugin dependencies. + +notes: + - Plugin installation shoud be run under root or the same user which owns + the plugin files on the disk. Only if the plugin is not installed yet and + no version is specified, the API installation is performed which requires + only the Web UI credentials. + - It's necessary to notify the handler or call the I(service) module to + restart the Jenkins service after a new plugin was installed. + - Pinning works only if the plugin is installed and Jenkis service was + successfully restarted after the plugin installation. + - It is not possible to run the module remotely by changing the I(url) + parameter to point to the Jenkins server. The module must be used on the + host where Jenkins runs as it needs direct access to the plugin files. +''' + +EXAMPLES = ''' +- name: Install plugin + jenkins_plugin: + name: build-pipeline-plugin + +- name: Install plugin without its dependencies + jenkins_plugin: + name: build-pipeline-plugin + with_dependencies: no + +- name: Make sure the plugin is always up-to-date + jenkins_plugin: + name: token-macro + state: latest + +- name: Install specific version of the plugin + jenkins_plugin: + name: token-macro + version: 1.15 + +- name: Pin the plugin + jenkins_plugin: + name: token-macro + state: pinned + +- name: Unpin the plugin + jenkins_plugin: + name: token-macro + state: unpinned + +- name: Enable the plugin + jenkins_plugin: + name: token-macro + state: enabled + +- name: Disable the plugin + jenkins_plugin: + name: token-macro + state: disabled + +- name: Uninstall plugin + jenkins_plugin: + name: build-pipeline-plugin + state: absent + +# +# Example of how to use the params +# +# Define a variable and specify all default parameters you want to use across +# all jenkins_plugin calls: +# +# my_jenkins_params: +# url_username: admin +# url_password: p4ssw0rd +# url: http://localhost:8888 +# +- name: Install plugin + jenkins_plugin: + name: build-pipeline-plugin + params: "{{ my_jenkins_params }}" + +# +# Example of a Play which handles Jenkins restarts during the state changes +# +- name: Jenkins Master play + hosts: jenkins-master + vars: + my_jenkins_plugins: + token-macro: + enabled: yes + build-pipeline-plugin: + version: 1.4.9 + pinned: no + enabled: yes + tasks: + - name: Install plugins without a specific version + jenkins_plugin: + name: "{{ item.key }}" + register: my_jenkins_plugin_unversioned + when: > + 'version' not in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Install plugins with a specific version + jenkins_plugin: + name: "{{ item.key }}" + version: "{{ item.value['version'] }}" + register: my_jenkins_plugin_versioned + when: > + 'version' in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Initiate the fact + set_fact: + jenkins_restart_required: no + + - name: Check if restart is required by any of the versioned plugins + set_fact: + jenkins_restart_required: yes + when: item.changed + with_items: "{{ my_jenkins_plugin_versioned.results }}" + + - name: Check if restart is required by any of the unversioned plugins + set_fact: + jenkins_restart_required: yes + when: item.changed + with_items: "{{ my_jenkins_plugin_unversioned.results }}" + + - name: Restart Jenkins if required + service: + name: jenkins + state: restarted + when: jenkins_restart_required + + - name: Wait for Jenkins to start up + uri: + url: http://localhost:8080 + status_code: 200 + timeout: 5 + register: jenkins_service_status + # Keep trying for 5 mins in 5 sec intervals + retries: 60 + delay: 5 + until: > + 'status' in jenkins_service_status and + jenkins_service_status['status'] == 200 + when: jenkins_restart_required + + - name: Reset the fact + set_fact: + jenkins_restart_required: no + when: jenkins_restart_required + + - name: Plugin pinning + jenkins_plugin: + name: "{{ item.key }}" + state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}" + when: > + 'pinned' in item.value + with_dict: "{{ my_jenkins_plugins }}" + + - name: Plugin enabling + jenkins_plugin: + name: "{{ item.key }}" + state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}" + when: > + 'enabled' in item.value + with_dict: "{{ my_jenkins_plugins }}" +''' + +RETURN = ''' +plugin: + description: plugin name + returned: success + type: string + sample: build-pipeline-plugin +state: + description: state of the target, after execution + returned: success + type: string + sample: "present" +''' + + +class JenkinsPlugin(object): + def __init__(self, module): + # To be able to call fail_json + self.module = module + + # Shortcuts for the params + self.params = self.module.params + self.url = self.params['url'] + self.timeout = self.params['timeout'] + + # Crumb + self.crumb = {} + + if self._csrf_enabled(): + self.crumb = self._get_crumb() + + # Get list of installed plugins + self._get_installed_plugins() + + def _csrf_enabled(self): + csrf_data = self._get_json_data( + "%s/%s" % (self.url, "api/json"), 'CSRF') + + return csrf_data["useCrumbs"] + + def _get_json_data(self, url, what, **kwargs): + # Get the JSON data + r = self._get_url_data(url, what, **kwargs) + + # Parse the JSON data + try: + json_data = json.load(r) + except Exception: + e = get_exception() + self.module.fail_json( + msg="Cannot parse %s JSON data." % what, + details=e.message) + + return json_data + + def _get_url_data( + self, url, what=None, msg_status=None, msg_exception=None, + **kwargs): + # Compose default messages + if msg_status is None: + msg_status = "Cannot get %s" % what + + if msg_exception is None: + msg_exception = "Retrieval of %s failed." % what + + # Get the URL data + try: + response, info = fetch_url( + self.module, url, timeout=self.timeout, **kwargs) + + if info['status'] != 200: + self.module.fail_json(msg=msg_status, details=info['msg']) + except Exception: + e = get_exception() + self.module.fail_json(msg=msg_exception, details=e.message) + + return response + + def _get_crumb(self): + crumb_data = self._get_json_data( + "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb') + + if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data: + ret = { + crumb_data['crumbRequestField']: crumb_data['crumb'] + } + else: + self.module.fail_json( + msg="Required fields not found in the Crum response.", + details=crumb_data) + + return ret + + def _get_installed_plugins(self): + plugins_data = self._get_json_data( + "%s/%s" % (self.url, "pluginManager/api/json?depth=1"), + 'list of plugins') + + # Check if we got valid data + if 'plugins' not in plugins_data: + self.module.fail_json(msg="No valid plugin data found.") + + # Create final list of installed/pined plugins + self.is_installed = False + self.is_pinned = False + self.is_enabled = False + + for p in plugins_data['plugins']: + if p['shortName'] == self.params['name']: + self.is_installed = True + + if p['pinned']: + self.is_pinned = True + + if p['enabled']: + self.is_enabled = True + + break + + def install(self): + changed = False + plugin_file = ( + '%s/plugins/%s.jpi' % ( + self.params['jenkins_home'], + self.params['name'])) + + if not self.is_installed and self.params['version'] is None: + if not self.module.check_mode: + # Install the plugin (with dependencies) + install_script = ( + 'd = Jenkins.instance.updateCenter.getPlugin("%s")' + '.deploy(); d.get();' % self.params['name']) + + if self.params['with_dependencies']: + install_script = ( + 'Jenkins.instance.updateCenter.getPlugin("%s")' + '.getNeededDependencies().each{it.deploy()}; %s' % ( + self.params['name'], install_script)) + + script_data = { + 'script': install_script + } + script_data.update(self.crumb) + data = urllib.urlencode(script_data) + + # Send the installation request + r = self._get_url_data( + "%s/scriptText" % self.url, + msg_status="Cannot install plugin.", + msg_exception="Plugin installation has failed.", + data=data) + + changed = True + else: + # Check if the plugin directory exists + if not os.path.isdir(self.params['jenkins_home']): + self.module.fail_json( + msg="Jenkins home directory doesn't exist.") + + md5sum_old = None + if os.path.isfile(plugin_file): + # Make the checksum of the currently installed plugin + md5sum_old = hashlib.md5( + open(plugin_file, 'rb').read()).hexdigest() + + if self.params['version'] in [None, 'latest']: + # Take latest version + plugin_url = ( + "%s/latest/%s.hpi" % ( + self.params['updates_url'], + self.params['name'])) + else: + # Take specific version + plugin_url = ( + "{0}/download/plugins/" + "{1}/{2}/{1}.hpi".format( + self.params['updates_url'], + self.params['name'], + self.params['version'])) + + if ( + self.params['updates_expiration'] == 0 or + self.params['version'] not in [None, 'latest'] or + md5sum_old is None): + + # Download the plugin file directly + r = self._download_plugin(plugin_url) + + # Write downloaded plugin into file if checksums don't match + if md5sum_old is None: + # No previously installed plugin + if not self.module.check_mode: + self._write_file(plugin_file, r) + + changed = True + else: + # Get data for the MD5 + data = r.read() + + # Make new checksum + md5sum_new = hashlib.md5(data).hexdigest() + + # If the checksum is different from the currently installed + # plugin, store the new plugin + if md5sum_old != md5sum_new: + if not self.module.check_mode: + self._write_file(plugin_file, data) + + changed = True + else: + # Check for update from the updates JSON file + plugin_data = self._download_updates() + + try: + sha1_old = hashlib.sha1(open(plugin_file, 'rb').read()) + except Exception: + e = get_exception() + self.module.fail_json( + msg="Cannot calculate SHA1 of the old plugin.", + details=e.message) + + sha1sum_old = base64.b64encode(sha1_old.digest()) + + # If the latest version changed, download it + if sha1sum_old != plugin_data['sha1']: + if not self.module.check_mode: + r = self._download_plugin(plugin_url) + self._write_file(plugin_file, r) + + changed = True + + # Change file attributes if needed + if os.path.isfile(plugin_file): + params = { + 'dest': plugin_file + } + params.update(self.params) + file_args = self.module.load_file_common_arguments(params) + + if not self.module.check_mode: + # Not sure how to run this in the check mode + changed = self.module.set_fs_attributes_if_different( + file_args, changed) + else: + # See the comment above + changed = True + + return changed + + def _download_updates(self): + updates_filename = 'jenkins-plugin-cache.json' + updates_dir = os.path.expanduser('~/.ansible/tmp') + updates_file = "%s/%s" % (updates_dir, updates_filename) + download_updates = True + + # Check if we need to download new updates file + if os.path.isfile(updates_file): + # Get timestamp when the file was changed last time + ts_file = os.stat(updates_file).st_mtime + ts_now = time.time() + + if ts_now - ts_file < self.params['updates_expiration']: + download_updates = False + + updates_file_orig = updates_file + + # Download the updates file if needed + if download_updates: + url = "%s/update-center.json" % self.params['updates_url'] + + # Get the data + r = self._get_url_data( + url, + msg_status="Remote updates not found.", + msg_exception="Updates download failed.") + + # Write the updates file + updates_file = tempfile.mkstemp() + + try: + fd = open(updates_file, 'wb') + except IOError: + e = get_exception() + self.module.fail_json( + msg="Cannot open the tmp updates file %s." % updates_file, + details=str(e)) + + fd.write(r.read()) + + try: + fd.close() + except IOError: + e = get_exception() + self.module.fail_json( + msg="Cannot close the tmp updates file %s." % updates_file, + detail=str(e)) + + # Open the updates file + try: + f = open(updates_file) + except IOError: + e = get_exception() + self.module.fail_json( + msg="Cannot open temporal updates file.", + details=str(e)) + + i = 0 + for line in f: + # Read only the second line + if i == 1: + try: + data = json.loads(line) + except Exception: + e = get_exception() + self.module.fail_json( + msg="Cannot load JSON data from the tmp updates file.", + details=e.message) + + break + + i += 1 + + # Move the updates file to the right place if we could read it + if download_updates: + # Make sure the destination directory exists + if not os.path.isdir(updates_dir): + try: + os.makedirs(updates_dir, int('0700', 8)) + except OSError: + e = get_exception() + self.module.fail_json( + msg="Cannot create temporal directory.", + details=e.message) + + self.module.atomic_move(updates_file, updates_file_orig) + + # Check if we have the plugin data available + if 'plugins' not in data or self.params['name'] not in data['plugins']: + self.module.fail_json( + msg="Cannot find plugin data in the updates file.") + + return data['plugins'][self.params['name']] + + def _download_plugin(self, plugin_url): + # Download the plugin + r = self._get_url_data( + plugin_url, + msg_status="Plugin not found.", + msg_exception="Plugin download failed.") + + return r + + def _write_file(self, f, data): + # Store the plugin into a temp file and then move it + tmp_f = tempfile.mkstemp() + + try: + fd = open(tmp_f, 'wb') + except IOError: + e = get_exception() + self.module.fail_json( + msg='Cannot open the temporal plugin file %s.' % tmp_f, + details=str(e)) + + if isinstance(data, str): + d = data + else: + d = data.read() + + fd.write(d) + + try: + fd.close() + except IOError: + e = get_exception() + self.module.fail_json( + msg='Cannot close the temporal plugin file %s.' % tmp_f, + details=str(e)) + + # Move the file onto the right place + self.module.atomic_move(tmp_f, f) + + def uninstall(self): + changed = False + + # Perform the action + if self.is_installed: + if not self.module.check_mode: + self._pm_query('doUninstall', 'Uninstallation') + + changed = True + + return changed + + def pin(self): + return self._pinning('pin') + + def unpin(self): + return self._pinning('unpin') + + def _pinning(self, action): + changed = False + + # Check if the plugin is pinned/unpinned + if ( + action == 'pin' and not self.is_pinned or + action == 'unpin' and self.is_pinned): + + # Perform the action + if not self.module.check_mode: + self._pm_query(action, "%sning" % action.capitalize()) + + changed = True + + return changed + + def enable(self): + return self._enabling('enable') + + def disable(self): + return self._enabling('disable') + + def _enabling(self, action): + changed = False + + # Check if the plugin is pinned/unpinned + if ( + action == 'enable' and not self.is_enabled or + action == 'disable' and self.is_enabled): + + # Perform the action + if not self.module.check_mode: + self._pm_query( + "make%sd" % action.capitalize(), + "%sing" % action[:-1].capitalize()) + + changed = True + + return changed + + def _pm_query(self, action, msg): + url = "%s/pluginManager/plugin/%s/%s" % ( + self.params['url'], self.params['name'], action) + data = urllib.urlencode(self.crumb) + + # Send the request + self._get_url_data( + url, + msg_status="Plugin not found. %s" % url, + msg_exception="%s has failed." % msg, + data=data) + + +def main(): + # Module arguments + argument_spec = url_argument_spec() + argument_spec.update( + group=dict(default='jenkins'), + jenkins_home=dict(default='/var/lib/jenkins'), + mode=dict(default='0644', type='raw'), + name=dict(required=True), + owner=dict(default='jenkins'), + params=dict(type='dict'), + state=dict( + choices=[ + 'present', + 'absent', + 'pinned', + 'unpinned', + 'enabled', + 'disabled', + 'latest'], + default='present'), + timeout=dict(default=30, type="int"), + updates_expiration=dict(default=86400, type="int"), + updates_url=dict(default='https://updates.jenkins-ci.org'), + url=dict(default='http://localhost:8080'), + url_password=dict(no_log=True), + version=dict(), + with_dependencies=dict(default=True, type='bool'), + ) + # Module settings + module = AnsibleModule( + argument_spec=argument_spec, + add_file_common_args=True, + supports_check_mode=True, + ) + + # Update module parameters by user's parameters if defined + if 'params' in module.params and isinstance(module.params['params'], dict): + module.params.update(module.params['params']) + # Remove the params + module.params.pop('params', None) + + # Force basic authentication + module.params['force_basic_auth'] = True + + # Convert timeout to float + try: + module.params['timeout'] = float(module.params['timeout']) + except ValueError: + e = get_exception() + module.fail_json( + msg='Cannot convert %s to float.' % module.params['timeout'], + details=str(e)) + + # Set version to latest if state is latest + if module.params['state'] == 'latest': + module.params['state'] = 'present' + module.params['version'] = 'latest' + + # Create some shortcuts + name = module.params['name'] + state = module.params['state'] + + # Initial change state of the task + changed = False + + # Instantiate the JenkinsPlugin object + jp = JenkinsPlugin(module) + + # Perform action depending on the requested state + if state == 'present': + changed = jp.install() + elif state == 'absent': + changed = jp.uninstall() + elif state == 'pinned': + changed = jp.pin() + elif state == 'unpinned': + changed = jp.unpin() + elif state == 'enabled': + changed = jp.enable() + elif state == 'disabled': + changed = jp.disable() + + # Print status of the change + module.exit_json(changed=changed, plugin=name, state=state) + + +if __name__ == '__main__': + main() diff --git a/web_infrastructure/jira.py b/web_infrastructure/jira.py old mode 100644 new mode 100755 index 79cfb72d4a7..aca751801c4 --- a/web_infrastructure/jira.py +++ b/web_infrastructure/jira.py @@ -20,6 +20,10 @@ # along with Ansible. If not, see . # +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = """ module: jira version_added: "1.6" @@ -91,6 +95,24 @@ description: - Sets the assignee on create or transition operations. Note not all transitions will allow this. + linktype: + required: false + version_added: 2.3 + description: + - Set type of link, when action 'link' selected + + inwardissue: + required: false + version_added: 2.3 + description: + - set issue from which link will be created + + outwardissue: + required: false + version_added: 2.3 + description: + - set issue to which link will be created + fields: required: false description: @@ -105,64 +127,110 @@ EXAMPLES = """ # Create a new issue and add a comment to it: - name: Create an issue - jira: uri={{server}} username={{user}} password={{pass}} - project=ANS operation=create - summary="Example Issue" description="Created using Ansible" issuetype=Task + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: create + summary: Example Issue + description: Created using Ansible + issuetype: Task register: issue - name: Comment on issue - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=comment - comment="A comment added by Ansible" + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: comment + comment: A comment added by Ansible # Assign an existing issue using edit - name: Assign an issue using free-form fields - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=edit - assignee=ssmith + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key}}' + operation: edit + assignee: ssmith # Create an issue with an existing assignee - name: Create an assigned issue - jira: uri={{server}} username={{user}} password={{pass}} - project=ANS operation=create - summary="Assigned issue" description="Created and assigned using Ansible" - issuetype=Task assignee=ssmith - -# Edit an issue using free-form fields + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: create + summary: Assigned issue + description: Created and assigned using Ansible + issuetype: Task + assignee: ssmith + +# Edit an issue - name: Set the labels on an issue using free-form fields - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=edit - args: { fields: {labels: ["autocreated", "ansible"]}} - -- name: Set the labels on an issue, YAML version - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=edit - args: - fields: - labels: - - "autocreated" - - "ansible" - - "yaml" + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: edit + args: + fields: + labels: + - autocreated + - ansible # Retrieve metadata for an issue and use it to create an account - name: Get an issue - jira: uri={{server}} username={{user}} password={{pass}} - project=ANS operation=fetch issue="ANS-63" + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + project: ANS + operation: fetch + issue: ANS-63 register: issue - name: Create a unix account for the reporter - sudo: true - user: name="{{issue.meta.fields.creator.name}}" comment="{{issue.meta.fields.creator.displayName}}" + become: true + user: + name: '{{ issue.meta.fields.creator.name }}' + comment: '{{issue.meta.fields.creator.displayName }}' + +- name: Create link from HSP-1 to MKY-1 + jira: uri={{server}} username={{user}} password={{pass}} operation=link + linktype=Relate inwardissue=HSP-1 outwardissue=MKY-1 # Transition an issue by target status - name: Close the issue - jira: uri={{server}} username={{user}} password={{pass}} - issue={{issue.meta.key}} operation=transition status="Done" + jira: + uri: '{{ server }}' + username: '{{ user }}' + password: '{{ pass }}' + issue: '{{ issue.meta.key }}' + operation: transition + status: Done """ -import json +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + # Let snippet from module_utils/basic.py return a proper error in this case + pass + import base64 +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +from ansible.module_utils.pycompat24 import get_exception + def request(url, user, passwd, data=None, method=None): if data: data = json.dumps(data) @@ -179,7 +247,7 @@ def request(url, user, passwd, data=None, method=None): headers={'Content-Type':'application/json', 'Authorization':"Basic %s" % auth}) - if info['status'] not in (200, 204): + if info['status'] not in (200, 201, 204): module.fail_json(msg=info['msg']) body = response.read() @@ -273,13 +341,26 @@ def transition(restbase, user, passwd, params): return ret +def link(restbase, user, passwd, params): + data = { + 'type': { 'name': params['linktype'] }, + 'inwardIssue': { 'key': params['inwardissue'] }, + 'outwardIssue': { 'key': params['outwardissue'] }, + } + + url = restbase + '/issueLink/' + + ret = post(url, user, passwd, data) + + return ret # Some parameters are required depending on the operation: OP_REQUIRED = dict(create=['project', 'issuetype', 'summary', 'description'], comment=['issue', 'comment'], edit=[], fetch=['issue'], - transition=['status']) + transition=['status'], + link=['linktype', 'inwardissue', 'outwardissue']) def main(): @@ -287,7 +368,7 @@ def main(): module = AnsibleModule( argument_spec=dict( uri=dict(required=True), - operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition'], + operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition', 'link'], aliases=['command'], required=True), username=dict(required=True), password=dict(required=True), @@ -299,7 +380,10 @@ def main(): comment=dict(), status=dict(), assignee=dict(), - fields=dict(default={}) + fields=dict(default={}, type='dict'), + linktype=dict(), + inwardissue=dict(), + outwardissue=dict(), ), supports_check_mode=False ) @@ -335,13 +419,13 @@ def main(): ret = method(restbase, user, passwd, module.params) - except Exception, e: + except Exception: + e = get_exception() return module.fail_json(msg=e.message) module.exit_json(changed=True, meta=ret) -from ansible.module_utils.basic import * -from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/web_infrastructure/letsencrypt.py b/web_infrastructure/letsencrypt.py new file mode 100644 index 00000000000..a8541a6d77a --- /dev/null +++ b/web_infrastructure/letsencrypt.py @@ -0,0 +1,805 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016 Michael Gruener +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import binascii +import copy +import locale +import textwrap +from datetime import datetime + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: letsencrypt +author: "Michael Gruener (@mgruener)" +version_added: "2.2" +short_description: Create SSL certificates with Let's Encrypt +description: + - "Create and renew SSL certificates with Let's Encrypt. Let’s Encrypt is a + free, automated, and open certificate authority (CA), run for the + public’s benefit. For details see U(https://letsencrypt.org). The current + implementation supports the http-01, tls-sni-02 and dns-01 challenges." + - "To use this module, it has to be executed at least twice. Either as two + different tasks in the same run or during multiple runs." + - "Between these two tasks you have to fulfill the required steps for the + choosen challenge by whatever means necessary. For http-01 that means + creating the necessary challenge file on the destination webserver. For + dns-01 the necessary dns record has to be created. tls-sni-02 requires + you to create a SSL certificate with the appropriate subjectAlternativeNames. + It is I(not) the responsibility of this module to perform these steps." + - "For details on how to fulfill these challenges, you might have to read through + U(https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7)" + - "Although the defaults are choosen so that the module can be used with + the Let's Encrypt CA, the module can be used with any service using the ACME + protocol." +requirements: + - "python >= 2.6" + - openssl +options: + account_key: + description: + - "File containing the the Let's Encrypt account RSA key." + - "Can be created with C(openssl rsa ...)." + required: true + account_email: + description: + - "The email address associated with this account." + - "It will be used for certificate expiration warnings." + required: false + default: null + acme_directory: + description: + - "The ACME directory to use. This is the entry point URL to access + CA server API." + - "For safety reasons the default is set to the Let's Encrypt staging server. + This will create technically correct, but untrusted certificates." + required: false + default: https://acme-staging.api.letsencrypt.org/directory + agreement: + description: + - "URI to a terms of service document you agree to when using the + ACME service at C(acme_directory)." + required: false + default: 'https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf' + challenge: + description: The challenge to be performed. + required: false + choices: [ 'http-01', 'dns-01', 'tls-sni-02'] + default: 'http-01' + csr: + description: + - "File containing the CSR for the new certificate." + - "Can be created with C(openssl csr ...)." + - "The CSR may contain multiple Subject Alternate Names, but each one + will lead to an individual challenge that must be fulfilled for the + CSR to be signed." + required: true + alias: ['src'] + data: + description: + - "The data to validate ongoing challenges." + - "The value that must be used here will be provided by a previous use + of this module." + required: false + default: null + dest: + description: The destination file for the certificate. + required: true + alias: ['cert'] + remaining_days: + description: + - "The number of days the certificate must have left being valid. + If C(remaining_days < cert_days), then it will be renewed. + If the certificate is not renewed, module return values will not + include C(challenge_data)." + required: false + default: 10 +''' + +EXAMPLES = ''' +- letsencrypt: + account_key: /etc/pki/cert/private/account.key + csr: /etc/pki/cert/csr/sample.com.csr + dest: /etc/httpd/ssl/sample.com.crt + register: sample_com_challenge + +# perform the necessary steps to fulfill the challenge +# for example: +# +# - copy: +# dest: /var/www/html/{{ sample_com_challenge['challenge_data']['sample.com']['http-01']['resource'] }} +# content: "{{ sample_com_challenge['challenge_data']['sample.com']['http-01']['resource_value'] }}" +# when: sample_com_challenge|changed + +- letsencrypt: + account_key: /etc/pki/cert/private/account.key + csr: /etc/pki/cert/csr/sample.com.csr + dest: /etc/httpd/ssl/sample.com.crt + data: "{{ sample_com_challenge }}" +''' + +RETURN = ''' +cert_days: + description: the number of days the certificate remains valid. + returned: success +challenge_data: + description: per domain / challenge type challenge data + returned: changed + type: dictionary + contains: + resource: + description: the challenge resource that must be created for validation + returned: changed + type: string + sample: .well-known/acme-challenge/evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA + resource_value: + description: the value the resource has to produce for the validation + returned: changed + type: string + sample: IlirfxKKXA...17Dt3juxGJ-PCt92wr-oA +authorizations: + description: ACME authorization data. + returned: changed + type: list + contains: + authorization: + description: ACME authorization object. See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.1.2 + returned: success + type: dict +''' + +def nopad_b64(data): + return base64.urlsafe_b64encode(data).decode('utf8').replace("=", "") + +def simple_get(module,url): + resp, info = fetch_url(module, url, method='GET') + + result = None + try: + content = resp.read() + except AttributeError: + if info['body']: + content = info['body'] + + if content: + if info['content-type'].startswith('application/json'): + try: + result = module.from_json(content.decode('utf8')) + except ValueError: + module.fail_json(msg="Failed to parse the ACME response: {0} {1}".format(url,content)) + else: + result = content + + if info['status'] >= 400: + module.fail_json(msg="ACME request failed: CODE: {0} RESULT:{1}".format(info['status'],result)) + return result + +def get_cert_days(module,cert_file): + ''' + Return the days the certificate in cert_file remains valid and -1 + if the file was not found. + ''' + if not os.path.exists(cert_file): + return -1 + + openssl_bin = module.get_bin_path('openssl', True) + openssl_cert_cmd = [openssl_bin, "x509", "-in", cert_file, "-noout", "-text"] + _, out, _ = module.run_command(openssl_cert_cmd,check_rc=True) + try: + not_after_str = re.search(r"\s+Not After\s*:\s+(.*)",out.decode('utf8')).group(1) + not_after = datetime.datetime.fromtimestamp(time.mktime(time.strptime(not_after_str,'%b %d %H:%M:%S %Y %Z'))) + except AttributeError: + module.fail_json(msg="No 'Not after' date found in {0}".format(cert_file)) + except ValueError: + module.fail_json(msg="Failed to parse 'Not after' date of {0}".format(cert_file)) + now = datetime.datetime.utcnow() + return (not_after - now).days + +# function source: network/basics/uri.py +def write_file(module, dest, content): + ''' + Write content to destination file dest, only if the content + has changed. + ''' + changed = False + # create a tempfile with some test content + _, tmpsrc = tempfile.mkstemp() + f = open(tmpsrc, 'wb') + try: + f.write(content) + except Exception as err: + os.remove(tmpsrc) + module.fail_json(msg="failed to create temporary content file: %s" % str(err)) + f.close() + checksum_src = None + checksum_dest = None + # raise an error if there is no tmpsrc file + if not os.path.exists(tmpsrc): + os.remove(tmpsrc) + module.fail_json(msg="Source %s does not exist" % (tmpsrc)) + if not os.access(tmpsrc, os.R_OK): + os.remove(tmpsrc) + module.fail_json( msg="Source %s not readable" % (tmpsrc)) + checksum_src = module.sha1(tmpsrc) + # check if there is no dest file + if os.path.exists(dest): + # raise an error if copy has no permission on dest + if not os.access(dest, os.W_OK): + os.remove(tmpsrc) + module.fail_json(msg="Destination %s not writable" % (dest)) + if not os.access(dest, os.R_OK): + os.remove(tmpsrc) + module.fail_json(msg="Destination %s not readable" % (dest)) + checksum_dest = module.sha1(dest) + else: + if not os.access(os.path.dirname(dest), os.W_OK): + os.remove(tmpsrc) + module.fail_json(msg="Destination dir %s not writable" % (os.path.dirname(dest))) + if checksum_src != checksum_dest: + try: + shutil.copyfile(tmpsrc, dest) + changed = True + except Exception as err: + os.remove(tmpsrc) + module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) + os.remove(tmpsrc) + return changed + +class ACMEDirectory(object): + ''' + The ACME server directory. Gives access to the available resources + and the Replay-Nonce for a given URI. This only works for + URIs that permit GET requests (so normally not the ones that + require authentication). + https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.2 + ''' + def __init__(self, module): + self.module = module + self.directory_root = module.params['acme_directory'] + + self.directory = simple_get(self.module,self.directory_root) + + def __getitem__(self, key): return self.directory[key] + + def get_nonce(self,resource=None): + url = self.directory_root + if resource is not None: + url = resource + _, info = fetch_url(self.module, url, method='HEAD') + if info['status'] != 200: + self.module.fail_json(msg="Failed to get replay-nonce, got status {0}".format(info['status'])) + return info['replay-nonce'] + +class ACMEAccount(object): + ''' + ACME account object. Handles the authorized communication with the + ACME server. Provides access to accound bound information like + the currently active authorizations and valid certificates + ''' + def __init__(self,module): + self.module = module + self.agreement = module.params['agreement'] + self.key = module.params['account_key'] + self.email = module.params['account_email'] + self.data = module.params['data'] + self.directory = ACMEDirectory(module) + self.uri = None + self.changed = False + + self._authz_list_uri = None + self._certs_list_uri = None + + if not os.path.exists(self.key): + module.fail_json(msg="Account key %s not found" % (self.key)) + + self._openssl_bin = module.get_bin_path('openssl', True) + + pub_hex, pub_exp = self._parse_account_key(self.key) + self.jws_header = { + "alg": "RS256", + "jwk": { + "e": nopad_b64(binascii.unhexlify(pub_exp.encode("utf-8"))), + "kty": "RSA", + "n": nopad_b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex).encode("utf-8"))), + }, + } + self.init_account() + + def get_keyauthorization(self,token): + ''' + Returns the key authorization for the given token + https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.1 + ''' + accountkey_json = json.dumps(self.jws_header['jwk'], sort_keys=True, separators=(',', ':')) + thumbprint = nopad_b64(hashlib.sha256(accountkey_json.encode('utf8')).digest()) + return "{0}.{1}".format(token, thumbprint) + + def _parse_account_key(self,key): + ''' + Parses an RSA key file in PEM format and returns the modulus + and public exponent of the key + ''' + openssl_keydump_cmd = [self._openssl_bin, "rsa", "-in", key, "-noout", "-text"] + _, out, _ = self.module.run_command(openssl_keydump_cmd,check_rc=True) + + pub_hex, pub_exp = re.search( + r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)", + out.decode('utf8'), re.MULTILINE|re.DOTALL).groups() + pub_exp = "{0:x}".format(int(pub_exp)) + if len(pub_exp) % 2: + pub_exp = "0{0}".format(pub_exp) + + return pub_hex, pub_exp + + def send_signed_request(self, url, payload): + ''' + Sends a JWS signed HTTP POST request to the ACME server and returns + the response as dictionary + https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.2 + ''' + protected = copy.deepcopy(self.jws_header) + protected["nonce"] = self.directory.get_nonce() + + try: + payload64 = nopad_b64(self.module.jsonify(payload).encode('utf8')) + protected64 = nopad_b64(self.module.jsonify(protected).encode('utf8')) + except Exception as e: + self.module.fail_json(msg="Failed to encode payload / headers as JSON: {0}".format(e)) + + openssl_sign_cmd = [self._openssl_bin, "dgst", "-sha256", "-sign", self.key] + sign_payload = "{0}.{1}".format(protected64, payload64).encode('utf8') + _, out, _ = self.module.run_command(openssl_sign_cmd,data=sign_payload,check_rc=True, binary_data=True) + + data = self.module.jsonify({ + "header": self.jws_header, + "protected": protected64, + "payload": payload64, + "signature": nopad_b64(out), + }) + + resp, info = fetch_url(self.module, url, data=data, method='POST') + result = None + try: + content = resp.read() + except AttributeError: + if info['body']: + content = info['body'] + + if content: + if info['content-type'].startswith('application/json'): + try: + result = self.module.from_json(content.decode('utf8')) + except ValueError: + self.module.fail_json(msg="Failed to parse the ACME response: {0} {1}".format(url,content)) + else: + result = content + + return result,info + + def _new_reg(self,contact=[]): + ''' + Registers a new ACME account. Returns True if the account was + created and False if it already existed (e.g. it was not newly + created) + https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.3 + ''' + if self.uri is not None: + return True + + new_reg = { + 'resource': 'new-reg', + 'agreement': self.agreement, + 'contact': contact + } + + result, info = self.send_signed_request(self.directory['new-reg'], new_reg) + if 'location' in info: + self.uri = info['location'] + + if info['status'] in [200,201]: + # Account did not exist + self.changed = True + return True + elif info['status'] == 409: + # Account did exist + return False + else: + self.module.fail_json(msg="Error registering: {0} {1}".format(info['status'], result)) + + def init_account(self): + ''' + Create or update an account on the ACME server. As the only way + (without knowing an account URI) to test if an account exists + is to try and create one with the provided account key, this + method will always result in an account being present (except + on error situations). If the account already exists, it will + update the contact information. + https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.3 + ''' + + contact = [] + if self.email: + contact.append('mailto:' + self.email) + + # if this is not a new registration (e.g. existing account) + if not self._new_reg(contact): + # pre-existing account, get account data... + result, _ = self.send_signed_request(self.uri, {'resource':'reg'}) + + # XXX: letsencrypt/boulder#1435 + if 'authorizations' in result: + self._authz_list_uri = result['authorizations'] + if 'certificates' in result: + self._certs_list_uri = result['certificates'] + + # ...and check if update is necessary + do_update = False + if 'contact' in result: + if cmp(contact,result['contact']) != 0: + do_update = True + elif len(contact) > 0: + do_update = True + + if do_update: + upd_reg = result + upd_reg['contact'] = contact + result, _ = self.send_signed_request(self.uri, upd_reg) + self.changed = True + + def get_authorizations(self): + ''' + Return a list of currently active authorizations + https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4 + ''' + authz_list = {'authorizations': []} + if self._authz_list_uri is None: + # XXX: letsencrypt/boulder#1435 + # Workaround, retrieve the known authorization urls + # from the data attribute + # It is also a way to limit the queried authorizations, which + # might become relevant at some point + if (self.data is not None) and ('authorizations' in self.data): + for auth in self.data['authorizations']: + authz_list['authorizations'].append(auth['uri']) + else: + return [] + else: + # TODO: need to handle pagination + authz_list = simple_get(self.module, self._authz_list_uri) + + authz = [] + for auth_uri in authz_list['authorizations']: + auth = simple_get(self.module,auth_uri) + auth['uri'] = auth_uri + authz.append(auth) + + return authz + +class ACMEClient(object): + ''' + ACME client class. Uses an ACME account object and a CSR to + start and validate ACME challenges and download the respective + certificates. + ''' + def __init__(self,module): + self.module = module + self.challenge = module.params['challenge'] + self.csr = module.params['csr'] + self.dest = module.params['dest'] + self.account = ACMEAccount(module) + self.directory = self.account.directory + self.authorizations = self.account.get_authorizations() + self.cert_days = -1 + self.changed = self.account.changed + + if not os.path.exists(self.csr): + module.fail_json(msg="CSR %s not found" % (self.csr)) + + self._openssl_bin = module.get_bin_path('openssl', True) + self.domains = self._get_csr_domains() + + def _get_csr_domains(self): + ''' + Parse the CSR and return the list of requested domains + ''' + openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-noout", "-text"] + _, out, _ = self.module.run_command(openssl_csr_cmd,check_rc=True) + + domains = set([]) + common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out.decode('utf8')) + if common_name is not None: + domains.add(common_name.group(1)) + subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL) + if subject_alt_names is not None: + for san in subject_alt_names.group(1).split(", "): + if san.startswith("DNS:"): + domains.add(san[4:]) + return domains + + + def _get_domain_auth(self,domain): + ''' + Get the status string of the first authorization for the given domain. + Return None if no active authorization for the given domain was found. + ''' + if self.authorizations is None: + return None + + for auth in self.authorizations: + if (auth['identifier']['type'] == 'dns') and (auth['identifier']['value'] == domain): + return auth + return None + + def _add_or_update_auth(self,auth): + ''' + Add or update the given authroization in the global authorizations list. + Return True if the auth was updated/added and False if no change was + necessary. + ''' + for index,cur_auth in enumerate(self.authorizations): + if (cur_auth['uri'] == auth['uri']): + # does the auth parameter contain updated data? + if cmp(cur_auth,auth) != 0: + # yes, update our current authorization list + self.authorizations[index] = auth + return True + else: + return False + # this is a new authorization, add it to the list of current + # authorizations + self.authorizations.append(auth) + return True + + def _new_authz(self,domain): + ''' + Create a new authorization for the given domain. + Return the authorization object of the new authorization + https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4 + ''' + if self.account.uri is None: + return + + new_authz = { + "resource": "new-authz", + "identifier": {"type": "dns", "value": domain}, + } + + result, info = self.account.send_signed_request(self.directory['new-authz'], new_authz) + if info['status'] not in [200,201]: + self.module.fail_json(msg="Error requesting challenges: CODE: {0} RESULT: {1}".format(info['status'], result)) + else: + result['uri'] = info['location'] + return result + + def _get_challenge_data(self,auth): + ''' + Returns a dict with the data for all proposed (and supported) challenges + of the given authorization. + ''' + + data = {} + # no need to choose a specific challenge here as this module + # is not responsible for fulfilling the challenges. Calculate + # and return the required information for each challenge. + for challenge in auth['challenges']: + type = challenge['type'] + token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token']) + keyauthorization = self.account.get_keyauthorization(token) + + # NOTE: tls-sni-01 is not supported by choice + # too complex to be usefull and tls-sni-02 is an alternative + # as soon as it is implemented server side + if type == 'http-01': + # https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.2 + resource = '.well-known/acme-challenge/' + token + value = keyauthorization + elif type == 'tls-sni-02': + # https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.3 + token_digest = hashlib.sha256(token.encode('utf8')).hexdigest() + ka_digest = hashlib.sha256(keyauthorization.encode('utf8')).hexdigest() + len_token_digest = len(token_digest) + len_ka_digest = len(ka_digest) + resource = 'subjectAlternativeNames' + value = [ + "{0}.{1}.token.acme.invalid".format(token_digest[:len_token_digest/2],token_digest[len_token_digest/2:]), + "{0}.{1}.ka.acme.invalid".format(ka_digest[:len_ka_digest/2],ka_digest[len_ka_digest/2:]), + ] + elif type == 'dns-01': + # https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-7.4 + resource = '_acme-challenge' + value = nopad_b64(hashlib.sha256(keyauthorization).digest()).encode('utf8') + else: + continue + + data[type] = { 'resource': resource, 'resource_value': value } + return data + + def _validate_challenges(self,auth): + ''' + Validate the authorization provided in the auth dict. Returns True + when the validation was successfull and False when it was not. + ''' + for challenge in auth['challenges']: + if self.challenge != challenge['type']: + continue + + uri = challenge['uri'] + token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token']) + keyauthorization = self.account.get_keyauthorization(token) + + challenge_response = { + "resource": "challenge", + "keyAuthorization": keyauthorization, + } + result, info = self.account.send_signed_request(uri, challenge_response) + if info['status'] not in [200,202]: + self.module.fail_json(msg="Error validating challenge: CODE: {0} RESULT: {1}".format(info['status'], result)) + + status = '' + + while status not in ['valid','invalid','revoked']: + result = simple_get(self.module,auth['uri']) + result['uri'] = auth['uri'] + if self._add_or_update_auth(result): + self.changed = True + # draft-ietf-acme-acme-02 + # "status (required, string): ... + # If this field is missing, then the default value is "pending"." + if 'status' not in result: + status = 'pending' + else: + status = result['status'] + time.sleep(2) + + if status == 'invalid': + error_details = '' + # multiple challenges could have failed at this point, gather error + # details for all of them before failing + for challenge in result['challenges']: + if challenge['status'] == 'invalid': + error_details += ' CHALLENGE: {0}'.format(challenge['type']) + if 'error' in challenge: + error_details += ' DETAILS: {0};'.format(challenge['error']['detail']) + else: + error_details += ';' + self.module.fail_json(msg="Authorization for {0} returned invalid: {1}".format(result['identifier']['value'],error_details)) + + return status == 'valid' + + def _new_cert(self): + ''' + Create a new certificate based on the csr. + Return the certificate object as dict + https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.5 + ''' + openssl_csr_cmd = [self._openssl_bin, "req", "-in", self.csr, "-outform", "DER"] + _, out, _ = self.module.run_command(openssl_csr_cmd,check_rc=True) + + new_cert = { + "resource": "new-cert", + "csr": nopad_b64(out), + } + result, info = self.account.send_signed_request(self.directory['new-cert'], new_cert) + if info['status'] not in [200,201]: + self.module.fail_json(msg="Error new cert: CODE: {0} RESULT: {1}".format(info['status'], result)) + else: + return {'cert': result, 'uri': info['location']} + + def _der_to_pem(self,der_cert): + ''' + Convert the DER format certificate in der_cert to a PEM format + certificate and return it. + ''' + return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format( + "\n".join(textwrap.wrap(base64.b64encode(der_cert).decode('utf8'), 64))) + + def do_challenges(self): + ''' + Create new authorizations for all domains of the CSR and return + the challenge details for the choosen challenge type. + ''' + data = {} + for domain in self.domains: + auth = self._get_domain_auth(domain) + if auth is None: + new_auth = self._new_authz(domain) + self._add_or_update_auth(new_auth) + data[domain] = self._get_challenge_data(new_auth) + self.changed = True + elif (auth['status'] == 'pending') or ('status' not in auth): + # draft-ietf-acme-acme-02 + # "status (required, string): ... + # If this field is missing, then the default value is "pending"." + self._validate_challenges(auth) + # _validate_challenges updates the global authrozation dict, + # so get the current version of the authorization we are working + # on to retrieve the challenge data + data[domain] = self._get_challenge_data(self._get_domain_auth(domain)) + + return data + + def get_certificate(self): + ''' + Request a new certificate and write it to the destination file. + Only do this if a destination file was provided and if all authorizations + for the domains of the csr are valid. No Return value. + ''' + if self.dest is None: + return + + for domain in self.domains: + auth = self._get_domain_auth(domain) + if auth is None or ('status' not in auth) or (auth['status'] != 'valid'): + return + + cert = self._new_cert() + if cert['cert'] is not None: + pem_cert = self._der_to_pem(cert['cert']) + if write_file(self.module,self.dest,pem_cert): + self.cert_days = get_cert_days(self.module,self.dest) + self.changed = True + +def main(): + module = AnsibleModule( + argument_spec = dict( + account_key = dict(required=True, type='path'), + account_email = dict(required=False, default=None, type='str'), + acme_directory = dict(required=False, default='https://acme-staging.api.letsencrypt.org/directory', type='str'), + agreement = dict(required=False, default='https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf', type='str'), + challenge = dict(required=False, default='http-01', choices=['http-01', 'dns-01', 'tls-sni-02'], type='str'), + csr = dict(required=True, aliases=['src'], type='path'), + data = dict(required=False, no_log=True, default=None, type='dict'), + dest = dict(required=True, aliases=['cert'], type='path'), + remaining_days = dict(required=False, default=10, type='int'), + ), + supports_check_mode = True, + ) + + # AnsibleModule() changes the locale, so change it back to C because we rely on time.strptime() when parsing certificate dates. + locale.setlocale(locale.LC_ALL, "C") + + cert_days = get_cert_days(module,module.params['dest']) + if cert_days < module.params['remaining_days']: + # If checkmode is active, base the changed state solely on the status + # of the certificate file as all other actions (accessing an account, checking + # the authorization status...) would lead to potential changes of the current + # state + if module.check_mode: + module.exit_json(changed=True,authorizations={}, + challenge_data={},cert_days=cert_days) + else: + client = ACMEClient(module) + client.cert_days = cert_days + data = client.do_challenges() + client.get_certificate() + module.exit_json(changed=client.changed,authorizations=client.authorizations, + challenge_data=data,cert_days=client.cert_days) + else: + module.exit_json(changed=False,cert_days=cert_days) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +if __name__ == '__main__': + main() diff --git a/web_infrastructure/nginx_status_facts.py b/web_infrastructure/nginx_status_facts.py new file mode 100644 index 00000000000..dd2fbd5ee17 --- /dev/null +++ b/web_infrastructure/nginx_status_facts.py @@ -0,0 +1,164 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: nginx_status_facts +short_description: Retrieve nginx status facts. +description: + - Gathers facts from nginx from an URL having C(stub_status) enabled. +version_added: "2.3" +author: "René Moser (@resmo)" +options: + url: + description: + - URL of the nginx status. + required: true + timeout: + description: + - HTTP connection timeout in seconds. + required: false + default: 10 + +notes: + - See http://nginx.org/en/docs/http/ngx_http_stub_status_module.html for more information. +''' + +EXAMPLES = ''' +# Gather status facts from nginx on localhost +- name: get current http stats + nginx_status_facts: + url: http://localhost/nginx_status + +# Gather status facts from nginx on localhost with a custom timeout of 20 seconds +- name: get current http stats + nginx_status_facts: + url: http://localhost/nginx_status + timeout: 20 +''' + +RETURN = ''' +--- +nginx_status_facts.active_connections: + description: Active connections. + returned: success + type: int + sample: 2340 +nginx_status_facts.accepts: + description: The total number of accepted client connections. + returned: success + type: int + sample: 81769947 +nginx_status_facts.handled: + description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached. + returned: success + type: int + sample: 81769947 +nginx_status_facts.requests: + description: The total number of client requests. + returned: success + type: int + sample: 144332345 +nginx_status_facts.reading: + description: The current number of connections where nginx is reading the request header. + returned: success + type: int + sample: 0 +nginx_status_facts.writing: + description: The current number of connections where nginx is writing the response back to the client. + returned: success + type: int + sample: 241 +nginx_status_facts.waiting: + description: The current number of idle client connections waiting for a request. + returned: success + type: int + sample: 2092 +nginx_status_facts.data: + description: HTTP response as is. + returned: success + type: string + sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n" +''' + +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import fetch_url + + +class NginxStatusFacts(object): + + def __init__(self): + self.url = module.params.get('url') + self.timeout = module.params.get('timeout') + + def run(self): + result = { + 'nginx_status_facts': { + 'active_connections': None, + 'accepts': None, + 'handled': None, + 'requests': None, + 'reading': None, + 'writing': None, + 'waiting': None, + 'data': None, + } + } + (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout) + if not response: + module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout)) + + data = response.read() + if not data: + return result + + result['nginx_status_facts']['data'] = data + match = re.match(r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \nReading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)', data, re.S) + if match: + result['nginx_status_facts']['active_connections'] = int(match.group(1)) + result['nginx_status_facts']['accepts'] = int(match.group(2)) + result['nginx_status_facts']['handled'] = int(match.group(3)) + result['nginx_status_facts']['requests'] = int(match.group(4)) + result['nginx_status_facts']['reading'] = int(match.group(5)) + result['nginx_status_facts']['writing'] = int(match.group(6)) + result['nginx_status_facts']['waiting'] = int(match.group(7)) + return result + +def main(): + global module + module = AnsibleModule( + argument_spec=dict( + url=dict(required=True), + timeout=dict(type='int', default=10), + ), + supports_check_mode=True, + ) + + nginx_status_facts = NginxStatusFacts().run() + result = dict(changed=False, ansible_facts=nginx_status_facts) + module.exit_json(**result) + +if __name__ == '__main__': + main() diff --git a/web_infrastructure/taiga_issue.py b/web_infrastructure/taiga_issue.py new file mode 100644 index 00000000000..03be0952862 --- /dev/null +++ b/web_infrastructure/taiga_issue.py @@ -0,0 +1,317 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Alejandro Guirao +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: taiga_issue +short_description: Creates/deletes an issue in a Taiga Project Management Platform +description: + - Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)). + - An issue is identified by the combination of project, issue subject and issue type. + - This module implements the creation or deletion of issues (not the update). +version_added: "2.0" +options: + taiga_host: + description: + - The hostname of the Taiga instance. + required: False + default: https://api.taiga.io + project: + description: + - Name of the project containing the issue. Must exist previously. + required: True + subject: + description: + - The issue subject. + required: True + issue_type: + description: + - The issue type. Must exist previously. + required: True + priority: + description: + - The issue priority. Must exist previously. + required: False + default: Normal + status: + description: + - The issue status. Must exist previously. + required: False + default: New + severity: + description: + - The issue severity. Must exist previously. + required: False + default: Normal + description: + description: + - The issue description. + required: False + default: "" + attachment: + description: + - Path to a file to be attached to the issue. + required: False + default: None + attachment_description: + description: + - A string describing the file to be attached to the issue. + required: False + default: "" + tags: + description: + - A lists of tags to be assigned to the issue. + required: False + default: [] + state: + description: + - Whether the issue should be present or not. + required: False + choices: ["present", "absent"] + default: present +author: Alejandro Guirao (@lekum) +requirements: [python-taiga] +notes: +- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD +''' + +EXAMPLES = ''' +# Create an issue in the my hosted Taiga environment and attach an error log +- taiga_issue: + taiga_host: https://mytaigahost.example.com + project: myproject + subject: An error has been found + issue_type: Bug + priority: High + status: New + severity: Important + description: An error has been found. Please check the attached error log for details. + attachment: /path/to/error.log + attachment_description: Error log file + tags: + - Error + - Needs manual check + state: present + +# Deletes the previously created issue +- taiga_issue: + taiga_host: https://mytaigahost.example.com + project: myproject + subject: An error has been found + issue_type: Bug + state: absent +''' + +RETURN = '''# ''' +from os import getenv +from os.path import isfile + +try: + from taiga import TaigaAPI + from taiga.exceptions import TaigaException + TAIGA_MODULE_IMPORTED=True +except ImportError: + TAIGA_MODULE_IMPORTED=False + +def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority, + issue_status, issue_type, issue_severity, issue_description, + issue_attachment, issue_attachment_description, + issue_tags, state, check_mode=False): + """ + Method that creates/deletes issues depending whether they exist and the state desired + + The credentials should be passed via environment variables: + - TAIGA_TOKEN + - TAIGA_USERNAME and TAIGA_PASSWORD + + Returns a tuple with these elements: + - A boolean representing the success of the operation + - A descriptive message + - A dict with the issue attributes, in case of issue creation, otherwise empty dict + """ + + changed = False + + try: + token = getenv('TAIGA_TOKEN') + if token: + api = TaigaAPI(host=taiga_host, token=token) + else: + api = TaigaAPI(host=taiga_host) + username = getenv('TAIGA_USERNAME') + password = getenv('TAIGA_PASSWORD') + if not any([username, password]): + return (False, changed, "Missing credentials", {}) + api.auth(username=username, password=password) + + user_id = api.me().id + project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id)) + if len(project_list) != 1: + return (False, changed, "Unable to find project %s" % project_name, {}) + project = project_list[0] + project_id = project.id + + priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id)) + if len(priority_list) != 1: + return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {}) + priority_id = priority_list[0].id + + status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id)) + if len(status_list) != 1: + return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {}) + status_id = status_list[0].id + + type_list = filter(lambda x: x.name == issue_type, project.list_issue_types()) + if len(type_list) != 1: + return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {}) + type_id = type_list[0].id + + severity_list = filter(lambda x: x.name == issue_severity, project.list_severities()) + if len(severity_list) != 1: + return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {}) + severity_id = severity_list[0].id + + issue = { + "project": project_name, + "subject": issue_subject, + "priority": issue_priority, + "status": issue_status, + "type": issue_type, + "severity": issue_severity, + "description": issue_description, + "tags": issue_tags, + } + + # An issue is identified by the project_name, the issue_subject and the issue_type + matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues()) + matching_issue_list_len = len(matching_issue_list) + + if matching_issue_list_len == 0: + # The issue does not exist in the project + if state == "present": + # This implies a change + changed = True + if not check_mode: + # Create the issue + new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description) + if issue_attachment: + new_issue.attach(issue_attachment, description=issue_attachment_description) + issue["attachment"] = issue_attachment + issue["attachment_description"] = issue_attachment_description + return (True, changed, "Issue created", issue) + + else: + # If does not exist, do nothing + return (True, changed, "Issue does not exist", {}) + + elif matching_issue_list_len == 1: + # The issue exists in the project + if state == "absent": + # This implies a change + changed = True + if not check_mode: + # Delete the issue + matching_issue_list[0].delete() + return (True, changed, "Issue deleted", {}) + + else: + # Do nothing + return (True, changed, "Issue already exists", {}) + + else: + # More than 1 matching issue + return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {}) + + except TaigaException: + msg = "An exception happened: %s" % sys.exc_info()[1] + return (False, changed, msg, {}) + +def main(): + module = AnsibleModule( + argument_spec=dict( + taiga_host=dict(required=False, default="https://api.taiga.io"), + project=dict(required=True), + subject=dict(required=True), + issue_type=dict(required=True), + priority=dict(required=False, default="Normal"), + status=dict(required=False, default="New"), + severity=dict(required=False, default="Normal"), + description=dict(required=False, default=""), + attachment=dict(required=False, default=None), + attachment_description=dict(required=False, default=""), + tags=dict(required=False, default=[], type='list'), + state=dict(required=False, choices=['present','absent'], default='present'), + ), + supports_check_mode=True + ) + + if not TAIGA_MODULE_IMPORTED: + msg = "This module needs python-taiga module" + module.fail_json(msg=msg) + + taiga_host = module.params['taiga_host'] + project_name = module.params['project'] + issue_subject = module.params['subject'] + issue_priority = module.params['priority'] + issue_status = module.params['status'] + issue_type = module.params['issue_type'] + issue_severity = module.params['severity'] + issue_description = module.params['description'] + issue_attachment = module.params['attachment'] + issue_attachment_description = module.params['attachment_description'] + if issue_attachment: + if not isfile(issue_attachment): + msg = "%s is not a file" % issue_attachment + module.fail_json(msg=msg) + issue_tags = module.params['tags'] + state = module.params['state'] + + return_status, changed, msg, issue_attr_dict = manage_issue( + module, + taiga_host, + project_name, + issue_subject, + issue_priority, + issue_status, + issue_type, + issue_severity, + issue_description, + issue_attachment, + issue_attachment_description, + issue_tags, + state, + check_mode=module.check_mode + ) + if return_status: + if len(issue_attr_dict) > 0: + module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict) + else: + module.exit_json(changed=changed, msg=msg) + else: + module.fail_json(msg=msg) + + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/windows/win_acl.ps1 b/windows/win_acl.ps1 new file mode 100644 index 00000000000..068130a203f --- /dev/null +++ b/windows/win_acl.ps1 @@ -0,0 +1,206 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Phil Schwartz +# Copyright 2015, Trond Hindenes +# Copyright 2015, Hans-Joachim Kliemeck +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +# win_acl module (File/Resources Permission Additions/Removal) + + +#Functions +Function UserSearch +{ + Param ([string]$accountName) + #Check if there's a realm specified + + $searchDomain = $false + $searchDomainUPN = $false + $SearchAppPools = $false + if ($accountName.Split("\").count -gt 1) + { + if ($accountName.Split("\")[0] -eq $env:COMPUTERNAME) + { + + } + elseif ($accountName.Split("\")[0] -eq "IIS APPPOOL") + { + $SearchAppPools = $true + $accountName = $accountName.split("\")[1] + } + else + { + $searchDomain = $true + $accountName = $accountName.split("\")[1] + } + } + Elseif ($accountName.contains("@")) + { + $searchDomain = $true + $searchDomainUPN = $true + } + Else + { + #Default to local user account + $accountName = $env:COMPUTERNAME + "\" + $accountName + } + + if (($searchDomain -eq $false) -and ($SearchAppPools -eq $false)) + { + # do not use Win32_UserAccount, because e.g. SYSTEM (BUILTIN\SYSTEM or COMPUUTERNAME\SYSTEM) will not be listed. on Win32_Account groups will be listed too + $localaccount = get-wmiobject -class "Win32_Account" -namespace "root\CIMV2" -filter "(LocalAccount = True)" | where {$_.Caption -eq $accountName} + if ($localaccount) + { + return $localaccount.SID + } + } + Elseif ($SearchAppPools -eq $true) + { + Import-Module WebAdministration + $testiispath = Test-path "IIS:" + if ($testiispath -eq $false) + { + return $null + } + else + { + $apppoolobj = Get-ItemProperty IIS:\AppPools\$accountName + return $apppoolobj.applicationPoolSid + } + } + { + #Search by samaccountname + $Searcher = [adsisearcher]"" + + If ($searchDomainUPN -eq $false) { + $Searcher.Filter = "sAMAccountName=$($accountName)" + } + Else { + $Searcher.Filter = "userPrincipalName=$($accountName)" + } + + $result = $Searcher.FindOne() + if ($result) + { + $user = $result.GetDirectoryEntry() + + # get binary SID from AD account + $binarySID = $user.ObjectSid.Value + + # convert to string SID + return (New-Object System.Security.Principal.SecurityIdentifier($binarySID,0)).Value + } + } +} + +$params = Parse-Args $args; + +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +$path = Get-Attr $params "path" -failifempty $true +$user = Get-Attr $params "user" -failifempty $true +$rights = Get-Attr $params "rights" -failifempty $true + +$type = Get-Attr $params "type" -failifempty $true -validateSet "allow","deny" -resultobj $result +$state = Get-Attr $params "state" "present" -validateSet "present","absent" -resultobj $result + +$inherit = Get-Attr $params "inherit" "" +$propagation = Get-Attr $params "propagation" "None" -validateSet "None","NoPropagateInherit","InheritOnly" -resultobj $result + +If (-Not (Test-Path -Path $path)) { + Fail-Json $result "$path file or directory does not exist on the host" +} + +# Test that the user/group is resolvable on the local machine +$sid = UserSearch -AccountName ($user) +if (!$sid) +{ + Fail-Json $result "$user is not a valid user or group on the host machine or domain" +} + +If (Test-Path -Path $path -PathType Leaf) { + $inherit = "None" +} +ElseIf ($inherit -eq "") { + $inherit = "ContainerInherit, ObjectInherit" +} + +Try { + $colRights = [System.Security.AccessControl.FileSystemRights]$rights + $InheritanceFlag = [System.Security.AccessControl.InheritanceFlags]$inherit + $PropagationFlag = [System.Security.AccessControl.PropagationFlags]$propagation + + If ($type -eq "allow") { + $objType =[System.Security.AccessControl.AccessControlType]::Allow + } + Else { + $objType =[System.Security.AccessControl.AccessControlType]::Deny + } + + $objUser = New-Object System.Security.Principal.SecurityIdentifier($sid) + $objACE = New-Object System.Security.AccessControl.FileSystemAccessRule ($objUser, $colRights, $InheritanceFlag, $PropagationFlag, $objType) + $objACL = Get-ACL $path + + # Check if the ACE exists already in the objects ACL list + $match = $false + ForEach($rule in $objACL.Access){ + $ruleIdentity = $rule.IdentityReference.Translate([System.Security.Principal.SecurityIdentifier]) + If (($rule.FileSystemRights -eq $objACE.FileSystemRights) -And ($rule.AccessControlType -eq $objACE.AccessControlType) -And ($ruleIdentity -eq $objACE.IdentityReference) -And ($rule.IsInherited -eq $objACE.IsInherited) -And ($rule.InheritanceFlags -eq $objACE.InheritanceFlags) -And ($rule.PropagationFlags -eq $objACE.PropagationFlags)) { + $match = $true + Break + } + } + + If ($state -eq "present" -And $match -eq $false) { + Try { + $objACL.AddAccessRule($objACE) + Set-ACL $path $objACL + Set-Attr $result "changed" $true; + } + Catch { + Fail-Json $result "an exception occured when adding the specified rule" + } + } + ElseIf ($state -eq "absent" -And $match -eq $true) { + Try { + $objACL.RemoveAccessRule($objACE) + Set-ACL $path $objACL + Set-Attr $result "changed" $true; + } + Catch { + Fail-Json $result "an exception occured when removing the specified rule" + } + } + Else { + # A rule was attempting to be added but already exists + If ($match -eq $true) { + Exit-Json $result "the specified rule already exists" + } + # A rule didn't exist that was trying to be removed + Else { + Exit-Json $result "the specified rule does not exist" + } + } +} +Catch { + Fail-Json $result "an error occured when attempting to $state $rights permission(s) on $path for $user" +} + +Exit-Json $result diff --git a/windows/win_acl.py b/windows/win_acl.py new file mode 100644 index 00000000000..4e6e9cb7ad6 --- /dev/null +++ b/windows/win_acl.py @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2015, Phil Schwartz +# Copyright 2015, Trond Hindenes +# Copyright 2015, Hans-Joachim Kliemeck +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_acl +version_added: "2.0" +short_description: Set file/directory permissions for a system user or group. +description: + - Add or remove rights/permissions for a given user or group for the specified src file or folder. + - If adding ACL's for AppPool identities (available since 2.3), the Windows "Feature Web-Scripting-Tools" must be enabled +options: + path: + description: + - File or Directory + required: yes + user: + description: + - User or Group to add specified rights to act on src file/folder + required: yes + default: none + state: + description: + - Specify whether to add C(present) or remove C(absent) the specified access rule + required: no + choices: + - present + - absent + default: present + type: + description: + - Specify whether to allow or deny the rights specified + required: yes + choices: + - allow + - deny + default: none + rights: + description: + - The rights/permissions that are to be allowed/denyed for the specified user or group for the given src file or directory. Can be entered as a comma separated list (Ex. "Modify, Delete, ExecuteFile"). For more information on the choices see MSDN FileSystemRights Enumeration. + required: yes + choices: + - AppendData + - ChangePermissions + - Delete + - DeleteSubdirectoriesAndFiles + - ExecuteFile + - FullControl + - ListDirectory + - Modify + - Read + - ReadAndExecute + - ReadAttributes + - ReadData + - ReadExtendedAttributes + - ReadPermissions + - Synchronize + - TakeOwnership + - Traverse + - Write + - WriteAttributes + - WriteData + - WriteExtendedAttributes + default: none + inherit: + description: + - Inherit flags on the ACL rules. Can be specified as a comma separated list (Ex. "ContainerInherit, ObjectInherit"). For more information on the choices see MSDN InheritanceFlags Enumeration. + required: no + choices: + - ContainerInherit + - ObjectInherit + - None + default: For Leaf File, None; For Directory, ContainerInherit, ObjectInherit; + propagation: + description: + - Propagation flag on the ACL rules. For more information on the choices see MSDN PropagationFlags Enumeration. + required: no + choices: + - None + - NoPropagateInherit + - InheritOnly + default: "None" +author: Phil Schwartz (@schwartzmx), Trond Hindenes (@trondhindenes), Hans-Joachim Kliemeck (@h0nIg) +''' + +EXAMPLES = ''' +# Restrict write,execute access to User Fed-Phil +$ ansible -i hosts -m win_acl -a "user=Fed-Phil path=C:\Important\Executable.exe type=deny rights='ExecuteFile,Write'" all + +# Playbook example +# Add access rule to allow IIS_IUSRS FullControl to MySite +--- +- name: Add IIS_IUSRS allow rights + win_acl: + path: 'C:\inetpub\wwwroot\MySite' + user: 'IIS_IUSRS' + rights: 'FullControl' + type: 'allow' + state: 'present' + inherit: 'ContainerInherit, ObjectInherit' + propagation: 'None' + +# Remove previously added rule for IIS_IUSRS +- name: Remove FullControl AccessRule for IIS_IUSRS + path: 'C:\inetpub\wwwroot\MySite' + user: 'IIS_IUSRS' + rights: 'FullControl' + type: 'allow' + state: 'absent' + inherit: 'ContainerInherit, ObjectInherit' + propagation: 'None' + +# Deny Intern +- name: Deny Deny + path: 'C:\Administrator\Documents' + user: 'Intern' + rights: 'Read,Write,Modify,FullControl,Delete' + type: 'deny' + state: 'present' +''' diff --git a/windows/win_acl_inheritance.ps1 b/windows/win_acl_inheritance.ps1 new file mode 100644 index 00000000000..1933a3a5dd4 --- /dev/null +++ b/windows/win_acl_inheritance.ps1 @@ -0,0 +1,86 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Hans-Joachim Kliemeck +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + + +$params = Parse-Args $args; + +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +$path = Get-Attr $params "path" -failifempty $true +$state = Get-Attr $params "state" "absent" -validateSet "present","absent" -resultobj $result +$reorganize = Get-Attr $params "reorganize" "no" -validateSet "no","yes" -resultobj $result +$reorganize = $reorganize | ConvertTo-Bool + +If (-Not (Test-Path -Path $path)) { + Fail-Json $result "$path file or directory does not exist on the host" +} + +Try { + $objACL = Get-ACL $path + $inheritanceEnabled = !$objACL.AreAccessRulesProtected + + If (($state -eq "present") -And !$inheritanceEnabled) { + # second parameter is ignored if first=$False + $objACL.SetAccessRuleProtection($False, $False) + + If ($reorganize) { + # it wont work without intermediate save, state would be the same + Set-ACL $path $objACL + $objACL = Get-ACL $path + + # convert explicit ACE to inherited ACE + ForEach($inheritedRule in $objACL.Access) { + If (!$inheritedRule.IsInherited) { + Continue + } + + ForEach($explicitRrule in $objACL.Access) { + If ($explicitRrule.IsInherited) { + Continue + } + + If (($inheritedRule.FileSystemRights -eq $explicitRrule.FileSystemRights) -And ($inheritedRule.AccessControlType -eq $explicitRrule.AccessControlType) -And ($inheritedRule.IdentityReference -eq $explicitRrule.IdentityReference) -And ($inheritedRule.InheritanceFlags -eq $explicitRrule.InheritanceFlags) -And ($inheritedRule.PropagationFlags -eq $explicitRrule.PropagationFlags)) { + $objACL.RemoveAccessRule($explicitRrule) + } + } + } + } + + Set-ACL $path $objACL + Set-Attr $result "changed" $true; + } + Elseif (($state -eq "absent") -And $inheritanceEnabled) { + If ($reorganize) { + $objACL.SetAccessRuleProtection($True, $True) + } Else { + $objACL.SetAccessRuleProtection($True, $False) + } + + Set-ACL $path $objACL + Set-Attr $result "changed" $true; + } +} +Catch { + Fail-Json $result "an error occured when attempting to disable inheritance" +} + +Exit-Json $result diff --git a/windows/win_acl_inheritance.py b/windows/win_acl_inheritance.py new file mode 100644 index 00000000000..549ce629335 --- /dev/null +++ b/windows/win_acl_inheritance.py @@ -0,0 +1,83 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2015, Hans-Joachim Kliemeck +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_acl_inheritance +version_added: "2.1" +short_description: Change ACL inheritance +description: + - Change ACL (Access Control List) inheritance and optionally copy inherited ACE's (Access Control Entry) to dedicated ACE's or vice versa. +options: + path: + description: + - Path to be used for changing inheritance + required: true + state: + description: + - Specify whether to enable I(present) or disable I(absent) ACL inheritance + required: false + choices: + - present + - absent + default: absent + reorganize: + description: + - For P(state) = I(absent), indicates if the inherited ACE's should be copied from the parent directory. This is necessary (in combination with removal) for a simple ACL instead of using multiple ACE deny entries. + - For P(state) = I(present), indicates if the inherited ACE's should be deduplicated compared to the parent directory. This removes complexity of the ACL structure. + required: false + choices: + - no + - yes + default: no +author: Hans-Joachim Kliemeck (@h0nIg) +''' + +EXAMPLES = ''' +# Playbook example +--- +- name: Disable inherited ACE's + win_acl_inheritance: + path: 'C:\\apache\\' + state: absent + +- name: Disable and copy inherited ACE's + win_acl_inheritance: + path: 'C:\\apache\\' + state: absent + reorganize: yes + +- name: Enable and remove dedicated ACE's + win_acl_inheritance: + path: 'C:\\apache\\' + state: present + reorganize: yes +''' + +RETURN = ''' + +''' \ No newline at end of file diff --git a/windows/win_chocolatey.ps1 b/windows/win_chocolatey.ps1 index 4a033d23157..3bb6a1f0dc0 100644 --- a/windows/win_chocolatey.ps1 +++ b/windows/win_chocolatey.ps1 @@ -16,7 +16,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -$ErrorActionPreference = "Stop" # WANT_JSON # POWERSHELL_COMMON @@ -25,72 +24,32 @@ $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; -If ($params.name) -{ - $package = $params.name -} -Else -{ - Fail-Json $result "missing required argument: name" -} +$package = Get-Attr -obj $params -name name -failifempty $true -emptyattributefailmessage "missing required argument: name" +$force = Get-Attr -obj $params -name force -default "false" | ConvertTo-Bool +$upgrade = Get-Attr -obj $params -name upgrade -default "false" | ConvertTo-Bool +$version = Get-Attr -obj $params -name version -default $null -If ($params.force) -{ - $force = $params.force | ConvertTo-Bool -} -Else -{ - $force = $false -} +$source = Get-Attr -obj $params -name source -default $null +if ($source) {$source = $source.Tolower()} -If ($params.upgrade) -{ - $upgrade = $params.upgrade | ConvertTo-Bool -} -Else -{ - $upgrade = $false -} +$showlog = Get-Attr -obj $params -name showlog -default "false" | ConvertTo-Bool +$state = Get-Attr -obj $params -name state -default "present" -If ($params.version) -{ - $version = $params.version -} -Else -{ - $version = $null -} +$installargs = Get-Attr -obj $params -name install_args -default $null +$packageparams = Get-Attr -obj $params -name params -default $null +$allowemptychecksums = Get-Attr -obj $params -name allow_empty_checksums -default "false" | ConvertTo-Bool +$ignorechecksums = Get-Attr -obj $params -name ignore_checksums -default "false" | ConvertTo-Bool +$ignoredependencies = Get-Attr -obj $params -name ignore_dependencies -default "false" | ConvertTo-Bool -If ($params.source) -{ - $source = $params.source.ToString().ToLower() -} -Else -{ - $source = $null -} +# as of chocolatey 0.9.10, nonzero success exit codes can be returned +# see https://github.com/chocolatey/choco/issues/512#issuecomment-214284461 +$successexitcodes = (0,1605,1614,1641,3010) -If ($params.showlog) -{ - $showlog = $params.showlog | ConvertTo-Bool -} -Else +if ("present","absent" -notcontains $state) { - $showlog = $null + Fail-Json $result "state is $state; must be present or absent" } -If ($params.state) -{ - $state = $params.state.ToString().ToLower() - If (($state -ne "present") -and ($state -ne "absent")) - { - Fail-Json $result "state is $state; must be present or absent" - } -} -Else -{ - $state = "present" -} Function Chocolatey-Install-Upgrade { @@ -102,7 +61,12 @@ Function Chocolatey-Install-Upgrade if ($ChocoAlreadyInstalled -eq $null) { #We need to install chocolatey - iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1")) + $install_output = (new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1") | powershell - + if ($LASTEXITCODE -ne 0) + { + Set-Attr $result "choco_bootstrap_output" $install_output + Fail-Json $result "Chocolatey bootstrap installation failed." + } $result.changed = $true $script:executable = "C:\ProgramData\chocolatey\bin\choco.exe" } @@ -110,7 +74,7 @@ Function Chocolatey-Install-Upgrade { $script:executable = "choco.exe" - if ((choco --version) -lt '0.9.9') + if ([Version](choco --version) -lt [Version]'0.9.9') { Choco-Upgrade chocolatey } @@ -138,7 +102,7 @@ Function Choco-IsInstalled Throw "Error checking installation status for $package" } - If ("$results" -match " $package .* (\d+) packages installed.") + If ("$results" -match "$package .* (\d+) packages installed.") { return $matches[1] -gt 0 } @@ -158,7 +122,17 @@ Function Choco-Upgrade [Parameter(Mandatory=$false, Position=3)] [string]$source, [Parameter(Mandatory=$false, Position=4)] - [bool]$force + [bool]$force, + [Parameter(Mandatory=$false, Position=5)] + [string]$installargs, + [Parameter(Mandatory=$false, Position=6)] + [string]$packageparams, + [Parameter(Mandatory=$false, Position=7)] + [bool]$allowemptychecksums, + [Parameter(Mandatory=$false, Position=8)] + [bool]$ignorechecksums, + [Parameter(Mandatory=$false, Position=9)] + [bool]$ignoredependencies ) if (-not (Choco-IsInstalled $package)) @@ -183,9 +157,34 @@ Function Choco-Upgrade $cmd += " -force" } + if ($installargs) + { + $cmd += " -installargs '$installargs'" + } + + if ($packageparams) + { + $cmd += " -params '$packageparams'" + } + + if ($allowemptychecksums) + { + $cmd += " --allow-empty-checksums" + } + + if ($ignorechecksums) + { + $cmd += " --ignore-checksums" + } + + if ($ignoredependencies) + { + $cmd += " -ignoredependencies" + } + $results = invoke-expression $cmd - if ($LastExitCode -ne 0) + if ($LastExitCode -notin $successexitcodes) { Set-Attr $result "choco_error_cmd" $cmd Set-Attr $result "choco_error_log" "$results" @@ -215,17 +214,35 @@ Function Choco-Install [Parameter(Mandatory=$false, Position=4)] [bool]$force, [Parameter(Mandatory=$false, Position=5)] - [bool]$upgrade + [bool]$upgrade, + [Parameter(Mandatory=$false, Position=6)] + [string]$installargs, + [Parameter(Mandatory=$false, Position=7)] + [string]$packageparams, + [Parameter(Mandatory=$false, Position=8)] + [bool]$allowemptychecksums, + [Parameter(Mandatory=$false, Position=9)] + [bool]$ignorechecksums, + [Parameter(Mandatory=$false, Position=10)] + [bool]$ignoredependencies ) if (Choco-IsInstalled $package) { if ($upgrade) { - Choco-Upgrade -package $package -version $version -source $source -force $force + Choco-Upgrade -package $package -version $version -source $source -force $force ` + -installargs $installargs -packageparams $packageparams ` + -allowemptychecksums $allowemptychecksums -ignorechecksums $ignorechecksums ` + -ignoredependencies $ignoredependencies + + return } - return + if (-not $force) + { + return + } } $cmd = "$executable install -dv -y $package" @@ -245,9 +262,34 @@ Function Choco-Install $cmd += " -force" } + if ($installargs) + { + $cmd += " -installargs '$installargs'" + } + + if ($packageparams) + { + $cmd += " -params '$packageparams'" + } + + if ($allowemptychecksums) + { + $cmd += " --allow-empty-checksums" + } + + if ($ignorechecksums) + { + $cmd += " --ignore-checksums" + } + + if ($ignoredependencies) + { + $cmd += " -ignoredependencies" + } + $results = invoke-expression $cmd - if ($LastExitCode -ne 0) + if ($LastExitCode -notin $successexitcodes) { Set-Attr $result "choco_error_cmd" $cmd Set-Attr $result "choco_error_log" "$results" @@ -287,9 +329,14 @@ Function Choco-Uninstall $cmd += " -force" } + if ($packageparams) + { + $cmd += " -params '$packageparams'" + } + $results = invoke-expression $cmd - if ($LastExitCode -ne 0) + if ($LastExitCode -notin $successexitcodes) { Set-Attr $result "choco_error_cmd" $cmd Set-Attr $result "choco_error_log" "$results" @@ -305,7 +352,9 @@ Try if ($state -eq "present") { Choco-Install -package $package -version $version -source $source ` - -force $force -upgrade $upgrade + -force $force -upgrade $upgrade -installargs $installargs ` + -packageparams $packageparams -allowemptychecksums $allowemptychecksums ` + -ignorechecksums $ignorechecksums -ignoredependencies $ignoredependencies } else { @@ -319,3 +368,4 @@ Catch Fail-Json $result $_.Exception.Message } + diff --git a/windows/win_chocolatey.py b/windows/win_chocolatey.py index 7f399dbd22f..89e6d73af0e 100644 --- a/windows/win_chocolatey.py +++ b/windows/win_chocolatey.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'committer', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_chocolatey @@ -33,48 +37,59 @@ description: - Name of the package to be installed required: true - default: null - aliases: [] state: description: - State of the package on the system - required: false choices: - present - absent default: present - aliases: [] force: description: - Forces install of the package (even if it already exists). Using Force will cause ansible to always report that a change was made - required: false choices: - yes - no default: no - aliases: [] upgrade: description: - If package is already installed it, try to upgrade to the latest version or to the specified version - required: false choices: - yes - no default: no - aliases: [] version: description: - Specific version of the package to be installed - Ignored when state == 'absent' - required: false - default: null - aliases: [] source: description: - Specify source rather than using default chocolatey repository + install_args: + description: + - Arguments to pass to the native installer + version_added: '2.1' + params: + description: + - Parameters to pass to the package + version_added: '2.1' + allow_empty_checksums: + description: + - Allow empty Checksums to be used require: false - default: null - aliases: [] + default: false + version_added: '2.2' + ignore_checksums: + description: + - Ignore Checksums + require: false + default: false + version_added: '2.2' + ignore_dependencies: + description: + - Ignore dependencies, only install/upgrade the package itself + default: false + version_added: '2.1' author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)" ''' @@ -91,7 +106,7 @@ # Install notepadplusplus version 6.6 win_chocolatey: name: notepadplusplus.install - version: 6.6 + version: '6.6' # Uninstall git win_chocolatey: diff --git a/windows/win_dotnet_ngen.py b/windows/win_dotnet_ngen.py index 75ce9cc138b..9fb7e44e016 100644 --- a/windows/win_dotnet_ngen.py +++ b/windows/win_dotnet_ngen.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_dotnet_ngen diff --git a/windows/win_environment.ps1 b/windows/win_environment.ps1 index 1398524cfbb..f1acfe19356 100644 --- a/windows/win_environment.ps1 +++ b/windows/win_environment.ps1 @@ -20,36 +20,18 @@ # POWERSHELL_COMMON $params = Parse-Args $args; -$result = New-Object PSObject; -Set-Attr $result "changed" $false; +$state = Get-AnsibleParam -obj $params -name "state" -default "present" -validateSet "present","absent" +$name = Get-AnsibleParam -obj $params -name "name" -failifempty $true +$level = Get-AnsibleParam -obj $params -name "level" -validateSet "machine","process","user" -failifempty $true +$value = Get-AnsibleParam -obj $params -name "value" -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent') ) { - Fail-Json $result "state is '$state'; must be 'present', or 'absent'" - } -} else { - $state = 'present' -} - -If ($params.name) -{ - $name = $params.name -} else { - Fail-Json $result "missing required argument: name" -} - -$value = $params.value - -If ($params.level) { - $level = $params.level.ToString().ToLower() - If (( $level -ne 'machine') -and ( $level -ne 'user' ) -and ( $level -ne 'process')) { - Fail-Json $result "level is '$level'; must be 'machine', 'user', or 'process'" - } +If ($level) { + $level = $level.ToString().ToLower() } $before_value = [Environment]::GetEnvironmentVariable($name, $level) +$state = $state.ToString().ToLower() if ($state -eq "present" ) { [Environment]::SetEnvironmentVariable($name, $value, $level) } Elseif ($state -eq "absent") { @@ -58,6 +40,8 @@ if ($state -eq "present" ) { $after_value = [Environment]::GetEnvironmentVariable($name, $level) +$result = New-Object PSObject; +Set-Attr $result "changed" $false; Set-Attr $result "name" $name; Set-Attr $result "before_value" $before_value; Set-Attr $result "value" $after_value; diff --git a/windows/win_environment.py b/windows/win_environment.py index 8d4a1701695..f66771a758d 100644 --- a/windows/win_environment.py +++ b/windows/win_environment.py @@ -21,15 +21,18 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_environment version_added: "2.0" -short_description: Modifies environment variables on windows guests +short_description: Modifies environment variables on windows hosts. description: - - Uses .net Environment to set or remove environment variables. - - Can set at User, Machine or Process level. - - Note that usual rules apply, so existing environments will not change until new processes are started. + - Uses .net Environment to set or remove environment variables and can set at User, Machine or Process level. + - User level environment variables will be set, but not available until the user has logged off and on again. options: state: description: @@ -62,6 +65,13 @@ - process - user author: "Jon Hawkesworth (@jhawkesworth)" +notes: + - This module does not broadcast change events. + This means that the minority of windows applications which can have + their environment changed without restarting will not be notified and + therefore will need restarting to pick up new environment settings. + User level environment variables will require the user to log out + and in again before they become available. ''' EXAMPLES = ''' diff --git a/windows/win_file_version.ps1 b/windows/win_file_version.ps1 new file mode 100644 index 00000000000..2e2f341c461 --- /dev/null +++ b/windows/win_file_version.ps1 @@ -0,0 +1,78 @@ +#!powershell + +#this file is part of Ansible +#Copyright © 2015 Sam Liu + +#This program is free software: you can redistribute it and/or modify +#it under the terms of the GNU General Public License as published by +#the Free Software Foundation, either version 3 of the License, or +#(at your option) any later version. + +#This program is distributed in the hope that it will be useful, +#but WITHOUT ANY WARRANTY; without even the implied warranty of +#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +#GNU General Public License for more details. + +#You should have received a copy of the GNU General Public License +#along with this program. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +$result = New-Object psobject @{ + win_file_version = New-Object psobject + changed = $false +} + +$path = Get-AnsibleParam $params "path" -failifempty $true -resultobj $result + +If (-Not (Test-Path -Path $path -PathType Leaf)){ + Fail-Json $result "Specfied path $path does exist or is not a file." +} +$ext = [System.IO.Path]::GetExtension($path) +If ( $ext -notin '.exe', '.dll'){ + Fail-Json $result "Specfied path $path is not a vaild file type; must be DLL or EXE." +} + +Try { + $_version_fields = [System.Diagnostics.FileVersionInfo]::GetVersionInfo($path) + $file_version = $_version_fields.FileVersion + If ($file_version -eq $null){ + $file_version = '' + } + $product_version = $_version_fields.ProductVersion + If ($product_version -eq $null){ + $product_version= '' + } + $file_major_part = $_version_fields.FileMajorPart + If ($file_major_part -eq $null){ + $file_major_part= '' + } + $file_minor_part = $_version_fields.FileMinorPart + If ($file_minor_part -eq $null){ + $file_minor_part= '' + } + $file_build_part = $_version_fields.FileBuildPart + If ($file_build_part -eq $null){ + $file_build_part = '' + } + $file_private_part = $_version_fields.FilePrivatePart + If ($file_private_part -eq $null){ + $file_private_part = '' + } +} +Catch{ + Fail-Json $result "Error: $_.Exception.Message" +} + +Set-Attr $result.win_file_version "path" $path.toString() +Set-Attr $result.win_file_version "file_version" $file_version.toString() +Set-Attr $result.win_file_version "product_version" $product_version.toString() +Set-Attr $result.win_file_version "file_major_part" $file_major_part.toString() +Set-Attr $result.win_file_version "file_minor_part" $file_minor_part.toString() +Set-Attr $result.win_file_version "file_build_part" $file_build_part.toString() +Set-Attr $result.win_file_version "file_private_part" $file_private_part.toString() +Exit-Json $result; + diff --git a/windows/win_file_version.py b/windows/win_file_version.py new file mode 100644 index 00000000000..f882a4439de --- /dev/null +++ b/windows/win_file_version.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Get DLL or EXE build version +# Copyright © 2015 Sam Liu + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_file_version +version_added: "2.1" +short_description: Get DLL or EXE file build version +description: + - Get DLL or EXE file build version + - change state alway be false +options: + path: + description: + - File to get version(provide absolute path) + required: true + aliases: [] +author: Sam Liu +''' + +EXAMPLES = ''' +# get C:\Windows\System32\cmd.exe version in playbook +--- +- name: Get acm instance version + win_file_version: + path: 'C:\Windows\System32\cmd.exe' + register: exe_file_version + +- debug: + msg: '{{ exe_file_version }}' + +''' + +RETURN = """ +win_file_version.path: + description: file path + returned: always + type: string + +win_file_version.file_version: + description: file version number. + returned: no error + type: string + +win_file_version.product_version: + description: the version of the product this file is distributed with. + returned: no error + type: string + +win_file_version.file_major_part: + description: the major part of the version number. + returned: no error + type: string + +win_file_version.file_minor_part: + description: the minor part of the version number of the file. + returned: no error + type: string + +win_file_version.file_build_part: + description: build number of the file. + returned: no error + type: string + +win_file_version.file_private_part: + description: file private part number. + returned: no error + type: string + +""" diff --git a/windows/win_firewall_rule.ps1 b/windows/win_firewall_rule.ps1 new file mode 100644 index 00000000000..a63cedec0c1 --- /dev/null +++ b/windows/win_firewall_rule.ps1 @@ -0,0 +1,362 @@ +#!powershell +# +# (c) 2014, Timothy Vandenbrande +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# WANT_JSON +# POWERSHELL_COMMON + +function getFirewallRule ($fwsettings) { + try { + + #$output = Get-NetFirewallRule -name $($fwsettings.'Rule Name'); + $rawoutput=@(netsh advfirewall firewall show rule name="$($fwsettings.'Rule Name')" verbose) + if (!($rawoutput -eq 'No rules match the specified criteria.')){ + $rawoutput | Where {$_ -match '^([^:]+):\s*(\S.*)$'} | Foreach -Begin { + $FirstRun = $true; + $HashProps = @{}; + } -Process { + if (($Matches[1] -eq 'Rule Name') -and (!($FirstRun))) { + #$output=New-Object -TypeName PSCustomObject -Property $HashProps; + $output=$HashProps; + $HashProps = @{}; + }; + $HashProps.$($Matches[1]) = $Matches[2]; + $FirstRun = $false; + } -End { + #$output=New-Object -TypeName PSCustomObject -Property $HashProps; + $output=$HashProps; + } + } + $exists=$false; + $correct=$true; + $diff=$false; + $multi=$false; + $correct=$false; + $difference=@(); + $msg=@(); + if ($($output|measure).count -gt 0) { + $exists=$true; + $msg += @("The rule '" + $fwsettings.'Rule Name' + "' exists."); + if ($($output|measure).count -gt 1) { + $multi=$true + $msg += @("The rule '" + $fwsettings.'Rule Name' + "' has multiple entries."); + ForEach($rule in $output.GetEnumerator()) { + ForEach($fwsetting in $fwsettings.GetEnumerator()) { + if ( $rule.$fwsetting -ne $fwsettings.$fwsetting) { + $diff=$true; + #$difference+=@($fwsettings.$($fwsetting.Key)); + $difference+=@("output:$rule.$fwsetting,fwsetting:$fwsettings.$fwsetting"); + }; + }; + if ($diff -eq $false) { + $correct=$true + }; + }; + } else { + ForEach($fwsetting in $fwsettings.GetEnumerator()) { + if ( $output.$($fwsetting.Key) -ne $fwsettings.$($fwsetting.Key)) { + + if (($fwsetting.Key -eq 'RemoteIP') -and ($output.$($fwsetting.Key) -eq ($fwsettings.$($fwsetting.Key)+'-'+$fwsettings.$($fwsetting.Key)))) { + $donothing=$false + } elseif (($fwsetting.Key -eq 'DisplayName') -and ($output."Rule Name" -eq $fwsettings.$($fwsetting.Key))) { + $donothing=$false + } else { + $diff=$true; + $difference+=@($fwsettings.$($fwsetting.Key)); + }; + }; + }; + if ($diff -eq $false) { + $correct=$true + }; + }; + if ($correct) { + $msg += @("An identical rule exists"); + } else { + $msg += @("The rule exists but has different values"); + } + } else { + $msg += @("No rule could be found"); + }; + $result = @{ + failed = $false + exists = $exists + identical = $correct + multiple = $multi + difference = $difference + msg = $msg + } + } catch [Exception]{ + $result = @{ + failed = $true + error = $_.Exception.Message + msg = $msg + } + }; + return $result +}; + +function createFireWallRule ($fwsettings) { + $msg=@() + $execString="netsh advfirewall firewall add rule" + + ForEach ($fwsetting in $fwsettings.GetEnumerator()) { + if ($fwsetting.key -eq 'Direction') { + $key='dir' + } elseif ($fwsetting.key -eq 'Rule Name') { + $key='name' + } elseif ($fwsetting.key -eq 'Enabled') { + $key='enable' + } elseif ($fwsetting.key -eq 'Profiles') { + $key='profile' + } else { + $key=$($fwsetting.key).ToLower() + }; + $execString+=" "; + $execString+=$key; + $execString+="="; + $execString+='"'; + $execString+=$fwsetting.value; + $execString+='"'; + }; + try { + #$msg+=@($execString); + $output=$(Invoke-Expression $execString| ? {$_}); + $msg+=@("Created firewall rule $name"); + + $result=@{ + failed = $false + output=$output + changed=$true + msg=$msg + }; + + } catch [Exception]{ + $msg=@("Failed to create the rule") + $result=@{ + output=$output + failed=$true + error=$_.Exception.Message + msg=$msg + }; + }; + return $result +}; + +function removeFireWallRule ($fwsettings) { + $msg=@() + try { + $rawoutput=@(netsh advfirewall firewall delete rule name="$($fwsettings.'Rule Name')") + $rawoutput | Where {$_ -match '^([^:]+):\s*(\S.*)$'} | Foreach -Begin { + $FirstRun = $true; + $HashProps = @{}; + } -Process { + if (($Matches[1] -eq 'Rule Name') -and (!($FirstRun))) { + $output=$HashProps; + $HashProps = @{}; + }; + $HashProps.$($Matches[1]) = $Matches[2]; + $FirstRun = $false; + } -End { + $output=$HashProps; + }; + $msg+=@("Removed the rule") + $result=@{ + failed=$false + changed=$true + msg=$msg + output=$output + }; + } catch [Exception]{ + $msg+=@("Failed to remove the rule") + $result=@{ + failed=$true + error=$_.Exception.Message + msg=$msg + } + }; + return $result +} + +# Mount Drives +$change=$false; +$fail=$false; +$msg=@(); +$fwsettings=@{} + +# Variabelise the arguments +$params=Parse-Args $args; + +$name = Get-AnsibleParam -obj $params -name "name" -failifempty $true +$direction = Get-AnsibleParam -obj $params -name "direction" -failifempty $true -validateSet "in","out" +$action = Get-AnsibleParam -obj $params -name "action" -failifempty $true -validateSet "allow","block","bypass" +$program = Get-AnsibleParam -obj $params -name "program" +$service = Get-AnsibleParam -obj $params -name "service" -default "any" +$description = Get-AnsibleParam -obj $params -name "description" +$enable = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "enable" -default "true") +$winprofile = Get-AnsibleParam -obj $params -name "profile" -default "any" +$localip = Get-AnsibleParam -obj $params -name "localip" -default "any" +$remoteip = Get-AnsibleParam -obj $params -name "remoteip" -default "any" +$localport = Get-AnsibleParam -obj $params -name "localport" -default "any" +$remoteport = Get-AnsibleParam -obj $params -name "remoteport" -default "any" +$protocol = Get-AnsibleParam -obj $params -name "protocol" -default "any" + +$state = Get-AnsibleParam -obj $params -name "state" -failifempty $true -validateSet "present","absent" +$force = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "force" -default "false") + +# Check the arguments +If ($enable -eq $true) { + $fwsettings.Add("Enabled", "yes"); +} Else { + $fwsettings.Add("Enabled", "no"); +}; + +$fwsettings.Add("Rule Name", $name) +#$fwsettings.Add("displayname", $name) + +$state = $state.ToString().ToLower() +If ($state -eq "present"){ + $fwsettings.Add("Direction", $direction) + $fwsettings.Add("Action", $action) +}; + +If ($description) { + $fwsettings.Add("Description", $description); +} + +If ($program) { + $fwsettings.Add("Program", $program); +} + +$fwsettings.Add("LocalIP", $localip); +$fwsettings.Add("RemoteIP", $remoteip); +$fwsettings.Add("LocalPort", $localport); +$fwsettings.Add("RemotePort", $remoteport); +$fwsettings.Add("Service", $service); +$fwsettings.Add("Protocol", $protocol); +$fwsettings.Add("Profiles", $winprofile) + +$output=@() +$capture=getFirewallRule ($fwsettings); +if ($capture.failed -eq $true) { + $msg+=$capture.msg; + $result=New-Object psobject @{ + changed=$false + failed=$true + error=$capture.error + msg=$msg + }; + Exit-Json $result; +} else { + $diff=$capture.difference + $msg+=$capture.msg; + $identical=$capture.identical; + $multiple=$capture.multiple; +} + + +switch ($state){ + "present" { + if ($capture.exists -eq $false) { + $capture=createFireWallRule($fwsettings); + $msg+=$capture.msg; + $change=$true; + if ($capture.failed -eq $true){ + $result=New-Object psobject @{ + failed=$capture.failed + error=$capture.error + output=$capture.output + changed=$change + msg=$msg + difference=$diff + fwsettings=$fwsettings + }; + Exit-Json $result; + } + } elseif ($capture.identical -eq $false) { + if ($force -eq $true) { + $capture=removeFirewallRule($fwsettings); + $msg+=$capture.msg; + $change=$true; + if ($capture.failed -eq $true){ + $result=New-Object psobject @{ + failed=$capture.failed + error=$capture.error + changed=$change + msg=$msg + output=$capture.output + fwsettings=$fwsettings + }; + Exit-Json $result; + } + $capture=createFireWallRule($fwsettings); + $msg+=$capture.msg; + $change=$true; + if ($capture.failed -eq $true){ + $result=New-Object psobject @{ + failed=$capture.failed + error=$capture.error + changed=$change + msg=$msg + difference=$diff + fwsettings=$fwsettings + }; + Exit-Json $result; + } + + } else { + $fail=$true + $msg+=@("There was already a rule $name with different values, use force=True to overwrite it"); + } + } elseif ($capture.identical -eq $true) { + $msg+=@("Firewall rule $name was already created"); + }; + } + "absent" { + if ($capture.exists -eq $true) { + $capture=removeFirewallRule($fwsettings); + $msg+=$capture.msg; + $change=$true; + if ($capture.failed -eq $true){ + $result=New-Object psobject @{ + failed=$capture.failed + error=$capture.error + changed=$change + msg=$msg + output=$capture.output + fwsettings=$fwsettings + }; + Exit-Json $result; + } + } else { + $msg+=@("Firewall rule $name did not exist"); + }; + } +}; + + +$result=New-Object psobject @{ + failed=$fail + changed=$change + msg=$msg + difference=$diff + fwsettings=$fwsettings +}; + + +Exit-Json $result; diff --git a/windows/win_firewall_rule.py b/windows/win_firewall_rule.py new file mode 100644 index 00000000000..1a5c699f795 --- /dev/null +++ b/windows/win_firewall_rule.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python + +# (c) 2014, Timothy Vandenbrande +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_firewall_rule +version_added: "2.0" +author: Timothy Vandenbrande +short_description: Windows firewall automation +description: + - allows you to create/remove/update firewall rules +options: + enable: + description: + - is this firewall rule enabled or disabled + default: true + required: false + state: + description: + - should this rule be added or removed + default: "present" + required: true + choices: ['present', 'absent'] + name: + description: + - the rules name + default: null + required: true + direction: + description: + - is this rule for inbound or outbound trafic + default: null + required: true + choices: ['in', 'out'] + action: + description: + - what to do with the items this rule is for + default: null + required: true + choices: ['allow', 'block', 'bypass'] + description: + description: + - description for the firewall rule + default: null + required: false + localip: + description: + - the local ip address this rule applies to + default: 'any' + required: false + remoteip: + description: + - the remote ip address/range this rule applies to + default: 'any' + required: false + localport: + description: + - the local port this rule applies to + default: 'any' + required: false + remoteport: + description: + - the remote port this rule applies to + default: 'any' + required: false + program: + description: + - the program this rule applies to + default: null + required: false + service: + description: + - the service this rule applies to + default: 'any' + required: false + protocol: + description: + - the protocol this rule applies to + default: 'any' + required: false + profile: + description: + - the profile this rule applies to, e.g. Domain,Private,Public + default: 'any' + required: false + force: + description: + - Enforces the change if a rule with different values exists + default: false + required: false + + +''' + +EXAMPLES = ''' +- name: Firewall rule to allow smtp on TCP port 25 + action: win_firewall_rule + args: + name: smtp + enable: yes + state: present + localport: 25 + action: allow + direction: In + protocol: TCP + +''' diff --git a/windows/win_iis_virtualdirectory.ps1 b/windows/win_iis_virtualdirectory.ps1 index 3f2ab692b42..44854ff09b4 100644 --- a/windows/win_iis_virtualdirectory.ps1 +++ b/windows/win_iis_virtualdirectory.ps1 @@ -66,7 +66,11 @@ $directory_path = if($application) { } # Directory info -$directory = Get-WebVirtualDirectory -Site $site -Name $name +$directory = if($application) { + Get-WebVirtualDirectory -Site $site -Name $name -Application $application +} else { + Get-WebVirtualDirectory -Site $site -Name $name +} try { # Add directory diff --git a/windows/win_iis_virtualdirectory.py b/windows/win_iis_virtualdirectory.py index 1ccb34a65d3..9388cb9d6be 100644 --- a/windows/win_iis_virtualdirectory.py +++ b/windows/win_iis_virtualdirectory.py @@ -18,13 +18,17 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_iis_virtualdirectory version_added: "2.0" -short_description: Configures a IIS virtual directories. +short_description: Configures a virtual directory in IIS. description: - - Creates, Removes and configures a IIS Web site + - Creates, Removes and configures a virtual directory in IIS. options: name: description: @@ -37,12 +41,11 @@ - absent - present required: false - default: null + default: present site: description: - The site name under which the virtual directory is created or exists. - required: false - default: null + required: true application: description: - The application under which the virtual directory is created or exists. @@ -55,3 +58,14 @@ default: null author: Henrik Wallström ''' + +EXAMPLES = ''' +# This creates a virtual directory if it doesn't exist. +$ ansible -i hosts -m win_iis_virtualdirectory -a "name='somedirectory' site=somesite state=present physical_path=c:\\virtualdirectory\\some" host + +# This removes a virtual directory if it exists. +$ ansible -i hosts -m win_iis_virtualdirectory -a "name='somedirectory' site=somesite state=absent" host + +# This creates a virtual directory on an application if it doesn't exist. +$ ansible -i hosts -m win_iis_virtualdirectory -a "name='somedirectory' site=somesite application=someapp state=present physical_path=c:\\virtualdirectory\\some" host +''' diff --git a/windows/win_iis_webapplication.py b/windows/win_iis_webapplication.py index b8ebd085162..26177eb90b2 100644 --- a/windows/win_iis_webapplication.py +++ b/windows/win_iis_webapplication.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_iis_webapplication diff --git a/windows/win_iis_webapppool.ps1 b/windows/win_iis_webapppool.ps1 index 2ed369e4a3f..4172dc2f336 100644 --- a/windows/win_iis_webapppool.ps1 +++ b/windows/win_iis_webapppool.ps1 @@ -39,7 +39,7 @@ If (($state -Ne $FALSE) -And ($state -NotIn $valid_states)) { # Attributes parameter - Pipe separated list of attributes where # keys and values are separated by comma (paramA:valyeA|paramB:valueB) $attributes = @{}; -If ($params.attributes) { +If (Get-Member -InputObject $params -Name attributes) { $params.attributes -split '\|' | foreach { $key, $value = $_ -split "\:"; $attributes.Add($key, $value); @@ -90,10 +90,18 @@ try { Stop-WebAppPool -Name $name -ErrorAction Stop $result.changed = $TRUE } - if ((($state -eq 'started') -and ($pool.State -eq 'Stopped')) -or ($state -eq 'restarted')) { + if ((($state -eq 'started') -and ($pool.State -eq 'Stopped'))) { Start-WebAppPool -Name $name -ErrorAction Stop $result.changed = $TRUE } + if ($state -eq 'restarted') { + switch ($pool.State) + { + 'Stopped' { Start-WebAppPool -Name $name -ErrorAction Stop } + default { Restart-WebAppPool -Name $name -ErrorAction Stop } + } + $result.changed = $TRUE + } } } catch { Fail-Json $result $_.Exception.Message @@ -101,12 +109,15 @@ try { # Result $pool = Get-Item IIS:\AppPools\$name -$result.info = @{ - name = $pool.Name - state = $pool.State - attributes = New-Object psobject @{} -}; - -$pool.Attributes | ForEach { $result.info.attributes.Add($_.Name, $_.Value)}; +if ($pool) +{ + $result.info = @{ + name = $pool.Name + state = $pool.State + attributes = New-Object psobject @{} + }; + + $pool.Attributes | ForEach { $result.info.attributes.Add($_.Name, $_.Value)}; +} -Exit-Json $result +Exit-Json $result \ No newline at end of file diff --git a/windows/win_iis_webapppool.py b/windows/win_iis_webapppool.py index c77c3b04cb7..e2cb8778b5f 100644 --- a/windows/win_iis_webapppool.py +++ b/windows/win_iis_webapppool.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_iis_webapppool diff --git a/windows/win_iis_webbinding.ps1 b/windows/win_iis_webbinding.ps1 index bdff43fc63c..dfd9cdb958b 100644 --- a/windows/win_iis_webbinding.ps1 +++ b/windows/win_iis_webbinding.ps1 @@ -23,42 +23,35 @@ $params = Parse-Args $args; -# Name parameter -$name = Get-Attr $params "name" $FALSE; -If ($name -eq $FALSE) { - Fail-Json (New-Object psobject) "missing required argument: name"; -} - -# State parameter -$state = Get-Attr $params "state" $FALSE; -$valid_states = ($FALSE, 'present', 'absent'); -If ($state -NotIn $valid_states) { - Fail-Json $result "state is '$state'; must be $($valid_states)" -} +$name = Get-AnsibleParam $params -name "name" -failifempty $true +$state = Get-AnsibleParam $params "state" -default "present" -validateSet "present","absent" +$host_header = Get-AnsibleParam $params -name "host_header" +$protocol = Get-AnsibleParam $params -name "protocol" +$port = Get-AnsibleParam $params -name "port" +$ip = Get-AnsibleParam $params -name "ip" +$certificatehash = Get-AnsibleParam $params -name "certificate_hash" -default $false +$certificateStoreName = Get-AnsibleParam $params -name "certificate_store_name" -default "MY" $binding_parameters = New-Object psobject @{ Name = $name }; -If ($params.host_header) { - $binding_parameters.HostHeader = $params.host_header +If ($host_header) { + $binding_parameters.HostHeader = $host_header } -If ($params.protocol) { - $binding_parameters.Protocol = $params.protocol +If ($protocol) { + $binding_parameters.Protocol = $protocol } -If ($params.port) { - $binding_parameters.Port = $params.port +If ($port) { + $binding_parameters.Port = $port } -If ($params.ip) { - $binding_parameters.IPAddress = $params.ip +If ($ip) { + $binding_parameters.IPAddress = $ip } -$certificateHash = Get-Attr $params "certificate_hash" $FALSE; -$certificateStoreName = Get-Attr $params "certificate_store_name" "MY"; - # Ensure WebAdministration module is loaded if ((Get-Module "WebAdministration" -ErrorAction SilentlyContinue) -eq $null){ Import-Module WebAdministration @@ -98,12 +91,12 @@ try { # Select certificat if($certificateHash -ne $FALSE) { - $ip = $binding_parameters.IPAddress + $ip = $binding_parameters["IPAddress"] if((!$ip) -or ($ip -eq "*")) { $ip = "0.0.0.0" } - $port = $binding_parameters.Port + $port = $binding_parameters["Port"] if(!$port) { $port = 443 } diff --git a/windows/win_iis_webbinding.py b/windows/win_iis_webbinding.py index 061bed73723..c7a08628f48 100644 --- a/windows/win_iis_webbinding.py +++ b/windows/win_iis_webbinding.py @@ -19,6 +19,10 @@ # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_iis_webbinding @@ -66,12 +70,6 @@ required: false default: null aliases: [] - protocol: - description: - - The protocol to be used for the Web binding (usually HTTP, HTTPS, or FTP). - required: false - default: null - aliases: [] certificate_hash: description: - Certificate hash for the SSL binding. The certificate hash is the unique identifier for the certificate. diff --git a/windows/win_iis_website.ps1 b/windows/win_iis_website.ps1 index 26a8df12730..74fc3df3026 100644 --- a/windows/win_iis_website.ps1 +++ b/windows/win_iis_website.ps1 @@ -37,6 +37,7 @@ If (($state -ne $FALSE) -and ($state -ne 'started') -and ($state -ne 'stopped') # Path parameter $physical_path = Get-Attr $params "physical_path" $FALSE; +$site_id = Get-Attr $params "site_id" $FALSE; # Application Pool Parameter $application_pool = Get-Attr $params "application_pool" $FALSE; @@ -91,6 +92,10 @@ Try { $site_parameters.ApplicationPool = $application_pool } + If ($site_id) { + $site_parameters.ID = $site_id + } + If ($bind_port) { $site_parameters.Port = $bind_port } @@ -103,6 +108,12 @@ Try { $site_parameters.HostHeader = $bind_hostname } + # Fix for error "New-Item : Index was outside the bounds of the array." + # This is a bug in the New-WebSite commandlet. Apparently there must be at least one site configured in IIS otherwise New-WebSite crashes. + # For more details, see http://stackoverflow.com/questions/3573889/ps-c-new-website-blah-throws-index-was-outside-the-bounds-of-the-array + $sites_list = get-childitem -Path IIS:\sites + if ($sites_list -eq $null) { $site_parameters.ID = 1 } + $site = New-Website @site_parameters -Force $result.changed = $true } @@ -165,15 +176,21 @@ Catch Fail-Json (New-Object psobject) $_.Exception.Message } -$site = Get-Website | Where { $_.Name -eq $name } -$result.site = New-Object psobject @{ - Name = $site.Name - ID = $site.ID - State = $site.State - PhysicalPath = $site.PhysicalPath - ApplicationPool = $site.applicationPool - Bindings = @($site.Bindings.Collection | ForEach-Object { $_.BindingInformation }) +if ($state -ne 'absent') +{ + $site = Get-Website | Where { $_.Name -eq $name } } +if ($site) +{ + $result.site = New-Object psobject @{ + Name = $site.Name + ID = $site.ID + State = $site.State + PhysicalPath = $site.PhysicalPath + ApplicationPool = $site.applicationPool + Bindings = @($site.Bindings.Collection | ForEach-Object { $_.BindingInformation }) + } +} Exit-Json $result diff --git a/windows/win_iis_website.py b/windows/win_iis_website.py index 8921afe5970..9c65c067c95 100644 --- a/windows/win_iis_website.py +++ b/windows/win_iis_website.py @@ -18,6 +18,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_iis_website @@ -32,6 +36,12 @@ required: true default: null aliases: [] + site_id: + description: + - Explicitly set the IIS numeric ID for a site. Note that this value cannot be changed after the website has been created. + required: false + version_added: "2.1" + default: null state: description: - State of the web site diff --git a/windows/win_nssm.ps1 b/windows/win_nssm.ps1 new file mode 100644 index 00000000000..da3d01a7161 --- /dev/null +++ b/windows/win_nssm.ps1 @@ -0,0 +1,685 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, George Frank +# Copyright 2015, Adam Keech +# Copyright 2015, Hans-Joachim Kliemeck +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +$ErrorActionPreference = "Stop" + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +$name = Get-Attr $params "name" -failifempty $true +$state = Get-Attr $params "state" -default "present" -validateSet "present", "absent", "started", "stopped", "restarted" -resultobj $result + +$application = Get-Attr $params "application" -default $null +$appParameters = Get-Attr $params "app_parameters" -default $null +$startMode = Get-Attr $params "start_mode" -default "auto" -validateSet "auto", "manual", "disabled" -resultobj $result + +$stdoutFile = Get-Attr $params "stdout_file" -default $null +$stderrFile = Get-Attr $params "stderr_file" -default $null +$dependencies = Get-Attr $params "dependencies" -default $null + +$user = Get-Attr $params "user" -default $null +$password = Get-Attr $params "password" -default $null + + +#abstract the calling of nssm because some PowerShell environments +#mishandle its stdout(which is Unicode) as UTF8 +Function Nssm-Invoke +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$cmd + ) + Try { + $encodingWas = [System.Console]::OutputEncoding + [System.Console]::OutputEncoding = [System.Text.Encoding]::Unicode + + $nssmOutput = invoke-expression "nssm $cmd" + return $nssmOutput + } + Catch { + $ErrorMessage = $_.Exception.Message + Fail-Json $result "an exception occurred when invoking NSSM: $ErrorMessage" + } + Finally { + # Set the console encoding back to what it was + [System.Console]::OutputEncoding = $encodingWas + } +} + +Function Service-Exists +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name + ) + + return [bool](Get-Service "$name" -ErrorAction SilentlyContinue) +} + +Function Nssm-Remove +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name + ) + + if (Service-Exists -name $name) + { + $cmd = "stop ""$name""" + $results = Nssm-Invoke $cmd + + $cmd = "remove ""$name"" confirm" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error removing service ""$name""" + } + + Set-Attr $result "changed_by" "remove_service" + $result.changed = $true + } +} + +Function Nssm-Install +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name, + [Parameter(Mandatory=$true)] + [AllowEmptyString()] + [string]$application + ) + + if (!$application) + { + Throw "Error installing service ""$name"". No application was supplied." + } + If (-Not (Test-Path -Path $application -PathType Leaf)) { + Throw "$application does not exist on the host" + } + + if (!(Service-Exists -name $name)) + { + $results = Nssm-Invoke "install ""$name"" $application" + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error installing service ""$name""" + } + + Set-Attr $result "changed_by" "install_service" + $result.changed = $true + + } else { + $results = Nssm-Invoke "get ""$name"" Application" + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error installing service ""$name""" + } + + if ($results -cnotlike $application) + { + $cmd = "set ""$name"" Application $application" + + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error installing service ""$name""" + } + Set-Attr $result "application" "$application" + + Set-Attr $result "changed_by" "reinstall_service" + $result.changed = $true + } + } + + if ($result.changed) + { + $applicationPath = (Get-Item $application).DirectoryName + $cmd = "nssm set ""$name"" AppDirectory $applicationPath" + + $results = invoke-expression $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error installing service ""$name""" + } + } +} + +Function ParseAppParameters() +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [AllowEmptyString()] + [string]$appParameters + ) + + $escapedAppParameters = $appParameters.TrimStart("@").TrimStart("{").TrimEnd("}").Replace("; ","`n").Replace("\","\\") + + return ConvertFrom-StringData -StringData $escapedAppParameters +} + + +Function Nssm-Update-AppParameters +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name, + [Parameter(Mandatory=$true)] + [AllowEmptyString()] + [string]$appParameters + ) + + $cmd = "get ""$name"" AppParameters" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error updating AppParameters for service ""$name""" + } + + $appParamKeys = @() + $appParamVals = @() + $singleLineParams = "" + + if ($appParameters) + { + $appParametersHash = ParseAppParameters -appParameters $appParameters + $appParametersHash.GetEnumerator() | + % { + $key = $($_.Name) + $val = $($_.Value) + + $appParamKeys += $key + $appParamVals += $val + + if ($key -eq "_") { + $singleLineParams = "$val " + $singleLineParams + } else { + $singleLineParams = $singleLineParams + "$key ""$val""" + } + } + + Set-Attr $result "nssm_app_parameters_parsed" $appParametersHash + Set-Attr $result "nssm_app_parameters_keys" $appParamKeys + Set-Attr $result "nssm_app_parameters_vals" $appParamVals + } + + Set-Attr $result "nssm_app_parameters" $appParameters + Set-Attr $result "nssm_single_line_app_parameters" $singleLineParams + + if ($results -ne $singleLineParams) + { + if ($appParameters) + { + $cmd = "set ""$name"" AppParameters $singleLineParams" + } else { + $cmd = "set ""$name"" AppParameters '""""'" + } + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error updating AppParameters for service ""$name""" + } + + Set-Attr $result "changed_by" "update_app_parameters" + $result.changed = $true + } +} + +Function Nssm-Set-Output-Files +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name, + [string]$stdout, + [string]$stderr + ) + + $cmd = "get ""$name"" AppStdout" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error retrieving existing stdout file for service ""$name""" + } + + if ($results -cnotlike $stdout) + { + if (!$stdout) + { + $cmd = "reset ""$name"" AppStdout" + } else { + $cmd = "set ""$name"" AppStdout $stdout" + } + + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error setting stdout file for service ""$name""" + } + + Set-Attr $result "changed_by" "set_stdout" + $result.changed = $true + } + + $cmd = "get ""$name"" AppStderr" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error retrieving existing stderr file for service ""$name""" + } + + if ($results -cnotlike $stderr) + { + if (!$stderr) + { + $cmd = "reset ""$name"" AppStderr" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error clearing stderr file setting for service ""$name""" + } + } else { + $cmd = "set ""$name"" AppStderr $stderr" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error setting stderr file for service ""$name""" + } + } + + Set-Attr $result "changed_by" "set_stderr" + $result.changed = $true + } + + ### + # Setup file rotation so we don't accidentally consume too much disk + ### + + #set files to overwrite + $cmd = "set ""$name"" AppStdoutCreationDisposition 2" + $results = Nssm-Invoke $cmd + + $cmd = "set ""$name"" AppStderrCreationDisposition 2" + $results = Nssm-Invoke $cmd + + #enable file rotation + $cmd = "set ""$name"" AppRotateFiles 1" + $results = Nssm-Invoke $cmd + + #don't rotate until the service restarts + $cmd = "set ""$name"" AppRotateOnline 0" + $results = Nssm-Invoke $cmd + + #both of the below conditions must be met before rotation will happen + #minimum age before rotating + $cmd = "set ""$name"" AppRotateSeconds 86400" + $results = Nssm-Invoke $cmd + + #minimum size before rotating + $cmd = "set ""$name"" AppRotateBytes 104858" + $results = Nssm-Invoke $cmd +} + +Function Nssm-Update-Credentials +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name, + [Parameter(Mandatory=$false)] + [string]$user, + [Parameter(Mandatory=$false)] + [string]$password + ) + + $cmd = "get ""$name"" ObjectName" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error updating credentials for service ""$name""" + } + + if ($user) { + if (!$password) { + Throw "User without password is informed for service ""$name""" + } + else { + $fullUser = $user + If (-Not($user.contains("@")) -And ($user.Split("\").count -eq 1)) { + $fullUser = ".\" + $user + } + + If ($results -ne $fullUser) { + $cmd = "set ""$name"" ObjectName $fullUser $password" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error updating credentials for service ""$name""" + } + + Set-Attr $result "changed_by" "update_credentials" + $result.changed = $true + } + } + } +} + +Function Nssm-Update-Dependencies +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name, + [Parameter(Mandatory=$false)] + [string]$dependencies + ) + + $cmd = "get ""$name"" DependOnService" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error updating dependencies for service ""$name""" + } + + If (($dependencies) -and ($results.Tolower() -ne $dependencies.Tolower())) { + $cmd = "set ""$name"" DependOnService $dependencies" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error updating dependencies for service ""$name""" + } + + Set-Attr $result "changed_by" "update-dependencies" + $result.changed = $true + } +} + +Function Nssm-Update-StartMode +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name, + [Parameter(Mandatory=$true)] + [string]$mode + ) + + $cmd = "get ""$name"" Start" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error updating start mode for service ""$name""" + } + + $modes=@{"auto" = "SERVICE_AUTO_START"; "manual" = "SERVICE_DEMAND_START"; "disabled" = "SERVICE_DISABLED"} + $mappedMode = $modes.$mode + if ($results -cnotlike $mappedMode) { + $cmd = "set ""$name"" Start $mappedMode" + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error updating start mode for service ""$name""" + } + + Set-Attr $result "changed_by" "start_mode" + $result.changed = $true + } +} + +Function Nssm-Get-Status +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name + ) + + $cmd = "status ""$name""" + $results = Nssm-Invoke $cmd + + return ,$results +} + +Function Nssm-Start +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name + ) + + $currentStatus = Nssm-Get-Status -name $name + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error starting service ""$name""" + } + + switch ($currentStatus) + { + "SERVICE_RUNNING" { <# Nothing to do #> } + "SERVICE_STOPPED" { Nssm-Start-Service-Command -name $name } + + "SERVICE_CONTINUE_PENDING" { Nssm-Stop-Service-Command -name $name; Nssm-Start-Service-Command -name $name } + "SERVICE_PAUSE_PENDING" { Nssm-Stop-Service-Command -name $name; Nssm-Start-Service-Command -name $name } + "SERVICE_PAUSED" { Nssm-Stop-Service-Command -name $name; Nssm-Start-Service-Command -name $name } + "SERVICE_START_PENDING" { Nssm-Stop-Service-Command -name $name; Nssm-Start-Service-Command -name $name } + "SERVICE_STOP_PENDING" { Nssm-Stop-Service-Command -name $name; Nssm-Start-Service-Command -name $name } + } +} + +Function Nssm-Start-Service-Command +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name + ) + + $cmd = "start ""$name""" + + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error starting service ""$name""" + } + + Set-Attr $result "changed_by" "start_service" + $result.changed = $true +} + +Function Nssm-Stop-Service-Command +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name + ) + + $cmd = "stop ""$name""" + + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error stopping service ""$name""" + } + + Set-Attr $result "changed_by" "stop_service_command" + $result.changed = $true +} + +Function Nssm-Stop +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name + ) + + $currentStatus = Nssm-Get-Status -name $name + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error stopping service ""$name""" + } + + if ($currentStatus -ne "SERVICE_STOPPED") + { + $cmd = "stop ""$name""" + + $results = Nssm-Invoke $cmd + + if ($LastExitCode -ne 0) + { + Set-Attr $result "nssm_error_cmd" $cmd + Set-Attr $result "nssm_error_log" "$results" + Throw "Error stopping service ""$name""" + } + + Set-Attr $result "changed_by" "stop_service" + $result.changed = $true + } +} + +Function Nssm-Restart +{ + [CmdletBinding()] + param( + [Parameter(Mandatory=$true)] + [string]$name + ) + + Nssm-Stop-Service-Command -name $name + Nssm-Start-Service-Command -name $name +} + +Function NssmProcedure +{ + Nssm-Install -name $name -application $application + Nssm-Update-AppParameters -name $name -appParameters $appParameters + Nssm-Set-Output-Files -name $name -stdout $stdoutFile -stderr $stderrFile + Nssm-Update-Dependencies -name $name -dependencies $dependencies + Nssm-Update-Credentials -name $name -user $user -password $password + Nssm-Update-StartMode -name $name -mode $startMode +} + +Try +{ + switch ($state) + { + "absent" { Nssm-Remove -name $name } + "present" { + NssmProcedure + } + "started" { + NssmProcedure + Nssm-Start -name $name + } + "stopped" { + NssmProcedure + Nssm-Stop -name $name + } + "restarted" { + NssmProcedure + Nssm-Restart -name $name + } + } + + Exit-Json $result; +} +Catch +{ + Fail-Json $result $_.Exception.Message +} diff --git a/windows/win_nssm.py b/windows/win_nssm.py new file mode 100644 index 00000000000..57d9dfa3cb5 --- /dev/null +++ b/windows/win_nssm.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Heyo +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_nssm +version_added: "2.0" +short_description: NSSM - the Non-Sucking Service Manager +description: + - nssm is a service helper which doesn't suck. See https://nssm.cc/ for more information. +requirements: + - "nssm >= 2.24.0 # (install via win_chocolatey) win_chocolatey: name=nssm" +options: + name: + description: + - Name of the service to operate on + required: true + state: + description: + - State of the service on the system + - Note that NSSM actions like "pause", "continue", "rotate" do not fit the declarative style of ansible, so these should be implemented via the ansible command module + required: false + choices: + - present + - started + - stopped + - restarted + - absent + default: started + application: + description: + - The application binary to run as a service + - "Specify this whenever the service may need to be installed (state: present, started, stopped, restarted)" + - "Note that the application name must look like the following, if the directory includes spaces:" + - 'nssm install service "c:\\Program Files\\app.exe\\" "C:\\Path with spaces\\"' + - "See commit 0b386fc1984ab74ee59b7bed14b7e8f57212c22b in the nssm.git project for more info (https://git.nssm.cc/?p=nssm.git;a=commit;h=0b386fc1984ab74ee59b7bed14b7e8f57212c22b)" + required: false + default: null + stdout_file: + description: + - Path to receive output + required: false + default: null + stderr_file: + description: + - Path to receive error output + required: false + default: null + app_parameters: + description: + - Parameters to be passed to the application when it starts + required: false + default: null + dependencies: + description: + - Service dependencies that has to be started to trigger startup, separated by comma. + required: false + default: null + user: + description: + - User to be used for service startup + required: false + default: null + password: + description: + - Password to be used for service startup + required: false + default: null + start_mode: + description: + - If C(auto) is selected, the service will start at bootup. C(manual) means that the service will start only when another service needs it. C(disabled) means that the service will stay off, regardless if it is needed or not. + required: true + default: auto + choices: + - auto + - manual + - disabled +author: + - "Adam Keech (@smadam813)" + - "George Frank (@georgefrank)" + - "Hans-Joachim Kliemeck (@h0nIg)" +''' + +EXAMPLES = ''' +# Install and start the foo service +- win_nssm: + name: foo + application: C:\windows\\foo.exe + +# Install and start the foo service with a key-value pair argument +# This will yield the following command: C:\windows\\foo.exe bar "true" +- win_nssm: + name: foo + application: C:\windows\\foo.exe + app_parameters: + bar: true + +# Install and start the foo service with a key-value pair argument, where the argument needs to start with a dash +# This will yield the following command: C:\windows\\foo.exe -bar "true" +- win_nssm: + name: foo + application: C:\windows\\foo.exe + app_parameters: + "-bar": true + +# Install and start the foo service with a single parameter +# This will yield the following command: C:\windows\\foo.exe bar +- win_nssm: + name: foo + application: C:\windows\\foo.exe + app_parameters: + _: bar + +# Install and start the foo service with a mix of single params, and key value pairs +# This will yield the following command: C:\windows\\foo.exe bar -file output.bat +- win_nssm: + name: foo + application: C:\windows\\foo.exe + app_parameters: + _: bar + "-file": "output.bat" + +# Install and start the foo service, redirecting stdout and stderr to the same file +- win_nssm: + name: foo + application: C:\windows\\foo.exe + stdout_file: C:\windows\\foo.log + stderr_file: C:\windows\\foo.log + +# Install and start the foo service, but wait for dependencies tcpip and adf +- win_nssm: + name: foo + application: C:\windows\\foo.exe + dependencies: 'adf,tcpip' + +# Install and start the foo service with dedicated user +- win_nssm: + name: foo + application: C:\windows\\foo.exe + user: foouser + password: secret + +# Install the foo service but do not start it automatically +- win_nssm: + name: foo + application: C:\windows\\foo.exe + state: present + start_mode: manual + +# Remove the foo service +- win_nssm: + name: foo + state: absent +''' diff --git a/windows/win_owner.ps1 b/windows/win_owner.ps1 new file mode 100644 index 00000000000..076ab846052 --- /dev/null +++ b/windows/win_owner.ps1 @@ -0,0 +1,136 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Hans-Joachim Kliemeck +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +#Functions +Function UserSearch +{ + Param ([string]$accountName) + #Check if there's a realm specified + + $searchDomain = $false + $searchDomainUPN = $false + if ($accountName.Split("\").count -gt 1) + { + if ($accountName.Split("\")[0] -ne $env:COMPUTERNAME) + { + $searchDomain = $true + $accountName = $accountName.split("\")[1] + } + } + Elseif ($accountName.contains("@")) + { + $searchDomain = $true + $searchDomainUPN = $true + } + Else + { + #Default to local user account + $accountName = $env:COMPUTERNAME + "\" + $accountName + } + + if ($searchDomain -eq $false) + { + # do not use Win32_UserAccount, because e.g. SYSTEM (BUILTIN\SYSTEM or COMPUUTERNAME\SYSTEM) will not be listed. on Win32_Account groups will be listed too + $localaccount = get-wmiobject -class "Win32_Account" -namespace "root\CIMV2" -filter "(LocalAccount = True)" | where {$_.Caption -eq $accountName} + if ($localaccount) + { + return $localaccount.SID + } + } + Else + { + #Search by samaccountname + $Searcher = [adsisearcher]"" + + If ($searchDomainUPN -eq $false) { + $Searcher.Filter = "sAMAccountName=$($accountName)" + } + Else { + $Searcher.Filter = "userPrincipalName=$($accountName)" + } + + $result = $Searcher.FindOne() + if ($result) + { + $user = $result.GetDirectoryEntry() + + # get binary SID from AD account + $binarySID = $user.ObjectSid.Value + + # convert to string SID + return (New-Object System.Security.Principal.SecurityIdentifier($binarySID,0)).Value + } + } +} + +$params = Parse-Args $args; + +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +$path = Get-Attr $params "path" -failifempty $true +$user = Get-Attr $params "user" -failifempty $true +$recurse = Get-Attr $params "recurse" "no" -validateSet "no","yes" -resultobj $result +$recurse = $recurse | ConvertTo-Bool + +If (-Not (Test-Path -Path $path)) { + Fail-Json $result "$path file or directory does not exist on the host" +} + +# Test that the user/group is resolvable on the local machine +$sid = UserSearch -AccountName ($user) +if (!$sid) +{ + Fail-Json $result "$user is not a valid user or group on the host machine or domain" +} + +Try { + $objUser = New-Object System.Security.Principal.SecurityIdentifier($sid) + + $file = Get-Item -Path $path + $acl = Get-Acl $file.FullName + + If ($acl.getOwner([System.Security.Principal.SecurityIdentifier]) -ne $objUser) { + $acl.setOwner($objUser) + Set-Acl $file.FullName $acl + + Set-Attr $result "changed" $true; + } + + If ($recurse) { + $files = Get-ChildItem -Path $path -Force -Recurse + ForEach($file in $files){ + $acl = Get-Acl $file.FullName + + If ($acl.getOwner([System.Security.Principal.SecurityIdentifier]) -ne $objUser) { + $acl.setOwner($objUser) + Set-Acl $file.FullName $acl + + Set-Attr $result "changed" $true; + } + } + } +} +Catch { + Fail-Json $result "an error occured when attempting to change owner on $path for $user" +} + +Exit-Json $result diff --git a/windows/win_owner.py b/windows/win_owner.py new file mode 100644 index 00000000000..b3ad35b40a6 --- /dev/null +++ b/windows/win_owner.py @@ -0,0 +1,73 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2015, Hans-Joachim Kliemeck +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_owner +version_added: "2.1" +short_description: Set owner +description: + - Set owner of files or directories +options: + path: + description: + - Path to be used for changing owner + required: true + user: + description: + - Name to be used for changing owner + required: true + recurse: + description: + - Indicates if the owner should be changed recursively + required: false + choices: + - no + - yes + default: no +author: Hans-Joachim Kliemeck (@h0nIg) +''' + +EXAMPLES = ''' +# Playbook example +--- +- name: Change owner of Path + win_owner: + path: 'C:\\apache\\' + user: apache + recurse: yes + +- name: Set the owner of root directory + win_owner: + path: 'C:\\apache\\' + user: SYSTEM + recurse: no +''' + +RETURN = ''' + +''' \ No newline at end of file diff --git a/windows/win_package.ps1 b/windows/win_package.ps1 new file mode 100644 index 00000000000..544c3660866 --- /dev/null +++ b/windows/win_package.ps1 @@ -0,0 +1,1326 @@ +#!powershell +# (c) 2014, Trond Hindenes , and others +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +#region DSC + +data LocalizedData +{ + # culture="en-US" + # TODO: Support WhatIf + ConvertFrom-StringData @' +InvalidIdentifyingNumber=The specified IdentifyingNumber ({0}) is not a valid Guid +InvalidPath=The specified Path ({0}) is not in a valid format. Valid formats are local paths, UNC, and HTTP +InvalidNameOrId=The specified Name ({0}) and IdentifyingNumber ({1}) do not match Name ({2}) and IdentifyingNumber ({3}) in the MSI file +NeedsMoreInfo=Either Name or ProductId is required +InvalidBinaryType=The specified Path ({0}) does not appear to specify an EXE or MSI file and as such is not supported +CouldNotOpenLog=The specified LogPath ({0}) could not be opened +CouldNotStartProcess=The process {0} could not be started +UnexpectedReturnCode=The return code {0} was not expected. Configuration is likely not correct +PathDoesNotExist=The given Path ({0}) could not be found +CouldNotOpenDestFile=Could not open the file {0} for writing +CouldNotGetHttpStream=Could not get the {0} stream for file {1} +ErrorCopyingDataToFile=Encountered error while writing the contents of {0} to {1} +PackageConfigurationComplete=Package configuration finished +PackageConfigurationStarting=Package configuration starting +InstalledPackage=Installed package +UninstalledPackage=Uninstalled package +NoChangeRequired=Package found in desired state, no action required +RemoveExistingLogFile=Remove existing log file +CreateLogFile=Create log file +MountSharePath=Mount share to get media +DownloadHTTPFile=Download the media over HTTP or HTTPS +StartingProcessMessage=Starting process {0} with arguments {1} +RemoveDownloadedFile=Remove the downloaded file +PackageInstalled=Package has been installed +PackageUninstalled=Package has been uninstalled +MachineRequiresReboot=The machine requires a reboot +PackageDoesNotAppearInstalled=The package {0} is not installed +PackageAppearsInstalled=The package {0} is already installed +PostValidationError=Package from {0} was installed, but the specified ProductId and/or Name does not match package details +'@ +} + +$Debug = $true +Function Trace-Message +{ + param([string] $Message) + if($Debug) + { + Write-Verbose $Message + } +} + +$CacheLocation = "$env:ProgramData\Microsoft\Windows\PowerShell\Configuration\BuiltinProvCache\MSFT_PackageResource" + +Function Throw-InvalidArgumentException +{ + param( + [string] $Message, + [string] $ParamName + ) + + $exception = new-object System.ArgumentException $Message,$ParamName + $errorRecord = New-Object System.Management.Automation.ErrorRecord $exception,$ParamName,"InvalidArgument",$null + throw $errorRecord +} + +Function Throw-InvalidNameOrIdException +{ + param( + [string] $Message + ) + + $exception = new-object System.ArgumentException $Message + $errorRecord = New-Object System.Management.Automation.ErrorRecord $exception,"NameOrIdNotInMSI","InvalidArgument",$null + throw $errorRecord +} + +Function Throw-TerminatingError +{ + param( + [string] $Message, + [System.Management.Automation.ErrorRecord] $ErrorRecord + ) + + if ($errorRecord) + { + $exception = new-object "System.InvalidOperationException" $Message,$ErrorRecord.Exception + } + Else + { + $exception = new-object "System.InvalidOperationException" $Message + } + + $errorRecord = New-Object System.Management.Automation.ErrorRecord $exception,"MachineStateIncorrect","InvalidOperation",$null + throw $errorRecord +} + +Function Get-RegistryValueIgnoreError +{ + param + ( + [parameter(Mandatory = $true)] + [Microsoft.Win32.RegistryHive] + $RegistryHive, + + [parameter(Mandatory = $true)] + [System.String] + $Key, + + [parameter(Mandatory = $true)] + [System.String] + $Value, + + [parameter(Mandatory = $true)] + [Microsoft.Win32.RegistryView] + $RegistryView + ) + + try + { + $baseKey = [Microsoft.Win32.RegistryKey]::OpenBaseKey($RegistryHive, $RegistryView) + $subKey = $baseKey.OpenSubKey($Key) + if($subKey -ne $null) + { + return $subKey.GetValue($Value) + } + } + catch + { + $exceptionText = ($_ | Out-String).Trim() + Write-Verbose "Exception occured in Get-RegistryValueIgnoreError: $exceptionText" + } + return $null +} + +Function Validate-StandardArguments +{ + param( + $Path, + $ProductId, + $Name + ) + + Trace-Message "Validate-StandardArguments, Path was $Path" + $uri = $null + try + { + $uri = [uri] $Path + } + catch + { + Throw-InvalidArgumentException ($LocalizedData.InvalidPath -f $Path) "Path" + } + + if(-not @("file", "http", "https") -contains $uri.Scheme) + { + Trace-Message "The uri scheme was $uri.Scheme" + Throw-InvalidArgumentException ($LocalizedData.InvalidPath -f $Path) "Path" + } + + $pathExt = [System.IO.Path]::GetExtension($Path) + Trace-Message "The path extension was $pathExt" + if(-not @(".msi",".exe") -contains $pathExt.ToLower()) + { + Throw-InvalidArgumentException ($LocalizedData.InvalidBinaryType -f $Path) "Path" + } + + $identifyingNumber = $null + if(-not $Name -and -not $ProductId) + { + #It's a tossup here which argument to blame, so just pick ProductId to encourage customers to use the most efficient version + Throw-InvalidArgumentException ($LocalizedData.NeedsMoreInfo -f $Path) "ProductId" + } + elseif($ProductId) + { + try + { + Trace-Message "Parsing $ProductId as an identifyingNumber" + $TestGuid = [system.guid]::NewGuid() + #Check to see if the productid is a guid + if ([guid]::TryParse($ProductId, [ref]$TestGuid)) + { + $identifyingNumber = "{{{0}}}" -f [Guid]::Parse($ProductId).ToString().ToUpper() + Trace-Message "Parsed $ProductId as $identifyingNumber (is guid)" + } + Else + { + $identifyingNumber = $ProductId + Trace-Message "Parsed $ProductId as $identifyingNumber (is not guid)" + } + + Trace-Message "Parsed $ProductId as $identifyingNumber" + } + catch + { + Throw-InvalidArgumentException ($LocalizedData.InvalidIdentifyingNumber -f $ProductId) $ProductId + } + } + + return $uri, $identifyingNumber +} + +Function Get-ProductEntry +{ + param + ( + [string] $Name, + [string] $IdentifyingNumber, + [string] $InstalledCheckRegKey, + [string] $InstalledCheckRegValueName, + [string] $InstalledCheckRegValueData + ) + + $uninstallKey = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall" + $uninstallKeyWow64 = "HKLM:\SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall" + + if($IdentifyingNumber) + { + $keyLocation = "$uninstallKey\$identifyingNumber" + $item = Get-Item $keyLocation -EA SilentlyContinue + if(-not $item) + { + $keyLocation = "$uninstallKeyWow64\$identifyingNumber" + $item = Get-Item $keyLocation -EA SilentlyContinue + } + + return $item + } + + foreach($item in (Get-ChildItem -EA Ignore $uninstallKey, $uninstallKeyWow64)) + { + if($Name -eq (Get-LocalizableRegKeyValue $item "DisplayName")) + { + return $item + } + } + + if ($InstalledCheckRegKey -and $InstalledCheckRegValueName -and $InstalledCheckRegValueData) + { + $installValue = $null + + #if 64bit OS, check 64bit registry view first + if ((Get-WmiObject -Class Win32_OperatingSystem -ComputerName "localhost" -ea 0).OSArchitecture -eq '64-bit') + { + $installValue = Get-RegistryValueIgnoreError LocalMachine "$InstalledCheckRegKey" "$InstalledCheckRegValueName" Registry64 + } + + if($installValue -eq $null) + { + $installValue = Get-RegistryValueIgnoreError LocalMachine "$InstalledCheckRegKey" "$InstalledCheckRegValueName" Registry32 + } + + if($installValue) + { + if($InstalledCheckRegValueData -and $installValue -eq $InstalledCheckRegValueData) + { + return @{ + Installed = $true + } + } + } + } + + return $null +} + +function Test-TargetResource +{ + param + ( + [ValidateSet("Present", "Absent")] + [string] $Ensure = "Present", + + [parameter(Mandatory = $true)] + [AllowEmptyString()] + [string] $Name, + + [parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $Path, + + [parameter(Mandatory = $true)] + [AllowEmptyString()] + [string] $ProductId, + + [string] $Arguments, + + [pscredential] $Credential, + + [int[]] $ReturnCode, + + [string] $LogPath, + + [pscredential] $RunAsCredential, + + [string] $InstalledCheckRegKey, + + [string] $InstalledCheckRegValueName, + + [string] $InstalledCheckRegValueData + ) + + $uri, $identifyingNumber = Validate-StandardArguments $Path $ProductId $Name + $product = Get-ProductEntry $Name $identifyingNumber $InstalledCheckRegKey $InstalledCheckRegValueName $InstalledCheckRegValueData + Trace-Message "Ensure is $Ensure" + if($product) + { + Trace-Message "product found" + } + else + { + Trace-Message "product installation cannot be determined" + } + Trace-Message ("product as boolean is {0}" -f [boolean]$product) + $res = ($product -ne $null -and $Ensure -eq "Present") -or ($product -eq $null -and $Ensure -eq "Absent") + + # install registry test overrides the product id test and there is no true product information + # when doing a lookup via registry key + if ($product -and $InstalledCheckRegKey -and $InstalledCheckRegValueName -and $InstalledCheckRegValueData) + { + Write-Verbose ($LocalizedData.PackageAppearsInstalled -f $Name) + } + else + { + if ($product -ne $null) + { + $name = Get-LocalizableRegKeyValue $product "DisplayName" + Write-Verbose ($LocalizedData.PackageAppearsInstalled -f $name) + } + else + { + $displayName = $null + if($Name) + { + $displayName = $Name + } + else + { + $displayName = $ProductId + } + + Write-Verbose ($LocalizedData.PackageDoesNotAppearInstalled -f $displayName) + } + + } + + return $res +} + +function Get-LocalizableRegKeyValue +{ + param( + [object] $RegKey, + [string] $ValueName + ) + + $res = $RegKey.GetValue("{0}_Localized" -f $ValueName) + if(-not $res) + { + $res = $RegKey.GetValue($ValueName) + } + + return $res +} + +function Get-TargetResource +{ + param + ( + [parameter(Mandatory = $true)] + [AllowEmptyString()] + [string] $Name, + + [parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $Path, + + [parameter(Mandatory = $true)] + [AllowEmptyString()] + [string] $ProductId, + + [string] $InstalledCheckRegKey, + + [string] $InstalledCheckRegValueName, + + [string] $InstalledCheckRegValueData + ) + + #If the user gave the ProductId then we derive $identifyingNumber + $uri, $identifyingNumber = Validate-StandardArguments $Path $ProductId $Name + + $localMsi = $uri.IsFile -and -not $uri.IsUnc + + $product = Get-ProductEntry $Name $identifyingNumber $InstalledCheckRegKey $InstalledCheckRegValueName $InstalledCheckRegValueData + + if(-not $product) + { + return @{ + Ensure = "Absent" + Name = $Name + ProductId = $identifyingNumber + Installed = $false + InstalledCheckRegKey = $InstalledCheckRegKey + InstalledCheckRegValueName = $InstalledCheckRegValueName + InstalledCheckRegValueData = $InstalledCheckRegValueData + } + } + + if ($InstalledCheckRegKey -and $InstalledCheckRegValueName -and $InstalledCheckRegValueData) + { + return @{ + Ensure = "Present" + Name = $Name + ProductId = $identifyingNumber + Installed = $true + InstalledCheckRegKey = $InstalledCheckRegKey + InstalledCheckRegValueName = $InstalledCheckRegValueName + InstalledCheckRegValueData = $InstalledCheckRegValueData + } + } + + #$identifyingNumber can still be null here (e.g. remote MSI with Name specified, local EXE) + #If the user gave a ProductId just pass it through, otherwise fill it from the product + if(-not $identifyingNumber) + { + $identifyingNumber = Split-Path -Leaf $product.Name + } + + $date = $product.GetValue("InstallDate") + if($date) + { + try + { + $date = "{0:d}" -f [DateTime]::ParseExact($date, "yyyyMMdd",[System.Globalization.CultureInfo]::CurrentCulture).Date + } + catch + { + $date = $null + } + } + + $publisher = Get-LocalizableRegKeyValue $product "Publisher" + $size = $product.GetValue("EstimatedSize") + if($size) + { + $size = $size/1024 + } + + $version = $product.GetValue("DisplayVersion") + $description = $product.GetValue("Comments") + $name = Get-LocalizableRegKeyValue $product "DisplayName" + return @{ + Ensure = "Present" + Name = $name + Path = $Path + InstalledOn = $date + ProductId = $identifyingNumber + Size = $size + Installed = $true + Version = $version + PackageDescription = $description + Publisher = $publisher + } +} + +Function Get-MsiTools +{ + if($script:MsiTools) + { + return $script:MsiTools + } + + $sig = @' + [DllImport("msi.dll", CharSet = CharSet.Unicode, PreserveSig = true, SetLastError = true, ExactSpelling = true)] + private static extern UInt32 MsiOpenPackageW(string szPackagePath, out IntPtr hProduct); + + [DllImport("msi.dll", CharSet = CharSet.Unicode, PreserveSig = true, SetLastError = true, ExactSpelling = true)] + private static extern uint MsiCloseHandle(IntPtr hAny); + + [DllImport("msi.dll", CharSet = CharSet.Unicode, PreserveSig = true, SetLastError = true, ExactSpelling = true)] + private static extern uint MsiGetPropertyW(IntPtr hAny, string name, StringBuilder buffer, ref int bufferLength); + + private static string GetPackageProperty(string msi, string property) + { + IntPtr MsiHandle = IntPtr.Zero; + try + { + var res = MsiOpenPackageW(msi, out MsiHandle); + if (res != 0) + { + return null; + } + + int length = 256; + var buffer = new StringBuilder(length); + res = MsiGetPropertyW(MsiHandle, property, buffer, ref length); + return buffer.ToString(); + } + finally + { + if (MsiHandle != IntPtr.Zero) + { + MsiCloseHandle(MsiHandle); + } + } + } + public static string GetProductCode(string msi) + { + return GetPackageProperty(msi, "ProductCode"); + } + + public static string GetProductName(string msi) + { + return GetPackageProperty(msi, "ProductName"); + } +'@ + $script:MsiTools = Add-Type -PassThru -Namespace Microsoft.Windows.DesiredStateConfiguration.PackageResource ` + -Name MsiTools -Using System.Text -MemberDefinition $sig + return $script:MsiTools +} + + +Function Get-MsiProductEntry +{ + param + ( + [string] $Path + ) + + if(-not (Test-Path -PathType Leaf $Path) -and ($fileExtension -ne ".msi")) + { + Throw-TerminatingError ($LocalizedData.PathDoesNotExist -f $Path) + } + + $tools = Get-MsiTools + + $pn = $tools::GetProductName($Path) + + $pc = $tools::GetProductCode($Path) + + return $pn,$pc +} + + +function Set-TargetResource +{ + [CmdletBinding(SupportsShouldProcess=$true)] + param + ( + [ValidateSet("Present", "Absent")] + [string] $Ensure = "Present", + + [parameter(Mandatory = $true)] + [AllowEmptyString()] + [string] $Name, + + [parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $Path, + + [parameter(Mandatory = $true)] + [AllowEmptyString()] + [string] $ProductId, + + [string] $Arguments, + + [pscredential] $Credential, + + [int[]] $ReturnCode, + + [string] $LogPath, + + [pscredential] $RunAsCredential, + + [string] $InstalledCheckRegKey, + + [string] $InstalledCheckRegValueName, + + [string] $InstalledCheckRegValueData + ) + + $ErrorActionPreference = "Stop" + + if((Test-TargetResource -Ensure $Ensure -Name $Name -Path $Path -ProductId $ProductId ` + -InstalledCheckRegKey $InstalledCheckRegKey -InstalledCheckRegValueName $InstalledCheckRegValueName ` + -InstalledCheckRegValueData $InstalledCheckRegValueData)) + { + return + } + + $uri, $identifyingNumber = Validate-StandardArguments $Path $ProductId $Name + + #Path gets overwritten in the download code path. Retain the user's original Path in case the install succeeded + #but the named package wasn't present on the system afterward so we can give a better message + $OrigPath = $Path + + Write-Verbose $LocalizedData.PackageConfigurationStarting + if(-not $ReturnCode) + { + $ReturnCode = @(0) + } + + $logStream = $null + $psdrive = $null + $downloadedFileName = $null + try + { + $fileExtension = [System.IO.Path]::GetExtension($Path).ToLower() + if($LogPath) + { + try + { + if($fileExtension -eq ".msi") + { + #We want to pre-verify the path exists and is writable ahead of time + #even in the MSI case, as detecting WHY the MSI log doesn't exist would + #be rather problematic for the user + if((Test-Path $LogPath) -and $PSCmdlet.ShouldProcess($LocalizedData.RemoveExistingLogFile,$null,$null)) + { + rm $LogPath + } + + if($PSCmdlet.ShouldProcess($LocalizedData.CreateLogFile, $null, $null)) + { + New-Item -Type File $LogPath | Out-Null + } + } + elseif($PSCmdlet.ShouldProcess($LocalizedData.CreateLogFile, $null, $null)) + { + $logStream = new-object "System.IO.StreamWriter" $LogPath,$false + } + } + catch + { + Throw-TerminatingError ($LocalizedData.CouldNotOpenLog -f $LogPath) $_ + } + } + + #Download or mount file as necessary + if(-not ($fileExtension -eq ".msi" -and $Ensure -eq "Absent")) + { + if($uri.IsUnc -and $PSCmdlet.ShouldProcess($LocalizedData.MountSharePath, $null, $null)) + { + $psdriveArgs = @{Name=([guid]::NewGuid());PSProvider="FileSystem";Root=(Split-Path $uri.LocalPath)} + if($Credential) + { + #We need to optionally include these and then splat the hash otherwise + #we pass a null for Credential which causes the cmdlet to pop a dialog up + $psdriveArgs["Credential"] = $Credential + } + + $psdrive = New-PSDrive @psdriveArgs + $Path = Join-Path $psdrive.Root (Split-Path -Leaf $uri.LocalPath) #Necessary? + } + elseif(@("http", "https") -contains $uri.Scheme -and $Ensure -eq "Present" -and $PSCmdlet.ShouldProcess($LocalizedData.DownloadHTTPFile, $null, $null)) + { + $scheme = $uri.Scheme + $outStream = $null + $responseStream = $null + + try + { + Trace-Message "Creating cache location" + + if(-not (Test-Path -PathType Container $CacheLocation)) + { + mkdir $CacheLocation | Out-Null + } + + $destName = Join-Path $CacheLocation (Split-Path -Leaf $uri.LocalPath) + + Trace-Message "Need to download file from $scheme, destination will be $destName" + + try + { + Trace-Message "Creating the destination cache file" + $outStream = New-Object System.IO.FileStream $destName, "Create" + } + catch + { + #Should never happen since we own the cache directory + Throw-TerminatingError ($LocalizedData.CouldNotOpenDestFile -f $destName) $_ + } + + try + { + Trace-Message "Creating the $scheme stream" + $request = [System.Net.WebRequest]::Create($uri) + Trace-Message "Setting default credential" + $request.Credentials = [System.Net.CredentialCache]::DefaultCredentials + if ($scheme -eq "http") + { + Trace-Message "Setting authentication level" + # default value is MutualAuthRequested, which applies to https scheme + $request.AuthenticationLevel = [System.Net.Security.AuthenticationLevel]::None + } + if ($scheme -eq "https") + { + Trace-Message "Ignoring bad certificates" + $request.ServerCertificateValidationCallBack = {$true} + } + Trace-Message "Getting the $scheme response stream" + $responseStream = (([System.Net.HttpWebRequest]$request).GetResponse()).GetResponseStream() + } + catch + { + Trace-Message ("Error: " + ($_ | Out-String)) + Throw-TerminatingError ($LocalizedData.CouldNotGetHttpStream -f $scheme, $Path) $_ + } + + try + { + Trace-Message "Copying the $scheme stream bytes to the disk cache" + $responseStream.CopyTo($outStream) + $responseStream.Flush() + $outStream.Flush() + } + catch + { + Throw-TerminatingError ($LocalizedData.ErrorCopyingDataToFile -f $Path,$destName) $_ + } + } + finally + { + if($outStream) + { + $outStream.Close() + } + + if($responseStream) + { + $responseStream.Close() + } + } + Trace-Message "Redirecting package path to cache file location" + $Path = $downloadedFileName = $destName + } + } + + #At this point the Path ought to be valid unless it's an MSI uninstall case + if(-not (Test-Path -PathType Leaf $Path) -and -not ($Ensure -eq "Absent" -and $fileExtension -eq ".msi")) + { + Throw-TerminatingError ($LocalizedData.PathDoesNotExist -f $Path) + } + + $startInfo = New-Object System.Diagnostics.ProcessStartInfo + $startInfo.UseShellExecute = $false #Necessary for I/O redirection and just generally a good idea + $process = New-Object System.Diagnostics.Process + $process.StartInfo = $startInfo + $errLogPath = $LogPath + ".err" #Concept only, will never touch disk + if($fileExtension -eq ".msi") + { + $startInfo.FileName = "$env:windir\system32\msiexec.exe" + if($Ensure -eq "Present") + { + # check if Msi package contains the ProductName and Code specified + <# + $pName,$pCode = Get-MsiProductEntry -Path $Path + + if ( + ( (-not [String]::IsNullOrEmpty($Name)) -and ($pName -ne $Name)) ` + -or ( (-not [String]::IsNullOrEmpty($identifyingNumber)) -and ($identifyingNumber -ne $pCode)) + ) + { + Throw-InvalidNameOrIdException ($LocalizedData.InvalidNameOrId -f $Name,$identifyingNumber,$pName,$pCode) + } + #> + + $startInfo.Arguments = '/i "{0}"' -f $Path + } + else + { + $product = Get-ProductEntry $Name $identifyingNumber + $id = Split-Path -Leaf $product.Name #We may have used the Name earlier, now we need the actual ID + $startInfo.Arguments = ("/x{0}" -f $id) + } + + if($LogPath) + { + $startInfo.Arguments += ' /log "{0}"' -f $LogPath + } + + $startInfo.Arguments += " /quiet" + + if($Arguments) + { + $startInfo.Arguments += " " + $Arguments + } + } + else #EXE + { + Trace-Message "The binary is an EXE" + $startInfo.FileName = $Path + $startInfo.Arguments = $Arguments + if($LogPath) + { + Trace-Message "User has requested logging, need to attach event handlers to the process" + $startInfo.RedirectStandardError = $true + $startInfo.RedirectStandardOutput = $true + Register-ObjectEvent -InputObject $process -EventName "OutputDataReceived" -SourceIdentifier $LogPath + Register-ObjectEvent -InputObject $process -EventName "ErrorDataReceived" -SourceIdentifier $errLogPath + } + } + + Trace-Message ("Starting {0} with {1}" -f $startInfo.FileName, $startInfo.Arguments) + + if($PSCmdlet.ShouldProcess(($LocalizedData.StartingProcessMessage -f $startInfo.FileName, $startInfo.Arguments), $null, $null)) + { + try + { + $exitCode = 0 + + if($PSBoundParameters.ContainsKey("RunAsCredential")) + { + CallPInvoke + [Source.NativeMethods]::CreateProcessAsUser("""" + $startInfo.FileName + """ " + $startInfo.Arguments, ` + $RunAsCredential.GetNetworkCredential().Domain, $RunAsCredential.GetNetworkCredential().UserName, ` + $RunAsCredential.GetNetworkCredential().Password, [ref] $exitCode) + } + else + { + $process.Start() | Out-Null + + if($logStream) #Identical to $fileExtension -eq ".exe" -and $logPath + { + $process.BeginOutputReadLine(); + $process.BeginErrorReadLine(); + } + + $process.WaitForExit() + + if($process) + { + $exitCode = $process.ExitCode + } + } + } + catch + { + Throw-TerminatingError ($LocalizedData.CouldNotStartProcess -f $Path) $_ + } + + + if($logStream) + { + #We have to re-mux these since they appear to us as different streams + #The underlying Win32 APIs prevent this problem, as would constructing a script + #on the fly and executing it, but the former is highly problematic from PowerShell + #and the latter doesn't let us get the return code for UI-based EXEs + $outputEvents = Get-Event -SourceIdentifier $LogPath + $errorEvents = Get-Event -SourceIdentifier $errLogPath + $masterEvents = @() + $outputEvents + $errorEvents + $masterEvents = $masterEvents | Sort-Object -Property TimeGenerated + + foreach($event in $masterEvents) + { + $logStream.Write($event.SourceEventArgs.Data); + } + + Remove-Event -SourceIdentifier $LogPath + Remove-Event -SourceIdentifier $errLogPath + } + + if(-not ($ReturnCode -contains $exitCode)) + { + Throw-TerminatingError ($LocalizedData.UnexpectedReturnCode -f $exitCode.ToString()) + } + } + } + finally + { + if($psdrive) + { + Remove-PSDrive -Force $psdrive + } + + if($logStream) + { + $logStream.Dispose() + } + } + + if($downloadedFileName -and $PSCmdlet.ShouldProcess($LocalizedData.RemoveDownloadedFile, $null, $null)) + { + #This is deliberately not in the Finally block. We want to leave the downloaded file on disk + #in the error case as a debugging aid for the user + rm $downloadedFileName + } + + $operationString = $LocalizedData.PackageUninstalled + if($Ensure -eq "Present") + { + $operationString = $LocalizedData.PackageInstalled + } + + # Check if reboot is required, if so notify CA. The MSFT_ServerManagerTasks provider is missing on client SKUs + $featureData = invoke-wmimethod -EA Ignore -Name GetServerFeature -namespace root\microsoft\windows\servermanager -Class MSFT_ServerManagerTasks + $regData = Get-ItemProperty "HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager" "PendingFileRenameOperations" -EA Ignore + if(($featureData -and $featureData.RequiresReboot) -or $regData) + { + Write-Verbose $LocalizedData.MachineRequiresReboot + $global:DSCMachineStatus = 1 + } + + if($Ensure -eq "Present") + { + $productEntry = Get-ProductEntry $Name $identifyingNumber $InstalledCheckRegKey $InstalledCheckRegValueName $InstalledCheckRegValueData + if(-not $productEntry) + { + Throw-TerminatingError ($LocalizedData.PostValidationError -f $OrigPath) + } + } + + Write-Verbose $operationString + Write-Verbose $LocalizedData.PackageConfigurationComplete +} + +function CallPInvoke +{ +$script:ProgramSource = @" +using System; +using System.Collections.Generic; +using System.Text; +using System.Security; +using System.Runtime.InteropServices; +using System.Diagnostics; +using System.Security.Principal; +using System.ComponentModel; +using System.IO; + +namespace Source +{ + [SuppressUnmanagedCodeSecurity] + public static class NativeMethods + { + //The following structs and enums are used by the various Win32 API's that are used in the code below + + [StructLayout(LayoutKind.Sequential)] + public struct STARTUPINFO + { + public Int32 cb; + public string lpReserved; + public string lpDesktop; + public string lpTitle; + public Int32 dwX; + public Int32 dwY; + public Int32 dwXSize; + public Int32 dwXCountChars; + public Int32 dwYCountChars; + public Int32 dwFillAttribute; + public Int32 dwFlags; + public Int16 wShowWindow; + public Int16 cbReserved2; + public IntPtr lpReserved2; + public IntPtr hStdInput; + public IntPtr hStdOutput; + public IntPtr hStdError; + } + + [StructLayout(LayoutKind.Sequential)] + public struct PROCESS_INFORMATION + { + public IntPtr hProcess; + public IntPtr hThread; + public Int32 dwProcessID; + public Int32 dwThreadID; + } + + [Flags] + public enum LogonType + { + LOGON32_LOGON_INTERACTIVE = 2, + LOGON32_LOGON_NETWORK = 3, + LOGON32_LOGON_BATCH = 4, + LOGON32_LOGON_SERVICE = 5, + LOGON32_LOGON_UNLOCK = 7, + LOGON32_LOGON_NETWORK_CLEARTEXT = 8, + LOGON32_LOGON_NEW_CREDENTIALS = 9 + } + + [Flags] + public enum LogonProvider + { + LOGON32_PROVIDER_DEFAULT = 0, + LOGON32_PROVIDER_WINNT35, + LOGON32_PROVIDER_WINNT40, + LOGON32_PROVIDER_WINNT50 + } + [StructLayout(LayoutKind.Sequential)] + public struct SECURITY_ATTRIBUTES + { + public Int32 Length; + public IntPtr lpSecurityDescriptor; + public bool bInheritHandle; + } + + public enum SECURITY_IMPERSONATION_LEVEL + { + SecurityAnonymous, + SecurityIdentification, + SecurityImpersonation, + SecurityDelegation + } + + public enum TOKEN_TYPE + { + TokenPrimary = 1, + TokenImpersonation + } + + [StructLayout(LayoutKind.Sequential, Pack = 1)] + internal struct TokPriv1Luid + { + public int Count; + public long Luid; + public int Attr; + } + + public const int GENERIC_ALL_ACCESS = 0x10000000; + public const int CREATE_NO_WINDOW = 0x08000000; + internal const int SE_PRIVILEGE_ENABLED = 0x00000002; + internal const int TOKEN_QUERY = 0x00000008; + internal const int TOKEN_ADJUST_PRIVILEGES = 0x00000020; + internal const string SE_INCRASE_QUOTA = "SeIncreaseQuotaPrivilege"; + + [DllImport("kernel32.dll", + EntryPoint = "CloseHandle", SetLastError = true, + CharSet = CharSet.Auto, CallingConvention = CallingConvention.StdCall)] + public static extern bool CloseHandle(IntPtr handle); + + [DllImport("advapi32.dll", + EntryPoint = "CreateProcessAsUser", SetLastError = true, + CharSet = CharSet.Ansi, CallingConvention = CallingConvention.StdCall)] + public static extern bool CreateProcessAsUser( + IntPtr hToken, + string lpApplicationName, + string lpCommandLine, + ref SECURITY_ATTRIBUTES lpProcessAttributes, + ref SECURITY_ATTRIBUTES lpThreadAttributes, + bool bInheritHandle, + Int32 dwCreationFlags, + IntPtr lpEnvrionment, + string lpCurrentDirectory, + ref STARTUPINFO lpStartupInfo, + ref PROCESS_INFORMATION lpProcessInformation + ); + + [DllImport("advapi32.dll", EntryPoint = "DuplicateTokenEx")] + public static extern bool DuplicateTokenEx( + IntPtr hExistingToken, + Int32 dwDesiredAccess, + ref SECURITY_ATTRIBUTES lpThreadAttributes, + Int32 ImpersonationLevel, + Int32 dwTokenType, + ref IntPtr phNewToken + ); + + [DllImport("advapi32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + public static extern Boolean LogonUser( + String lpszUserName, + String lpszDomain, + String lpszPassword, + LogonType dwLogonType, + LogonProvider dwLogonProvider, + out IntPtr phToken + ); + + [DllImport("advapi32.dll", ExactSpelling = true, SetLastError = true)] + internal static extern bool AdjustTokenPrivileges( + IntPtr htok, + bool disall, + ref TokPriv1Luid newst, + int len, + IntPtr prev, + IntPtr relen + ); + + [DllImport("kernel32.dll", ExactSpelling = true)] + internal static extern IntPtr GetCurrentProcess(); + + [DllImport("advapi32.dll", ExactSpelling = true, SetLastError = true)] + internal static extern bool OpenProcessToken( + IntPtr h, + int acc, + ref IntPtr phtok + ); + + [DllImport("kernel32.dll", ExactSpelling = true)] + internal static extern int WaitForSingleObject( + IntPtr h, + int milliseconds + ); + + [DllImport("kernel32.dll", ExactSpelling = true)] + internal static extern bool GetExitCodeProcess( + IntPtr h, + out int exitcode + ); + + [DllImport("advapi32.dll", SetLastError = true)] + internal static extern bool LookupPrivilegeValue( + string host, + string name, + ref long pluid + ); + + public static void CreateProcessAsUser(string strCommand, string strDomain, string strName, string strPassword, ref int ExitCode ) + { + var hToken = IntPtr.Zero; + var hDupedToken = IntPtr.Zero; + TokPriv1Luid tp; + var pi = new PROCESS_INFORMATION(); + var sa = new SECURITY_ATTRIBUTES(); + sa.Length = Marshal.SizeOf(sa); + Boolean bResult = false; + try + { + bResult = LogonUser( + strName, + strDomain, + strPassword, + LogonType.LOGON32_LOGON_BATCH, + LogonProvider.LOGON32_PROVIDER_DEFAULT, + out hToken + ); + if (!bResult) + { + throw new Win32Exception("Logon error #" + Marshal.GetLastWin32Error().ToString()); + } + IntPtr hproc = GetCurrentProcess(); + IntPtr htok = IntPtr.Zero; + bResult = OpenProcessToken( + hproc, + TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, + ref htok + ); + if(!bResult) + { + throw new Win32Exception("Open process token error #" + Marshal.GetLastWin32Error().ToString()); + } + tp.Count = 1; + tp.Luid = 0; + tp.Attr = SE_PRIVILEGE_ENABLED; + bResult = LookupPrivilegeValue( + null, + SE_INCRASE_QUOTA, + ref tp.Luid + ); + if(!bResult) + { + throw new Win32Exception("Lookup privilege error #" + Marshal.GetLastWin32Error().ToString()); + } + bResult = AdjustTokenPrivileges( + htok, + false, + ref tp, + 0, + IntPtr.Zero, + IntPtr.Zero + ); + if(!bResult) + { + throw new Win32Exception("Token elevation error #" + Marshal.GetLastWin32Error().ToString()); + } + + bResult = DuplicateTokenEx( + hToken, + GENERIC_ALL_ACCESS, + ref sa, + (int)SECURITY_IMPERSONATION_LEVEL.SecurityIdentification, + (int)TOKEN_TYPE.TokenPrimary, + ref hDupedToken + ); + if(!bResult) + { + throw new Win32Exception("Duplicate Token error #" + Marshal.GetLastWin32Error().ToString()); + } + var si = new STARTUPINFO(); + si.cb = Marshal.SizeOf(si); + si.lpDesktop = ""; + bResult = CreateProcessAsUser( + hDupedToken, + null, + strCommand, + ref sa, + ref sa, + false, + 0, + IntPtr.Zero, + null, + ref si, + ref pi + ); + if(!bResult) + { + throw new Win32Exception("Create process as user error #" + Marshal.GetLastWin32Error().ToString()); + } + + int status = WaitForSingleObject(pi.hProcess, -1); + if(status == -1) + { + throw new Win32Exception("Wait during create process failed user error #" + Marshal.GetLastWin32Error().ToString()); + } + + bResult = GetExitCodeProcess(pi.hProcess, out ExitCode); + if(!bResult) + { + throw new Win32Exception("Retrieving status error #" + Marshal.GetLastWin32Error().ToString()); + } + } + finally + { + if (pi.hThread != IntPtr.Zero) + { + CloseHandle(pi.hThread); + } + if (pi.hProcess != IntPtr.Zero) + { + CloseHandle(pi.hProcess); + } + if (hDupedToken != IntPtr.Zero) + { + CloseHandle(hDupedToken); + } + } + } + } +} + +"@ + Add-Type -TypeDefinition $ProgramSource -ReferencedAssemblies "System.ServiceProcess" +} + +#endregion + + +$params = Parse-Args $args; +$result = New-Object psobject; +Set-Attr $result "changed" $false; + +$path = Get-Attr -obj $params -name path -failifempty $true -resultobj $result +$name = Get-Attr -obj $params -name name -default $path +$productid = Get-Attr -obj $params -name productid +if ($productid -eq $null) +{ + #Alias added for backwards compat. + $productid = Get-Attr -obj $params -name product_id -failifempty $true -resultobj $result +} +$arguments = Get-Attr -obj $params -name arguments +$ensure = Get-Attr -obj $params -name state -default "present" +if ($ensure -eq $null) +{ + $ensure = Get-Attr -obj $params -name ensure -default "present" +} +$username = Get-Attr -obj $params -name user_name +$password = Get-Attr -obj $params -name user_password +$return_code = Get-Attr -obj $params -name expected_return_code -default 0 + +#Construct the DSC param hashtable +$dscparams = @{ + name=$name + path=$path + productid = $productid + arguments = $arguments + ensure = $ensure + returncode = $return_code +} + +if (($username -ne $null) -and ($password -ne $null)) +{ + #Add network credential to the list + $secpassword = $password | ConvertTo-SecureString -AsPlainText -Force + $credential = New-Object pscredential -ArgumentList $username, $secpassword + $dscparams.add("Credential",$credential) +} + +#Always return the name +set-attr -obj $result -name "name" -value $name + +$testdscresult = Test-TargetResource @dscparams +if ($testdscresult -eq $true) +{ + Exit-Json -obj $result +} +Else +{ + try + { + set-TargetResource @dscparams + } + catch + { + $errormsg = $_ + Fail-Json -obj $result -message $errormsg.ToString() + } + + #Check if DSC thinks the computer needs a reboot: + if ((get-variable DSCMachinestatus -Scope Global -ea 0) -and ($global:DSCMachineStatus -eq 1)) + { + Set-Attr $result "restart_required" $true + } + + #Set-TargetResource did its job. We can assume a change has happened + Set-Attr $result "changed" $true + Exit-Json -obj $result + +} + diff --git a/windows/win_package.py b/windows/win_package.py new file mode 100644 index 00000000000..9c358fcd845 --- /dev/null +++ b/windows/win_package.py @@ -0,0 +1,99 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Trond Hindenes , and others +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_package +version_added: "1.7" +author: Trond Hindenes +short_description: Installs/Uninstalls an installable package, either from local file system or url +description: + - Installs or uninstalls a package. + - 'Optionally uses a product_id to check if the package needs installing. You can find product ids for installed programs in the windows registry either in C(HKLM:Software\\Microsoft\\Windows\CurrentVersion\\Uninstall) or for 32 bit programs C(HKLM:Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall)' +options: + path: + description: + - Location of the package to be installed (either on file system, network share or url) + required: true + name: + description: + - Name of the package, if name isn't specified the path will be used for log messages + required: false + default: null + product_id: + description: + - product id of the installed package (used for checking if already installed) + - You can find product ids for installed programs in the windows registry either in C(HKLM:Software\\Microsoft\\Windows\CurrentVersion\\Uninstall) or for 32 bit programs C(HKLM:Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall)' + required: true + aliases: [productid] + arguments: + description: + - Any arguments the installer needs + default: null + required: false + state: + description: + - Install or Uninstall + choices: + - present + - absent + default: present + required: false + aliases: [ensure] + user_name: + description: + - Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly. + default: null + required: false + user_password: + description: + - Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly. + default: null + required: false +''' + +EXAMPLES = ''' +# Playbook example +- name: Install the vc thingy + win_package: + name="Microsoft Visual C thingy" + path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe" + Product_Id="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}" + Arguments="/install /passive /norestart" + +# Install/uninstall an msi-based package +- name: Install msi-based package (Remote Desktop Connection Manager) + win_package: + path: "https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi" + product_id: "{0240359E-6A4C-4884-9E94-B397A02D893C}" +- name: Uninstall msi-based package + win_package: + path: "https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi" + product_id: "{0240359E-6A4C-4884-9E94-B397A02D893C}" + state: absent +''' + diff --git a/windows/win_regedit.ps1 b/windows/win_regedit.ps1 index 1a257413466..723a6c7b239 100644 --- a/windows/win_regedit.ps1 +++ b/windows/win_regedit.ps1 @@ -21,64 +21,34 @@ $ErrorActionPreference = "Stop" # WANT_JSON # POWERSHELL_COMMON +New-PSDrive -PSProvider registry -Root HKEY_CLASSES_ROOT -Name HKCR -ErrorAction SilentlyContinue +New-PSDrive -PSProvider registry -Root HKEY_USERS -Name HKU -ErrorAction SilentlyContinue +New-PSDrive -PSProvider registry -Root HKEY_CURRENT_CONFIG -Name HCCC -ErrorAction SilentlyContinue + $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; +Set-Attr $result "data_changed" $false; +Set-Attr $result "data_type_changed" $false; -If ($params.key) -{ - $registryKey = $params.key -} -Else -{ - Fail-Json $result "missing required argument: key" -} - -If ($params.value) -{ - $registryValue = $params.value -} -Else -{ - $registryValue = $null -} - -If ($params.state) -{ - $state = $params.state.ToString().ToLower() - If (($state -ne "present") -and ($state -ne "absent")) - { - Fail-Json $result "state is $state; must be present or absent" - } -} -Else -{ - $state = "present" -} +$registryKey = Get-Attr -obj $params -name "key" -failifempty $true +$registryValue = Get-Attr -obj $params -name "value" -default $null +$state = Get-Attr -obj $params -name "state" -validateSet "present","absent" -default "present" +$registryData = Get-Attr -obj $params -name "data" -default $null +$registryDataType = Get-Attr -obj $params -name "datatype" -validateSet "binary","dword","expandstring","multistring","string","qword" -default "string" -If ($params.data) -{ - $registryData = $params.data -} -ElseIf ($state -eq "present" -and $registryValue -ne $null) +If ($state -eq "present" -and $registryData -eq $null -and $registryValue -ne $null) { Fail-Json $result "missing required argument: data" } -If ($params.datatype) +# check the registry key is in powershell ps-drive format: HKLM, HKCU, HKU, HKCR, HCCC +If (-not ($registryKey -match "^H[KC][CLU][MURC]{0,1}:\\")) { - $registryDataType = $params.datatype.ToString().ToLower() - $validRegistryDataTypes = "binary", "dword", "expandstring", "multistring", "string", "qword" - If ($validRegistryDataTypes -notcontains $registryDataType) - { - Fail-Json $result "type is $registryDataType; must be binary, dword, expandstring, multistring, string, or qword" - } -} -Else -{ - $registryDataType = "string" + Fail-Json $result "key: $registryKey is not a valid powershell path, see module documentation for examples." } + Function Test-RegistryValueData { Param ( [parameter(Mandatory=$true)] @@ -95,19 +65,95 @@ Function Test-RegistryValueData { } } +# Returns true if registry data matches. +# Handles binary, integer(dword) and string registry data +Function Compare-RegistryData { + Param ( + [parameter(Mandatory=$true)] + [AllowEmptyString()]$ReferenceData, + [parameter(Mandatory=$true)] + [AllowEmptyString()]$DifferenceData + ) + + if ($ReferenceData -is [String] -or $ReferenceData -is [int]) { + if ($ReferenceData -eq $DifferenceData) { + return $true + } else { + return $false + } + } elseif ($ReferenceData -is [Object[]]) { + if (@(Compare-Object $ReferenceData $DifferenceData -SyncWindow 0).Length -eq 0) { + return $true + } else { + return $false + } + } +} + +# Simplified version of Convert-HexStringToByteArray from +# https://cyber-defense.sans.org/blog/2010/02/11/powershell-byte-array-hex-convert +# Expects a hex in the format you get when you run reg.exe export, +# and converts to a byte array so powershell can modify binary registry entries +function Convert-RegExportHexStringToByteArray +{ + Param ( + [parameter(Mandatory=$true)] [String] $String + ) + +# remove 'hex:' from the front of the string if present +$String = $String.ToLower() -replace '^hex\:', '' + +#remove whitespace and any other non-hex crud. +$String = $String.ToLower() -replace '[^a-f0-9\\,x\-\:]','' + +# turn commas into colons +$String = $String -replace ',',':' + +#Maybe there's nothing left over to convert... +if ($String.Length -eq 0) { ,@() ; return } + +#Split string with or without colon delimiters. +if ($String.Length -eq 1) +{ ,@([System.Convert]::ToByte($String,16)) } +elseif (($String.Length % 2 -eq 0) -and ($String.IndexOf(":") -eq -1)) +{ ,@($String -split '([a-f0-9]{2})' | foreach-object { if ($_) {[System.Convert]::ToByte($_,16)}}) } +elseif ($String.IndexOf(":") -ne -1) +{ ,@($String -split ':+' | foreach-object {[System.Convert]::ToByte($_,16)}) } +else +{ ,@() } + +} + +if($registryDataType -eq "binary" -and $registryData -ne $null -and $registryData -is [String]) { + $registryData = Convert-RegExportHexStringToByteArray($registryData) +} + if($state -eq "present") { if ((Test-Path $registryKey) -and $registryValue -ne $null) { if (Test-RegistryValueData -Path $registryKey -Value $registryValue) { + # handle binary data + $currentRegistryData =(Get-ItemProperty -Path $registryKey | Select-Object -ExpandProperty $registryValue) + + if ($registryValue.ToLower() -eq "(default)") { + # Special case handling for the key's default property. Because .GetValueKind() doesn't work for the (default) key property + $oldRegistryDataType = "String" + } + else { + $oldRegistryDataType = (Get-Item $registryKey).GetValueKind($registryValue) + } + # Changes Data and DataType - if ((Get-Item $registryKey).GetValueKind($registryValue) -ne $registryDataType) + if ($registryDataType -ne $oldRegistryDataType) { Try { Remove-ItemProperty -Path $registryKey -Name $registryValue New-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData -PropertyType $registryDataType $result.changed = $true + $result.data_changed = $true + $result.data_type_changed = $true } Catch { @@ -115,11 +161,12 @@ if($state -eq "present") { } } # Changes Only Data - elseif ((Get-ItemProperty -Path $registryKey | Select-Object -ExpandProperty $registryValue) -ne $registryData) + elseif (-Not (Compare-RegistryData -ReferenceData $currentRegistryData -DifferenceData $registryData)) { Try { Set-ItemProperty -Path $registryKey -Name $registryValue -Value $registryData $result.changed = $true + $result.data_changed = $true } Catch { @@ -142,7 +189,7 @@ if($state -eq "present") { } elseif(-not (Test-Path $registryKey)) { - Try + Try { $newRegistryKey = New-Item $registryKey -Force $result.changed = $true diff --git a/windows/win_regedit.py b/windows/win_regedit.py index 5087a5eaa8f..693b4c2f370 100644 --- a/windows/win_regedit.py +++ b/windows/win_regedit.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_regedit @@ -43,7 +47,7 @@ aliases: [] data: description: - - Registry Value Data + - Registry Value Data. Binary data should be expressed a yaml byte array or as comma separated hex values. An easy way to generate this is to run C(regedit.exe) and use the I(Export) option to save the registry values to a file. In the exported file binary values will look like C(hex:be,ef,be,ef). The C(hex:) prefix is optional. required: false default: null aliases: [] @@ -94,6 +98,26 @@ data: 1337 datatype: dword + # Creates Registry Key called MyCompany, + # a value within MyCompany Key called "hello", and + # binary data for the value "hello" as type "binary" + # data expressed as comma separated list + win_regedit: + key: HKCU:\Software\MyCompany + value: hello + data: hex:be,ef,be,ef,be,ef,be,ef,be,ef + datatype: binary + + # Creates Registry Key called MyCompany, + # a value within MyCompany Key called "hello", and + # binary data for the value "hello" as type "binary" + # data expressed as yaml array of bytes + win_regedit: + key: HKCU:\Software\MyCompany + value: hello + data: [0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef] + datatype: binary + # Delete Registry Key MyCompany # NOTE: Not specifying a value will delete the root key which means # all values will be deleted @@ -106,4 +130,22 @@ key: HKCU:\Software\MyCompany value: hello state: absent + + # Ensure registry paths containing spaces are quoted. + # Creates Registry Key called 'My Company'. + win_regedit: + key: 'HKCU:\Software\My Company' + +''' +RETURN = ''' +data_changed: + description: whether this invocation changed the data in the registry value + returned: success + type: boolean + sample: False +data_type_changed: + description: whether this invocation changed the datatype of the registry value + returned: success + type: boolean + sample: True ''' diff --git a/windows/win_regmerge.ps1 b/windows/win_regmerge.ps1 new file mode 100644 index 00000000000..87e73a69773 --- /dev/null +++ b/windows/win_regmerge.ps1 @@ -0,0 +1,100 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Jon Hawkesworth (@jhawkesworth) +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +Function Convert-RegistryPath { + Param ( + [parameter(Mandatory=$True)] + [ValidateNotNullOrEmpty()]$Path + ) + + $output = $Path -replace "HKLM:", "HKLM" + $output = $output -replace "HKCU:", "HKCU" + + Return $output +} + +$params = Parse-Args $args +$result = New-Object PSObject +Set-Attr $result "changed" $False + +$path = Get-Attr -obj $params -name path -failifempty $True -resultobj $result +$compare_to = Get-Attr -obj $params -name compare_to -failifempty $False -resultobj $result + +# check it looks like a reg key, warn if key not present - will happen first time +# only accepting PS-Drive style key names (starting with HKLM etc, not HKEY_LOCAL_MACHINE etc) + +$do_comparison = $False + +If ($compare_to) { + $compare_to_key = $params.compare_to.ToString() + If (Test-Path $compare_to_key -pathType container ) { + $do_comparison = $True + } Else { + Set-Attr $result "compare_to_key_found" $False + } +} + +If ( $do_comparison -eq $True ) { + $guid = [guid]::NewGuid() + $exported_path = $env:TEMP + "\" + $guid.ToString() + 'ansible_win_regmerge.reg' + + $expanded_compare_key = Convert-RegistryPath ($compare_to_key) + + # export from the reg key location to a file + $reg_args = @("EXPORT", "$expanded_compare_key", $exported_path) + & reg.exe $reg_args + + # compare the two files + $comparison_result = Compare-Object -ReferenceObject $(Get-Content $path) -DifferenceObject $(Get-Content $exported_path) + + If (Get-Member -InputObject $comparison_result -Name "count" -MemberType Properties ) + { + # Something is different, actually do reg merge + $reg_import_args = @("IMPORT", "$path") + $ret = & reg.exe $reg_import_args 2>&1 + If ($LASTEXITCODE -eq 0) { + Set-Attr $result "changed" $True + Set-Attr $result "difference_count" $comparison_result.count + } Else { + Set-Attr $result "rc" $LASTEXITCODE + Fail-Json $result "$ret" + } + } Else { + Set-Attr $result "difference_count" 0 + } + + Remove-Item $exported_path + Set-Attr $result "compared" $True + +} Else { + # not comparing, merge and report changed + $reg_import_args = @("IMPORT", "$path") + $ret = & reg.exe $reg_import_args 2>&1 + If ( $LASTEXITCODE -eq 0 ) { + Set-Attr $result "changed" $True + Set-Attr $result "compared" $False + } Else { + Set-Attr $result "rc" $LASTEXITCODE + Fail-Json $result "$ret" + } +} + +Exit-Json $result diff --git a/windows/win_regmerge.py b/windows/win_regmerge.py new file mode 100644 index 00000000000..cefc98029a4 --- /dev/null +++ b/windows/win_regmerge.py @@ -0,0 +1,91 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Jon Hawkesworth (@jhawkesworth) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_regmerge +version_added: "2.1" +short_description: Merges the contents of a registry file into the windows registry +description: + - Wraps the reg.exe command to import the contents of a registry file. + - Suitable for use with registry files created using M(win_template). + - Windows registry files have a specific format and must be constructed correctly with carriage return and line feed line endings otherwise they will not be merged. + - Exported registry files often start with a Byte Order Mark which must be removed if the file is to templated using M(win_template). + - Registry file format is described at U(https://support.microsoft.com/en-us/kb/310516) + - See also M(win_template), M(win_regedit) +options: + path: + description: + - The full path including file name to the registry file on the remote machine to be merged + required: true + default: no default + compare_key: + description: + - The parent key to use when comparing the contents of the registry to the contents of the file. Needs to be in HKLM or HKCU part of registry. Use a PS-Drive style path for example HKLM:\SOFTWARE not HKEY_LOCAL_MACHINE\SOFTWARE + If not supplied, or the registry key is not found, no comparison will be made, and the module will report changed. + required: false + default: no default +author: "Jon Hawkesworth (@jhawkesworth)" +notes: + - Organise your registry files so that they contain a single root registry + key if you want to use the compare_to functionality. + This module does not force registry settings to be in the state + described in the file. If registry settings have been modified externally + the module will merge the contents of the file but continue to report + differences on subsequent runs. + To force registry change, use M(win_regedit) with state=absent before + using M(win_regmerge). +''' + +EXAMPLES = ''' + # Merge in a registry file without comparing to current registry + # Note that paths using / to separate are preferred as they require less special handling than \ + win_regmerge: + path: C:/autodeploy/myCompany-settings.reg + # Compare and merge registry file + win_regmerge: + path: C:/autodeploy/myCompany-settings.reg + compare_to: HKLM:\SOFTWARE\myCompany +''' + +RETURN = ''' +compare_to_key_found: + description: whether the parent registry key has been found for comparison + returned: when comparison key not found in registry + type: boolean + sample: false +difference_count: + description: number of differences between the registry and the file + returned: changed + type: integer + sample: 1 +compared: + description: whether a comparison has taken place between the registry and the file + returned: when a comparison key has been supplied and comparison has been attempted + type: boolean + sample: true +''' diff --git a/windows/win_robocopy.ps1 b/windows/win_robocopy.ps1 new file mode 100644 index 00000000000..69cf9ee3e3a --- /dev/null +++ b/windows/win_robocopy.ps1 @@ -0,0 +1,147 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Corwin Brown +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +$result = New-Object psobject @{ + win_robocopy = New-Object psobject @{ + recurse = $false + purge = $false + } + changed = $false +} + +$src = Get-AnsibleParam -obj $params -name "src" -failifempty $true +$dest = Get-AnsibleParam -obj $params -name "dest" -failifempty $true +$purge = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "purge" -default $false) +$recurse = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "recurse" -default $false) +$flags = Get-AnsibleParam -obj $params -name "flags" -default $null +$_ansible_check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -default $false + +# Search for an Error Message +# Robocopy seems to display an error after 3 '-----' separator lines +Function SearchForError($cmd_output, $default_msg) { + $separator_count = 0 + $error_msg = $default_msg + ForEach ($line in $cmd_output) { + if (-Not $line) { + continue + } + + if ($separator_count -ne 3) { + if (Select-String -InputObject $line -pattern "^(\s+)?(\-+)(\s+)?$") { + $separator_count += 1 + } + } + Else { + If (Select-String -InputObject $line -pattern "error") { + $error_msg = $line + break + } + } + } + + return $error_msg +} + +# Build Arguments +$robocopy_opts = @() + +if (-Not (Test-Path $src)) { + Fail-Json $result "$src does not exist!" +} + +$robocopy_opts += $src +Set-Attr $result.win_robocopy "src" $src + +$robocopy_opts += $dest +Set-Attr $result.win_robocopy "dest" $dest + +if ($flags -eq $null) { + if ($purge) { + $robocopy_opts += "/purge" + } + + if ($recurse) { + $robocopy_opts += "/e" + } +} +Else { + $robocopy_opts += $flags +} + +Set-Attr $result.win_robocopy "purge" $purge +Set-Attr $result.win_robocopy "recurse" $recurse +Set-Attr $result.win_robocopy "flags" $flags + +$robocopy_output = "" +$rc = 0 +If ($_ansible_check_mode -eq $true) { + $robocopy_output = "Would have copied the contents of $src to $dest" + $rc = 0 +} +Else { + Try { + &robocopy $robocopy_opts | Tee-Object -Variable robocopy_output | Out-Null + $rc = $LASTEXITCODE + } + Catch { + $ErrorMessage = $_.Exception.Message + Fail-Json $result "Error synchronizing $src to $dest! Msg: $ErrorMessage" + } +} + +Set-Attr $result.win_robocopy "return_code" $rc +Set-Attr $result.win_robocopy "output" $robocopy_output + +$cmd_msg = "Success" +If ($rc -eq 0) { + $cmd_msg = "No files copied." +} +ElseIf ($rc -eq 1) { + $cmd_msg = "Files copied successfully!" + $changed = $true +} +ElseIf ($rc -eq 2) { + $cmd_msg = "Extra files or directories were detected!" + $changed = $true +} +ElseIf ($rc -eq 4) { + $cmd_msg = "Some mismatched files or directories were detected!" + $changed = $true +} +ElseIf ($rc -eq 8) { + $error_msg = SearchForError $robocopy_output "Some files or directories could not be copied!" + Fail-Json $result $error_msg +} +ElseIf ($rc -eq 10) { + $error_msg = SearchForError $robocopy_output "Serious Error! No files were copied! Do you have permissions to access $src and $dest?" + Fail-Json $result $error_msg +} +ElseIf ($rc -eq 16) { + $error_msg = SearchForError $robocopy_output "Fatal Error!" + Fail-Json $result $error_msg +} + +Set-Attr $result.win_robocopy "msg" $cmd_msg +Set-Attr $result.win_robocopy "changed" $changed + +Exit-Json $result diff --git a/windows/win_robocopy.py b/windows/win_robocopy.py new file mode 100644 index 00000000000..c29c07604bb --- /dev/null +++ b/windows/win_robocopy.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Corwin Brown +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: win_robocopy +version_added: "2.2" +short_description: Synchronizes the contents of two directories using Robocopy. +description: + - Synchronizes the contents of two directories on the remote machine. Under the hood this just calls out to RoboCopy, since that should be available on most modern Windows Systems. +options: + src: + description: + - Source file/directory to sync. + required: true + dest: + description: + - Destination file/directory to sync (Will receive contents of src). + required: true + recurse: + description: + - Includes all subdirectories (Toggles the `/e` flag to RoboCopy). If "flags" is set, this will be ignored. + choices: + - true + - false + defaults: false + required: false + purge: + description: + - Deletes any files/directories found in the destination that do not exist in the source (Toggles the `/purge` flag to RoboCopy). If "flags" is set, this will be ignored. + choices: + - true + - false + defaults: false + required: false + flags: + description: + - Directly supply Robocopy flags. If set, purge and recurse will be ignored. + default: None + required: false +author: Corwin Brown (@blakfeld) +notes: + - This is not a complete port of the "synchronize" module. Unlike the "synchronize" module this only performs the sync/copy on the remote machine, not from the master to the remote machine. + - This module does not currently support all Robocopy flags. + - Works on Windows 7, Windows 8, Windows Server 2k8, and Windows Server 2k12 +""" + +EXAMPLES = """ +# Syncs the contents of one diretory to another. +$ ansible -i hosts all -m win_robocopy -a "src=C:\\DirectoryOne dest=C:\\DirectoryTwo" + +# Sync the contents of one directory to another, including subdirectories. +$ ansible -i hosts all -m win_robocopy -a "src=C:\\DirectoryOne dest=C:\\DirectoryTwo recurse=true" + +# Sync the contents of one directory to another, and remove any files/directories found in destination that do not exist in the source. +$ ansible -i hosts all -m win_robocopy -a "src=C:\\DirectoryOne dest=C:\\DirectoryTwo purge=true" + +# Sample sync +--- +- name: Sync Two Directories + win_robocopy: + src: "C:\\DirectoryOne + dest: "C:\\DirectoryTwo" + recurse: true + purge: true + +--- +- name: Sync Two Directories + win_robocopy: + src: "C:\\DirectoryOne + dest: "C:\\DirectoryTwo" + recurse: true + purge: true + flags: '/XD SOME_DIR /XF SOME_FILE /MT:32' +""" + +RETURN = ''' +src: + description: The Source file/directory of the sync. + returned: always + type: string + sample: "c:/Some/Path" +dest: + description: The Destination file/directory of the sync. + returned: always + type: string + sample: "c:/Some/Path" +recurse: + description: Whether or not the recurse flag was toggled. + returned: always + type: bool + sample: False +purge: + description: Whether or not the purge flag was toggled. + returned: always + type: bool + sample: False +flags: + description: Any flags passed in by the user. + returned: always + type: string + sample: "/e /purge" +return_code: + description: The return code retuned by robocopy. + returned: success + type: int + sample: 1 +output: + description: The output of running the robocopy command. + returned: success + type: string + sample: "-------------------------------------------------------------------------------\n ROBOCOPY :: Robust File Copy for Windows \n-------------------------------------------------------------------------------\n" +msg: + description: Output intrepreted into a concise message. + returned: always + type: string + sample: No files copied! +changed: + description: Whether or not any changes were made. + returned: always + type: bool + sample: False +''' diff --git a/windows/win_say.ps1 b/windows/win_say.ps1 new file mode 100644 index 00000000000..2a1a0c18aa5 --- /dev/null +++ b/windows/win_say.ps1 @@ -0,0 +1,106 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2016, Jon Hawkesworth (@jhawkesworth) +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; +$result = New-Object PSObject; +$msg = Get-AnsibleParam -obj $params -name "msg" +$msg_file = Get-AnsibleParam -obj $params -name "msg_file" +$start_sound_path = Get-AnsibleParam -obj $params -name "start_sound_path" +$end_sound_path = Get-AnsibleParam -obj $params -name "end_sound_path" +$voice = Get-AnsibleParam -obj $params -name "voice" +$speech_speed = Get-AnsibleParam -obj $params -name "speech_speed" +$speed = 0 +$words = $null + +if ($speech_speed -ne $null) { + try { + $speed = [convert]::ToInt32($speech_speed, 10) + } catch { + Fail-Json $result "speech_speed needs to a integer in the range -10 to 10. The value $speech_speed could not be converted to an integer." + + } + if ($speed -lt -10 -or $speed -gt 10) { + Fail-Json $result "speech_speed needs to a integer in the range -10 to 10. The value $speech_speed is outside this range." + } +} + + +if ($msg_file -ne $null -and $msg -ne $null ) { + Fail-Json $result "Please specify either msg_file or msg parameters, not both" +} + +if ($msg_file -eq $null -and $msg -eq $null -and $start_sound_path -eq $null -and $end_sound_path -eq $null) { + Fail-Json $result "No msg_file, msg, start_sound_path, or end_sound_path parameters have been specified. Please specify at least one so the module has something to do" + +} + + +if ($msg_file -ne $null) { + if (Test-Path $msg_file) { + $words = Get-Content $msg_file | Out-String + } else { + Fail-Json $result "Message file $msg_file could not be found or opened. Ensure you have specified the full path to the file, and the ansible windows user has permission to read the file." + } +} + +if ($start_sound_path -ne $null) { + if (Test-Path $start_sound_path) { + (new-object Media.SoundPlayer $start_sound_path).playSync(); + } else { + Fail-Json $result "Start sound file $start_sound_path could not be found or opened. Ensure you have specified the full path to the file, and the ansible windows user has permission to read the file." + } +} + +if ($msg -ne $null) { + $words = $msg +} + +if ($words -ne $null) { + Add-Type -AssemblyName System.speech + $tts = New-Object System.Speech.Synthesis.SpeechSynthesizer + if ($voice -ne $null) { + try { + $tts.SelectVoice($voice) + } catch [System.Management.Automation.MethodInvocationException] { + Set-Attr $result "voice_info" "Could not load voice $voice, using system default voice." + } + } + + Set-Attr $result "voice" $tts.Voice.Name + if ($speed -ne 0) { + $tts.Rate = $speed + } + $tts.Speak($words) + $tts.Dispose() +} + +if ($end_sound_path -ne $null) { + if (Test-Path $end_sound_path) { + (new-object Media.SoundPlayer $end_sound_path).playSync(); + } else { + Fail-Json $result "End sound file $start_sound_path could not be found or opened. Ensure you have specified the full path to the file, and the ansible windows user has permission to read the file." + } +} + +Set-Attr $result "changed" $false; +Set-Attr $result "message_text" $words; + +Exit-Json $result; diff --git a/windows/win_say.py b/windows/win_say.py new file mode 100644 index 00000000000..61fa74b9c87 --- /dev/null +++ b/windows/win_say.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, Jon Hawkesworth (@jhawkesworth) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_say +version_added: "2.3" +short_description: Text to speech module for Windows to speak messages and optionally play sounds +description: + - Uses .NET libraries to convert text to speech and optionally play .wav sounds. Audio Service needs to be running and some kind of speakers or headphones need to be attached to the windows target(s) for the speech to be audible. +options: + msg: + description: + - The text to be spoken. Use either msg or msg_file. Optional so that you can use this module just to play sounds. + required: false + default: none + msg_file: + description: + - Full path to a windows format text file containing the text to be spokend. Use either msg or msg_file. Optional so that you can use this module just to play sounds. + required: false + default: none + voice: + description: + - Which voice to use. See notes for how to discover installed voices. If the requested voice is not available the default voice will be used. Example voice names from Windows 10 are 'Microsoft Zira Desktop' and 'Microsoft Hazel Desktop'. + required: false + default: system default voice + speech_speed: + description: + - How fast or slow to speak the text. Must be an integer value in the range -10 to 10. -10 is slowest, 10 is fastest. + required: false + default: 0 + start_sound_path: + description: + - Full path to a C(.wav) file containing a sound to play before the text is spoken. Useful on conference calls to alert other speakers that ansible has something to say. + required: false + default: null + end_sound_path: + description: + - Full path to a C(.wav) file containing a sound to play after the text has been spoken. Useful on conference calls to alert other speakers that ansible has finished speaking. + required: false + default: null +author: "Jon Hawkesworth (@jhawkesworth)" +notes: + - Needs speakers or headphones to do anything useful. + - To find which voices are installed, run the following powershell + Add-Type -AssemblyName System.Speech + $speech = New-Object -TypeName System.Speech.Synthesis.SpeechSynthesizer + $speech.GetInstalledVoices() | ForEach-Object { $_.VoiceInfo } + $speech.Dispose() + - Speech can be surprisingly slow, so its best to keep message text short. +''' + +EXAMPLES = ''' + # Warn of impending deployment +- win_say: + msg: Warning, deployment commencing in 5 minutes, please log out. + # Using a different voice and a start sound +- win_say: + start_sound_path: 'C:\Windows\Media\ding.wav' + msg: Warning, deployment commencing in 5 minutes, please log out. + voice: Microsoft Hazel Desktop + # example with start and end sound +- win_say: + start_sound_path: 'C:\Windows\Media\Windows Balloon.wav' + msg: "New software installed" + end_sound_path: 'C:\Windows\Media\chimes.wav' + # text from file example +- win_say: + start_sound_path: 'C:\Windows\Media\Windows Balloon.wav' + msg_file: AppData\Local\Temp\morning_report.txt + end_sound_path: 'C:\Windows\Media\chimes.wav' +''' +RETURN = ''' +message_text: + description: the text that the module attempted to speak + returned: success + type: string + sample: "Warning, deployment commencing in 5 minutes." +voice: + description: the voice used to speak the text. + returned: success + type: string + sample: Microsoft Hazel Desktop +voice_info: + description: the voice used to speak the text. + returned: when requested voice could not be loaded + type: string + sample: Could not load voice TestVoice, using system default voice +''' + diff --git a/windows/win_scheduled_task.ps1 b/windows/win_scheduled_task.ps1 index 2f802f59cd0..70ba45e29d3 100644 --- a/windows/win_scheduled_task.ps1 +++ b/windows/win_scheduled_task.ps1 @@ -2,6 +2,7 @@ # This file is part of Ansible # # Copyright 2015, Peter Mounce +# Michael Perzel # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -22,53 +23,142 @@ $ErrorActionPreference = "Stop" # POWERSHELL_COMMON $params = Parse-Args $args; + +$days_of_week = Get-AnsibleParam $params -name "days_of_week" +$enabled = Get-AnsibleParam $params -name "enabled" -default $true +$enabled = $enabled | ConvertTo-Bool +$description = Get-AnsibleParam $params -name "description" -default " " +$path = Get-AnsibleParam $params -name "path" +$argument = Get-AnsibleParam $params -name "argument" + $result = New-Object PSObject; Set-Attr $result "changed" $false; -if ($params.name) -{ - $name = $params.name -} -else +#Required vars +$name = Get-AnsibleParam -obj $params -name name -failifempty $true -resultobj $result +$state = Get-AnsibleParam -obj $params -name state -failifempty $true -resultobj $result -validateSet "present","absent" + +#Vars conditionally required +$present_args_required = $state -eq "present" +$execute = Get-AnsibleParam -obj $params -name execute -failifempty $present_args_required -resultobj $result +$frequency = Get-AnsibleParam -obj $params -name frequency -failifempty $present_args_required -resultobj $result +$time = Get-AnsibleParam -obj $params -name time -failifempty $present_args_required -resultobj $result +$user = Get-AnsibleParam -obj $params -name user -failifempty $present_args_required -resultobj $result + + +# Mandatory Vars +if ($frequency -eq "weekly") { - Fail-Json $result "missing required argument: name" + if (!($days_of_week)) + { + Fail-Json $result "missing required argument: days_of_week" + } } -if ($params.enabled) + +if ($path) { - $enabled = $params.enabled | ConvertTo-Bool + $path = "\{0}\" -f $path } else { - $enabled = $true + $path = "\" #default } -$target_state = @{$true = "Enabled"; $false="Disabled"}[$enabled] -try -{ - $tasks = Get-ScheduledTask -TaskPath $name - $tasks_needing_changing = $tasks |? { $_.State -ne $target_state } - if (-not($tasks_needing_changing -eq $null)) - { - if ($enabled) - { - $tasks_needing_changing | Enable-ScheduledTask +try { + $task = Get-ScheduledTask -TaskPath "$path" | Where-Object {$_.TaskName -eq "$name"} + + # Correlate task state to enable variable, used to calculate if state needs to be changed + $taskState = if ($task) { $task.State } else { $null } + if ($taskState -eq "Ready"){ + $taskState = $true + } + elseif($taskState -eq "Disabled"){ + $taskState = $false } else { - $tasks_needing_changing | Disable-ScheduledTask - } - Set-Attr $result "tasks_changed" ($tasks_needing_changing | foreach { $_.TaskPath + $_.TaskName }) - $result.changed = $true - } - else - { - Set-Attr $result "tasks_changed" @() - $result.changed = $false - } - - Exit-Json $result; + $taskState = $null + } + + $measure = $task | measure + if ($measure.count -eq 1 ) { + $exists = $true + } + elseif ( ($measure.count -eq 0) -and ($state -eq "absent") ){ + Set-Attr $result "msg" "Task does not exist" + Exit-Json $result + } + elseif ($measure.count -eq 0){ + $exists = $false + } + else { + # This should never occur + Fail-Json $result "$($measure.count) scheduled tasks found" + } + + Set-Attr $result "exists" "$exists" + + if ($frequency){ + if ($frequency -eq "daily") { + $trigger = New-ScheduledTaskTrigger -Daily -At $time + } + elseif ($frequency -eq "weekly"){ + $trigger = New-ScheduledTaskTrigger -Weekly -At $time -DaysOfWeek $days_of_week + } + else { + Fail-Json $result "frequency must be daily or weekly" + } + } + + if ( ($state -eq "absent") -and ($exists -eq $true) ) { + Unregister-ScheduledTask -TaskName $name -Confirm:$false + $result.changed = $true + Set-Attr $result "msg" "Deleted task $name" + Exit-Json $result + } + elseif ( ($state -eq "absent") -and ($exists -eq $false) ) { + Set-Attr $result "msg" "Task $name does not exist" + Exit-Json $result + } + + $principal = New-ScheduledTaskPrincipal -UserId "$user" -LogonType ServiceAccount + + if ($enabled -eq $false){ + $settings = New-ScheduledTaskSettingsSet -Disable + } + else { + $settings = New-ScheduledTaskSettingsSet + } + + if ($argument) { + $action = New-ScheduledTaskAction -Execute $execute -Argument $argument + } + else { + $action = New-ScheduledTaskAction -Execute $execute + } + + if ( ($state -eq "present") -and ($exists -eq $false) ){ + Register-ScheduledTask -Action $action -Trigger $trigger -TaskName $name -Description $description -TaskPath $path -Settings $settings -Principal $principal + $task = Get-ScheduledTask -TaskName $name + Set-Attr $result "msg" "Added new task $name" + $result.changed = $true + } + elseif( ($state -eq "present") -and ($exists -eq $true) ) { + if ($task.Description -eq $description -and $task.TaskName -eq $name -and $task.TaskPath -eq $path -and $task.Actions.Execute -eq $execute -and $taskState -eq $enabled -and $task.Principal.UserId -eq $user) { + #No change in the task + Set-Attr $result "msg" "No change in task $name" + } + else { + Unregister-ScheduledTask -TaskName $name -Confirm:$false + Register-ScheduledTask -Action $action -Trigger $trigger -TaskName $name -Description $description -TaskPath $path -Settings $settings -Principal $principal + Set-Attr $result "msg" "Updated task $name" + $result.changed = $true + } + } + + Exit-Json $result; } catch { Fail-Json $result $_.Exception.Message -} +} \ No newline at end of file diff --git a/windows/win_scheduled_task.py b/windows/win_scheduled_task.py index 2c5867402c5..96a9b48f951 100644 --- a/windows/win_scheduled_task.py +++ b/windows/win_scheduled_task.py @@ -1,8 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- - -# (c) 2015, Peter Mounce -# # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -21,6 +18,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_scheduled_task @@ -28,24 +29,74 @@ short_description: Manage scheduled tasks description: - Manage scheduled tasks +notes: + - This module requires Windows Server 2012 or later. options: name: description: - Name of the scheduled task - - Supports * as wildcard required: true - enabled: + description: description: - - State that the task should become + - The description for the scheduled task required: false + enabled: + description: + - Enable/disable the task choices: - yes - no default: yes -author: Peter Mounce + state: + description: + - State that the task should become + required: true + choices: + - present + - absent + user: + description: + - User to run scheduled task as + required: false + execute: + description: + - Command the scheduled task should execute + required: false + argument: + description: + - Arguments to provide scheduled task action + required: false + frequency: + description: + - The frequency of the command, not idempotent + required: false + choices: + - daily + - weekly + time: + description: + - Time to execute scheduled task, not idempotent + required: false + days_of_week: + description: + - Days of the week to run a weekly task, not idempotent + required: false + path: + description: + - Task folder in which this task will be stored + default: '\' ''' EXAMPLES = ''' - # Disable the scheduled tasks with "WindowsUpdate" in their name - win_scheduled_task: name="*WindowsUpdate*" enabled=no +# Create a scheduled task to open a command prompt +- win_scheduled_task: + name: TaskName + execute: cmd + frequency: daily + time: 9am + description: open command prompt + path: example + enable: yes + state: present + user: SYSTEM ''' diff --git a/windows/win_share.ps1 b/windows/win_share.ps1 new file mode 100644 index 00000000000..59e4e8ab810 --- /dev/null +++ b/windows/win_share.ps1 @@ -0,0 +1,251 @@ +#!powershell +# This file is part of Ansible + +# Copyright 2015, Hans-Joachim Kliemeck +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +#Functions +Function UserSearch +{ + Param ([string]$accountName) + #Check if there's a realm specified + + $searchDomain = $false + $searchDomainUPN = $false + if ($accountName.Split("\").count -gt 1) + { + if ($accountName.Split("\")[0] -ne $env:COMPUTERNAME) + { + $searchDomain = $true + $accountName = $accountName.split("\")[1] + } + } + Elseif ($accountName.contains("@")) + { + $searchDomain = $true + $searchDomainUPN = $true + } + Else + { + #Default to local user account + $accountName = $env:COMPUTERNAME + "\" + $accountName + } + + if ($searchDomain -eq $false) + { + # do not use Win32_UserAccount, because e.g. SYSTEM (BUILTIN\SYSTEM or COMPUUTERNAME\SYSTEM) will not be listed. on Win32_Account groups will be listed too + $localaccount = get-wmiobject -class "Win32_Account" -namespace "root\CIMV2" -filter "(LocalAccount = True)" | where {$_.Caption -eq $accountName} + if ($localaccount) + { + return $localaccount.SID + } + } + Else + { + #Search by samaccountname + $Searcher = [adsisearcher]"" + + If ($searchDomainUPN -eq $false) { + $Searcher.Filter = "sAMAccountName=$($accountName)" + } + Else { + $Searcher.Filter = "userPrincipalName=$($accountName)" + } + + $result = $Searcher.FindOne() + if ($result) + { + $user = $result.GetDirectoryEntry() + + # get binary SID from AD account + $binarySID = $user.ObjectSid.Value + + # convert to string SID + return (New-Object System.Security.Principal.SecurityIdentifier($binarySID,0)).Value + } + } +} +Function NormalizeAccounts +{ + param( + [parameter(valuefrompipeline=$true)] + $users + ) + + $users = $users.Trim() + If ($users -eq "") { + $splittedUsers = [Collections.Generic.List[String]] @() + } + Else { + $splittedUsers = [Collections.Generic.List[String]] $users.Split(",") + } + + $normalizedUsers = [Collections.Generic.List[String]] @() + ForEach($splittedUser in $splittedUsers) { + $sid = UserSearch $splittedUser + If (!$sid) { + Fail-Json $result "$splittedUser is not a valid user or group on the host machine or domain" + } + + $normalizedUser = (New-Object System.Security.Principal.SecurityIdentifier($sid)).Translate([System.Security.Principal.NTAccount]) + $normalizedUsers.Add($normalizedUser) + } + + return ,$normalizedUsers +} + +$params = Parse-Args $args; + +$result = New-Object PSObject; +Set-Attr $result "changed" $false; + +$name = Get-Attr $params "name" -failifempty $true +$state = Get-Attr $params "state" "present" -validateSet "present","absent" -resultobj $result + +Try { + $share = Get-SmbShare $name -ErrorAction SilentlyContinue + If ($state -eq "absent") { + If ($share) { + Remove-SmbShare -Force -Name $name + Set-Attr $result "changed" $true; + } + } + Else { + $path = Get-Attr $params "path" -failifempty $true + $description = Get-Attr $params "description" "" + + $permissionList = Get-Attr $params "list" "no" -validateSet "no","yes" -resultobj $result | ConvertTo-Bool + $folderEnum = if ($permissionList) { "Unrestricted" } else { "AccessBased" } + + $permissionRead = Get-Attr $params "read" "" | NormalizeAccounts + $permissionChange = Get-Attr $params "change" "" | NormalizeAccounts + $permissionFull = Get-Attr $params "full" "" | NormalizeAccounts + $permissionDeny = Get-Attr $params "deny" "" | NormalizeAccounts + + If (-Not (Test-Path -Path $path)) { + Fail-Json $result "$path directory does not exist on the host" + } + + # normalize path and remove slash at the end + $path = (Get-Item $path).FullName -replace "\\$" + + # need to (re-)create share + If (!$share) { + New-SmbShare -Name $name -Path $path + $share = Get-SmbShare $name -ErrorAction SilentlyContinue + + Set-Attr $result "changed" $true; + } + If ($share.Path -ne $path) { + Remove-SmbShare -Force -Name $name + + New-SmbShare -Name $name -Path $path + $share = Get-SmbShare $name -ErrorAction SilentlyContinue + + Set-Attr $result "changed" $true; + } + + # updates + If ($share.Description -ne $description) { + Set-SmbShare -Force -Name $name -Description $description + Set-Attr $result "changed" $true; + } + If ($share.FolderEnumerationMode -ne $folderEnum) { + Set-SmbShare -Force -Name $name -FolderEnumerationMode $folderEnum + Set-Attr $result "changed" $true; + } + + # clean permissions that imply others + ForEach ($user in $permissionFull) { + $permissionChange.remove($user) + $permissionRead.remove($user) + } + ForEach ($user in $permissionChange) { + $permissionRead.remove($user) + } + + # remove permissions + $permissions = Get-SmbShareAccess -Name $name + ForEach ($permission in $permissions) { + If ($permission.AccessControlType -eq "Deny") { + If (!$permissionDeny.Contains($permission.AccountName)) { + Unblock-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName + Set-Attr $result "changed" $true; + } + } + ElseIf ($permission.AccessControlType -eq "Allow") { + If ($permission.AccessRight -eq "Full") { + If (!$permissionFull.Contains($permission.AccountName)) { + Revoke-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName + Set-Attr $result "changed" $true; + + Continue + } + + # user got requested permissions + $permissionFull.remove($permission.AccountName) + } + ElseIf ($permission.AccessRight -eq "Change") { + If (!$permissionChange.Contains($permission.AccountName)) { + Revoke-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName + Set-Attr $result "changed" $true; + + Continue + } + + # user got requested permissions + $permissionChange.remove($permission.AccountName) + } + ElseIf ($permission.AccessRight -eq "Read") { + If (!$permissionRead.Contains($permission.AccountName)) { + Revoke-SmbShareAccess -Force -Name $name -AccountName $permission.AccountName + Set-Attr $result "changed" $true; + + Continue + } + + # user got requested permissions + $permissionRead.Remove($permission.AccountName) + } + } + } + + # add missing permissions + ForEach ($user in $permissionRead) { + Grant-SmbShareAccess -Force -Name $name -AccountName $user -AccessRight "Read" + Set-Attr $result "changed" $true; + } + ForEach ($user in $permissionChange) { + Grant-SmbShareAccess -Force -Name $name -AccountName $user -AccessRight "Change" + Set-Attr $result "changed" $true; + } + ForEach ($user in $permissionFull) { + Grant-SmbShareAccess -Force -Name $name -AccountName $user -AccessRight "Full" + Set-Attr $result "changed" $true; + } + ForEach ($user in $permissionDeny) { + Block-SmbShareAccess -Force -Name $name -AccountName $user + Set-Attr $result "changed" $true; + } + } +} +Catch { + Fail-Json $result "an error occured when attempting to create share $name" +} + +Exit-Json $result \ No newline at end of file diff --git a/windows/win_share.py b/windows/win_share.py new file mode 100644 index 00000000000..bca7646cf3f --- /dev/null +++ b/windows/win_share.py @@ -0,0 +1,121 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2015, Hans-Joachim Kliemeck +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_share +version_added: "2.1" +short_description: Manage Windows shares +description: + - Add, modify or remove Windows share and set share permissions. +requirements: + - Windows 8.1 / Windows 2012 or newer +options: + name: + description: + - Share name + required: yes + path: + description: + - Share directory + required: yes + state: + description: + - Specify whether to add C(present) or remove C(absent) the specified share + required: no + choices: + - present + - absent + default: present + description: + description: + - Share description + required: no + default: none + list: + description: + - Specify whether to allow or deny file listing, in case user got no permission on share + required: no + choices: + - yes + - no + default: none + read: + description: + - Specify user list that should get read access on share, separated by comma. + required: no + default: none + change: + description: + - Specify user list that should get read and write access on share, separated by comma. + required: no + default: none + full: + description: + - Specify user list that should get full access on share, separated by comma. + required: no + default: none + deny: + description: + - Specify user list that should get no access, regardless of implied access on share, separated by comma. + required: no + default: none +author: Hans-Joachim Kliemeck (@h0nIg) +''' + +EXAMPLES = ''' +# Playbook example +# Add share and set permissions +--- +- name: Add secret share + win_share: + name: internal + description: top secret share + path: C:/shares/internal + list: 'no' + full: Administrators,CEO + read: HR-Global + deny: HR-External + +- name: Add public company share + win_share: + name: company + description: top secret share + path: C:/shares/company + list: 'yes' + full: Administrators,CEO + read: Global + +# Remove previously added share + win_share: + name: internal + state: absent +''' + +RETURN = ''' + +''' \ No newline at end of file diff --git a/windows/win_timezone.ps1 b/windows/win_timezone.ps1 new file mode 100644 index 00000000000..03a6935052d --- /dev/null +++ b/windows/win_timezone.ps1 @@ -0,0 +1,71 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Phil Schwartz +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +$result = New-Object psobject @{ + win_timezone = New-Object psobject + changed = $false +} + +$timezone = Get-Attr -obj $params -name timezone -failifempty $true -resultobj $result + +Try { + # Get the current timezone set + $currentTZ = $(tzutil.exe /g) + If ($LASTEXITCODE -ne 0) { Throw "An error occured when getting the current machine's timezone setting." } + + If ( $currentTZ -eq $timezone ) { + Exit-Json $result "$timezone is already set on this machine" + } + Else { + $tzExists = $false + #Check that timezone can even be set (if it is listed from tzutil as an available timezone to the machine) + $tzList = $(tzutil.exe /l) + If ($LASTEXITCODE -ne 0) { Throw "An error occured when listing the available timezones." } + ForEach ($tz in $tzList) { + If ( $tz -eq $timezone ) { + $tzExists = $true + break + } + } + + If ( $tzExists ) { + tzutil.exe /s "$timezone" + If ($LASTEXITCODE -ne 0) { Throw "An error occured when setting the specified timezone with tzutil." } + $newTZ = $(tzutil.exe /g) + If ($LASTEXITCODE -ne 0) { Throw "An error occured when getting the current machine's timezone setting." } + + If ( $timezone -eq $newTZ ) { + $result.changed = $true + } + } + Else { + Fail-Json $result "The specified timezone: $timezone isn't supported on the machine." + } + } +} +Catch { + Fail-Json $result "Error setting timezone to: $timezone." +} + + +Exit-Json $result; \ No newline at end of file diff --git a/windows/win_timezone.py b/windows/win_timezone.py new file mode 100644 index 00000000000..02b9bb9c457 --- /dev/null +++ b/windows/win_timezone.py @@ -0,0 +1,53 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Phil Schwartz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: win_timezone +version_added: "2.1" +short_description: Sets Windows machine timezone +description: + - Sets machine time to the specified timezone, the module will check if the provided timezone is supported on the machine. +options: + timezone: + description: + - Timezone to set to. Example Central Standard Time + required: true + default: null + aliases: [] + +author: Phil Schwartz +''' + + +EXAMPLES = ''' + # Set machine's timezone to Central Standard Time + win_timezone: + timezone: "Central Standard Time" +''' + +RETURN = '''# ''' diff --git a/windows/win_unzip.ps1 b/windows/win_unzip.ps1 index a62f246f5c8..59fbd33166c 100644 --- a/windows/win_unzip.ps1 +++ b/windows/win_unzip.ps1 @@ -19,6 +19,7 @@ # WANT_JSON # POWERSHELL_COMMON + $params = Parse-Args $args; $result = New-Object psobject @{ @@ -26,64 +27,47 @@ $result = New-Object psobject @{ changed = $false } -If ($params.creates) { +$creates = Get-AnsibleParam -obj $params -name "creates" +If ($creates -ne $null) { If (Test-Path $params.creates) { Exit-Json $result "The 'creates' file or directory already exists." } - } -If ($params.src) { - $src = $params.src.toString() - - If (-Not (Test-Path -path $src)){ - Fail-Json $result "src file: $src does not exist." - } - - $ext = [System.IO.Path]::GetExtension($src) -} -Else { - Fail-Json $result "missing required argument: src" +$src = Get-AnsibleParam -obj $params -name "src" -failifempty $true +If (-Not (Test-Path -path $src)){ + Fail-Json $result "src file: $src does not exist." } -If (-Not($params.dest -eq $null)) { - $dest = $params.dest.toString() +$ext = [System.IO.Path]::GetExtension($src) - If (-Not (Test-Path $dest -PathType Container)){ - Try{ - New-Item -itemtype directory -path $dest - } - Catch { - Fail-Json $result "Error creating $dest directory" - } - } -} -Else { - Fail-Json $result "missing required argument: dest" -} -If ($params.recurse) { - $recurse = ConvertTo-Bool ($params.recurse) -} -Else { - $recurse = $false +$dest = Get-AnsibleParam -obj $params -name "dest" -failifempty $true +If (-Not (Test-Path $dest -PathType Container)){ + Try{ + New-Item -itemtype directory -path $dest + } + Catch { + $err_msg = $_.Exception.Message + Fail-Json $result "Error creating $dest directory! Msg: $err_msg" + } } -If ($params.rm) { - $rm = ConvertTo-Bool ($params.rm) -} -Else { - $rm = $false -} +$recurse = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "recurse" -default "false") +$rm = ConvertTo-Bool (Get-AnsibleParam -obj $params -name "rm" -default "false") If ($ext -eq ".zip" -And $recurse -eq $false) { Try { $shell = New-Object -ComObject Shell.Application - $shell.NameSpace($dest).copyhere(($shell.NameSpace($src)).items(), 20) + $zipPkg = $shell.NameSpace([IO.Path]::GetFullPath($src)) + $destPath = $shell.NameSpace([IO.Path]::GetFullPath($dest)) + # 20 means do not display any dialog (4) and overwrite any file (16) + $destPath.CopyHere($zipPkg.Items(), 20) $result.changed = $true } Catch { - Fail-Json $result "Error unzipping $src to $dest" + $err_msg = $_.Exception.Message + Fail-Json $result "Error unzipping $src to $dest! Msg: $err_msg" } } # Requires PSCX @@ -127,11 +111,12 @@ Else { } } Catch { + $err_msg = $_.Exception.Message If ($recurse) { - Fail-Json $result "Error recursively expanding $src to $dest" + Fail-Json $result "Error recursively expanding $src to $dest! Msg: $err_msg" } Else { - Fail-Json $result "Error expanding $src to $dest" + Fail-Json $result "Error expanding $src to $dest! Msg: $err_msg" } } } @@ -154,4 +139,4 @@ Set-Attr $result.win_unzip "src" $src.toString() Set-Attr $result.win_unzip "dest" $dest.toString() Set-Attr $result.win_unzip "recurse" $recurse.toString() -Exit-Json $result; \ No newline at end of file +Exit-Json $result; diff --git a/windows/win_unzip.py b/windows/win_unzip.py index aa0180baf74..708a909820b 100644 --- a/windows/win_unzip.py +++ b/windows/win_unzip.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_unzip @@ -65,9 +69,9 @@ author: Phil Schwartz ''' -EXAMPLES = ''' +EXAMPLES = r''' # This unzips a library that was downloaded with win_get_url, and removes the file after extraction -$ ansible -i hosts -m win_unzip -a "src=C:\\LibraryToUnzip.zip dest=C:\\Lib rm=true" all +$ ansible -i hosts -m win_unzip -a "src=C:\LibraryToUnzip.zip dest=C:\Lib rm=true" all # Playbook example # Simple unzip @@ -95,12 +99,12 @@ - name: Grab PSCX msi win_get_url: url: 'http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=pscx&DownloadId=923562&FileTime=130585918034470000&Build=20959' - dest: 'C:\\pscx.msi' + dest: 'C:\pscx.msi' - name: Install PSCX win_msi: - path: 'C:\\pscx.msi' + path: 'C:\pscx.msi' - name: Unzip gz log win_unzip: - src: "C:\\Logs\\application-error-logs.gz" - dest: "C:\\ExtractedLogs\\application-error-logs" + src: "C:\Logs\application-error-logs.gz" + dest: "C:\ExtractedLogs\application-error-logs" ''' diff --git a/windows/win_updates.ps1 b/windows/win_updates.ps1 index 92c1b93e1f8..a74e68f3663 100644 --- a/windows/win_updates.ps1 +++ b/windows/win_updates.ps1 @@ -1,7 +1,7 @@ #!powershell # This file is part of Ansible # -# Copyright 2014, Trond Hindenes +# Copyright 2015, Matt Davis # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -19,68 +19,406 @@ # WANT_JSON # POWERSHELL_COMMON -function Write-Log -{ - param - ( - [parameter(mandatory=$false)] - [System.String] - $message - ) +$ErrorActionPreference = "Stop" +$FormatEnumerationLimit = -1 # prevent out-string et al from truncating collection dumps - $date = get-date -format 'yyyy-MM-dd hh:mm:ss.zz' +<# Most of the Windows Update Agent API will not run under a remote token, +which a remote WinRM session always has. win_updates uses the Task Scheduler +to run the bulk of the update functionality under a local token. Powershell's +Scheduled-Job capability provides a decent abstraction over the Task Scheduler +and handles marshaling Powershell args in and output/errors/etc back. The +module schedules a single job that executes all interactions with the Update +Agent API, then waits for completion. A significant amount of hassle is +involved to ensure that only one of these jobs is running at a time, and to +clean up the various error conditions that can occur. #> - Write-Host "$date $message" +# define the ScriptBlock that will be passed to Register-ScheduledJob +$job_body = { + Param( + [hashtable]$boundparms=@{}, + [Object[]]$unboundargs=$() + ) - Out-File -InputObject "$date $message" -FilePath $global:LoggingFile -Append + Set-StrictMode -Version 2 + + $ErrorActionPreference = "Stop" + $DebugPreference = "Continue" + $FormatEnumerationLimit = -1 # prevent out-string et al from truncating collection dumps + + # set this as a global for the Write-DebugLog function + $log_path = $boundparms['log_path'] + + Write-DebugLog "Scheduled job started with boundparms $($boundparms | out-string) and unboundargs $($unboundargs | out-string)" + + # FUTURE: elevate this to module arg validation once we have it + Function MapCategoryNameToGuid { + Param([string] $category_name) + + $category_guid = switch -exact ($category_name) { + # as documented by TechNet @ https://technet.microsoft.com/en-us/library/ff730937.aspx + "Application" {"5C9376AB-8CE6-464A-B136-22113DD69801"} + "Connectors" {"434DE588-ED14-48F5-8EED-A15E09A991F6"} + "CriticalUpdates" {"E6CF1350-C01B-414D-A61F-263D14D133B4"} + "DefinitionUpdates" {"E0789628-CE08-4437-BE74-2495B842F43B"} + "DeveloperKits" {"E140075D-8433-45C3-AD87-E72345B36078"} + "FeaturePacks" {"B54E7D24-7ADD-428F-8B75-90A396FA584F"} + "Guidance" {"9511D615-35B2-47BB-927F-F73D8E9260BB"} + "SecurityUpdates" {"0FA1201D-4330-4FA8-8AE9-B877473B6441"} + "ServicePacks" {"68C5B0A3-D1A6-4553-AE49-01D3A7827828"} + "Tools" {"B4832BD8-E735-4761-8DAF-37F882276DAB"} + "UpdateRollups" {"28BC880E-0592-4CBF-8F95-C79B17911D5F"} + "Updates" {"CD5FFD1E-E932-4E3A-BF74-18BF0B1BBD83"} + default { throw "Unknown category_name $category_name, must be one of (Application,Connectors,CriticalUpdates,DefinitionUpdates,DeveloperKits,FeaturePacks,Guidance,SecurityUpdates,ServicePacks,Tools,UpdateRollups,Updates)" } + } + + return $category_guid + } + + Function DoWindowsUpdate { + Param( + [string[]]$category_names=@("CriticalUpdates","SecurityUpdates","UpdateRollups"), + [ValidateSet("installed", "searched")] + [string]$state="installed", + [bool]$_ansible_check_mode=$false + ) + + $is_check_mode = $($state -eq "searched") -or $_ansible_check_mode + + $category_guids = $category_names | % { MapCategoryNameToGUID $_ } + + $update_status = @{ changed = $false } + + Write-DebugLog "Creating Windows Update session..." + $session = New-Object -ComObject Microsoft.Update.Session + + Write-DebugLog "Create Windows Update searcher..." + $searcher = $session.CreateUpdateSearcher() + + # OR is only allowed at the top-level, so we have to repeat base criteria inside + # FUTURE: change this to client-side filtered? + $criteriabase = "IsInstalled = 0" + $criteria_list = $category_guids | % { "($criteriabase AND CategoryIDs contains '$_')" } + + $criteria = [string]::Join(" OR ", $criteria_list) + + Write-DebugLog "Search criteria: $criteria" + + Write-DebugLog "Searching for updates to install in category IDs $category_guids..." + $searchresult = $searcher.Search($criteria) + + Write-DebugLog "Creating update collection..." + + $updates_to_install = New-Object -ComObject Microsoft.Update.UpdateColl + + Write-DebugLog "Found $($searchresult.Updates.Count) updates" + + $update_status.updates = @{ } + + # FUTURE: add further filtering options + foreach($update in $searchresult.Updates) { + if(-Not $update.EulaAccepted) { + Write-DebugLog "Accepting EULA for $($update.Identity.UpdateID)" + $update.AcceptEula() + } + + if($update.IsHidden) { + Write-DebugLog "Skipping hidden update $($update.Title)" + continue + } + + Write-DebugLog "Adding update $($update.Identity.UpdateID) - $($update.Title)" + $res = $updates_to_install.Add($update) + + $update_status.updates[$update.Identity.UpdateID] = @{ + title = $update.Title + # TODO: pluck the first KB out (since most have just one)? + kb = $update.KBArticleIDs + id = $update.Identity.UpdateID + installed = $false + } + } + + Write-DebugLog "Calculating pre-install reboot requirement..." + + # calculate this early for check mode, and to see if we should allow updates to continue + $sysinfo = New-Object -ComObject Microsoft.Update.SystemInfo + $update_status.reboot_required = $sysinfo.RebootRequired + $update_status.found_update_count = $updates_to_install.Count + $update_status.installed_update_count = 0 + + # bail out here for check mode + if($is_check_mode -eq $true) { + Write-DebugLog "Check mode; exiting..." + Write-DebugLog "Return value: $($update_status | out-string)" + + if($updates_to_install.Count -gt 0) { $update_status.changed = $true } + return $update_status + } + + if($updates_to_install.Count -gt 0) { + if($update_status.reboot_required) { + throw "A reboot is required before more updates can be installed." + } + else { + Write-DebugLog "No reboot is pending..." + } + Write-DebugLog "Downloading updates..." + } + + foreach($update in $updates_to_install) { + if($update.IsDownloaded) { + Write-DebugLog "Update $($update.Identity.UpdateID) already downloaded, skipping..." + continue + } + Write-DebugLog "Creating downloader object..." + $dl = $session.CreateUpdateDownloader() + Write-DebugLog "Creating download collection..." + $dl.Updates = New-Object -ComObject Microsoft.Update.UpdateColl + Write-DebugLog "Adding update $($update.Identity.UpdateID)" + $res = $dl.Updates.Add($update) + Write-DebugLog "Downloading update $($update.Identity.UpdateID)..." + $download_result = $dl.Download() + # FUTURE: configurable download retry + if($download_result.ResultCode -ne 2) { # OperationResultCode orcSucceeded + throw "Failed to download update $($update.Identity.UpdateID)" + } + } + + if($updates_to_install.Count -lt 1 ) { return $update_status } + + Write-DebugLog "Installing updates..." + + # install as a batch so the reboot manager will suppress intermediate reboots + Write-DebugLog "Creating installer object..." + $inst = $session.CreateUpdateInstaller() + Write-DebugLog "Creating install collection..." + $inst.Updates = New-Object -ComObject Microsoft.Update.UpdateColl + + foreach($update in $updates_to_install) { + Write-DebugLog "Adding update $($update.Identity.UpdateID)" + $res = $inst.Updates.Add($update) + } + + # FUTURE: use BeginInstall w/ progress reporting so we can at least log intermediate install results + Write-DebugLog "Installing updates..." + $install_result = $inst.Install() + + $update_success_count = 0 + $update_fail_count = 0 + + # WU result API requires us to index in to get the install results + $update_index = 0 + + foreach($update in $updates_to_install) { + $update_result = $install_result.GetUpdateResult($update_index) + $update_resultcode = $update_result.ResultCode + $update_hresult = $update_result.HResult + + $update_index++ + + $update_dict = $update_status.updates[$update.Identity.UpdateID] + + if($update_resultcode -eq 2) { # OperationResultCode orcSucceeded + $update_success_count++ + $update_dict.installed = $true + Write-DebugLog "Update $($update.Identity.UpdateID) succeeded" + } + else { + $update_fail_count++ + $update_dict.installed = $false + $update_dict.failed = $true + $update_dict.failure_hresult_code = $update_hresult + Write-DebugLog "Update $($update.Identity.UpdateID) failed resultcode $update_hresult hresult $update_hresult" + } + + } + + if($update_fail_count -gt 0) { + $update_status.failed = $true + $update_status.msg="Failed to install one or more updates" + } + else { $update_status.changed = $true } + + Write-DebugLog "Performing post-install reboot requirement check..." + + # recalculate reboot status after installs + $sysinfo = New-Object -ComObject Microsoft.Update.SystemInfo + $update_status.reboot_required = $sysinfo.RebootRequired + $update_status.installed_update_count = $update_success_count + $update_status.failed_update_count = $update_fail_count + + Write-DebugLog "Return value: $($update_status | out-string)" + + return $update_status + } + + Try { + # job system adds a bunch of cruft to top-level dict, so we have to send a sub-dict + return @{ job_output = DoWindowsUpdate @boundparms } + } + Catch { + $excep = $_ + Write-DebugLog "Fatal exception: $($excep.Exception.Message) at $($excep.ScriptStackTrace)" + return @{ job_output = @{ failed=$true;error=$excep.Exception.Message;location=$excep.ScriptStackTrace } } + } } -$params = Parse-Args $args; -$result = New-Object PSObject; -Set-Attr $result "changed" $false; +Function DestroyScheduledJob { + Param([string] $job_name) + + # find a scheduled job with the same name (should normally fail) + $schedjob = Get-ScheduledJob -Name $job_name -ErrorAction SilentlyContinue + + # nuke it if it's there + If($schedjob -ne $null) { + Write-DebugLog "ScheduledJob $job_name exists, ensuring it's not running..." + # can't manage jobs across sessions, so we have to resort to the Task Scheduler script object to kill running jobs + $schedserv = New-Object -ComObject Schedule.Service + Write-DebugLog "Connecting to scheduler service..." + $schedserv.Connect() + Write-DebugLog "Getting running tasks named $job_name" + $running_tasks = @($schedserv.GetRunningTasks(0) | Where-Object { $_.Name -eq $job_name }) + + Foreach($task_to_stop in $running_tasks) { + Write-DebugLog "Stopping running task $($task_to_stop.InstanceGuid)..." + $task_to_stop.Stop() + } + + <# FUTURE: add a global waithandle for this to release any other waiters. Wait-Job + and/or polling will block forever, since the killed job object in the parent + session doesn't know it's been killed :( #> + + Unregister-ScheduledJob -Name $job_name + } -if(($params.logPath).Length -gt 0) { - $global:LoggingFile = $params.logPath -} else { - $global:LoggingFile = "c:\ansible-playbook.log" } -if ($params.category) { - $category = $params.category -} else { - $category = "critical" + +Function RunAsScheduledJob { + Param([scriptblock] $job_body, [string] $job_name, [scriptblock] $job_init, [Object[]] $job_arg_list=@()) + + DestroyScheduledJob -job_name $job_name + + $rsj_args = @{ + ScriptBlock = $job_body + Name = $job_name + ArgumentList = $job_arg_list + ErrorAction = "Stop" + ScheduledJobOption = @{ RunElevated=$True } + } + + if($job_init) { $rsj_args.InitializationScript = $job_init } + + Write-DebugLog "Registering scheduled job with args $($rsj_args | Out-String -Width 300)" + $schedjob = Register-ScheduledJob @rsj_args + + # RunAsTask isn't available in PS3- fall back to a 2s future trigger + if($schedjob | Get-Member -Name RunAsTask) { + Write-DebugLog "Starting scheduled job (PS4 method)" + $schedjob.RunAsTask() + } + else { + Write-DebugLog "Starting scheduled job (PS3 method)" + Add-JobTrigger -inputobject $schedjob -trigger $(New-JobTrigger -once -at $(Get-Date).AddSeconds(2)) + } + + $sw = [System.Diagnostics.Stopwatch]::StartNew() + + $job = $null + + Write-DebugLog "Waiting for job completion..." + + # Wait-Job can fail for a few seconds until the scheduled task starts- poll for it... + while ($job -eq $null) { + start-sleep -Milliseconds 100 + if($sw.ElapsedMilliseconds -ge 30000) { # tasks scheduled right after boot on 2008R2 can take awhile to start... + Throw "Timed out waiting for scheduled task to start" + } + + # FUTURE: configurable timeout so we don't block forever? + # FUTURE: add a global WaitHandle in case another instance kills our job, so we don't block forever + $job = Wait-Job -Name $schedjob.Name -ErrorAction SilentlyContinue + } + + $sw = [System.Diagnostics.Stopwatch]::StartNew() + + # NB: output from scheduled jobs is delayed after completion (including the sub-objects after the primary Output object is available) + While (($job.Output -eq $null -or -not ($job.Output | Get-Member -Name Keys -ErrorAction Ignore) -or -not $job.Output.Keys.Contains('job_output')) -and $sw.ElapsedMilliseconds -lt 15000) { + Write-DebugLog "Waiting for job output to populate..." + Start-Sleep -Milliseconds 500 + } + + # NB: fallthru on both timeout and success + + $ret = @{ + ErrorOutput = $job.Error + WarningOutput = $job.Warning + VerboseOutput = $job.Verbose + DebugOutput = $job.Debug + } + + If ($job.Output -eq $null -or -not $job.Output.Keys.Contains('job_output')) { + $ret.Output = @{failed = $true; msg = "job output was lost"} + } + Else { + $ret.Output = $job.Output.job_output # sub-object returned, can only be accessed as a property for some reason + } + + Try { # this shouldn't be fatal, but can fail with both Powershell errors and COM Exceptions, hence the dual error-handling... + Unregister-ScheduledJob -Name $job_name -Force -ErrorAction Continue + } + Catch { + Write-DebugLog "Error unregistering job after execution: $($_.Exception.ToString()) $($_.ScriptStackTrace)" + } + + return $ret } -$installed_prior = get-wulist -isinstalled | foreach { $_.KBArticleIDs } -set-attr $result "updates_already_present" $installed_prior - -write-log "Looking for updates in '$category'" -set-attr $result "updates_category" $category -$to_install = get-wulist -category $category -$installed = @() -foreach ($u in $to_install) { - $kb = $u.KBArticleIDs - write-log "Installing $kb - $($u.Title)" - $install_result = get-wuinstall -KBArticleID $u.KBArticleIDs -acceptall -ignorereboot - Set-Attr $result "updates_installed_KB$kb" $u.Title - $installed += $kb +Function Log-Forensics { + Write-DebugLog "Arguments: $job_args | out-string" + Write-DebugLog "OS Version: $([environment]::OSVersion.Version | out-string)" + Write-DebugLog "Running as user: $([System.Security.Principal.WindowsIdentity]::GetCurrent().Name)" + Write-DebugLog "Powershell version: $($PSVersionTable | out-string)" + # FUTURE: log auth method (kerb, password, etc) } -write-log "Installed: $($installed.count)" -set-attr $result "updates_installed" $installed -set-attr $result "updates_installed_count" $installed.count -$result.changed = $installed.count -gt 0 - -$installed_afterwards = get-wulist -isinstalled | foreach { $_.KBArticleIDs } -set-attr $result "updates_installed_afterwards" $installed_afterwards - -$reboot_needed = Get-WURebootStatus -write-log $reboot_needed -if ($reboot_needed -match "not") { - write-log "Reboot not required" -} else { - write-log "Reboot required" - Set-Attr $result "updates_reboot_needed" $true - $result.changed = $true + +# code shared between the scheduled job and the host script +$common_inject = { + # FUTURE: capture all to a list, dump on error + Function Write-DebugLog { + Param( + [string]$msg + ) + + $DebugPreference = "Continue" + $ErrorActionPreference = "Continue" + $date_str = Get-Date -Format u + $msg = "$date_str $msg" + + Write-Debug $msg + + if($log_path -ne $null) { + Add-Content $log_path $msg + } + } } -Set-Attr $result "updates_success" "true" -Exit-Json $result; +# source the common code into the current scope so we can call it +. $common_inject + +$parsed_args = Parse-Args $args $true +# grr, why use PSCustomObject for args instead of just native hashtable? +$parsed_args.psobject.properties | foreach -begin {$job_args=@{}} -process {$job_args."$($_.Name)" = $_.Value} -end {$job_args} + +# set the log_path for the global log function we injected earlier +$log_path = $job_args['log_path'] + +Log-Forensics + +Write-DebugLog "Starting scheduled job with args: $($job_args | Out-String -Width 300)" + +# pass the common code as job_init so it'll be injected into the scheduled job script +$sjo = RunAsScheduledJob -job_init $common_inject -job_body $job_body -job_name ansible-win-updates -job_arg_list $job_args + +Write-DebugLog "Scheduled job completed with output: $($sjo.Output | Out-String -Width 300)" + +Exit-Json $sjo.Output \ No newline at end of file diff --git a/windows/win_updates.py b/windows/win_updates.py index 13c57f2b6d1..3fa5d0e3278 100644 --- a/windows/win_updates.py +++ b/windows/win_updates.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2014, Peter Mounce +# (c) 2015, Matt Davis # # This file is part of Ansible # @@ -21,37 +21,128 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'core', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_updates -version_added: "1.9" -short_description: Lists / Installs windows updates +version_added: "2.0" +short_description: Download and install Windows updates description: - - Installs windows updates using PSWindowsUpdate (http://gallery.technet.microsoft.com/scriptcenter/2d191bcd-3308-4edd-9de2-88dff796b0bc). - - PSWindowsUpdate needs to be installed first - use win_chocolatey. + - Searches, downloads, and installs Windows updates synchronously by automating the Windows Update client options: - category: - description: - - Which category to install updates from - required: false - default: critical - choices: - - critical - - security - - (anything that is a valid update category) - default: critical - aliases: [] - logPath: - description: - - Where to log command output to - required: false - default: c:\\ansible-playbook.log - aliases: [] -author: "Peter Mounce (@petemounce)" + category_names: + description: + - A scalar or list of categories to install updates from + required: false + default: ["CriticalUpdates","SecurityUpdates","UpdateRollups"] + choices: + - Application + - Connectors + - CriticalUpdates + - DefinitionUpdates + - DeveloperKits + - FeaturePacks + - Guidance + - SecurityUpdates + - ServicePacks + - Tools + - UpdateRollups + - Updates + state: + description: + - Controls whether found updates are returned as a list or actually installed. + - This module also supports Ansible check mode, which has the same effect as setting state=searched + required: false + default: installed + choices: + - installed + - searched + log_path: + description: + - If set, win_updates will append update progress to the specified file. The directory must already exist. + required: false +author: "Matt Davis (@mattdavispdx)" +notes: +- win_updates must be run by a user with membership in the local Administrators group +- win_updates will use the default update service configured for the machine (Windows Update, Microsoft Update, WSUS, etc) +- win_updates does not manage reboots, but will signal when a reboot is required with the reboot_required return value. +- win_updates can take a significant amount of time to complete (hours, in some cases). Performance depends on many factors, including OS version, number of updates, system load, and update server load. ''' EXAMPLES = ''' - # Install updates from security category - win_updates: - category: security +# Install all security, critical, and rollup updates +- win_updates: + category_names: + - SecurityUpdates + - CriticalUpdates + - UpdateRollups + +# Install only security updates +- win_updates: + category_names: SecurityUpdates + +# Search-only, return list of found updates (if any), log to c:\ansible_wu.txt +- win_updates: + category_names: SecurityUpdates + state: searched + log_path: c:\ansible_wu.txt +''' + +RETURN = ''' +reboot_required: + description: True when the target server requires a reboot to complete updates (no further updates can be installed until after a reboot) + returned: success + type: boolean + sample: True + +updates: + description: List of updates that were found/installed + returned: success + type: dictionary + sample: + contains: + title: + description: Display name + returned: always + type: string + sample: "Security Update for Windows Server 2012 R2 (KB3004365)" + kb: + description: A list of KB article IDs that apply to the update + returned: always + type: list of strings + sample: [ '3004365' ] + id: + description: Internal Windows Update GUID + returned: always + type: string (guid) + sample: "fb95c1c8-de23-4089-ae29-fd3351d55421" + installed: + description: Was the update successfully installed + returned: always + type: boolean + sample: True + failure_hresult_code: + description: The HRESULT code from a failed update + returned: on install failure + type: boolean + sample: 2147942402 + +found_update_count: + description: The number of updates found needing to be applied + returned: success + type: int + sample: 3 +installed_update_count: + description: The number of updates successfully installed + returned: success + type: int + sample: 2 +failed_update_count: + description: The number of updates that failed to install + returned: always + type: int + sample: 0 ''' diff --git a/windows/win_uri.ps1 b/windows/win_uri.ps1 new file mode 100644 index 00000000000..d701ef56b92 --- /dev/null +++ b/windows/win_uri.ps1 @@ -0,0 +1,86 @@ +#!powershell +# This file is part of Ansible +# +# Copyright 2015, Corwin Brown +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +$result = New-Object psobject @{ + win_uri = New-Object psobject +} + +# Functions ############################################### + +Function ConvertTo-SnakeCase($input_string) { + $snake_case = $input_string -csplit "(? +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# this is a windows documentation stub. actual code lives in the .ps1 +# file of the same name + +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + +DOCUMENTATION = """ +--- +module: win_uri +version_added: "2.1" +short_description: Interacts with webservices. +description: + - Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE HTTP authentication mechanisms. +options: + url: + description: + - HTTP or HTTPS URL in the form of (http|https)://host.domain:port/path + method: + description: + - The HTTP Method of the request or response. + default: GET + choices: + - GET + - POST + - PUT + - HEAD + - DELETE + - OPTIONS + - PATCH + - TRACE + - CONNECT + - REFRESH + content_type: + description: + - Sets the "Content-Type" header. + body: + description: + - The body of the HTTP request/response to the web service. + headers: + description: + - 'Key Value pairs for headers. Example "Host: www.somesite.com"' + use_basic_parsing: + description: + - This module relies upon 'Invoke-WebRequest', which by default uses the Internet Explorer Engine to parse a webpage. There's an edge-case where if a user hasn't run IE before, this will fail. The only advantage to using the Internet Explorer praser is that you can traverse the DOM in a powershell script. That isn't useful for Ansible, so by default we toggle 'UseBasicParsing'. However, you can toggle that off here. + choices: + - True + - False + default: True +author: Corwin Brown (@blakfeld) +""" + +EXAMPLES = """ +# Send a GET request and store the output: +--- +- name: Perform a GET and Store Output + win_uri: + url: http://www.somesite.com/myendpoint + register: http_output + +# Set a HOST header to hit an internal webserver: +--- +- name: Hit a Specific Host on the Server + win_uri: + url: http://my.internal.server.com + method: GET + headers: + host: "www.somesite.com" + +# Do a HEAD request on an endpoint +--- +- name: Perform a HEAD on an Endpoint + win_uri: + url: http://www.somesite.com + method: HEAD + +# Post a body to an endpoint +--- +- name: POST a Body to an Endpoint + win_uri: + url: http://www.somesite.com + method: POST + body: "{ 'some': 'json' }" +""" + +RETURN = """ +url: + description: The Target URL + returned: always + type: string + sample: "https://www.ansible.com" +method: + description: The HTTP method used. + returned: always + type: string + sample: "GET" +content_type: + description: The "content-type" header used. + returned: always + type: string + sample: "application/json" +use_basic_parsing: + description: The state of the "use_basic_parsing" flag. + returned: always + type: bool + sample: True +body: + description: The content of the body used + returned: when body is specified + type: string + sample: '{"id":1}' + version_added: "2.3" +status_code: + description: The HTTP Status Code of the response. + returned: success + type: int + sample: 200 +status_description: + description: A summery of the status. + returned: success + type: string + stample: "OK" +raw_content: + description: The raw content of the HTTP response. + returned: success + type: string + sample: 'HTTP/1.1 200 OK\nX-XSS-Protection: 1; mode=block\nX-Frame-Options: SAMEORIGIN\nAlternate-Protocol: 443:quic,p=1\nAlt-Svc: quic="www.google.com:443"; ma=2592000; v="30,29,28,27,26,25",quic=":443"; ma=2...' +headers: + description: The Headers of the response. + returned: success + type: dict + sample: {"Content-Type": "application/json"} +raw_content_length: + description: The byte size of the response. + returned: success + type: int + sample: 54447 +""" diff --git a/windows/win_webpicmd.ps1 b/windows/win_webpicmd.ps1 index 377edcdc3c8..a8624739d7c 100644 --- a/windows/win_webpicmd.ps1 +++ b/windows/win_webpicmd.ps1 @@ -25,14 +25,7 @@ $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; -If ($params.name) -{ - $package = $params.name -} -Else -{ - Fail-Json $result "missing required argument: name" -} +$package = Get-AnsibleParam $params -name "name" -failifempty $true Function Find-Command { @@ -42,9 +35,9 @@ Function Find-Command ) $installed = get-command $command -erroraction Ignore write-verbose "$installed" - if ($installed.length -gt 0) + if ($installed) { - return $installed[0] + return $installed } return $null } @@ -87,8 +80,12 @@ Function Test-IsInstalledFromWebPI } Write-Verbose "$results" - $matches = $results | select-string -pattern "^$package\s+" - return $matches.length -gt 0 + if ($results -match "^$package\s+") + { + return $true + } + + return $false } Function Install-WithWebPICmd @@ -112,8 +109,8 @@ Function Install-WithWebPICmd } write-verbose "$results" - $success = $results | select-string -pattern "Install of Products: SUCCESS" - if ($success.length -gt 0) + + if ($results -match "Install of Products: SUCCESS") { $result.changed = $true } diff --git a/windows/win_webpicmd.py b/windows/win_webpicmd.py index 215123cef8c..3fc9d7d4335 100644 --- a/windows/win_webpicmd.py +++ b/windows/win_webpicmd.py @@ -21,6 +21,10 @@ # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name +ANSIBLE_METADATA = {'status': ['preview'], + 'supported_by': 'community', + 'version': '1.0'} + DOCUMENTATION = ''' --- module: win_webpicmd