Init
This commit is contained in:
commit
d754a8f02a
3
.vscode/settings.json
vendored
Normal file
3
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"ansible.python.interpreterPath": "/root/.virtualenvs/ansible/bin/python"
|
||||
}
|
3
README.md
Normal file
3
README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Ansible Collection - sebclem.haproxy
|
||||
|
||||
Documentation for the collection.
|
69
galaxy.yml
Normal file
69
galaxy.yml
Normal file
@ -0,0 +1,69 @@
|
||||
### REQUIRED
|
||||
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
|
||||
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
|
||||
# underscores or numbers and cannot contain consecutive underscores
|
||||
namespace: sebclem
|
||||
|
||||
# The name of the collection. Has the same character restrictions as 'namespace'
|
||||
name: haproxy
|
||||
|
||||
# The version of the collection. Must be compatible with semantic versioning
|
||||
version: 1.0.0
|
||||
|
||||
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
|
||||
readme: README.md
|
||||
|
||||
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
|
||||
# @nicks:irc/im.site#channel'
|
||||
authors:
|
||||
- your name <example@domain.com>
|
||||
|
||||
|
||||
### OPTIONAL but strongly recommended
|
||||
# A short summary description of the collection
|
||||
description: your collection description
|
||||
|
||||
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
|
||||
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
|
||||
license:
|
||||
- GPL-2.0-or-later
|
||||
|
||||
# The path to the license file for the collection. This path is relative to the root of the collection. This key is
|
||||
# mutually exclusive with 'license'
|
||||
license_file: ''
|
||||
|
||||
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
|
||||
# requirements as 'namespace' and 'name'
|
||||
tags: []
|
||||
|
||||
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
|
||||
# collection label 'namespace.name'. The value is a version range
|
||||
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
|
||||
# range specifiers can be set and are separated by ','
|
||||
dependencies: {}
|
||||
|
||||
# The URL of the originating SCM repository
|
||||
repository: http://example.com/repository
|
||||
|
||||
# The URL to any online docs
|
||||
documentation: http://docs.example.com
|
||||
|
||||
# The URL to the homepage of the collection/project
|
||||
homepage: http://example.com
|
||||
|
||||
# The URL to the collection issue tracker
|
||||
issues: http://example.com/issue/tracker
|
||||
|
||||
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
|
||||
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
|
||||
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
|
||||
# and '.git' are always filtered. Mutually exclusive with 'manifest'
|
||||
build_ignore: []
|
||||
|
||||
# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a
|
||||
# list of MANIFEST.in style
|
||||
# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key
|
||||
# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive
|
||||
# with 'build_ignore'
|
||||
# manifest: null
|
||||
|
52
meta/runtime.yml
Normal file
52
meta/runtime.yml
Normal file
@ -0,0 +1,52 @@
|
||||
---
|
||||
# Collections must specify a minimum required ansible version to upload
|
||||
# to galaxy
|
||||
# requires_ansible: '>=2.9.10'
|
||||
|
||||
# Content that Ansible needs to load from another location or that has
|
||||
# been deprecated/removed
|
||||
# plugin_routing:
|
||||
# action:
|
||||
# redirected_plugin_name:
|
||||
# redirect: ns.col.new_location
|
||||
# deprecated_plugin_name:
|
||||
# deprecation:
|
||||
# removal_version: "4.0.0"
|
||||
# warning_text: |
|
||||
# See the porting guide on how to update your playbook to
|
||||
# use ns.col.another_plugin instead.
|
||||
# removed_plugin_name:
|
||||
# tombstone:
|
||||
# removal_version: "2.0.0"
|
||||
# warning_text: |
|
||||
# See the porting guide on how to update your playbook to
|
||||
# use ns.col.another_plugin instead.
|
||||
# become:
|
||||
# cache:
|
||||
# callback:
|
||||
# cliconf:
|
||||
# connection:
|
||||
# doc_fragments:
|
||||
# filter:
|
||||
# httpapi:
|
||||
# inventory:
|
||||
# lookup:
|
||||
# module_utils:
|
||||
# modules:
|
||||
# netconf:
|
||||
# shell:
|
||||
# strategy:
|
||||
# terminal:
|
||||
# test:
|
||||
# vars:
|
||||
|
||||
# Python import statements that Ansible needs to load from another location
|
||||
# import_redirection:
|
||||
# ansible_collections.ns.col.plugins.module_utils.old_location:
|
||||
# redirect: ansible_collections.ns.col.plugins.module_utils.new_location
|
||||
|
||||
# Groups of actions/modules that take a common set of options
|
||||
# action_groups:
|
||||
# group_name:
|
||||
# - module1
|
||||
# - module2
|
31
plugins/README.md
Normal file
31
plugins/README.md
Normal file
@ -0,0 +1,31 @@
|
||||
# Collections Plugins Directory
|
||||
|
||||
This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that
|
||||
is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that
|
||||
would contain module utils and modules respectively.
|
||||
|
||||
Here is an example directory of the majority of plugins currently supported by Ansible:
|
||||
|
||||
```
|
||||
└── plugins
|
||||
├── action
|
||||
├── become
|
||||
├── cache
|
||||
├── callback
|
||||
├── cliconf
|
||||
├── connection
|
||||
├── filter
|
||||
├── httpapi
|
||||
├── inventory
|
||||
├── lookup
|
||||
├── module_utils
|
||||
├── modules
|
||||
├── netconf
|
||||
├── shell
|
||||
├── strategy
|
||||
├── terminal
|
||||
├── test
|
||||
└── vars
|
||||
```
|
||||
|
||||
A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible-core/2.15/plugins/plugins.html).
|
BIN
plugins/action/__pycache__/load_haproxy_config.cpython-311.pyc
Normal file
BIN
plugins/action/__pycache__/load_haproxy_config.cpython-311.pyc
Normal file
Binary file not shown.
249
plugins/action/load_haproxy_config.py
Normal file
249
plugins/action/load_haproxy_config.py
Normal file
@ -0,0 +1,249 @@
|
||||
# Copyright: (c) 2016, Allen Sanabria <asanabria@linuxdynasty.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from os import path, walk
|
||||
import re
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.vars import combine_vars
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
|
||||
TRANSFERS_FILES = False
|
||||
_requires_connection = False
|
||||
|
||||
def run(self, tmp=None, task_vars=None):
|
||||
""" Load yml files recursively from a directory.
|
||||
"""
|
||||
del tmp # tmp no longer has any effect
|
||||
|
||||
if task_vars is None:
|
||||
task_vars = dict()
|
||||
self.show_content = True
|
||||
self.included_files = []
|
||||
|
||||
# Validate arguments
|
||||
dirs = 0
|
||||
|
||||
module_args = self._task.args.copy()
|
||||
|
||||
if not "dir" in module_args:
|
||||
raise AnsibleError('\'dir\' option is mendatory in load_haproxy_config')
|
||||
if not "default_domain" in module_args:
|
||||
raise AnsibleError('\'default_domain\' option is mendatory in load_haproxy_config')
|
||||
if not "default_dns_provider" in module_args:
|
||||
raise AnsibleError('\'default_dns_provider\' option is mendatory in load_haproxy_config')
|
||||
if not "default_dns_target" in module_args:
|
||||
raise AnsibleError('\'default_dns_target\' option is mendatory in load_haproxy_config')
|
||||
|
||||
self.source_dir = module_args.get('dir')
|
||||
self.default_domain = module_args.get('default_domain')
|
||||
self.default_dns_provider = module_args.get('default_dns_provider')
|
||||
self.default_dns_target = module_args.get('default_dns_target')
|
||||
self.dir = module_args.get('dir')
|
||||
self.depth = module_args.get('depth', 0)
|
||||
|
||||
results = {
|
||||
"domain_maping": [],
|
||||
"dns_hostnames": dict(), # { provider: [ { hostname:"", domain:"", state: "", target: "" } ] }
|
||||
"protected_domain": [],
|
||||
"backend_config": []
|
||||
}
|
||||
failed = False
|
||||
|
||||
self._set_root_dir()
|
||||
if not path.exists(self.source_dir):
|
||||
failed = True
|
||||
err_msg = ('{0} directory does not exist'.format(to_native(self.source_dir)))
|
||||
elif not path.isdir(self.source_dir):
|
||||
failed = True
|
||||
err_msg = ('{0} is not a directory'.format(to_native(self.source_dir)))
|
||||
else:
|
||||
for root_dir, filenames in self._traverse_dir_depth():
|
||||
failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
|
||||
if failed:
|
||||
break
|
||||
results['domain_maping'] = results['domain_maping'] + updated_results['domain_maping']
|
||||
results['protected_domain'] = results['protected_domain'] + updated_results['protected_domain']
|
||||
results['backend_config'] = results['backend_config'] + updated_results['backend_config']
|
||||
print(updated_results)
|
||||
for key, value in updated_results['dns_hostnames'].items():
|
||||
results['dns_hostnames'][key] = results['dns_hostnames'].get(key, []) + value
|
||||
|
||||
|
||||
result = super(ActionModule, self).run(task_vars=task_vars)
|
||||
|
||||
if failed:
|
||||
result['failed'] = failed
|
||||
result['message'] = err_msg
|
||||
scope = dict()
|
||||
scope['haproxy_config'] = results
|
||||
results = scope
|
||||
result['ansible_included_var_files'] = self.included_files
|
||||
result['ansible_facts'] = results
|
||||
result['_ansible_no_log'] = not self.show_content
|
||||
|
||||
return result
|
||||
|
||||
def _set_root_dir(self):
|
||||
if self._task._role:
|
||||
if self.source_dir.split('/')[0] == 'vars':
|
||||
path_to_use = (
|
||||
path.join(self._task._role._role_path, self.source_dir)
|
||||
)
|
||||
if path.exists(path_to_use):
|
||||
self.source_dir = path_to_use
|
||||
else:
|
||||
path_to_use = (
|
||||
path.join(
|
||||
self._task._role._role_path, 'vars', self.source_dir
|
||||
)
|
||||
)
|
||||
self.source_dir = path_to_use
|
||||
else:
|
||||
if hasattr(self._task._ds, '_data_source'):
|
||||
current_dir = (
|
||||
"/".join(self._task._ds._data_source.split('/')[:-1])
|
||||
)
|
||||
self.source_dir = path.join(current_dir, self.source_dir)
|
||||
|
||||
def _log_walk(self, error):
|
||||
self._display.vvv('Issue with walking through "%s": %s' % (to_native(error.filename), to_native(error)))
|
||||
|
||||
def _traverse_dir_depth(self):
|
||||
""" Recursively iterate over a directory and sort the files in
|
||||
alphabetical order. Do not iterate pass the set depth.
|
||||
The default depth is unlimited.
|
||||
"""
|
||||
current_depth = 0
|
||||
sorted_walk = list(walk(self.source_dir, onerror=self._log_walk, followlinks=True))
|
||||
sorted_walk.sort(key=lambda x: x[0])
|
||||
for current_root, current_dir, current_files in sorted_walk:
|
||||
current_depth += 1
|
||||
if current_depth <= self.depth or self.depth == 0:
|
||||
current_files.sort()
|
||||
yield (current_root, current_files)
|
||||
else:
|
||||
break
|
||||
|
||||
def _load_files(self, filename):
|
||||
""" Loads a file and converts the output into a valid Python dict.
|
||||
Args:
|
||||
filename (str): The source file.
|
||||
|
||||
Returns:
|
||||
Tuple (bool, str, dict)
|
||||
"""
|
||||
results = dict()
|
||||
failed = False
|
||||
err_msg = ''
|
||||
b_data, show_content = self._loader._get_file_contents(filename)
|
||||
data = to_text(b_data, errors='surrogate_or_strict')
|
||||
|
||||
self.show_content = show_content
|
||||
data = self._loader.load(data, file_name=filename, show_content=show_content)
|
||||
if not data:
|
||||
data = dict()
|
||||
if not isinstance(data, dict):
|
||||
failed = True
|
||||
err_msg = ('{0} must be stored as a dictionary/hash'.format(to_native(filename)))
|
||||
else:
|
||||
self.included_files.append(filename)
|
||||
results.update(data)
|
||||
|
||||
return failed, err_msg, results
|
||||
|
||||
def _load_files_in_dir(self, root_dir, var_files):
|
||||
""" Load the found yml files and update/overwrite the dictionary.
|
||||
Args:
|
||||
root_dir (str): The base directory of the list of files that is being passed.
|
||||
var_files: (list): List of files to iterate over and load into a dictionary.
|
||||
|
||||
Returns:
|
||||
Tuple (bool, str, dict)
|
||||
"""
|
||||
results = {
|
||||
"domain_maping": [],
|
||||
"dns_hostnames": dict(), # { provider: [ { hostname:"", domain:"", state: "", target: "" } ] }
|
||||
"protected_domain": [],
|
||||
"backend_config": []
|
||||
}
|
||||
failed = False
|
||||
err_msg = ''
|
||||
for filename in var_files:
|
||||
stop_iter = False
|
||||
# Never include main.yml from a role, as that is the default included by the role
|
||||
if self._task._role:
|
||||
if path.join(self._task._role._role_path, filename) == path.join(root_dir, 'vars', 'main.yml'):
|
||||
stop_iter = True
|
||||
continue
|
||||
|
||||
filepath = path.join(root_dir, filename)
|
||||
if not stop_iter and not failed:
|
||||
if path.exists(filepath):
|
||||
failed, err_msg, loaded_data = self._load_files(filepath)
|
||||
if not failed:
|
||||
main_hostname = Path(filepath).stem
|
||||
dns = loaded_data.get("dns", dict())
|
||||
domain = dns.get("domain", self.default_domain)
|
||||
dns_provider = dns.get("provider", self.default_dns_provider)
|
||||
dns_target = dns.get("target", self.default_dns_provider)
|
||||
protected = loaded_data.get("protected", False)
|
||||
additionnal_hostname = loaded_data.get('additionnal_hostname', [])
|
||||
state = loaded_data.get("state", "present")
|
||||
if "backend" not in loaded_data:
|
||||
failed = True
|
||||
err_msg = ('Could not find "backend" in {0}'.format(to_native(filename)))
|
||||
continue
|
||||
backend = loaded_data.get("backend")
|
||||
|
||||
if state == "present":
|
||||
results['domain_maping'].append('{0}.{1} {2}'.format(main_hostname, domain, backend.get("name")))
|
||||
results['backend_config'].append(backend)
|
||||
if protected:
|
||||
results['protected_domain'].append('{0}.{1}'.format(main_hostname, domain))
|
||||
|
||||
if not dns.get("skip", False):
|
||||
if not dns_provider in results['dns_hostnames']:
|
||||
results['dns_hostnames'].update({ dns_provider: []})
|
||||
|
||||
results['dns_hostnames'][dns_provider].append({
|
||||
"hostname": main_hostname,
|
||||
"domain": domain,
|
||||
"target": dns_target,
|
||||
"state": state,
|
||||
})
|
||||
|
||||
for host in additionnal_hostname:
|
||||
this_dns = host.get("dns", dns)
|
||||
this_domain = this_dns.get("domain", domain)
|
||||
this_dns_provider = this_dns.get("provider", dns_provider)
|
||||
this_dns_target = this_dns.get("target", dns_target)
|
||||
this_protected = host.get('protected', protected)
|
||||
this_state = host.get('state', state)
|
||||
|
||||
if this_state == "present":
|
||||
results['domain_maping'].append('{0}.{1} {2}'.format(host.get("hostname"), this_domain, backend.get("name")))
|
||||
if this_protected:
|
||||
results['protected_domain'].append('{0}.{1}'.format(host.get("hostname"), this_domain))
|
||||
|
||||
if not this_dns.get("skip", dns.get("skip", False)):
|
||||
if not this_dns_provider in results['dns_hostnames']:
|
||||
results['dns_hostnames'].update({ this_dns_provider: []})
|
||||
|
||||
results['dns_hostnames'][this_dns_provider].append({
|
||||
"hostname": host.get("hostname"),
|
||||
"domain": this_domain,
|
||||
"target": this_dns_target,
|
||||
"state": this_state
|
||||
})
|
||||
return failed, err_msg, results
|
98
plugins/modules/load_haproxy_config.py
Normal file
98
plugins/modules/load_haproxy_config.py
Normal file
@ -0,0 +1,98 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author: Sebastien Clement (@sebclem)
|
||||
module: load_haproxy_config
|
||||
short_description: Load variables from files for haproxy config
|
||||
description: []
|
||||
version_added: "2.7"
|
||||
options:
|
||||
dir:
|
||||
description:
|
||||
- The directory name from which the variables should be loaded.
|
||||
- If the path is relative and the task is inside a role, it will look inside the role's vars/ subdirectory.
|
||||
- If the path is relative and not inside a role, it will be parsed relative to the playbook.
|
||||
type: path
|
||||
version_added: "2.1"
|
||||
required: true
|
||||
depth:
|
||||
description:
|
||||
- This module will, by default, recursively go through each sub directory and load up the
|
||||
variables. By explicitly setting the depth, this module will only go as deep as the depth.
|
||||
type: int
|
||||
default: 0
|
||||
version_added: "2.2"
|
||||
default_domain:
|
||||
description:
|
||||
- Default root domain
|
||||
type: str
|
||||
version_added: "2.2"
|
||||
required: true
|
||||
default_dns_provider:
|
||||
description:
|
||||
- Default dns provider
|
||||
type: str
|
||||
version_added: "2.2"
|
||||
required: true
|
||||
default_dns_target:
|
||||
description:
|
||||
- Default dns target
|
||||
type: str
|
||||
version_added: "2.2"
|
||||
required: true
|
||||
|
||||
attributes:
|
||||
action:
|
||||
details: While the action plugin does do some of the work it relies on the core engine to actually create the variables, that part cannot be overridden
|
||||
support: partial
|
||||
bypass_host_loop:
|
||||
support: none
|
||||
bypass_task_loop:
|
||||
support: none
|
||||
check_mode:
|
||||
support: full
|
||||
delegation:
|
||||
details:
|
||||
- while variable assignment can be delegated to a different host the execution context is always the current inventory_hostname
|
||||
- connection variables, if set at all, would reflect the host it would target, even if we are not connecting at all in this case
|
||||
support: partial
|
||||
diff_mode:
|
||||
support: none
|
||||
core:
|
||||
details: While parts of this action are implemented in core, other parts are still available as normal plugins and can be partially overridden
|
||||
support: partial
|
||||
seealso:
|
||||
- module: ansible.builtin.set_fact
|
||||
- ref: playbooks_delegation
|
||||
description: More information related to task delegation.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Test
|
||||
sebclem.haproxy.load_haproxy_config:
|
||||
dir: vars/
|
||||
default_domain: default_domain
|
||||
default_dns_provider: default_dns_provider
|
||||
default_dns_target: default_dns_target
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
ansible_facts:
|
||||
description: Variables that were included and their values
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {'variable': 'value'}
|
||||
ansible_included_var_files:
|
||||
description: A list of files that were successfully included
|
||||
returned: success
|
||||
type: list
|
||||
sample: [ /path/to/file.json, /path/to/file.yaml ]
|
||||
version_added: '2.4'
|
||||
'''
|
Loading…
Reference in New Issue
Block a user