added Ansible and fixed minimal issues
This commit is contained in:
parent
bb2ce4a830
commit
52bd06c871
|
@ -3,7 +3,7 @@ import configparser
|
|||
import sys
|
||||
import os
|
||||
from pyexecplugins.pyexecplugins import *
|
||||
from pyexecplugins.GatewaysToRestTask import *
|
||||
from pyexecplugins.Ansible import Ansible
|
||||
|
||||
def pyexec(task):
|
||||
try:
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
from pyexecplugins.pyexecplugins import PyExecPlugin
|
||||
import json
|
||||
import shutil
|
||||
import ansible.constants as C
|
||||
from ansible.executor.task_queue_manager import TaskQueueManager
|
||||
from ansible.module_utils.common.collections import ImmutableDict
|
||||
from ansible.inventory.manager import InventoryManager
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.playbook.play import Play
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
from ansible.vars.manager import VariableManager
|
||||
from ansible import context
|
||||
|
||||
|
||||
# Create a callback plugin so we can capture the output
|
||||
class ResultsCollectorJSONCallback(CallbackBase):
|
||||
"""A sample callback plugin used for performing an action as results come in.
|
||||
|
||||
If you want to collect all results into a single object for processing at
|
||||
the end of the execution, look into utilizing the ``json`` callback plugin
|
||||
or writing your own custom callback plugin.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ResultsCollectorJSONCallback, self).__init__(*args, **kwargs)
|
||||
self.host_ok = []
|
||||
self.host_unreachable = []
|
||||
self.host_failed = []
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
host = result._host
|
||||
self.host_unreachable.append({ "host" :host.get_name(), "result" : result._result})
|
||||
|
||||
def v2_runner_on_ok(self, result, *args, **kwargs):
|
||||
host = result._host
|
||||
self.host_ok.append({ "host" :host.get_name(), "result" : result._result})
|
||||
|
||||
def v2_runner_on_failed(self, result, *args, **kwargs):
|
||||
host = result._host
|
||||
self.host_failed.append({ "host" :host.get_name(), "result" : result._result})
|
||||
|
||||
class Ansible(PyExecPlugin):
|
||||
name = "Ansible"
|
||||
|
||||
def __init__(self, data=None):
|
||||
super().__init__(data)
|
||||
self.playbook = self.data.get("playbook")
|
||||
self.hosts = self.data.get("hosts", ["localhost"])
|
||||
self.connection = self.data.get("connection","local")
|
||||
self.verbosity = self.data.get("verbosity", 0)
|
||||
self.extra_vars = [self.data.get("extra_vars", None)]
|
||||
self.gather_facts = self.data.get("gather_facts", False)
|
||||
|
||||
def execute(self):
|
||||
|
||||
# since the API is constructed for CLI it expects certain options to always be set in the context object
|
||||
context.CLIARGS = ImmutableDict(connection=self.connection, forks=10, become=False, become_method="sudo", become_user=None, check=False, diff=False, verbosity=self.verbosity, extra_vars = self.extra_vars)
|
||||
# required for
|
||||
# https://github.com/ansible/ansible/blob/devel/lib/ansible/inventory/manager.py#L204
|
||||
|
||||
if self.extra_vars[0].get("ansible_host"):
|
||||
sources = self.extra_vars[0].get("ansible_host")
|
||||
else:
|
||||
sources = ','.join(self.hosts)
|
||||
|
||||
if len(self.hosts) == 1:
|
||||
sources += ','
|
||||
|
||||
# initialize needed objects
|
||||
loader = DataLoader() # Takes care of finding and reading yaml, json and ini files
|
||||
passwords = dict()
|
||||
|
||||
# Instantiate our ResultsCollectorJSONCallback for handling results as they come in. Ansible expects this to be one of its main display outlets
|
||||
results_callback = ResultsCollectorJSONCallback()
|
||||
|
||||
# create inventory, use path to host config file as source or hosts in a comma separated string
|
||||
inventory = InventoryManager(loader=loader, sources=sources)
|
||||
|
||||
# variable manager takes care of merging all the different sources to give you a unified view of variables available in each context
|
||||
variable_manager = VariableManager(loader=loader, inventory=inventory)
|
||||
|
||||
# instantiate task queue manager, which takes care of forking and setting up all objects to iterate over host list and tasks
|
||||
# IMPORTANT: This also adds library dirs paths to the module loader
|
||||
# IMPORTANT: and so it must be initialized before calling `Play.load()`.
|
||||
tqm = TaskQueueManager(
|
||||
inventory=inventory,
|
||||
variable_manager=variable_manager,
|
||||
loader=loader,
|
||||
passwords=passwords,
|
||||
stdout_callback=results_callback, # Use our custom callback instead of the ``default`` callback plugin, which prints to stdout
|
||||
)
|
||||
|
||||
# create data structure that represents our play
|
||||
play_sources = loader.load(self.playbook)
|
||||
for play_source in play_sources:
|
||||
print(">>> Executing playsource:",play_source)
|
||||
play_source["gather_facts"] = self.gather_facts
|
||||
# Create play object, playbook objects use .load instead of init or new methods,
|
||||
# this will also automatically create the task objects from the info provided in play_source
|
||||
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
|
||||
|
||||
# Actually run it
|
||||
result = tqm.run(play) # most interesting data for a play is actually sent to the callback's methods
|
||||
|
||||
# we always need to cleanup child procs and the structures we use to communicate with them
|
||||
tqm.cleanup()
|
||||
if loader:
|
||||
loader.cleanup_all_tmp_files()
|
||||
|
||||
# Remove ansible tmpdir
|
||||
shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
|
||||
|
||||
return {
|
||||
"ok" : results_callback.host_ok,
|
||||
"failed" : results_callback.host_failed,
|
||||
"unreachable" : results_callback.host_unreachable
|
||||
}
|
11
pyrest.py
11
pyrest.py
|
@ -4,16 +4,19 @@ import configparser
|
|||
import sys
|
||||
import os
|
||||
|
||||
def log(task, message):
|
||||
print("[{}:{}][{}:{}] - {}".format(task["workflowType"], task["workflowInstanceId"], task["taskDefName"], task["workflowTask"]["taskReferenceName"], message))
|
||||
|
||||
def pyrest(task):
|
||||
print("Executing task {} of type {}[{}] from wkf {}[{}]".format(task["workflowTask"]["taskReferenceName"], task["taskDefName"], task["taskId"], task["workflowType"], task["workflowInstanceId"]))
|
||||
print(task["inputData"].get("method"), task["inputData"]["url"], task["inputData"].get("body"))
|
||||
log(task, "Starting execution ...")
|
||||
log(task, "Data is {} {} {}".format(task["inputData"].get("method"), task["inputData"]["url"], task["inputData"].get("body")))
|
||||
http = Http(task["inputData"])
|
||||
try:
|
||||
return http.execute()
|
||||
ret = http.execute()
|
||||
return ret
|
||||
except Exception as exc:
|
||||
exc_type, exc_obj, exc_tb = sys.exc_info()
|
||||
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
|
||||
print(exc_type, exc_obj, exc_tb, fname)
|
||||
return {
|
||||
"status" : "FAILED",
|
||||
"output" : { "message" : "Internal error: {}".format(exc)}, "logs" : ["one","two"]
|
||||
|
|
Loading…
Reference in New Issue