initiele commit

This commit is contained in:
2022-05-26 23:12:09 +02:00
commit cef6b5dab8
5 changed files with 436 additions and 0 deletions

160
.gitignore vendored Normal file
View File

@ -0,0 +1,160 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

3
README.md Normal file
View File

@ -0,0 +1,3 @@
# Ansible Collection - kembit.topdesk
Documentation for the collection.

62
galaxy.yml Normal file
View File

@ -0,0 +1,62 @@
### REQUIRED
# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
# underscores or numbers and cannot contain consecutive underscores
namespace: kembit
# The name of the collection. Has the same character restrictions as 'namespace'
name: topdesk
# The version of the collection. Must be compatible with semantic versioning
version: 1.0.0
# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
readme: README.md
# A list of the collection's content authors. Can be just the name or in the format 'Full Name <email> (url)
# @nicks:irc/im.site#channel'
authors:
- Martijn Remmen <mremmen@kembit.nl>
### OPTIONAL but strongly recommended
# A short summary description of the collection
description: Een Ansible collectie voor het integreren met TOPDesk
# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
license:
- GPL-2.0-or-later
# The path to the license file for the collection. This path is relative to the root of the collection. This key is
# mutually exclusive with 'license'
license_file: ''
# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
# requirements as 'namespace' and 'name'
tags: []
# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
# collection label 'namespace.name'. The value is a version range
# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
# range specifiers can be set and are separated by ','
dependencies: {}
# The URL of the originating SCM repository
repository: http://example.com/repository
# The URL to any online docs
documentation: http://docs.example.com
# The URL to the homepage of the collection/project
homepage: http://example.com
# The URL to the collection issue tracker
issues: http://example.com/issue/tracker
# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
# and '.git' are always filtered
build_ignore: []

View File

@ -0,0 +1,210 @@
from __future__ import (absolute_import, division, print_function)
from binascii import Incomplete
from email.policy import default
__metaclass__ = type
DOCUMENTATION = r'''
---
name: topdesk_am
short_description: Topdesk Asset Management Inventory
author: Martijn Remmen <mremmen@kembit.nl>
description:
- Constructs an inventory from TOPdesk Asset Management.
options:
url:
type: str
required: true
desciprion:
- The TOPDesk url
username:
type: str
required: true
description:
- A TOPDesk username for authenticating with the TOPDesk API
application_key:
type: str
required: true
description:
- A TOPDesk application key associated with the given username.
- For instructions on creating a key: https://developers.topdesk.com/tutorial.html
fields:
type: list
required: true
description:
- A list of fields from Asset Management that you want to be included
- with the hosts.
names:
type: list
required: true
description:
- A list containing the unique names from all assets you want to include
- in the inventory
ansible_host:
type: str
required: true
description:
- The fieldname of the value to be used as ansible_host.
- This should be the field with a reachable hostname or IP address
groupby:
type: list
description:
- Group hosts based on field(s)
'''
EXAMPLES = '''
'''
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.utils.display import Display
import requests
display = Display()
class InventoryModule(BaseInventoryPlugin):
NAME = 'kembit.topdesk.topdesk_am'
def __init__(self):
super(BaseInventoryPlugin, self).__init__()
def verify_file(self, path):
# return super().verify_file(path)
return True
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path)
url = self.get_option('url')
username = self.get_option('username')
application_key = self.get_option('application_key')
fields = self.get_option('fields')
device_names = self.get_option('names')
ansible_host_field = self.get_option('ansible_host')
groupby = self.get_option('groupby')
td = Topdesk(url, username, application_key)
columns, devices = devices_lookup(td, device_names, fields)
column_lookup_table = create_id_lookup_table(td, columns)
replace_ids(devices, column_lookup_table)
# validgroupfields = [group for group in groupby if group in fields]
# groupvalues = dict(default=list)
# for device in devices:
# for groupfield in validgroupfields:
# groupvalues[groupfield]
# device[groupfield]
# self.inventory.add_group(column_lookup_table[group].values())
for device in devices:
name = device['name']
self.inventory.add_host(name)
self.inventory.set_variable(name, 'ansible_host', device[ansible_host_field])
for field, value in device.items():
if field != 'name' and field in fields:
self.inventory.set_variable(name, field, value)
display.display(f"Added {len(devices)} hosts")
class Topdesk:
def __init__(self, url: str, username: str, application_key: str) -> None:
self.url = url
self.api_url = url + '/tas/api'
self._session = requests.Session()
self._session.auth = (username, application_key)
self._headers = {'accept': 'application/json'}
def get(self, endpoint: str, **kwargs) -> requests.Request:
return self._session.get(self.api_url + endpoint, **kwargs)
def get_asset(td: Topdesk, parameters: dict):
return td.get('/assetmgmt/assets', params=parameters)
def create_parameters(
fields: list[str],
name: str,
includefunctionalities: bool = False,
includesettings: bool = False,
includetemplates: bool = False
):
return {
'fields': ','.join(fields),
'$filter': f'name eq {name}',
'includeFunctionalities': str(includefunctionalities),
'includeSettings': str(includesettings),
'includeTemplates': str(includetemplates)
}
def create_id_lookup_table(td: Topdesk, columns: dict) -> dict:
table = {}
for columnname, columndata in columns.items():
column_properties = columndata.get('properties')
if not column_properties:
continue
column_url = column_properties.get('url')
if column_url:
data = td.get("/assetmgmt/" + column_url).json()['dataSet']
table.update({columnname: {value['id']: value['text'] for value in data}})
return table
def devices_lookup(td: Topdesk, device_names: list[str], fields: list[str]):
columns = {}
devices = []
for device in device_names:
params = create_parameters(fields, device)
data = get_asset(td, params).json()
columns.update({column['fieldName']: column for column in data['columns']})
devices.extend(data['dataSet'])
return columns, devices
def replace_ids(devices, column_lookup_table):
for device in devices:
for fieldname, fieldvalue in device.items():
if fieldname in column_lookup_table.keys():
if fieldvalue: # soms is het None
device[fieldname] = column_lookup_table[fieldname][fieldvalue]
def main():
pass
if __name__ == '__main__':
main()

1
requirements.txt Normal file
View File

@ -0,0 +1 @@
requests>=2.27.1