From 0269406b7a6a5629c2f0a059761dfe770a671b60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergio=20Gim=C3=A9nez?= <sergio.gimenez@i2cat.net> Date: Fri, 8 Sep 2023 13:44:27 +0200 Subject: [PATCH] Add hotfix for filtering via slugs --- .dockerignore | 88 +++++++ .env.example | 6 + .github/dependabot.yml | 14 + .github/workflows/docker.yml | 60 +++++ .github/workflows/stale.yml | 24 ++ .gitignore | 135 ++++++++++ Dockerfile | 16 ++ LICENSE | 21 ++ README.md | 103 ++++++++ log_handler.py | 44 ++++ nb-dt-import.py | 54 ++++ netbox_api.py | 485 +++++++++++++++++++++++++++++++++++ repo.py | 109 ++++++++ requirements.txt | 4 + settings.py | 52 ++++ 15 files changed, 1215 insertions(+) create mode 100644 .dockerignore create mode 100644 .env.example create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/docker.yml create mode 100644 .github/workflows/stale.yml create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 log_handler.py create mode 100755 nb-dt-import.py create mode 100644 netbox_api.py create mode 100644 repo.py create mode 100644 requirements.txt create mode 100644 settings.py diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..20d4ddd --- /dev/null +++ b/.dockerignore @@ -0,0 +1,88 @@ +# Git +.git +.gitignore +.gitattributes + +# CI +.codeclimate.yml +.travis.yml +.taskcluster.yml + +# Docker +docker-compose.yml +Dockerfile +.docker +.dockerignore + +# Byte-compiled / optimized / DLL files +**/__pycache__/ +**/*.py[cod] + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.cache +nosetests.xml +coverage.xml + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Virtual environment +.env +.venv/ +venv/ + +# PyCharm +.idea + +# Python mode for VIM +.ropeproject +**/.ropeproject + +# Vim swap files +**/*.swp + +# VS Code +.vscode/ \ No newline at end of file diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..5025ea1 --- /dev/null +++ b/.env.example @@ -0,0 +1,6 @@ +NETBOX_URL= +NETBOX_TOKEN= +REPO_URL=https://github.com/netbox-community/devicetype-library.git +REPO_BRANCH=master +IGNORE_SSL_ERRORS=False +#SLUGS=c9300-48u isr4431 isr4331 diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..f422d7c --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,14 @@ +--- +version: 2 +updates: + - package-ecosystem: pip + directory: / + schedule: + interval: monthly + time: '02:00' + timezone: America/New_York + labels: + - dependencies + target-branch: master + assignees: + - "danner26" \ No newline at end of file diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000..2278447 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,60 @@ +--- +name: ci + +on: + push: + branches: + - 'master' + - 'main' + pull_request: + branches: + - 'master' + - 'main' + workflow_dispatch: + release: + types: [published, edited] + +jobs: + build-and-push-images: + runs-on: ubuntu-latest + steps: + - + name: Checkout + uses: actions/checkout@v2 + - + name: Docker meta + id: meta + uses: docker/metadata-action@v3 + with: + images: | + ghcr.io/minitriga/Netbox-Device-Type-Library-Import + tags: | + type=raw,value=latest,enable=${{ endsWith(github.ref, github.event.repository.default_branch) }} + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}} + type=semver,pattern={{major}}.{{minor}} + - + name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - + name: Login to GitHub Container Registry + if: github.event_name != 'pull_request' + uses: docker/login-action@v1 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + - + name: Build and push + uses: docker/build-push-action@v2 + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + platforms: linux/amd64 + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000..4a0237e --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,24 @@ +--- +#close-stale-issues (https://github.com/marketplace/actions/close-stale-issues) +name: Close stale PRs +on: # yamllint disable-line rule:truthy + schedule: + - cron: 0 4 * * * + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v5 + with: + close-pr-message: > + This PR has been automatically closed due to lack of activity. + days-before-stale: 30 + days-before-close: 7 + operations-per-run: 100 + remove-stale-when-updated: false + stale-pr-label: stale + stale-pr-message: > + This PR has been automatically marked as stale because it has not + had recent activity. It will be closed automatically if no further + progress is made. \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..825e176 --- /dev/null +++ b/.gitignore @@ -0,0 +1,135 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env* +!.env.example +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Editor +.vscode + +repo diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..fb6a2fb --- /dev/null +++ b/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.9-alpine + +ENV REPO_URL=https://github.com/netbox-community/devicetype-library.git +WORKDIR /app +COPY requirements.txt . + +# Install dependencies +RUN apk add --no-cache git ca-certificates && \ + python3 -m pip install --upgrade pip && \ + pip3 install -r requirements.txt + +# Copy over src code +COPY *.py ./ + +# -u to avoid stdout buffering +CMD ["python3","-u","nb-dt-import.py"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..4db3295 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Alexander Gittings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index e69de29..d3b41bb 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,103 @@ +# Netbox Device Type Import + +This library is intended to be your friend and help you import all the device-types defined within the the [NetBox Device Type Library Repository](https://github.com/netbox-community/devicetype-library). + +> Tested working with 2.9.4, 2.10.4 + +## 🪄 Description + +This script will clone a copy of the `netbox-community/devicetype-library` repository to your machine to allow it to import the device types you would like without copy and pasting them into the Netbox UI. + +## 🚀 Getting Started + +1. This script is written in Python, so lets setup a virtual environment. + +``` +git clone https://github.com/netbox-community/Device-Type-Library-Import.git +cd Netbox-Device-Type-Library-Import +python3 -m venv venv +source venv/bin/activate +``` + +2. Now that we have the basics setup, we'll need to install the requirements. + +``` +pip install -r requirements.txt +``` + +3. There are two variables that are required when using this script to import device types into your Netbox installation. (1) Your Netbox instance URL and (2) a token with **write rights**. + +Copy the existing `.env.example` to your own `.env` file, and fill in the variables. + +``` +cp .env.example .env +vim .env +``` + +Finally, we are able to execute the script and import some device templates! + +## 🔌 Usage + +To use the script, simply execute the script as follows. Make sure you're still in the activated virtual environment we created before. + +``` +./nb-dt-import.py +``` + +This will clone the latest master branch from the `netbox-community/devicetype-library` from Github and install it into the `repo` subdirectory. If this directory already exists, it will perform a `git pull` to update the reposity instead. + +Next, it will loop over every manufacturer and every device of every manufacturer and begin checking if your Netbox install already has them, and if not, creates them. It will skip preexisting manufacturers, devices, interfaces, etc. so as to not end up with duplicate entries in your Netbox instance. + +### 🧰 Arguments + +This script currently accepts a list of vendors as an arugment, so that you can selectively import devices. + +To import only device by APC, for example: + +``` +./nb-dt-import.py --vendors apc +``` + +`--vendors` can also accept a comma separated list of vendors if you want to import multiple. + +``` +./nb-dt-import.py --vendors apc,juniper +``` + +## Docker build + +It's possible to use this project as a docker container. + +To build : + +``` +docker build -t netbox-devicetype-import-library . +``` + +Alternatively you can pull a pre-built image from Github Container Registry (ghcr.io): + +``` +docker pull ghcr.io/minitriga/netbox-device-type-library-import +``` + +The container supports the following env var as configuration : + +- `REPO_URL`, the repo to look for device types (defaults to _https://github.com/netbox-community/devicetype-library.git_) +- `REPO_BRANCH`, the branch to check out if appropriate, defaults to master. +- `NETBOX_URL`, used to access netbox +- `NETBOX_TOKEN`, token for accessing netbox +- `VENDORS`, a comma-separated list of vendors to import (defaults to None) + +To run : + +``` +docker run -e "NETBOX_URL=http://netbox:8080/" -e "NETBOX_TOKEN=98765434567890" ghcr.io/minitriga/netbox-device-type-library-import +``` + +## 🧑💻 Contributing + +We're happy about any pull requests! + +## 📜 License + +MIT diff --git a/log_handler.py b/log_handler.py new file mode 100644 index 0000000..46cdf0b --- /dev/null +++ b/log_handler.py @@ -0,0 +1,44 @@ +from sys import exit as system_exit + + +class LogHandler: + def __new__(cls, *args, **kwargs): + return super().__new__(cls) + + def __init__(self, args): + self.args = args + + def exception(self, exception_type, exception, stack_trace=None): + exception_dict = { + "EnvironmentError": f'Environment variable "{exception}" is not set.', + "SSLError": f'SSL verification failed. IGNORE_SSL_ERRORS is {exception}. Set IGNORE_SSL_ERRORS to True if you want to ignore this error. EXITING.', + "GitCommandError": f'The repo "{exception}" is not a valid git repo.', + "GitInvalidRepositoryError": f'The repo "{exception}" is not a valid git repo.', + "Exception": f'An unknown error occurred: "{exception}"' + } + + if self.args.verbose and stack_trace: + print(stack_trace) + print(exception_dict[exception_type]) + system_exit(1) + + def verbose_log(self, message): + if self.args.verbose: + print(message) + + def log(self, message): + print(message) + + def log_device_ports_created(self, created_ports: list = [], port_type: str = "port"): + for port in created_ports: + self.verbose_log(f'{port_type} Template Created: {port.name} - ' + + f'{port.type if hasattr(port, "type") else ""} - {port.device_type.id} - ' + + f'{port.id}') + return len(created_ports) + + def log_module_ports_created(self, created_ports: list = [], port_type: str = "port"): + for port in created_ports: + self.verbose_log(f'{port_type} Template Created: {port.name} - ' + + f'{port.type if hasattr(port, "type") else ""} - {port.module_type.id} - ' + + f'{port.id}') + return len(created_ports) diff --git a/nb-dt-import.py b/nb-dt-import.py new file mode 100755 index 0000000..8255528 --- /dev/null +++ b/nb-dt-import.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +from collections import Counter +from datetime import datetime +import yaml +import pynetbox +from glob import glob +import os + +import settings +from netbox_api import NetBox + + +def main(): + startTime = datetime.now() + args = settings.args + + netbox = NetBox(settings) + files, vendors = settings.dtl_repo.get_devices( + f'{settings.dtl_repo.repo_path}/device-types/', args.vendors) + + settings.handle.log(f'{len(vendors)} Vendors Found') + device_types = settings.dtl_repo.parse_files(files, slugs=args.slugs) + settings.handle.log(f'{len(device_types)} Device-Types Found') + netbox.create_manufacturers(vendors) + netbox.create_device_types(device_types) + + if netbox.modules: + settings.handle.log("Modules Enabled. Creating Modules...") + files, vendors = settings.dtl_repo.get_devices( + f'{settings.dtl_repo.repo_path}/module-types/', args.vendors) + settings.handle.log(f'{len(vendors)} Module Vendors Found') + module_types = settings.dtl_repo.parse_files(files, slugs=args.slugs) + settings.handle.log(f'{len(module_types)} Module-Types Found') + netbox.create_manufacturers(vendors) + netbox.create_module_types(module_types) + + settings.handle.log('---') + settings.handle.verbose_log( + f'Script took {(datetime.now() - startTime)} to run') + settings.handle.log(f'{netbox.counter["added"]} devices created') + settings.handle.log(f'{netbox.counter["images"]} images uploaded') + settings.handle.log( + f'{netbox.counter["updated"]} interfaces/ports updated') + settings.handle.log( + f'{netbox.counter["manufacturer"]} manufacturers created') + if settings.NETBOX_FEATURES['modules']: + settings.handle.log( + f'{netbox.counter["module_added"]} modules created') + settings.handle.log( + f'{netbox.counter["module_port_added"]} module interface / ports created') + + +if __name__ == "__main__": + main() diff --git a/netbox_api.py b/netbox_api.py new file mode 100644 index 0000000..f848d49 --- /dev/null +++ b/netbox_api.py @@ -0,0 +1,485 @@ +from collections import Counter +import pynetbox +import requests +import os +import glob +# from pynetbox import RequestError as APIRequestError + +class NetBox: + def __new__(cls, *args, **kwargs): + return super().__new__(cls) + + def __init__(self, settings): + self.counter = Counter( + added=0, + updated=0, + manufacturer=0, + module_added=0, + module_port_added=0, + images=0, + ) + self.url = settings.NETBOX_URL + self.token = settings.NETBOX_TOKEN + self.handle = settings.handle + self.netbox = None + self.ignore_ssl = settings.IGNORE_SSL_ERRORS + self.modules = False + self.connect_api() + self.verify_compatibility() + self.existing_manufacturers = self.get_manufacturers() + self.device_types = DeviceTypes(self.netbox, self.handle, self.counter, self.ignore_ssl) + + def connect_api(self): + try: + self.netbox = pynetbox.api(self.url, token=self.token) + if self.ignore_ssl: + self.handle.verbose_log("IGNORE_SSL_ERRORS is True, catching exception and disabling SSL verification.") + #requests.packages.urllib3.disable_warnings() + self.netbox.http_session.verify = False + except Exception as e: + self.handle.exception("Exception", 'NetBox API Error', e) + + def get_api(self): + return self.netbox + + def get_counter(self): + return self.counter + + def verify_compatibility(self): + # nb.version should be the version in the form '3.2' + version_split = [int(x) for x in self.netbox.version.split('.')] + + # Later than 3.2 + # Might want to check for the module-types entry as well? + if version_split[0] > 3 or (version_split[0] == 3 and version_split[1] >= 2): + self.modules = True + + def get_manufacturers(self): + return {str(item): item for item in self.netbox.dcim.manufacturers.all()} + + def create_manufacturers(self, vendors): + to_create = [] + self.existing_manufacturers = self.get_manufacturers() + for vendor in vendors: + try: + manGet = self.existing_manufacturers[vendor["name"]] + self.handle.verbose_log(f'Manufacturer Exists: {manGet.name} - {manGet.id}') + except KeyError: + to_create.append(vendor) + self.handle.verbose_log(f"Manufacturer queued for addition: {vendor['name']}") + + if to_create: + try: + created_manufacturers = self.netbox.dcim.manufacturers.create(to_create) + for manufacturer in created_manufacturers: + self.handle.verbose_log(f'Manufacturer Created: {manufacturer.name} - ' + + f'{manufacturer.id}') + self.counter.update({'manufacturer': 1}) + except pynetbox.RequestError as request_error: + self.handle.log("Error creating manufacturers") + self.handle.verbose_log(f"Error during manufacturer creation. - {request_error.error}") + + def create_device_types(self, device_types_to_add): + for device_type in device_types_to_add: + + # Remove file base path + src_file = device_type["src"] + del device_type["src"] + + # Pre-process front/rear_image flag, remove it if present + saved_images = {} + image_base = os.path.dirname(src_file).replace("device-types","elevation-images") + for i in ["front_image","rear_image"]: + if i in device_type: + if device_type[i]: + image_glob = f"{image_base}/{device_type['slug']}.{i.split('_')[0]}.*" + images = glob.glob(image_glob, recursive=False) + if images: + saved_images[i] = images[0] + else: + self.handle.log(f"Error locating image file using '{image_glob}'") + del device_type[i] + + try: + dt = self.device_types.existing_device_types[device_type["model"]] + self.handle.verbose_log(f'Device Type Exists: {dt.manufacturer.name} - ' + + f'{dt.model} - {dt.id}') + except KeyError: + try: + dt = self.netbox.dcim.device_types.create(device_type) + self.counter.update({'added': 1}) + self.handle.verbose_log(f'Device Type Created: {dt.manufacturer.name} - ' + + f'{dt.model} - {dt.id}') + except pynetbox.RequestError as e: + self.handle.log(f'Error {e.error} creating device type:' + f' {device_type["manufacturer"]["name"]} {device_type["model"]}') + continue + + if "interfaces" in device_type: + self.device_types.create_interfaces(device_type["interfaces"], dt.id) + if "power-ports" in device_type: + self.device_types.create_power_ports(device_type["power-ports"], dt.id) + if "power-port" in device_type: + self.device_types.create_power_ports(device_type["power-port"], dt.id) + if "console-ports" in device_type: + self.device_types.create_console_ports(device_type["console-ports"], dt.id) + if "power-outlets" in device_type: + self.device_types.create_power_outlets(device_type["power-outlets"], dt.id) + if "console-server-ports" in device_type: + self.device_types.create_console_server_ports(device_type["console-server-ports"], dt.id) + if "rear-ports" in device_type: + self.device_types.create_rear_ports(device_type["rear-ports"], dt.id) + if "front-ports" in device_type: + self.device_types.create_front_ports(device_type["front-ports"], dt.id) + if "device-bays" in device_type: + self.device_types.create_device_bays(device_type["device-bays"], dt.id) + if self.modules and 'module-bays' in device_type: + self.device_types.create_module_bays(device_type['module-bays'], dt.id) + + # Finally, update images if any + if saved_images: + self.device_types.upload_images(self.url, self.token, saved_images, dt.id) + + def create_module_types(self, module_types): + all_module_types = {} + for curr_nb_mt in self.netbox.dcim.module_types.all(): + if curr_nb_mt.manufacturer.slug not in all_module_types: + all_module_types[curr_nb_mt.manufacturer.slug] = {} + + all_module_types[curr_nb_mt.manufacturer.slug][curr_nb_mt.model] = curr_nb_mt + + + for curr_mt in module_types: + try: + module_type_res = all_module_types[curr_mt['manufacturer']['slug']][curr_mt["model"]] + self.handle.verbose_log(f'Module Type Exists: {module_type_res.manufacturer.name} - ' + + f'{module_type_res.model} - {module_type_res.id}') + except KeyError: + try: + module_type_res = self.netbox.dcim.module_types.create(curr_mt) + self.counter.update({'module_added': 1}) + self.handle.verbose_log(f'Module Type Created: {module_type_res.manufacturer.name} - ' + + f'{module_type_res.model} - {module_type_res.id}') + except pynetbox.RequestError as exce: + self.handle.log(f"Error '{exce.error}' creating module type: " + + f"{curr_mt}") + + if "interfaces" in curr_mt: + self.device_types.create_module_interfaces(curr_mt["interfaces"], module_type_res.id) + if "power-ports" in curr_mt: + self.device_types.create_module_power_ports(curr_mt["power-ports"], module_type_res.id) + if "console-ports" in curr_mt: + self.device_types.create_module_console_ports(curr_mt["console-ports"], module_type_res.id) + if "power-outlets" in curr_mt: + self.device_types.create_module_power_outlets(curr_mt["power-outlets"], module_type_res.id) + if "console-server-ports" in curr_mt: + self.device_types.create_module_console_server_ports(curr_mt["console-server-ports"], module_type_res.id) + if "rear-ports" in curr_mt: + self.device_types.create_module_rear_ports(curr_mt["rear-ports"], module_type_res.id) + if "front-ports" in curr_mt: + self.device_types.create_module_front_ports(curr_mt["front-ports"], module_type_res.id) + +class DeviceTypes: + def __new__(cls, *args, **kwargs): + return super().__new__(cls) + + def __init__(self, netbox, handle, counter, ignore_ssl): + self.netbox = netbox + self.handle = handle + self.counter = counter + self.existing_device_types = self.get_device_types() + self.ignore_ssl = ignore_ssl + + def get_device_types(self): + return {str(item): item for item in self.netbox.dcim.device_types.all()} + + def get_power_ports(self, device_type): + return {str(item): item for item in self.netbox.dcim.power_port_templates.filter(devicetype_id=device_type)} + + def get_rear_ports(self, device_type): + return {str(item): item for item in self.netbox.dcim.rear_port_templates.filter(devicetype_id=device_type)} + + def get_module_power_ports(self, module_type): + return {str(item): item for item in self.netbox.dcim.power_port_templates.filter(moduletype_id=module_type)} + + def get_module_rear_ports(self, module_type): + return {str(item): item for item in self.netbox.dcim.rear_port_templates.filter(moduletype_id=module_type)} + + def get_device_type_ports_to_create(self, dcim_ports, device_type, existing_ports): + to_create = [port for port in dcim_ports if port['name'] not in existing_ports] + for port in to_create: + port['device_type'] = device_type + + return to_create + + def get_module_type_ports_to_create(self, module_ports, module_type, existing_ports): + to_create = [port for port in module_ports if port['name'] not in existing_ports] + for port in to_create: + port['module_type'] = module_type + + return to_create + + def create_interfaces(self, interfaces, device_type): + existing_interfaces = {str(item): item for item in self.netbox.dcim.interface_templates.filter( + devicetype_id=device_type)} + to_create = self.get_device_type_ports_to_create( + interfaces, device_type, existing_interfaces) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_device_ports_created( + self.netbox.dcim.interface_templates.create(to_create), "Interface") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Interface") + + def create_power_ports(self, power_ports, device_type): + existing_power_ports = self.get_power_ports(device_type) + to_create = self.get_device_type_ports_to_create(power_ports, device_type, existing_power_ports) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_device_ports_created( + self.netbox.dcim.power_port_templates.create(to_create), "Power Port") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Power Port") + + def create_console_ports(self, console_ports, device_type): + existing_console_ports = {str(item): item for item in self.netbox.dcim.console_port_templates.filter(devicetype_id=device_type)} + to_create = self.get_device_type_ports_to_create(console_ports, device_type, existing_console_ports) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_device_ports_created( + self.netbox.dcim.console_port_templates.create(to_create), "Console Port") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Console Port") + + def create_power_outlets(self, power_outlets, device_type): + existing_power_outlets = {str(item): item for item in self.netbox.dcim.power_outlet_templates.filter(devicetype_id=device_type)} + to_create = self.get_device_type_ports_to_create(power_outlets, device_type, existing_power_outlets) + + if to_create: + existing_power_ports = self.get_power_ports(device_type) + for outlet in to_create: + try: + power_port = existing_power_ports[outlet["power_port"]] + outlet['power_port'] = power_port.id + except KeyError: + pass + + try: + self.counter.update({'updated': + self.handle.log_device_ports_created( + self.netbox.dcim.power_outlet_templates.create(to_create), "Power Outlet") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Power Outlet") + + def create_console_server_ports(self, console_server_ports, device_type): + existing_console_server_ports = {str(item): item for item in self.netbox.dcim.console_server_port_templates.filter(devicetype_id=device_type)} + to_create = self.get_device_type_ports_to_create(console_server_ports, device_type, existing_console_server_ports) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_device_ports_created( + self.netbox.dcim.console_server_port_templates.create(to_create), "Console Server Port") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Console Server Port") + + def create_rear_ports(self, rear_ports, device_type): + existing_rear_ports = self.get_rear_ports(device_type) + to_create = self.get_device_type_ports_to_create(rear_ports, device_type, existing_rear_ports) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_device_ports_created( + self.netbox.dcim.rear_port_templates.create(to_create), "Rear Port") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Rear Port") + + def create_front_ports(self, front_ports, device_type): + existing_front_ports = {str(item): item for item in self.netbox.dcim.front_port_templates.filter(devicetype_id=device_type)} + to_create = self.get_device_type_ports_to_create(front_ports, device_type, existing_front_ports) + + if to_create: + all_rearports = self.get_rear_ports(device_type) + for port in to_create: + try: + rear_port = all_rearports[port["rear_port"]] + port['rear_port'] = rear_port.id + except KeyError: + self.handle.log(f'Could not find Rear Port for Front Port: {port["name"]} - ' + + f'{port["type"]} - {device_type}') + + try: + self.counter.update({'updated': + self.handle.log_device_ports_created( + self.netbox.dcim.front_port_templates.create(to_create), "Front Port") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Front Port") + + def create_device_bays(self, device_bays, device_type): + existing_device_bays = {str(item): item for item in self.netbox.dcim.device_bay_templates.filter(devicetype_id=device_type)} + to_create = self.get_device_type_ports_to_create(device_bays, device_type, existing_device_bays) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_device_ports_created( + self.netbox.dcim.device_bay_templates.create(to_create), "Device Bay") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Device Bay") + + def create_module_bays(self, module_bays, device_type): + existing_module_bays = {str(item): item for item in self.netbox.dcim.module_bay_templates.filter(devicetype_id=device_type)} + to_create = self.get_device_type_ports_to_create(module_bays, device_type, existing_module_bays) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_device_ports_created( + self.netbox.dcim.module_bay_templates.create(to_create), "Module Bay") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Module Bay") + + def create_module_interfaces(self, module_interfaces, module_type): + existing_interfaces = {str(item): item for item in self.netbox.dcim.interface_templates.filter(moduletype_id=module_type)} + to_create = self.get_module_type_ports_to_create(module_interfaces, module_type, existing_interfaces) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_module_ports_created( + self.netbox.dcim.interface_templates.create(to_create), "Module Interface") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Module Interface") + + def create_module_power_ports(self, power_ports, module_type): + existing_power_ports = self.get_module_power_ports(module_type) + to_create = self.get_module_type_ports_to_create(power_ports, module_type, existing_power_ports) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_module_ports_created( + self.netbox.dcim.power_port_templates.create(to_create), "Module Power Port") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Module Power Port") + + def create_module_console_ports(self, console_ports, module_type): + existing_console_ports = {str(item): item for item in self.netbox.dcim.console_port_templates.filter(moduletype_id=module_type)} + to_create = self.get_module_type_ports_to_create(console_ports, module_type, existing_console_ports) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_module_ports_created( + self.netbox.dcim.console_port_templates.create(to_create), "Module Console Port") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Module Console Port") + + def create_module_power_outlets(self, power_outlets, module_type): + existing_power_outlets = {str(item): item for item in self.netbox.dcim.power_outlet_templates.filter(moduletype_id=module_type)} + to_create = self.get_module_type_ports_to_create(power_outlets, module_type, existing_power_outlets) + + if to_create: + existing_power_ports = self.get_module_power_ports(module_type) + for outlet in to_create: + try: + power_port = existing_power_ports[outlet["power_port"]] + outlet['power_port'] = power_port.id + except KeyError: + pass + + try: + self.counter.update({'updated': + self.handle.log_module_ports_created( + self.netbox.dcim.power_outlet_templates.create(to_create), "Module Power Outlet") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Module Power Outlet") + + def create_module_console_server_ports(self, console_server_ports, module_type): + existing_console_server_ports = {str(item): item for item in self.netbox.dcim.console_server_port_templates.filter(moduletype_id=module_type)} + to_create = self.get_module_type_ports_to_create(console_server_ports, module_type, existing_console_server_ports) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_module_ports_created( + self.netbox.dcim.console_server_port_templates.create(to_create), "Module Console Server Port") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Module Console Server Port") + + def create_module_rear_ports(self, rear_ports, module_type): + existing_rear_ports = self.get_module_rear_ports(module_type) + to_create = self.get_module_type_ports_to_create(rear_ports, module_type, existing_rear_ports) + + if to_create: + try: + self.counter.update({'updated': + self.handle.log_module_ports_created( + self.netbox.dcim.rear_port_templates.create(to_create), "Module Rear Port") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Module Rear Port") + + def create_module_front_ports(self, front_ports, module_type): + existing_front_ports = {str(item): item for item in self.netbox.dcim.front_port_templates.filter(moduletype_id=module_type)} + to_create = self.get_module_type_ports_to_create(front_ports, module_type, existing_front_ports) + + if to_create: + existing_rear_ports = self.get_module_rear_ports(module_type) + for port in to_create: + try: + rear_port = existing_rear_ports[port["rear_port"]] + port['rear_port'] = rear_port.id + except KeyError: + self.handle.log(f'Could not find Rear Port for Front Port: {port["name"]} - ' + + f'{port["type"]} - {module_type}') + + try: + self.counter.update({'updated': + self.handle.log_module_ports_created( + self.netbox.dcim.front_port_templates.create(to_create), "Module Front Port") + }) + except pynetbox.RequestError as excep: + self.handle.log(f"Error '{excep.error}' creating Module Front Port") + + def upload_images(self,baseurl,token,images,device_type): + '''Upload front_image and/or rear_image for the given device type + + Args: + baseurl: URL for Netbox instance + token: Token to access Netbox instance + images: map of front_image and/or rear_image filename + device_type: id for the device-type to update + + Returns: + None + ''' + url = f"{baseurl}/api/dcim/device-types/{device_type}/" + headers = { "Authorization": f"Token {token}" } + + files = { i: (os.path.basename(f), open(f,"rb") ) for i,f in images.items() } + response = requests.patch(url, headers=headers, files=files, verify=(not self.ignore_ssl)) + + self.handle.log( f'Images {images} updated at {url}: {response}' ) + self.counter["images"] += len(images) diff --git a/repo.py b/repo.py new file mode 100644 index 0000000..789df52 --- /dev/null +++ b/repo.py @@ -0,0 +1,109 @@ +import os +from glob import glob +from re import sub as re_sub +from git import Repo, exc +import yaml + + +class DTLRepo: + def __new__(cls, *args, **kwargs): + return super().__new__(cls) + + def __init__(self, args, repo_path, exception_handler): + self.handle = exception_handler + self.yaml_extensions = ['yaml', 'yml'] + self.url = args.url + self.repo_path = repo_path + self.branch = args.branch + self.repo = None + self.cwd = os.getcwd() + + if os.path.isdir(self.repo_path): + self.pull_repo() + else: + self.clone_repo() + + def get_relative_path(self): + return self.repo_path + + def get_absolute_path(self): + return os.path.join(self.cwd, self.repo_path) + + def get_devices_path(self): + return os.path.join(self.get_absolute_path(), 'device-types') + + def get_modules_path(self): + return os.path.join(self.get_absolute_path(), 'module-types') + + def slug_format(self, name): + return re_sub('\W+', '-', name.lower()) + + def pull_repo(self): + try: + self.handle.log("Package devicetype-library is already installed, " + + f"updating {self.get_absolute_path()}") + self.repo = Repo(self.repo_path) + if not self.repo.remotes.origin.url.endswith('.git'): + self.handle.exception("GitInvalidRepositoryError", self.repo.remotes.origin.url, + f"Origin URL {self.repo.remotes.origin.url} does not end with .git") + self.repo.remotes.origin.pull() + self.repo.git.checkout(self.branch) + self.handle.verbose_log( + f"Pulled Repo {self.repo.remotes.origin.url}") + except exc.GitCommandError as git_error: + self.handle.exception( + "GitCommandError", self.repo.remotes.origin.url, git_error) + except Exception as git_error: + self.handle.exception( + "Exception", 'Git Repository Error', git_error) + + def clone_repo(self): + try: + self.repo = Repo.clone_from( + self.url, self.get_absolute_path(), branch=self.branch) + self.handle.log( + f"Package Installed {self.repo.remotes.origin.url}") + except exc.GitCommandError as git_error: + self.handle.exception("GitCommandError", self.url, git_error) + except Exception as git_error: + self.handle.exception( + "Exception", 'Git Repository Error', git_error) + + def get_devices(self, base_path, vendors: list = None): + files = [] + discovered_vendors = [] + vendor_dirs = os.listdir(base_path) + + for folder in [vendor for vendor in vendor_dirs if not vendors or vendor.casefold() in vendors]: + if folder.casefold() != "testing": + discovered_vendors.append({'name': folder, + 'slug': self.slug_format(folder)}) + for extension in self.yaml_extensions: + files.extend(glob(base_path + folder + f'/*.{extension}')) + return files, discovered_vendors + + def parse_files(self, files: list, slugs: list = None): + deviceTypes = [] + for file in files: + with open(file, 'r') as stream: + try: + data = yaml.safe_load(stream) + except yaml.YAMLError as excep: + self.handle.verbose_log(excep) + continue + manufacturer = data['manufacturer'] + data['manufacturer'] = { + 'name': manufacturer, 'slug': self.slug_format(manufacturer)} + + # Save file location to resolve any relative paths for images + data['src'] = file + + if data.get('slug') is None: + continue + + if slugs and True not in [True if s.casefold() in data['slug'].casefold() else False for s in slugs]: + self.handle.verbose_log(f"Skipping {data['model']}") + continue + + deviceTypes.append(data) + return deviceTypes diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..30ac661 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +GitPython==3.1.32 +pynetbox==7.0.1 +python-dotenv==1.0.0 +PyYAML==6.0.1 \ No newline at end of file diff --git a/settings.py b/settings.py new file mode 100644 index 0000000..797f77f --- /dev/null +++ b/settings.py @@ -0,0 +1,52 @@ +from argparse import ArgumentParser +import os +from log_handler import LogHandler +from repo import DTLRepo +from dotenv import load_dotenv +load_dotenv() + +REPO_URL = os.getenv("REPO_URL", + default="https://github.com/netbox-community/devicetype-library.git") +REPO_BRANCH = os.getenv("REPO_BRANCH", default="master") +NETBOX_URL = os.getenv("NETBOX_URL") +NETBOX_TOKEN = os.getenv("NETBOX_TOKEN") +IGNORE_SSL_ERRORS = (os.getenv("IGNORE_SSL_ERRORS", default="False") == "True") +REPO_PATH = f"{os.path.dirname(os.path.realpath(__file__))}/repo" + +# optionally load vendors through a comma separated list as env var +VENDORS = list(filter(None, os.getenv("VENDORS", "").split(","))) + +# optionally load device types through a space separated list as env var +SLUGS = os.getenv("SLUGS", "").split() + +NETBOX_FEATURES = { + 'modules': False, +} + +parser = ArgumentParser(description='Import Netbox Device Types') +parser.add_argument('--vendors', nargs='+', default=VENDORS, + help="List of vendors to import eg. apc cisco") +parser.add_argument('--url', '--git', default=REPO_URL, + help="Git URL with valid Device Type YAML files") +parser.add_argument('--slugs', nargs='+', default=SLUGS, + help="List of device-type slugs to import eg. ap4431 ws-c3850-24t-l") +parser.add_argument('--branch', default=REPO_BRANCH, + help="Git branch to use from repo") +parser.add_argument('--verbose', action='store_true', default=False, + help="Print verbose output") + +args = parser.parse_args() + +args.vendors = [v.casefold() + for vendor in args.vendors for v in vendor.split(",") if v.strip()] +args.slugs = [s for slug in args.slugs for s in slug.split(",") if s.strip()] + +handle = LogHandler(args) +# Evaluate environment variables and exit if one of the mandatory ones are not set +MANDATORY_ENV_VARS = ["REPO_URL", "NETBOX_URL", "NETBOX_TOKEN"] +for var in MANDATORY_ENV_VARS: + if var not in os.environ: + handle.exception("EnvironmentError", var, + f'Environment variable "{var}" is not set.\n\nMANDATORY_ENV_VARS: {str(MANDATORY_ENV_VARS)}.\n\nCURRENT_ENV_VARS: {str(os.environ)}') + +dtl_repo = DTLRepo(args, REPO_PATH, handle) -- GitLab