From ecedbc38b6f4b35ef321dce88972aad241e95e71 Mon Sep 17 00:00:00 2001 From: Shannon Byrnes Date: Wed, 28 Feb 2024 16:12:31 -0700 Subject: [PATCH] Initial commit --- .gitignore | 1 + 1-reading-network-configuration/.gitkeep | 0 1-reading-network-configuration/README.md | 0 .../1_netmiko_show_interfaces_raw.py | 26 + .../2_netmiko_show_interfaces_textfsm.py | 30 + .../1_create_network_structured_data.py | 401 ++++++++++++++ .../2_add_customers_to_interfaces.py | 102 ++++ .../answers/exercise2/answer.json | 177 ++++++ 1-reading-network-configuration/blah.py | 153 ++++++ .../customer_ports.json | 17 + .../1_create_network_structured_data.py | 410 ++++++++++++++ .../2_add_customers_to_interfaces.py | 102 ++++ .../exercise2/README.md | 205 +++++++ .../exercise2/customer_interfaces.csv | 12 + .../exercise2/hosts.yaml | 12 + .../exercise2/log/.gitkeep | 0 1-reading-network-configuration/my_hosts.yaml | 13 + .../1_netmiko_show_interfaces_raw.py | 26 + .../2_netmiko_show_interfaces_textfsm.py | 31 ++ LICENSE.txt | 201 +++++++ README.md | 18 + internal-lab-setup-assets/Containerfile | 43 ++ internal-lab-setup-assets/Makefile | 37 ++ internal-lab-setup-assets/README.md | 17 + .../startup-config/cisco1.conf | 91 +++ .../startup-config/cisco2.conf | 42 ++ .../startup-config/juniper1.conf | 83 +++ internal-lab-setup-assets/workshop-init.sh | 14 + .../workshop.clab.yml.j2 | 54 ++ poetry.lock | 520 ++++++++++++++++++ pyproject.toml | 18 + 31 files changed, 2856 insertions(+) create mode 100644 .gitignore create mode 100644 1-reading-network-configuration/.gitkeep create mode 100644 1-reading-network-configuration/README.md create mode 100644 1-reading-network-configuration/answers/exercise1/1_netmiko_show_interfaces_raw.py create mode 100644 1-reading-network-configuration/answers/exercise1/2_netmiko_show_interfaces_textfsm.py create mode 100644 1-reading-network-configuration/answers/exercise2/1_create_network_structured_data.py create mode 100644 1-reading-network-configuration/answers/exercise2/2_add_customers_to_interfaces.py create mode 100644 1-reading-network-configuration/answers/exercise2/answer.json create mode 100644 1-reading-network-configuration/blah.py create mode 100644 1-reading-network-configuration/customer_ports.json create mode 100644 1-reading-network-configuration/exercise2/1_create_network_structured_data.py create mode 100644 1-reading-network-configuration/exercise2/2_add_customers_to_interfaces.py create mode 100644 1-reading-network-configuration/exercise2/README.md create mode 100644 1-reading-network-configuration/exercise2/customer_interfaces.csv create mode 100644 1-reading-network-configuration/exercise2/hosts.yaml create mode 100644 1-reading-network-configuration/exercise2/log/.gitkeep create mode 100644 1-reading-network-configuration/my_hosts.yaml create mode 100644 1-reading-network-configuration/workshop_exercise/1_netmiko_show_interfaces_raw.py create mode 100644 1-reading-network-configuration/workshop_exercise/2_netmiko_show_interfaces_textfsm.py create mode 100644 LICENSE.txt create mode 100644 README.md create mode 100644 internal-lab-setup-assets/Containerfile create mode 100644 internal-lab-setup-assets/Makefile create mode 100644 internal-lab-setup-assets/README.md create mode 100644 internal-lab-setup-assets/startup-config/cisco1.conf create mode 100644 internal-lab-setup-assets/startup-config/cisco2.conf create mode 100644 internal-lab-setup-assets/startup-config/juniper1.conf create mode 100755 internal-lab-setup-assets/workshop-init.sh create mode 100644 internal-lab-setup-assets/workshop.clab.yml.j2 create mode 100644 poetry.lock create mode 100644 pyproject.toml diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e43b0f9 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/1-reading-network-configuration/.gitkeep b/1-reading-network-configuration/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/1-reading-network-configuration/README.md b/1-reading-network-configuration/README.md new file mode 100644 index 0000000..e69de29 diff --git a/1-reading-network-configuration/answers/exercise1/1_netmiko_show_interfaces_raw.py b/1-reading-network-configuration/answers/exercise1/1_netmiko_show_interfaces_raw.py new file mode 100644 index 0000000..45a1cb4 --- /dev/null +++ b/1-reading-network-configuration/answers/exercise1/1_netmiko_show_interfaces_raw.py @@ -0,0 +1,26 @@ +# pip install --user netmiko +from netmiko import Netmiko + +username = "clab" +password = "clab@123" +device_type = "cisco_xr" +hosts = ["172.16.30.2", "172.16.30.3"] +command_to_run = "show int brief" + +for host in hosts: + # Create a variable that represents an SSH connection to our router. + connection = Netmiko( + username=username, password=password, device_type=device_type, ip=host + ) + + # Send a command to the router, and get back the raw output + raw_output = connection.send_command(command_to_run) + + # The "really raw" output has '\n' characters appear instead of a real carriage return. + # Converting them into carriage returns will make it a little more readable for this demo. + raw_output = raw_output.replace("\\n", "\n") + + print( + f"### This is the raw output from {host}, without any parsing: ###\n", + raw_output + "\n", + ) diff --git a/1-reading-network-configuration/answers/exercise1/2_netmiko_show_interfaces_textfsm.py b/1-reading-network-configuration/answers/exercise1/2_netmiko_show_interfaces_textfsm.py new file mode 100644 index 0000000..4d3c919 --- /dev/null +++ b/1-reading-network-configuration/answers/exercise1/2_netmiko_show_interfaces_textfsm.py @@ -0,0 +1,30 @@ +# pip install --user textfsm +# pip install --user netmiko +import json +from netmiko import Netmiko + +username = "clab" +password = "clab@123" +device_type = "cisco_xr" +hosts = ["172.16.30.2", "172.16.30.3"] +command_to_run = "show int brief" + +for host in hosts: + # Create a variable that represents an SSH connection to our router. + connection = Netmiko( + username=username, + password=password, + device_type=device_type, + ip=host, + ) + + # Send a command to the router, and get back the output "dictionaried" by textfsm. + textfsm_output = connection.send_command(command_to_run, use_textfsm=True) + + print(f"### This is the TextFSM output from {host}: ###") + print(textfsm_output) + print("\n") # Add extra space between our outputs for each host + + print(f"### This is the TextFSM output from {host}, but JSON-formatted: ###") + print(json.dumps(textfsm_output, indent=4)) # indent for readability + print("\n") # Add extra space between our outputs for each host diff --git a/1-reading-network-configuration/answers/exercise2/1_create_network_structured_data.py b/1-reading-network-configuration/answers/exercise2/1_create_network_structured_data.py new file mode 100644 index 0000000..8df9d71 --- /dev/null +++ b/1-reading-network-configuration/answers/exercise2/1_create_network_structured_data.py @@ -0,0 +1,401 @@ +# pip install --user textfsm +# pip install --user netmiko +import json +import yaml +import csv +import os +import netmiko +import ipaddress +from copy import deepcopy +from ciscoconfparse import CiscoConfParse + +# Configure logging so it goes to a .log file next to this script. +import logging + +this_script_dir = os.path.dirname(os.path.realpath(__file__)) +log_file = f"{this_script_dir}/log/exercise2.log" +logging.basicConfig( + filename=log_file, encoding="utf-8", level=logging.DEBUG, filemode="w" +) + +# Configure a global variables to store things like +# our known BGP key. +# (Don't try this at home) +BGP_MD5_KEY = "foobar" +INPUT_FILENAME = "hosts.yaml" +OUTPUT_FILENAME = "devices.json" + + +# The real script +def main(): + with open("hosts.yaml") as f: + # This creates a list of dictionaries from a YAML file. + # + # This will take the YAML file that looks similar to the following: + # - host: "10.0.0.1" + # device_type: "cisco_xr" + # username: "root" + # password: "password" + # - host: "10.0.0.2 + # {...} + + # And create a dictionary, looking like the following: + # [{"host": "10.0.0.1", "device_type": "cisco_xr", "username": "root", "password": "password"}, {...}] + # + # We can access the IP address of the first host like so: + # first_host_ip = mylist[0]["host"] + + # YAML is convenient, and only a single line of code is required. + hosts = yaml.safe_load(f) + + # Now we are in the meat of it. Let's look at each host. + parsed_data = [] + for host in hosts: + # Create the connection with our host. + connection = netmiko.ConnectHandler(**host) + + try: + # We need the name, ip, platform, version, BGP peers, and interfaces of our host. + # The functions that are called to obtain these are different based on if we are + # on a Cisco or Juniper device. We know ahead of time which hosts are going to + # which OS, so we will look and then collect data based on it. + if host["device_type"] == "cisco_xr": + # Call each helper function from below to collect the data. + data = { + "name": get_cisco_hostname(connection), + "ip": host["host"], + "platform": host["device_type"], + "version": get_cisco_version(connection), + "peers": get_cisco_bgp_peers(connection, md5_key=BGP_MD5_KEY), + "interfaces": get_cisco_interfaces(connection), + } + + elif host["device_type"] == "juniper_junos": + # Call each helper function from below to collect the data. + data = { + "name": get_junos_hostname(connection), + "ip": host["host"], + "platform": host["device_type"], + "version": get_junos_version(connection), + "peers": get_junos_bgp_peers(connection, md5_key=BGP_MD5_KEY), + "interfaces": get_junos_interfaces(connection), + } + + # If the host is neither our Cisco and Juniper OSs, then cause an error. + else: + raise Exception(f"Device type {host['device_type']} not recognized.") + + # We are done with this host, let's add it to the data. + parsed_data.append(data) + print(json.dumps(data, indent=4)) + + # If anything wrong happens that causes an Exception, the script will move to this line. + # It will print out the host that we errored on, and then containue raising the exception. + except Exception: + print(f"Errored on host: {host}!") + raise + + # "finally" means that we will run this line, no matter what. + # We want to make sure we close our SSH connections whether things work, or not, + # otherwise, they will become stale and take up TTYs on the routers. + finally: + connection.disconnect() + + # Put our data into a file as a JSON. + with open(OUTPUT_FILENAME, "w") as f: + json.dump(parsed_data, f, indent=4) + + """ Done! """ + + +##### +# Helper functions +##### + + +def get_junos_hostname(connection: netmiko.ConnectHandler): + # Extract the hostname from running "show version | display json" + # Juniper makes this easy since it will already be in JSON format. + + # Run the command, and convert the JSON output into a Python dictionary. + output = json.loads(connection.send_command("show version | display json")) + # Read the dictionary to pull out the hostname value. + hostname = output["software-information"][0]["host-name"][0]["data"] + return hostname + + +def get_junos_version(connection: netmiko.ConnectHandler): + # Extract the version number from "show version | display json" + # Juniper makes this easy since it will already be in JSON format. + + # Run the command, and convert the JSON output into a Python dictionary. + output = json.loads(connection.send_command("show version | display json")) + # Read the dictionary to pull out the version number value. + version = output["software-information"][0]["junos-version"][0]["data"] + return version + + +def get_junos_bgp_peers(connection: netmiko.ConnectHandler, md5_key=""): + # Extract the BGP peers from running "show bgp neighbor | display json" + # Juniper makes this easy since it will already be in JSON format. + + # Create an empty list to store our data in. + result = [] + + # Run the command, and convert the JSON output into a Python dictionary. + bgp_data = json.loads(connection.send_command("show bgp neighbor | display json")) + + # Read the dictionary to pull out the list of BGP peers. + list_of_peers = bgp_data["bgp-information"][0]["bgp-peer"] + + # Iterate over each peer + for peer in list_of_peers: + # Extract the IP and port number for each BGP peer. + # It will look like "10.10.10.1+12345" + peer_ip_and_port = peer["peer-address"][0]["data"] + + # Manipulate the string and get just the IP. + ip = peer_ip_and_port.split("+")[0] + + # Add it to our "result" list that we will return at the end. + result.append({"remote_address": ip, "md5_key": md5_key}) + + return result + + +def get_junos_interfaces(connection: netmiko.ConnectHandler): + # Create a detailed dictionary of all interfaces and their configuration + # using "show configuration interfaces | display json" + # Juniper makes this easy since it will already be in JSON format. + + # Create an empty list to store our data in. + result = [] + + # Run the command, and convert the JSON output into a Python dictionary. + intf_data = json.loads( + connection.send_command("show configuration interfaces | display json") + ) + + # Drill down into the dictionary to where the interfaces really are. + interfaces = intf_data["configuration"]["interfaces"]["interface"] + for intf in interfaces: + # Now we will look at each interface. The data we are looking at right now looks simiar to: + # + # eth1 { + # description foobar; + # unit 0 { + # family inet { + # address 172.17.1.17/31; + # } + # } + # unit 100 { + # description foo; + # vlan-id 100; + # family inet { + # address 198.51.100.2/24; + # } + # } + # unit 200 { + # description foo; + # vlan-id 200; + # family inet { + # address 192.0.2.2/24; + # } + # } + # + # For each interface, add the name and description (if it exists) to our own result. + # Also add an empty list for subinterfaces, which we will populate next. + data = { + "name": intf["name"], + "description": intf["description"] if "description" in intf.keys() else "", + "sub_ints": [], + } + # Drill down more into the interface and look at its subinterfaces. + for sub_int in intf["unit"]: + # Create the "full name" based off the unit number that we see. + # Ex. "100" becomes "eth1.100" + name = f"{intf['name']}.{sub_int['name']}" + + # Add the description to our subinterface data, if it exists. + description = ( + sub_int["description"] if "description" in sub_int.keys() else "" + ) + # Add the vlan id to our subinterface data, if it exists. + vlan_id = sub_int["vlan-id"] if "vlan-id" in sub_int.keys() else "" + + # Now, extract the IP address + # We will assume there is only a single IPv4 address configured. + addr = sub_int["family"]["inet"]["address"][0]["name"] + + # Use Python's ipaddress module to read our string into a sophisticated + # IPv4_Interface object. This lets us do cool things. + addr = ipaddress.ip_interface(addr) + + # The cool thing we do: It automatically converts our /24 to 255.255.255.0. + # (We won't code the conversions ourself, that's what this is for) + ip, mask = addr.with_netmask.split("/") + + # If the unit is 0, add our collected data to the top-level interface (ex. eth1). + # We do this instead of adding it as "eth1.0" to the subinterfaces. + # This keeps our behavior consistent among different vendors. + if str(name) == "0": + data.update({"ip_address": ip, "subnet_mask": mask, "vlan": vlan_id}) + + # If it isn't unit 0, then add a subinterface to our list. + else: + data["sub_ints"].append( + { + "name": name, + "description": description, + "vlan": vlan_id, + "ip_address": ip, + "subnet_mask": mask, + } + ) + # Add all data about this interface into our result to send later. + # Then, move to the next interface. + result.append(data) + + return result + + +def get_cisco_hostname(connection: netmiko.ConnectHandler): + # Run "show run hostname" and collect the output. + output = connection.send_command("show run hostname") + + # Ex. Turn "hostname cisco1" into "cisco1" and return. + return output.split()[-1] + + +def get_cisco_version(connection: netmiko.ConnectHandler): + # Run "show version | i ^ Version" and collect the output. + output = connection.send_command("show version | i ^ Version") + + # Ex. Turn the outputted " Version : 7.9.1" into "7.9.1" and return. + return output.split()[-1] + + +def get_cisco_bgp_peers(connection: netmiko.ConnectHandler, md5_key=""): + # Run "show ip bgp summary" and get the IPs of all peers. + command = "show ip bgp summary" + + # Create an empty list to store our data. + result = [] + + # Send our command and get the output. + # Our output will be pre-formatted because are turning TextFSM on. + # TextFSM understands what the output on th router will look like, since + # we are running a command it supports. + bgp_neighbors = connection.send_command(command, use_textfsm=True) + + for peer in bgp_neighbors: + # Add the IP addresses to our data that we're collecting. + try: + result.append({"remote_address": peer["bgp_neigh"], "md5_key": md5_key}) + + # If BGP is not running, the router will print something like, + # "% BGP instance 'default' not active" + # Netmiko sees "% " and knows an error happened. + # Catch this error so we can choose just to log and ignore this device. + except TypeError: + # This 'replace' turns the carriage returns in the raw output into a single-lined string. + # We don't want that in our logs. + flattened_output = bgp_neighbors.replace("\n", "\\n") + logging.info( + f'Cannot format output for "{command}". BGP may not be running? Raw output:{flattened_output}' + ) + return [] + return result + + +def get_cisco_interfaces(connection: netmiko.ConnectHandler): + # For interface configuration on Cisco devices, we can use the "ciscoconfparse" module, since + # TextFSM doesn't support our command. + # + # We can search and extract blocks of configuration like this, getting only the interfaces + # we care about by using the right CiscoConfParse functions. + # + # interface GigabitEthernet0/0/0/1.100 + # description bar to foo + # ipv4 address 198.51.100.1 255.255.255.0 + # encapsulation dot1q 100 + # ! + # + # After cleaning up the output (like removing extra spaces), we can format like so: + # + # { + # "name": "Gi0/0/0/1", + # "description": "Some customer connects here!", + # "vlan": "100", + # "ip_address": "10.0.0.1", + # "subnet_mask": "255.255.255.0" + # } + + # Create an empty dictionary to store our interfaces as we discover them. + interfaces = {} + + # Create an empty list to store subinterfaces as we discover them, and we'll + # nest them in the appropriate parent interfaces later. + sub_interfaces = [] + + # Get the output for "show run". This will be raw and unformatted. + cisco_config = connection.send_command("show run") + + # Turn this giant singular string of output into a list of lines. + parser = CiscoConfParse(cisco_config.split("\n")) + + # parser.find_objects('^interface .*') will automatically make a list of all + # lines that start with "interface " that we can iterate over. + # It's also nice because it stores that interface's configuration with it. + for intf in parser.find_objects("^interface .*"): + # Get the name by converting "interface GigabitEthernet0/1" to "GigabitEthernet0/1". + intf_name = intf.text.split()[-1] + + # Find the "description" line, and extract. Ex. Turn "description hello!" into "hello!" + intf_description = intf.re_search_children("^ description ") + if intf_description: + tmp = intf_description[0].text.strip() + intf_description = " ".join(tmp.split()[1:]) + else: + # If description doesn't exist, just use an empty string. + intf_description = "" + + # Extract the vlan id. Ex. turn "encapsulation dot1q 100" into "100". + intf_vlan = intf.re_search_children("^ encapsulation dot1q ") + intf_vlan = intf_vlan[0].text.split()[-1] if intf_vlan else "" + + # Extract the IP address and mask. + # Ex. Turn "ipv4 address 10.10.10.1 255.255.255.0" into two separate stringsm + # one in our 'ip' variable and the other in our 'mask' variable. + raw_ipmask = intf.re_search_children("^ ipv4 address ") + ip, mask = raw_ipmask[0].text.split()[-2] if raw_ipmask else "", "" + + # Take all the interface config we collected and put it into a nicely-formatted dictionary. + data = { + "name": intf_name, + "description": intf_description, + "vlan": intf_vlan, + "ip_address": ip, + "subnet_mask": mask, + } + + # If it's a subinterface, put in the 'sub_interfaces' list to store later. + if "." in intf_name: + sub_interfaces.append(data) + # Otherwise, put it in our top-most 'interfaces' dictionary. + else: + data["sub_ints"] = [] + interfaces[intf_name] = data + + # Finally we are done going through our interfaces. + # Lets go back and sort all our subinterfaces into their parents. + for i in sub_interfaces: + parent_intf = i["name"].split(".")[0] + interfaces[parent_intf]["sub_ints"].append(i) + + # Return our interfaces. + return list(interfaces.values()) + + +if __name__ == "__main__": + main() diff --git a/1-reading-network-configuration/answers/exercise2/2_add_customers_to_interfaces.py b/1-reading-network-configuration/answers/exercise2/2_add_customers_to_interfaces.py new file mode 100644 index 0000000..f219199 --- /dev/null +++ b/1-reading-network-configuration/answers/exercise2/2_add_customers_to_interfaces.py @@ -0,0 +1,102 @@ +import csv +import json + +# Configure a global variables to store certain things. +# (Don't try this at home) +DEVICES_FILENAME = "devices.json" +CUSTOMERS_FILENAME = "customer_interfaces.csv" +OUTPUT_FILENAME = "answer.json" + + +def main(): + with open(DEVICES_FILENAME) as f: + # First, let's read in our previous data from JSON to a Python Dictionary. + devices = json.load(f) + if not devices: + raise ValueError(f"File {DEVICES_FILENAME} is empty!") + + with open(CUSTOMERS_FILENAME) as f: + # Next, we'll read in our customer data. + # + # This creates a list containing dictionaries. + # This will take a line like the following: + # cisco1,Gi0/0/0/1.100,Acme Co. + # + # And add it to the a list, which will look like: + # [{"device": "cisco1", "interface": "Gi0/0/0/1.100", "customer": "Acme Co."}, {...}] + # + # We can access the customer of the first interface like so: + # some_customer = mylist[0]["customer"] + + # Create an empty list where we can store the rows we read. + customer_interfaces = [] + + # The csv.DictReader reader will automatically associate each row + # with the CSV headers, like "customer" or "device". + csv_reader = csv.DictReader(f) + for row in csv_reader: + customer_interfaces.append(row) + + # If we didn't see anything in the customers file, raise an exception. + if not customer_interfaces: + raise ValueError(f"File {CUSTOMERS_FILENAME} is empty!") + + # Now that we have both our device data and customer data, + # let's blend the data together. + for row in customer_interfaces: + # First, find the device. + device_dict = find_device(devices, row["device"]) + + # Then, find the interface within that device. + interface_dict = find_interface(device_dict, row["interface"]) + + # Finally, at the customer into the interface we found. + interface_dict["customer"] = row["customer"] + + # We've now added customer names to each of our interfaces. + # Let's print our new data for good measure, and then store it + # in our new file. + print(json.dumps(devices, indent=4)) + with open(OUTPUT_FILENAME, "w") as f: + json.dump(devices, f, indent=4) + + """ Done! """ + + +##### +# Helper functions +##### + + +def find_device(devices, device_name): + for device in devices: + # If the name matches, the device was found. Return. + if device["name"] == device_name: + return device + + raise ValueError(f"Could not find device {device_name}!") + + +def find_interface(device, interface_name): + is_sub_int = False + if "." in interface_name: + is_sub_int = True + + for intf in device["interfaces"]: + # If we know it is a sub interface, then we know + # we need to go deeper. + if is_sub_int: + for sub_int in intf["sub_ints"]: + if interface_name == sub_int["name"]: + return sub_int + + # If it isn't a sub interface, stay at the top. + else: + if interface_name == intf["name"]: + return intf + + raise ValueError(f"Could not find interface {interface_name} on device {device}!") + + +if __name__ == "__main__": + main() diff --git a/1-reading-network-configuration/answers/exercise2/answer.json b/1-reading-network-configuration/answers/exercise2/answer.json new file mode 100644 index 0000000..6b77b0b --- /dev/null +++ b/1-reading-network-configuration/answers/exercise2/answer.json @@ -0,0 +1,177 @@ +[ + { + "name": "cisco1", + "ip": "172.16.1.2", + "platform": "cisco_xr", + "version": "7.9.1", + "peers": [ + { + "remote_address": "198.51.100.2", + "md5_key": "foobar" + } + ], + "interfaces": [ + { + "name": "Loopback1", + "description": "PEER_A_NETWORK", + "vlan": "", + "ip_address": "10.0.1.1", + "subnet_mask": "", + "sub_ints": [], + "customer": "Acme Corporation" + }, + { + "name": "MgmtEth0/RP0/CPU0/0", + "description": "", + "vlan": "", + "ip_address": "172.16.1.2", + "subnet_mask": "", + "sub_ints": [], + "customer": "Beta Industries" + }, + { + "name": "GigabitEthernet0/0/0/0", + "description": "NOT_IN_USE", + "vlan": "", + "ip_address": "", + "subnet_mask": "", + "sub_ints": [], + "customer": "Gamma Enterprises" + }, + { + "name": "GigabitEthernet0/0/0/1", + "description": "foobar", + "vlan": "", + "ip_address": "172.17.1.16", + "subnet_mask": "", + "sub_ints": [ + { + "name": "GigabitEthernet0/0/0/1.100", + "description": "bar to foo", + "vlan": "100", + "ip_address": "198.51.100.1", + "subnet_mask": "", + "customer": "Epsilon Electronics" + }, + { + "name": "GigabitEthernet0/0/0/1.200", + "description": "foo to biz", + "vlan": "200", + "ip_address": "192.0.2.1", + "subnet_mask": "", + "customer": "Zeta Zoological" + } + ], + "customer": "Delta Dynamics" + }, + { + "name": "GigabitEthernet0/0/0/2", + "description": "NOT_IN_USE", + "vlan": "", + "ip_address": "", + "subnet_mask": "", + "sub_ints": [], + "customer": "Eta Enterprises" + } + ] + }, + { + "name": "cisco2", + "ip": "172.16.1.3", + "platform": "cisco_xr", + "version": "7.9.1", + "peers": [], + "interfaces": [ + { + "name": "MgmtEth0/RP0/CPU0/0", + "description": "", + "vlan": "", + "ip_address": "172.16.1.3", + "subnet_mask": "", + "sub_ints": [] + }, + { + "name": "GigabitEthernet0/0/0/0", + "description": "", + "vlan": "", + "ip_address": "", + "subnet_mask": "", + "sub_ints": [] + }, + { + "name": "GigabitEthernet0/0/0/1", + "description": "", + "vlan": "", + "ip_address": "", + "subnet_mask": "", + "sub_ints": [] + }, + { + "name": "GigabitEthernet0/0/0/2", + "description": "", + "vlan": "", + "ip_address": "", + "subnet_mask": "", + "sub_ints": [] + } + ] + }, + { + "name": "juniper1", + "ip": "172.16.1.4", + "platform": "juniper_junos", + "version": "23.1R1.8", + "peers": [ + { + "remote_address": "198.51.100.1", + "md5_key": "foobar" + } + ], + "interfaces": [ + { + "name": "eth1", + "description": "foobar", + "sub_ints": [ + { + "name": "eth1.0", + "description": "", + "vlan": "", + "ip_address": "172.17.1.17", + "subnet_mask": "255.255.255.254", + "customer": "Theta Technologies" + }, + { + "name": "eth1.100", + "description": "foo", + "vlan": 100, + "ip_address": "198.51.100.2", + "subnet_mask": "255.255.255.0", + "customer": "Iota Innovations" + }, + { + "name": "eth1.200", + "description": "foo", + "vlan": 200, + "ip_address": "192.0.2.2", + "subnet_mask": "255.255.255.0", + "customer": "Kappa Kinetics" + } + ] + }, + { + "name": "eth2", + "description": "", + "sub_ints": [ + { + "name": "eth2.0", + "description": "EXAMPLE_NETWORK", + "vlan": "", + "ip_address": "10.0.2.1", + "subnet_mask": "255.255.255.0", + "customer": "Lambda Labs" + } + ] + } + ] + } +] \ No newline at end of file diff --git a/1-reading-network-configuration/blah.py b/1-reading-network-configuration/blah.py new file mode 100644 index 0000000..7ae77fb --- /dev/null +++ b/1-reading-network-configuration/blah.py @@ -0,0 +1,153 @@ +# pip install --user textfsm +# pip install --user netmiko +import json +import netmiko +import ipaddress + +bgp_md5_key = "foobar" +hosts = [ + { + "host": "172.16.3.2", + "device_type": "cisco_xr", + "username": "clab", + "password": "clab@123", + }, + { + "host": "172.16.3.3", + "device_type": "cisco_xr", + "username": "clab", + "password": "clab@123", + }, + { + "host": "172.16.3.4", + "device_type": "juniper_junos", + "username": "clab", + "password": "clab123", + }, +] + + +def main(): + result = {"hosts": []} + + for host in hosts: + parsed_data = {} + + connection = netmiko.ConnectHandler(**host) + + try: + if host["device_type"] == "cisco_xr": + print(get_cisco_hostname(connection)) + print(get_cisco_version(connection)) + print(get_cisco_bgp_peers(connection, md5_key=bgp_md5_key)) + print(get_cisco_interfaces(connection)) + + elif host["device_type"] == "juniper_junos": + print(get_junos_hostname(connection)) + print(get_junos_version(connection)) + print(get_junos_bgp_peers(connection, md5_key=bgp_md5_key)) + print(get_junos_interfaces(connection)) + + else: + raise Exception(f"Device type {host['device_type']} not recognized.") + + except Exception: + print(f"Errored on host: {host}!") + raise + finally: + connection.disconnect() + + +def get_junos_hostname(connection: netmiko.ConnectHandler): + output = json.loads(connection.send_command("show version | display json")) + hostname = output["software-information"][0]["host-name"][0]["data"] + return hostname + + +def get_junos_version(connection: netmiko.ConnectHandler): + output = json.loads(connection.send_command("show version | display json")) + version = output["software-information"][0]["junos-version"][0]["data"] + return version + + +def get_junos_bgp_peers(connection: netmiko.ConnectHandler, md5_key=""): + result = [] + bgp_data = json.loads(connection.send_command("show bgp neighbor | display json")) + + list_of_peers = bgp_data["bgp-information"][0]["bgp-peer"] + + for peer in list_of_peers: + peer_ip_and_port = peer["peer-address"][0]["data"] + ip = peer_ip_and_port.split("+")[0] + result.append({"remote_address": ip, "md5_key": md5_key}) + + return result + + +def get_junos_interfaces(connection: netmiko.ConnectHandler): + result = [] + intf_data = json.loads( + connection.send_command("show configuration interfaces | display json") + ) + interfaces = intf_data["configuration"]["interfaces"]["interface"] + for intf in interfaces: + data = { + "name": intf["name"], + "description": intf["description"] if "description" in intf.keys() else "", + "sub_ints": [], + } + for sub_int in intf["unit"]: + name = sub_int["name"] + description = ( + sub_int["description"] if "description" in sub_int.keys() else "" + ) + vlan_id = sub_int["vlan-id"] if "vlan-id" in sub_int.keys() else "" + + # We will assume there is only a single IPv4 address configured. + addr = sub_int["family"]["inet"]["address"][0]["name"] + addr = ipaddress.ip_interface(addr) + ip, mask = addr.with_netmask.split("/") + + if str(name) == "0": + data.update({"ip_address": ip, "subnet_mask": mask, "vlan": vlan_id}) + else: + data["sub_ints"].append( + { + "name": name, + "description": description, + "vlan": vlan_id, + "ip_address": ip, + "subnet_mask": mask, + } + ) + result.append(data) + + return result + + +def get_cisco_hostname(connection: netmiko.ConnectHandler): + output = connection.send_command("show run hostname").split()[-1] + return output + + +def get_cisco_version(connection: netmiko.ConnectHandler): + output = connection.send_command("show version | i ^ Version") + return output.split()[-1] + + +def get_cisco_bgp_peers(connection: netmiko.ConnectHandler, md5_key=""): + result = [] + bgp_neighbors = connection.send_command("show ip bgp summary", use_textfsm=True) + for peer in bgp_neighbors: + result.append({"remote_address": peer["bgp_neigh"], "md5_key": md5_key}) + return result + + +def get_cisco_interfaces(connection: netmiko.ConnectHandler): + # TODO + intf_data = connection.send_command("show interfaces", use_textfsm=True) + return intf_data + + +if __name__ == "__main__": + main() diff --git a/1-reading-network-configuration/customer_ports.json b/1-reading-network-configuration/customer_ports.json new file mode 100644 index 0000000..e05aa3c --- /dev/null +++ b/1-reading-network-configuration/customer_ports.json @@ -0,0 +1,17 @@ +{ + "cisco1": { + "Loopback1": "Acme Co.", + "MgmtEth0/RP0/CPU0/0": "Globex Corp.", + "GigabitEthernet0/0/0/0": "Initech", + "GigabitEthernet0/0/0/1": "Hooli", + "GigabitEthernet0/0/0/1.100": "Wonka Industries", + "GigabitEthernet0/0/0/1.200": "Stark Industries", + "GigabitEthernet0/0/0/2": "Acme Co." + }, + "juniper1": { + "eth1.0": "Monsters, Inc.", + "eth1.100": "Umbrella Corporation", + "eth1.200": "Wayne Enterprises", + "eth2.0": "Dunder Mifflin" + } +} \ No newline at end of file diff --git a/1-reading-network-configuration/exercise2/1_create_network_structured_data.py b/1-reading-network-configuration/exercise2/1_create_network_structured_data.py new file mode 100644 index 0000000..1db8731 --- /dev/null +++ b/1-reading-network-configuration/exercise2/1_create_network_structured_data.py @@ -0,0 +1,410 @@ +# pip install --user pyyaml # TODO +# pip install --user textfsm # TODO +# pip install --user netmiko # TODO +# pip install --user ciscoconfparse # TODO +import json +import yaml +import csv +import os +import netmiko +import ipaddress +from copy import deepcopy +from ciscoconfparse import CiscoConfParse + +# Configure logging so it goes to a .log file next to this script. +import logging + +this_script_dir = os.path.dirname(os.path.realpath(__file__)) +log_file = f"{this_script_dir}/log/exercise2.log" +logging.basicConfig( + filename=log_file, encoding="utf-8", level=logging.DEBUG, filemode="w" +) + +# Configure a global variables to store things like +# our known BGP key. +# (Don't try this at home) +BGP_MD5_KEY = "foobar" +INPUT_FILENAME = "hosts.yaml" +OUTPUT_FILENAME = "devices.json" + + +# The real script +def main(): + with open(INPUT_FILENAME) as f: + # This creates a list of dictionaries from a YAML file. + # + # This will take the YAML file that looks similar to the following: + # - host: "10.0.0.1" + # device_type: "cisco_xr" + # username: "root" + # password: "password" + # - host: "10.0.0.2 + # {...} + + # And create a dictionary, looking like the following: + # [{"host": "10.0.0.1", "device_type": "cisco_xr", "username": "root", "password": "password"}, {...}] + # + # We can access the IP address of the first host like so: + # first_host_ip = mylist[0]["host"] + + # YAML is convenient, and only a single line of code is required. + hosts = yaml.safe_load(f) + + # Now we are in the meat of it. Let's look at each host. + parsed_data = [] + for host in hosts: + # Create the Netmiko connection with our host. + # Under the hood, "**host" looks like this: + # { + # host: "172.16.1.2" + # device_type: "cisco_xr" + # username: "clab" + # password: "clab@123" + # } + connection = netmiko.ConnectHandler(**host) + + try: + # We need the name, ip, platform, version, BGP peers, and interfaces of our host. + # The functions that are called to obtain these are different based on if we are + # on a Cisco or Juniper device. We know ahead of time which hosts are going to + # which OS, so we will look and then collect data based on it. + if host["device_type"] == "fill me in!": # TODO + # Call each helper function from below to collect the data. + data = { + "name": get_cisco_hostname(connection), + "ip": host["host"], + "platform": host["device_type"], + "version": get_cisco_version(connection), + "peers": get_cisco_bgp_peers(connection, md5_key=BGP_MD5_KEY), + "interfaces": get_cisco_interfaces(connection), + } + + elif host["device_type"] == "fill me in!": # TODO + # Call each helper function from below to collect the data. + data = { + "name": get_junos_hostname(connection), + "ip": host["host"], + "platform": host["device_type"], + "version": get_junos_version(connection), + "peers": get_junos_bgp_peers(connection, md5_key=BGP_MD5_KEY), + "interfaces": get_junos_interfaces(connection), + } + + # If the host is neither our Cisco nor Juniper OSs, then cause an error. + else: + raise Exception(f"Device type {host['device_type']} not recognized.") + + # We are done with this host, let's add it to the data. + parsed_data.append(data) + print(json.dumps(data, indent=4)) + + # If anything wrong happens that causes an Exception, the script will move to this line. + # It will print out the host that we errored on, and then containue raising the exception. + except Exception: + print(f"Errored on host: {host}!") + raise + + # "finally" means that we will run this line, no matter what. + # We want to make sure we close our SSH connections whether things work, or not, + # otherwise, they will become stale and take up TTYs on the routers. + finally: + connection.disconnect() + + # Put our data into a file as a JSON. + with open(OUTPUT_FILENAME, "w") as f: + json.dump(parsed_data, f, indent=4) + + """ Done! """ + + +##### +# Helper functions +##### + + +def get_junos_hostname(connection: netmiko.ConnectHandler): + # Extract the hostname from running "show version | display json" + # Juniper makes this easy since it will already be in JSON format. + + # Run the command, and convert the JSON output into a Python dictionary. + output = json.loads(connection.send_command("show version | display json")) + # Read the dictionary to pull out the hostname value. + hostname = output["software-information"][0]["host-name"][0]["data"] + return hostname + + +def get_junos_version(connection: netmiko.ConnectHandler): + # Extract the version number from "show version | display json" + # Juniper makes this easy since it will already be in JSON format. + + # Run the command, and convert the JSON output into a Python dictionary. + output = json.loads(connection.send_command("show version | display json")) + # Read the dictionary to pull out the version number value. + version = output["software-information"][0]["junos-version"][0]["data"] + return version + + +def get_junos_bgp_peers(connection: netmiko.ConnectHandler, md5_key=""): + # Extract the BGP peers from running "show bgp neighbor | display json" + # Juniper makes this easy since it will already be in JSON format. + + # Create an empty list to store our data in. + result = [] + + # Run the command, and convert the JSON output into a Python dictionary. + bgp_data = json.loads(connection.send_command("show bgp neighbor | display json")) + + # Read the dictionary to pull out the list of BGP peers. + list_of_peers = bgp_data["bgp-information"][0]["bgp-peer"] + + # Iterate over each peer + for peer in list_of_peers: + # Extract the IP and port number for each BGP peer. + # It will look like "10.10.10.1+12345" + peer_ip_and_port = peer["peer-address"][0]["data"] + + # Manipulate the string and get just the IP. + ip = peer_ip_and_port.split("+")[0] + + # Add it to our "result" list that we will return at the end. + result.append({"remote_address": ip, "md5_key": md5_key}) + + return result + + +def get_junos_interfaces(connection: netmiko.ConnectHandler): + # Create a detailed dictionary of all interfaces and their configuration + # using "show configuration interfaces | display json" + # Juniper makes this easy since it will already be in JSON format. + + # Create an empty list to store our data in. + result = [] + + # Run the command, and convert the JSON output into a Python dictionary. + intf_data = json.loads( + connection.send_command("show configuration interfaces | display json") + ) + + # Drill down into the dictionary to where the interfaces really are. + interfaces = intf_data["configuration"]["interfaces"]["interface"] + for intf in interfaces: + # Now we will look at each interface. The data we are looking at right now looks simiar to: + # + # eth1 { + # description foobar; + # unit 0 { + # family inet { + # address 172.17.1.17/31; + # } + # } + # unit 100 { + # description foo; + # vlan-id 100; + # family inet { + # address 198.51.100.2/24; + # } + # } + # unit 200 { + # description foo; + # vlan-id 200; + # family inet { + # address 192.0.2.2/24; + # } + # } + # + # For each interface, add the name and description (if it exists) to our own result. + # Also add an empty list for subinterfaces, which we will populate next. + data = { + "name": intf["name"], + "description": intf["description"] if "description" in intf.keys() else "", + "sub_ints": [], + } + # Drill down more into the interface and look at its subinterfaces. + for sub_int in intf["unit"]: + # Create the "full name" based off the unit number that we see. + # Ex. "100" becomes "eth1.100" + name = f"{intf['name']}.{sub_int['name']}" + + # Add the description to our subinterface data, if it exists. + description = ( + sub_int["description"] if "description" in sub_int.keys() else "" + ) + # Add the vlan id to our subinterface data, if it exists. + vlan_id = sub_int["vlan-id"] if "vlan-id" in sub_int.keys() else "" + + # Now, extract the IP address + # We will assume there is only a single IPv4 address configured. + addr = sub_int["family"]["inet"]["address"][0]["name"] + + # Use Python's ipaddress module to read our string into a sophisticated + # IPv4_Interface object. This lets us do cool things. + addr = ipaddress.ip_interface(addr) + + # The cool thing we do: It automatically converts our /24 to 255.255.255.0. + # (We won't code the conversions ourself, that's what this is for) + ip, mask = addr.with_netmask.split("/") + + # If the unit is 0, add our collected data to the top-level interface (ex. eth1). + # We do this instead of adding it as "eth1.0" to the subinterfaces. + # This keeps our behavior consistent among different vendors. + if str(name) == "0": + data.update({"ip_address": ip, "subnet_mask": mask, "vlan": vlan_id}) + + # If it isn't unit 0, then add a subinterface to our list. + else: + data["sub_ints"].append( + { + "name": name, + "description": description, + "vlan": vlan_id, + "ip_address": ip, + "subnet_mask": mask, + } + ) + # Add all data about this interface into our result to send later. + # Then, move to the next interface. + result.append(data) + + return result + + +def get_cisco_hostname(connection: netmiko.ConnectHandler): + # Run "show run hostname" and collect the output. + output = connection.send_command("show run hostname") + + # Ex. Turn "hostname cisco1" into "cisco1" and return. + return output.split()[-1] + + +def get_cisco_version(connection: netmiko.ConnectHandler): + # Run "show version | i ^ Version" and collect the output. + output = connection.send_command("show version | i ^ Version") + + # Ex. Turn the outputted " Version : 7.9.1" into "7.9.1" and return. + return output.split()[-1] + + +def get_cisco_bgp_peers(connection: netmiko.ConnectHandler, md5_key=""): + # Run "show ip bgp summary" and get the IPs of all peers. + command = "show ip bgp summary" + + # Create an empty list to store our data. + result = [] + + # Send our command and get the output. + # Our output will be pre-formatted because are turning TextFSM on. + # TextFSM understands what the output on th router will look like, since + # we are running a command it supports. + bgp_neighbors = connection.send_command(command, use_textfsm=True) + + for peer in bgp_neighbors: + # Add the IP addresses to our data that we're collecting. + try: + result.append({"remote_address": peer["bgp_neigh"], "md5_key": md5_key}) + + # If BGP is not running, the router will print something like, + # "% BGP instance 'default' not active" + # Netmiko sees "% " and knows an error happened. + # Catch this error so we can choose just to log and ignore this device. + except TypeError: + # This 'replace' turns the carriage returns in the raw output into a single-lined string. + # We don't want that in our logs. + flattened_output = bgp_neighbors.replace("\n", "\\n") + logging.info( + f'Cannot format output for "{command}". BGP may not be running? Raw output:{flattened_output}' + ) + return [] + return result + + +def get_cisco_interfaces(connection: netmiko.ConnectHandler): + # For interface configuration on Cisco devices, we can use the "ciscoconfparse" module, since + # TextFSM doesn't support our command. + # + # We can search and extract blocks of configuration like this, getting only the interfaces + # we care about by using the right CiscoConfParse functions. + # + # interface GigabitEthernet0/0/0/1.100 + # description bar to foo + # ipv4 address 198.51.100.1 255.255.255.0 + # encapsulation dot1q 100 + # ! + # + # After cleaning up the output (like removing extra spaces), we can format like so: + # + # { + # "name": "Gi0/0/0/1", + # "description": "Some customer connects here!", + # "vlan": "100", + # "ip_address": "10.0.0.1", + # "subnet_mask": "255.255.255.0" + # } + + # Create an empty dictionary to store our interfaces as we discover them. + interfaces = {} + + # Create an empty list to store subinterfaces as we discover them, and we'll + # nest them in the appropriate parent interfaces later. + sub_interfaces = [] + + # Get the output for "show run". This will be raw and unformatted. + cisco_config = connection.send_command("show run") + + # Turn this giant singular string of output into a list of lines. + parser = CiscoConfParse(cisco_config.split("\n")) + + # parser.find_objects('^interface .*') will automatically make a list of all + # lines that start with "interface " that we can iterate over. + # It's also nice because it stores that interface's configuration with it. + for intf in parser.find_objects("^interface .*"): + # Get the name by converting "interface GigabitEthernet0/1" to "GigabitEthernet0/1". + intf_name = intf.text.split()[-1] + + # Find the "description" line, and extract. Ex. Turn "description hello!" into "hello!" + intf_description = intf.re_search_children("^ description ") + if intf_description: + tmp = intf_description[0].text.strip() + intf_description = " ".join(tmp.split()[1:]) + else: + # If description doesn't exist, just use an empty string. + intf_description = "" + + # Extract the vlan id. Ex. turn "encapsulation dot1q 100" into "100". + intf_vlan = intf.re_search_children("^ encapsulation dot1q ") + intf_vlan = intf_vlan[0].text.split()[-1] if intf_vlan else "" + + # Extract the IP address and mask. + # Ex. Turn "ipv4 address 10.10.10.1 255.255.255.0" into two separate stringsm + # one in our 'ip' variable and the other in our 'mask' variable. + raw_ipmask = intf.re_search_children("^ ipv4 address ") + ip, mask = raw_ipmask[0].text.split()[-2] if raw_ipmask else "", "" + + # Take all the interface config we collected and put it into a nicely-formatted dictionary. + data = { + "name": intf_name, + "description": intf_description, + "vlan": intf_vlan, + "ip_address": ip, + "subnet_mask": mask, + } + + # If it's a subinterface, put in the 'sub_interfaces' list to store later. + if "." in intf_name: + sub_interfaces.append(data) + # Otherwise, put it in our top-most 'interfaces' dictionary. + else: + data["sub_ints"] = [] + interfaces[intf_name] = data + + # Finally we are done going through our interfaces. + # Lets go back and sort all our subinterfaces into their parents. + for i in sub_interfaces: + parent_intf = i["name"].split(".")[0] + interfaces[parent_intf]["sub_ints"].append(i) + + # Return our interfaces. + return list(interfaces.values()) + + +if __name__ == "__main__": + main() diff --git a/1-reading-network-configuration/exercise2/2_add_customers_to_interfaces.py b/1-reading-network-configuration/exercise2/2_add_customers_to_interfaces.py new file mode 100644 index 0000000..f219199 --- /dev/null +++ b/1-reading-network-configuration/exercise2/2_add_customers_to_interfaces.py @@ -0,0 +1,102 @@ +import csv +import json + +# Configure a global variables to store certain things. +# (Don't try this at home) +DEVICES_FILENAME = "devices.json" +CUSTOMERS_FILENAME = "customer_interfaces.csv" +OUTPUT_FILENAME = "answer.json" + + +def main(): + with open(DEVICES_FILENAME) as f: + # First, let's read in our previous data from JSON to a Python Dictionary. + devices = json.load(f) + if not devices: + raise ValueError(f"File {DEVICES_FILENAME} is empty!") + + with open(CUSTOMERS_FILENAME) as f: + # Next, we'll read in our customer data. + # + # This creates a list containing dictionaries. + # This will take a line like the following: + # cisco1,Gi0/0/0/1.100,Acme Co. + # + # And add it to the a list, which will look like: + # [{"device": "cisco1", "interface": "Gi0/0/0/1.100", "customer": "Acme Co."}, {...}] + # + # We can access the customer of the first interface like so: + # some_customer = mylist[0]["customer"] + + # Create an empty list where we can store the rows we read. + customer_interfaces = [] + + # The csv.DictReader reader will automatically associate each row + # with the CSV headers, like "customer" or "device". + csv_reader = csv.DictReader(f) + for row in csv_reader: + customer_interfaces.append(row) + + # If we didn't see anything in the customers file, raise an exception. + if not customer_interfaces: + raise ValueError(f"File {CUSTOMERS_FILENAME} is empty!") + + # Now that we have both our device data and customer data, + # let's blend the data together. + for row in customer_interfaces: + # First, find the device. + device_dict = find_device(devices, row["device"]) + + # Then, find the interface within that device. + interface_dict = find_interface(device_dict, row["interface"]) + + # Finally, at the customer into the interface we found. + interface_dict["customer"] = row["customer"] + + # We've now added customer names to each of our interfaces. + # Let's print our new data for good measure, and then store it + # in our new file. + print(json.dumps(devices, indent=4)) + with open(OUTPUT_FILENAME, "w") as f: + json.dump(devices, f, indent=4) + + """ Done! """ + + +##### +# Helper functions +##### + + +def find_device(devices, device_name): + for device in devices: + # If the name matches, the device was found. Return. + if device["name"] == device_name: + return device + + raise ValueError(f"Could not find device {device_name}!") + + +def find_interface(device, interface_name): + is_sub_int = False + if "." in interface_name: + is_sub_int = True + + for intf in device["interfaces"]: + # If we know it is a sub interface, then we know + # we need to go deeper. + if is_sub_int: + for sub_int in intf["sub_ints"]: + if interface_name == sub_int["name"]: + return sub_int + + # If it isn't a sub interface, stay at the top. + else: + if interface_name == intf["name"]: + return intf + + raise ValueError(f"Could not find interface {interface_name} on device {device}!") + + +if __name__ == "__main__": + main() diff --git a/1-reading-network-configuration/exercise2/README.md b/1-reading-network-configuration/exercise2/README.md new file mode 100644 index 0000000..ea958e6 --- /dev/null +++ b/1-reading-network-configuration/exercise2/README.md @@ -0,0 +1,205 @@ +# Exercise 2: Structured Data and Scraping the Network + + +### How to Complete + +To complete this exercise, all TODOs must be completed in both scripts and one input YAML: +- 1_create_network_structured_data.py +- 2_add_customers_to_interfaces.py +- hosts.yaml + +If done successfully, both scripts can be run in sequential order. They will output results as they run. + +``` +From the 2023-workshop-automation folder: + +cd 1-reading-network-configuration/exercise2 +python 1_create_network_structured_data.py +python 2_add_customers_to_interfaces.py +``` + + +### Answer + +The newly outputted `answers.json` will match the following, where {{x}} is your lab number: + +``` +[ + { + "name": "cisco1", + "ip": "172.16.{{x}}.2", + "platform": "cisco_xr", + "version": "7.9.1", + "peers": [ + { + "remote_address": "198.51.100.2", + "md5_key": "foobar" + } + ], + "interfaces": [ + { + "name": "Loopback1", + "description": "PEER_A_NETWORK", + "vlan": "", + "ip_address": "10.0.1.1", + "subnet_mask": "", + "sub_ints": [], + "customer": "Acme Corporation" + }, + { + "name": "MgmtEth0/RP0/CPU0/0", + "description": "", + "vlan": "", + "ip_address": "172.16.{{x}}.2", + "subnet_mask": "", + "sub_ints": [], + "customer": "Beta Industries" + }, + { + "name": "GigabitEthernet0/0/0/0", + "description": "NOT_IN_USE", + "vlan": "", + "ip_address": "", + "subnet_mask": "", + "sub_ints": [], + "customer": "Gamma Enterprises" + }, + { + "name": "GigabitEthernet0/0/0/1", + "description": "foobar", + "vlan": "", + "ip_address": "172.17.1.16", + "subnet_mask": "", + "sub_ints": [ + { + "name": "GigabitEthernet0/0/0/1.100", + "description": "bar to foo", + "vlan": "100", + "ip_address": "198.51.100.1", + "subnet_mask": "", + "customer": "Epsilon Electronics" + }, + { + "name": "GigabitEthernet0/0/0/1.200", + "description": "foo to biz", + "vlan": "200", + "ip_address": "192.0.2.1", + "subnet_mask": "", + "customer": "Zeta Zoological" + } + ], + "customer": "Delta Dynamics" + }, + { + "name": "GigabitEthernet0/0/0/2", + "description": "NOT_IN_USE", + "vlan": "", + "ip_address": "", + "subnet_mask": "", + "sub_ints": [], + "customer": "Eta Enterprises" + } + ] + }, + { + "name": "cisco2", + "ip": "172.16.{{x}}.3", + "platform": "cisco_xr", + "version": "7.9.1", + "peers": [], + "interfaces": [ + { + "name": "MgmtEth0/RP0/CPU0/0", + "description": "", + "vlan": "", + "ip_address": "172.16.{{x}}.3", + "subnet_mask": "", + "sub_ints": [] + }, + { + "name": "GigabitEthernet0/0/0/0", + "description": "", + "vlan": "", + "ip_address": "", + "subnet_mask": "", + "sub_ints": [] + }, + { + "name": "GigabitEthernet0/0/0/1", + "description": "", + "vlan": "", + "ip_address": "", + "subnet_mask": "", + "sub_ints": [] + }, + { + "name": "GigabitEthernet0/0/0/2", + "description": "", + "vlan": "", + "ip_address": "", + "subnet_mask": "", + "sub_ints": [] + } + ] + }, + { + "name": "juniper1", + "ip": "172.16.{{x}}.4", + "platform": "juniper_junos", + "version": "23.1R1.8", + "peers": [ + { + "remote_address": "198.51.100.1", + "md5_key": "foobar" + } + ], + "interfaces": [ + { + "name": "eth1", + "description": "foobar", + "sub_ints": [ + { + "name": "eth1.0", + "description": "", + "vlan": "", + "ip_address": "172.17.1.17", + "subnet_mask": "255.255.255.254", + "customer": "Theta Technologies" + }, + { + "name": "eth1.100", + "description": "foo", + "vlan": 100, + "ip_address": "198.51.100.2", + "subnet_mask": "255.255.255.0", + "customer": "Iota Innovations" + }, + { + "name": "eth1.200", + "description": "foo", + "vlan": 200, + "ip_address": "192.0.2.2", + "subnet_mask": "255.255.255.0", + "customer": "Kappa Kinetics" + } + ] + }, + { + "name": "eth2", + "description": "", + "sub_ints": [ + { + "name": "eth2.0", + "description": "EXAMPLE_NETWORK", + "vlan": "", + "ip_address": "10.0.2.1", + "subnet_mask": "255.255.255.0", + "customer": "Lambda Labs" + } + ] + } + ] + } +] +``` + diff --git a/1-reading-network-configuration/exercise2/customer_interfaces.csv b/1-reading-network-configuration/exercise2/customer_interfaces.csv new file mode 100644 index 0000000..6dfea76 --- /dev/null +++ b/1-reading-network-configuration/exercise2/customer_interfaces.csv @@ -0,0 +1,12 @@ +device,interface,customer +cisco1,Loopback1,Acme Corporation +cisco1,MgmtEth0/RP0/CPU0/0,Beta Industries +cisco1,GigabitEthernet0/0/0/0,Gamma Enterprises +cisco1,GigabitEthernet0/0/0/1,Delta Dynamics +cisco1,GigabitEthernet0/0/0/1.100,Epsilon Electronics +cisco1,GigabitEthernet0/0/0/1.200,Zeta Zoological +cisco1,GigabitEthernet0/0/0/2,Eta Enterprises +juniper1,eth1.0,Theta Technologies +juniper1,eth1.100,Iota Innovations +juniper1,eth1.200,Kappa Kinetics +juniper1,eth2.0,Lambda Labs diff --git a/1-reading-network-configuration/exercise2/hosts.yaml b/1-reading-network-configuration/exercise2/hosts.yaml new file mode 100644 index 0000000..cf69b15 --- /dev/null +++ b/1-reading-network-configuration/exercise2/hosts.yaml @@ -0,0 +1,12 @@ +- host: "" + device_type: "cisco_xr" + username: "clab" + password: "clab@123" +- host: "" + device_type: "cisco_xr" + username: "clab" + password: "clab@123" +- host: "" + device_type: "juniper_junos" + username: "clab" + password: "clab123" diff --git a/1-reading-network-configuration/exercise2/log/.gitkeep b/1-reading-network-configuration/exercise2/log/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/1-reading-network-configuration/my_hosts.yaml b/1-reading-network-configuration/my_hosts.yaml new file mode 100644 index 0000000..be43f58 --- /dev/null +++ b/1-reading-network-configuration/my_hosts.yaml @@ -0,0 +1,13 @@ +hosts: + - host: "172.16.3.2" + device_type: "cisco_xr" + username: "clab" + password: "clab@123" + - host": 172.16.3.3" + device_type: "cisco_xr" + username": "clab" + password: "clab@123" + - host: "172.16.3.4" + device_type": "juniper_junos" + username: "clab" + password: "clab123" diff --git a/1-reading-network-configuration/workshop_exercise/1_netmiko_show_interfaces_raw.py b/1-reading-network-configuration/workshop_exercise/1_netmiko_show_interfaces_raw.py new file mode 100644 index 0000000..cc43e79 --- /dev/null +++ b/1-reading-network-configuration/workshop_exercise/1_netmiko_show_interfaces_raw.py @@ -0,0 +1,26 @@ +# pip install --user netmiko +from netmiko import Netmiko + +username = "fill me in!" +password = "fill me in!" +device_type = "fill me in!" +hosts = ["x.x.x.x", "y.y.y.y"] +command_to_run = "show int brief" + +for host in hosts: + # Create a variable that represents an SSH connection to our router. + connection = Netmiko( + username=username, password=password, device_type=device_type, ip=host + ) + + # Send a command to the router, and get back the raw output + raw_output = connection.send_command(command_to_run) + + # The "really raw" output has '\n' characters appear instead of a real carriage return. + # Converting them into carriage returns will make it a little more readable for this demo. + raw_output = raw_output.replace("\\n", "\n") + + print( + f"### This is the raw output from {host}, without any parsing: ###\n", + raw_output + "\n", + ) diff --git a/1-reading-network-configuration/workshop_exercise/2_netmiko_show_interfaces_textfsm.py b/1-reading-network-configuration/workshop_exercise/2_netmiko_show_interfaces_textfsm.py new file mode 100644 index 0000000..0d746bb --- /dev/null +++ b/1-reading-network-configuration/workshop_exercise/2_netmiko_show_interfaces_textfsm.py @@ -0,0 +1,31 @@ +# pip install --user textfsm +# pip install --user netmiko +import json +from netmiko import Netmiko +from pprint import pprint + +username = "fill me in!" +password = "fill me in!" +device_type = "fill me in!" +hosts = ["x.x.x.x", "y.y.y.y"] +command_to_run = "show int brief" + +for host in hosts: + # Create a variable that represents an SSH connection to our router. + connection = Netmiko( + username=username, + password=password, + device_type=device_type, + ip=host, + ) + + # Send a command to the router, and get back the output "dictionaried" by textfsm. + textfsm_output = connection.send_command(command_to_run, use_textfsm=True) + + print(f"### This is the TextFSM output from {host}: ###") + print(textfsm_output) + print("\n") # Add extra space between our outputs for each host + + print(f"### This is the TextFSM output from {host}, but JSON-formatted: ###") + print(json.dumps(textfsm_output, indent=4)) # indent for readability + print("\n") # Add extra space between our outputs for each host diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..87fb382 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Shannon Byrnes, Maria Isabel Gandia, AJ Ragusa + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..105e5bd --- /dev/null +++ b/README.md @@ -0,0 +1,18 @@ +# Get Started With Network Automation! +### LONI Annual Summit, March 2024 + +This git repository includes all code and assets to be used as part of the workshop. + +In addition, please see the following link to view the slides. Following along with the slides is highly encouraged. + +[Google Slides](GetStartedWithNetworkAutomation_slides.pdf) + +### Workshop Abstract + +This is an introductory, hands-on workshop where participants will learn to automate network devices within a lab environment. Participants will learn about: +- Various network automation concepts. +- Communicating with network devices using Python. +- Popular, open source network automation tooling, such as Netmiko and NAPALM. +- Strategies on how to get started at their home institution. + +Previous experience with a Unix shell is recommended. Familiarity with Python is helpful, but not required. Bringing a laptop is required for hands-on activities. diff --git a/internal-lab-setup-assets/Containerfile b/internal-lab-setup-assets/Containerfile new file mode 100644 index 0000000..735bf48 --- /dev/null +++ b/internal-lab-setup-assets/Containerfile @@ -0,0 +1,43 @@ +# Slim Ubuntu container with python 3.9 and pip pre-installed +FROM python:3.9.16-slim-bullseye + +# Install additional packages +RUN apt-get update && apt-get install -y --no-install-recommends \ + sudo \ + locales-all \ + openssh-server \ + iproute2 \ + net-tools \ + iputils-ping \ + traceroute \ + pip \ + make \ + vim nano emacs + +# Create user account "lab" for workshop participant +RUN adduser --gecos "" clab && usermod -aG sudo clab + +# Extend PATH so all our new packages run easier +RUN export PATH=$PATH:/sbin:/usr/sbin + +# Set the user's password via environment variable. Passwords must be set +# to allow login. +# Note: These variable was pre-defined by the lab orchestrator +ENV CX23_LAB_PASSWORD= + +# Create an RSA key, required for SSH server. +RUN ssh-keygen -A + +# Pre-provision filepath required for SSH server. +RUN mkdir -p /run/sshd + +# Open port 22 for SSH +EXPOSE 22 + +# Prepare post-init script. +COPY ./workshop-init.sh /workshop-init.sh +COPY ./lab-makefile-for-bug /Makefile +RUN chmod +x /workshop-init.sh + +# Configure sshd as root process. If sshd terminates for some reason, the container will too. +ENTRYPOINT ["/workshop-init.sh"] diff --git a/internal-lab-setup-assets/Makefile b/internal-lab-setup-assets/Makefile new file mode 100644 index 0000000..ddeec01 --- /dev/null +++ b/internal-lab-setup-assets/Makefile @@ -0,0 +1,37 @@ +# .phony: gen + +FILES=$(wildcard workshop[0-9]*.clab.yml) + +clab-install: + dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + dnf config-manager --add-repo https://yum.fury.io/netdevops/ + dnf update + dnf install docker-ce docker-ce-cli containerd.io -y + dnf install containerlab -y --nogpgcheck + +gen: clean + /srv/clab/venv/bin/python gen-topo.py + +deploy: gen + for name in workshop[0-9]*.clab.yml; do\ + containerlab deploy --reconfigure -t $${name};\ + # sleep 20;\ + done + +destroy: + for name in workshop[0-9]*.clab.yml; do\ + containerlab destroy --cleanup -t $${name};\ + # sleep 10;\ + done + rm -rf $(wildcard clab-automation-workshop-[0-9]*) + +inspect: + for name in workshop[0-9]*.clab.yml; do\ + containerlab inspect -t $${name};\ + done + +container: + docker build -t internet2/getting_started -f Containerfile . + +clean: + rm -rf $(FILES) diff --git a/internal-lab-setup-assets/README.md b/internal-lab-setup-assets/README.md new file mode 100644 index 0000000..603677e --- /dev/null +++ b/internal-lab-setup-assets/README.md @@ -0,0 +1,17 @@ +NOTE: This folder contains assets that were used to deploy the Containerlab environment that hosts 30 labs used in this workshop. It is included in this repo for any curious minds. + +Tune host settings https://hmntsharma.github.io/cisco-xrd/base_setup/#clone-the-xrd-tools-repository + +Set mgmt address in the config (instead of automatically assigning) as XRd has a bug and a container keeps its old address if it's been assigned a new address. containerlab is non-deterministic when assigning mgmt IPs. + +Review the `Makefile` in `Getting_Started` + +`make gen` to generate lab topologies based on `workshop.clab.yml.j2`. This currently creates 30 labs, set via `gen-topo.py`. +`make deploy` to start up all 30 labs. This will take some time but should complete without error. +`make destroy` to tear down all labs. +`make inspect` to output lab info (this show local IPs, not port bindings) +`make container` to build new lab container based on `Containerfile` + +You can also run `containerlab` commands directly but they need to be run as root or via `sudo`. + +You can probably use the output of `sudo containerlab inspect -t workshop1.clab.yml -f json` to create something similar to the `containerlab inspect` table output that displays the port bindings. i.e., use this to generate workshop instructions. diff --git a/internal-lab-setup-assets/startup-config/cisco1.conf b/internal-lab-setup-assets/startup-config/cisco1.conf new file mode 100644 index 0000000..9a11d06 --- /dev/null +++ b/internal-lab-setup-assets/startup-config/cisco1.conf @@ -0,0 +1,91 @@ +hostname {{ .ShortName }} +username clab + group root-lr + group cisco-support + secret 10 $6$7/293.lG/gI3....$qhqRPSKeGBilG47Ii/xlYF9xJVR1IK7bnw.7HHiVj4Aj8cb58bxiLAim8Xz.beUfJ6TQTP3vHUty3UO.4KmaL. +! +grpc + no-tls + address-family dual +! +line default + transport input ssh +! +call-home + service active + contact smart-licensing + profile CiscoTAC-1 + active + destination transport-method email disable + destination transport-method http + ! +! +netconf-yang agent + ssh +! +cdp +lldp +! +interface Loopback1 + description PEER_A_NETWORK + ipv4 address 10.0.1.1 255.255.255.0 +! +interface GigabitEthernet0/0/0/0 + description NOT_IN_USE + shutdown +! +interface GigabitEthernet0/0/0/1 + description foobar + ip address 172.17.1.16 255.255.255.254 +! +interface GigabitEthernet0/0/0/1.100 + description bar to foo + encapsulation dot1Q 100 + ip address 198.51.100.1 255.255.255.0 +! +interface GigabitEthernet0/0/0/1.200 + description foo to biz + encapsulation dot1Q 200 + ip address 192.0.2.1 255.255.255.0 +! +interface GigabitEthernet0/0/0/2 + description NOT_IN_USE + shutdown +! +route-policy PERMIT_ALL + pass +end-policy +! +router static + address-family ipv4 unicast + 0.0.0.0/0 MgmtEth0/RP0/CPU0/0 {{ .MgmtIPv4Gateway }} + ! + address-family ipv6 unicast + ::/0 MgmtEth0/RP0/CPU0/0 {{ .MgmtIPv6Gateway }} + ! +! +router bgp 64500 + bgp router-id 198.51.100.1 + address-family ipv4 unicast + redistribute connected + ! + neighbor-group PEER_B + remote-as 64501 + ebgp-multihop 2 + password encrypted 15140403062B39 + update-source GigabitEthernet0/0/0/1.100 + address-family ipv4 unicast + route-policy PERMIT_ALL in + route-policy PERMIT_ALL out + ! + ! + neighbor 198.51.100.2 + use neighbor-group PEER_B + ! +! +xml agent tty + iteration off +! +ssh server v2 +ssh server netconf vrf default +end diff --git a/internal-lab-setup-assets/startup-config/cisco2.conf b/internal-lab-setup-assets/startup-config/cisco2.conf new file mode 100644 index 0000000..eed41b9 --- /dev/null +++ b/internal-lab-setup-assets/startup-config/cisco2.conf @@ -0,0 +1,42 @@ +hostname {{ .ShortName }} +username clab + group root-lr + group cisco-support + secret 10 $6$7/293.lG/gI3....$qhqRPSKeGBilG47Ii/xlYF9xJVR1IK7bnw.7HHiVj4Aj8cb58bxiLAim8Xz.beUfJ6TQTP3vHUty3UO.4KmaL. +! +grpc + no-tls + address-family dual +! +line default + transport input ssh +! +call-home + service active + contact smart-licensing + profile CiscoTAC-1 + active + destination transport-method email disable + destination transport-method http + ! +! +netconf-yang agent + ssh +! +cdp +lldp +! +router static + address-family ipv4 unicast + 0.0.0.0/0 MgmtEth0/RP0/CPU0/0 {{ .MgmtIPv4Gateway }} + ! + address-family ipv6 unicast + ::/0 MgmtEth0/RP0/CPU0/0 {{ .MgmtIPv6Gateway }} + ! +! +xml agent tty + iteration off +! +ssh server v2 +ssh server netconf vrf default +end diff --git a/internal-lab-setup-assets/startup-config/juniper1.conf b/internal-lab-setup-assets/startup-config/juniper1.conf new file mode 100644 index 0000000..807f2ba --- /dev/null +++ b/internal-lab-setup-assets/startup-config/juniper1.conf @@ -0,0 +1,83 @@ +system { + root-authentication { + encrypted-password "$6$lB5c6$Zeud8c6IhCTE6hnZxXBl3ZMZTC2hOx9pxxYUWTHKW1oC32SATWLMH2EXarxWS5k685qMggUfFur1lq.o4p4cg1"; ## SECRET-DATA + } + login { + user clab { + uid 2000; + class super-user; + authentication { + encrypted-password "$6$lCT4O$miC8pBTrsdg5AI8wzsIb.oQPYosEaP2b1waGyrMV7QgBBjmrhjG37doJ094t6.m/Xv.p3EUAuZT0Fh7dkqt7b/"; ## SECRET-DATA + } + } + } + services { + ssh { + root-login allow; + } + netconf { + ssh; + } + } +} +interfaces { + eth1 { + description foobar; + unit 0 { + family inet { + address 172.17.1.17/31; + } + } + unit 100 { + description foo; + vlan-id 100; + family inet { + address 198.51.100.2/24; + } + } + unit 200 { + description foo; + vlan-id 200; + family inet { + address 192.0.2.2/24; + } + } + } + eth2 { + unit 0 { + description EXAMPLE_NETWORK; + family inet { + address 10.0.2.1/24; + } + } + } +} +policy-options { + policy-statement PERMIT_ALL { + term pass { + then accept; + } + } +} +routing-options { + router-id 198.51.100.2; + autonomous-system 64501; + static { + route 0.0.0.0/0 next-hop {{ .MgmtIPv4Gateway }}; + } +} +protocols { + bgp { + group PEER_A { + type external; + multihop; + import PERMIT_ALL; + authentication-key "$9$RXPcyKY2aHqfLxNbY2UD"; ## SECRET-DATA + export PERMIT_ALL; + neighbor 198.51.100.1 { + export PERMIT_ALL; + peer-as 64500; + } + } + } +} diff --git a/internal-lab-setup-assets/workshop-init.sh b/internal-lab-setup-assets/workshop-init.sh new file mode 100755 index 0000000..8f349a2 --- /dev/null +++ b/internal-lab-setup-assets/workshop-init.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +echo "clab:$CX23_LAB_PASSWORD" | chpasswd + +echo "clab ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/clab +chmod 0440 /etc/sudoers.d/clab + +echo "syntax on" > /home/clab/.vimrc + +/sbin/sysctl -w net.ipv6.conf.all.disable_ipv6=1 +/sbin/sysctl -w net.ipv6.conf.default.disable_ipv6=1 +/sbin/sysctl -w net.ipv6.conf.lo.disable_ipv6=1 + +/usr/sbin/sshd -D diff --git a/internal-lab-setup-assets/workshop.clab.yml.j2 b/internal-lab-setup-assets/workshop.clab.yml.j2 new file mode 100644 index 0000000..e05a1b6 --- /dev/null +++ b/internal-lab-setup-assets/workshop.clab.yml.j2 @@ -0,0 +1,54 @@ +{%- set id = id|default(1) %} +{%- macro shared_node_settings(x) %} + mgmt_ipv4: 172.16.{{id}}.{{x}} + mgmt_ipv6: 2001:db8:16:{{id}}::{{x}} + ports: + - 2{{"%02d" % id}}{{x}}:22 +{%- endmacro -%} +name: automation-workshop-{{"%02d" % id}} + +mgmt: + network: automation-workshop-{{"%02d" % id}} + ipv4_subnet: 172.16.{{id}}.0/24 + ipv6_subnet: 2001:db8:16:{{id}}::/80 + +topology: + kinds: + cisco_xrd: + image: ios-xr/xrd-control-plane:7.9.1 + juniper_crpd: + image: crpd:23.1R1.8 + license: license.txt + startup-config: startup-config/juniper1.conf + exec: + - cli request system license add tmp/junos_sfnt_tmp.lic + + nodes: + {%- set x = 2 %} + cisco1: + kind: cisco_xrd + startup-config: startup-config/cisco1.conf + {{- shared_node_settings(x) }} + {%- set x = x+1 %} + cisco2: + kind: cisco_xrd + startup-config: startup-config/cisco2.conf + {{- shared_node_settings(x) }} + {%- set x = x+1 %} + juniper1: + kind: juniper_crpd + {{- shared_node_settings(x) }} + {%- set x = x+1 %} + ubuntu: + kind: linux + image: internet2/getting_started + {{- shared_node_settings(x) }} + env: + CX23_LAB_PASSWORD: Self-Nose-Reasonable-Dust-{{"%02d" % id}} + + links: + - endpoints: ["cisco1:Gi0-0-0-0", "cisco2:Gi0-0-0-0"] + - endpoints: ["cisco1:Gi0-0-0-1", "juniper1:eth1"] + - endpoints: ["cisco2:Gi0-0-0-1", "juniper1:eth2"] + - endpoints: ["ubuntu:eth1", "cisco1:Gi0-0-0-2"] + - endpoints: ["ubuntu:eth2", "cisco2:Gi0-0-0-2"] diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..b133616 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,520 @@ +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. + +[[package]] +name = "bcrypt" +version = "4.1.2" +description = "Modern password hashing for your software and your servers" +optional = false +python-versions = ">=3.7" +files = [ + {file = "bcrypt-4.1.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c"}, + {file = "bcrypt-4.1.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5"}, + {file = "bcrypt-4.1.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0"}, + {file = "bcrypt-4.1.2-cp37-abi3-win32.whl", hash = "sha256:fbe188b878313d01b7718390f31528be4010fed1faa798c5a1d0469c9c48c369"}, + {file = "bcrypt-4.1.2-cp37-abi3-win_amd64.whl", hash = "sha256:9800ae5bd5077b13725e2e3934aa3c9c37e49d3ea3d06318010aa40f54c63551"}, + {file = "bcrypt-4.1.2-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7"}, + {file = "bcrypt-4.1.2-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c"}, + {file = "bcrypt-4.1.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a"}, + {file = "bcrypt-4.1.2-cp39-abi3-win32.whl", hash = "sha256:02d9ef8915f72dd6daaef40e0baeef8a017ce624369f09754baf32bb32dba25f"}, + {file = "bcrypt-4.1.2-cp39-abi3-win_amd64.whl", hash = "sha256:be3ab1071662f6065899fe08428e45c16aa36e28bc42921c4901a191fda6ee42"}, + {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d75fc8cd0ba23f97bae88a6ec04e9e5351ff3c6ad06f38fe32ba50cbd0d11946"}, + {file = "bcrypt-4.1.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a97e07e83e3262599434816f631cc4c7ca2aa8e9c072c1b1a7fec2ae809a1d2d"}, + {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e51c42750b7585cee7892c2614be0d14107fad9581d1738d954a262556dd1aab"}, + {file = "bcrypt-4.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba4e4cc26610581a6329b3937e02d319f5ad4b85b074846bf4fef8a8cf51e7bb"}, + {file = "bcrypt-4.1.2.tar.gz", hash = "sha256:33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258"}, +] + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] + +[[package]] +name = "black" +version = "24.2.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.8" +files = [ + {file = "black-24.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29"}, + {file = "black-24.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430"}, + {file = "black-24.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f"}, + {file = "black-24.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a"}, + {file = "black-24.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd"}, + {file = "black-24.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2"}, + {file = "black-24.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92"}, + {file = "black-24.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23"}, + {file = "black-24.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b"}, + {file = "black-24.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9"}, + {file = "black-24.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693"}, + {file = "black-24.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982"}, + {file = "black-24.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4"}, + {file = "black-24.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218"}, + {file = "black-24.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0"}, + {file = "black-24.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d"}, + {file = "black-24.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8"}, + {file = "black-24.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8"}, + {file = "black-24.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540"}, + {file = "black-24.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31"}, + {file = "black-24.2.0-py3-none-any.whl", hash = "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6"}, + {file = "black-24.2.0.tar.gz", hash = "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "cffi" +version = "1.16.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, + {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, + {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, + {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, + {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, + {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, + {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, + {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, + {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, + {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, + {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, + {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, + {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, + {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, + {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "cryptography" +version = "42.0.5" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16"}, + {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da"}, + {file = "cryptography-42.0.5-cp37-abi3-win32.whl", hash = "sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74"}, + {file = "cryptography-42.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940"}, + {file = "cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30"}, + {file = "cryptography-42.0.5-cp39-abi3-win32.whl", hash = "sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413"}, + {file = "cryptography-42.0.5-cp39-abi3-win_amd64.whl", hash = "sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd"}, + {file = "cryptography-42.0.5.tar.gz", hash = "sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "future" +version = "1.0.0" +description = "Clean single-source support for Python 3 and 2" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "future-1.0.0-py3-none-any.whl", hash = "sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216"}, + {file = "future-1.0.0.tar.gz", hash = "sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05"}, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "netmiko" +version = "4.3.0" +description = "Multi-vendor library to simplify legacy CLI connections to network devices" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "netmiko-4.3.0-py3-none-any.whl", hash = "sha256:a873b186e0b61be4a2100eda51e996d917ceddce195b734346b686757e61d324"}, + {file = "netmiko-4.3.0.tar.gz", hash = "sha256:da90f6efdf33b4140eb6cd7f2272773c2ce144fa74ac34d5ecac1b4d4607f1fb"}, +] + +[package.dependencies] +ntc-templates = ">=2.0.0" +paramiko = ">=2.9.5" +pyserial = ">=3.3" +pyyaml = ">=5.3" +scp = ">=0.13.6" +textfsm = ">=1.1.3" + +[[package]] +name = "ntc-templates" +version = "4.3.0" +description = "TextFSM Templates for Network Devices, and Python wrapper for TextFSM's CliTable." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "ntc_templates-4.3.0-py3-none-any.whl", hash = "sha256:f9b4805dfd9d1516a29ae9f505409c17c7f14c958d47f1c1f57c9486af6164db"}, + {file = "ntc_templates-4.3.0.tar.gz", hash = "sha256:b6902389e86b868d76b64ea55c8225a0aa7aafe910b3a02b2a33b7b18fb27ef1"}, +] + +[package.dependencies] +textfsm = ">=1.1.0,<2.0.0" + +[[package]] +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "paramiko" +version = "3.4.0" +description = "SSH2 protocol library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "paramiko-3.4.0-py3-none-any.whl", hash = "sha256:43f0b51115a896f9c00f59618023484cb3a14b98bbceab43394a39c6739b7ee7"}, + {file = "paramiko-3.4.0.tar.gz", hash = "sha256:aac08f26a31dc4dffd92821527d1682d99d52f9ef6851968114a8728f3c274d3"}, +] + +[package.dependencies] +bcrypt = ">=3.2" +cryptography = ">=3.3" +pynacl = ">=1.5" + +[package.extras] +all = ["gssapi (>=1.4.1)", "invoke (>=2.0)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] +gssapi = ["gssapi (>=1.4.1)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] +invoke = ["invoke (>=2.0)"] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "platformdirs" +version = "4.2.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, + {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pynacl" +version = "1.5.0" +description = "Python binding to the Networking and Cryptography (NaCl) library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, + {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, +] + +[package.dependencies] +cffi = ">=1.4.1" + +[package.extras] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] +tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] + +[[package]] +name = "pyserial" +version = "3.5" +description = "Python Serial Port Extension" +optional = false +python-versions = "*" +files = [ + {file = "pyserial-3.5-py2.py3-none-any.whl", hash = "sha256:c4451db6ba391ca6ca299fb3ec7bae67a5c55dde170964c7a14ceefec02f2cf0"}, + {file = "pyserial-3.5.tar.gz", hash = "sha256:3c77e014170dfffbd816e6ffc205e9842efb10be9f58ec16d3e8675b4925cddb"}, +] + +[package.extras] +cp2110 = ["hidapi"] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "scp" +version = "0.14.5" +description = "scp module for paramiko" +optional = false +python-versions = "*" +files = [ + {file = "scp-0.14.5-py2.py3-none-any.whl", hash = "sha256:d224535dd8ed00294f52b0e0e18fde7a6fb7a3d06b97ede9e3f750fa7bf75c09"}, + {file = "scp-0.14.5.tar.gz", hash = "sha256:64f0015899b3d212cb8088e7d40ebaf0686889ff0e243d5c1242efe8b50f053e"}, +] + +[package.dependencies] +paramiko = "*" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "textfsm" +version = "1.1.3" +description = "Python module for parsing semi-structured text into python tables." +optional = false +python-versions = "*" +files = [ + {file = "textfsm-1.1.3-py2.py3-none-any.whl", hash = "sha256:dcbeebc6a6137bed561c71a56344d752e6dbc04ae5ea309252cb70fb97ccc9cd"}, + {file = "textfsm-1.1.3.tar.gz", hash = "sha256:577ef278a9237f5341ae9b682947cefa4a2c1b24dbe486f94f2c95addc6504b5"}, +] + +[package.dependencies] +future = "*" +six = "*" + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "typing-extensions" +version = "4.10.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, + {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, +] + +[metadata] +lock-version = "2.0" +python-versions = "^3.9" +content-hash = "ae1a70284ae8d6067f7169baeef93c899931dd26994e4e721a7a33e8823eaefb" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..b0588d5 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,18 @@ +[tool.poetry] +name = "2024-lonisummit-workshop-automation" +version = "0.1.0" +description = "" +authors = ["Shannon Byrnes "] +readme = "README.md" +packages = [{include = "2024_lonisummit_workshop_automation"}] + +[tool.poetry.dependencies] +python = "^3.9" +netmiko = "^4.3.0" + +[tool.poetry.group.dev.dependencies] +black = "^24.2.0" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api"