From 50e5c6163488cab87f217a3dc5713ce58ea61011 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Wed, 7 Apr 2021 14:21:27 -0700 Subject: [PATCH 01/41] Fixed the possibility of using uninitialized variable in route_check.py (#1551) --- scripts/route_check.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/route_check.py b/scripts/route_check.py index 72ed3201bf..efc144c2d3 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -390,6 +390,8 @@ def check_routes(): rt_asic_miss = [] results = {} + adds = [] + deletes = [] selector, subs, rt_asic = get_route_entries() @@ -431,8 +433,8 @@ def check_routes(): if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") print_message(syslog.LOG_WARNING, "Failed. Look at reported mismatches above") - print_message(syslog.LOG_WARNING, "add: {", json.dumps(adds, indent=4), "}") - print_message(syslog.LOG_WARNING, "del: {", json.dumps(deletes, indent=4), "}") + print_message(syslog.LOG_WARNING, "add: ", json.dumps(adds, indent=4)) + print_message(syslog.LOG_WARNING, "del: ", json.dumps(deletes, indent=4)) return -1, results else: print_message(syslog.LOG_INFO, "All good!") From 030293c12875fd21c034b06e24e6d9ad69d9a450 Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Thu, 8 Apr 2021 10:24:31 -0700 Subject: [PATCH 02/41] Use 'importlib' module in lieu of deprecated 'imp' module (#1450) Migrate from using the `imp` module to using the `importlib` module. As of Python 3, the `imp` module has been deprecated in favor of the `importlib` module. Place logic in a new function, `load_module_from_source()` in a new file, `utilities_common/general.py` Also fix some formatting --- config/config_mgmt.py | 39 +++++----- pfcwd/main.py | 6 +- tests/aclshow_test.py | 99 ++++++++++++++++---------- tests/buffer_test.py | 1 - tests/config_dpb_test.py | 21 +++--- tests/config_mgmt_test.py | 114 +++++++++++++++++------------- tests/config_test.py | 10 +-- tests/crm_test.py | 4 +- tests/decode_syseeprom_test.py | 7 +- tests/feature_test.py | 12 ++-- tests/neighbor_advertiser_test.py | 15 ++-- tests/pfcstat_test.py | 1 - tests/pfcwd_test.py | 4 +- tests/port2alias_test.py | 9 ++- tests/psushow_test.py | 7 +- tests/watermarkstat_test.py | 1 - utilities_common/general.py | 17 +++++ utilities_common/util_base.py | 3 +- 18 files changed, 206 insertions(+), 164 deletions(-) create mode 100644 utilities_common/general.py diff --git a/config/config_mgmt.py b/config/config_mgmt.py index 194c8aefc9..cc64b35d97 100644 --- a/config/config_mgmt.py +++ b/config/config_mgmt.py @@ -2,28 +2,21 @@ config_mgmt.py provides classes for configuration validation and for Dynamic Port Breakout. ''' -try: - import re - import syslog +import re +import syslog +from json import load +from sys import flags +from time import sleep as tsleep - from json import load - from time import sleep as tsleep - from imp import load_source - from jsondiff import diff - from sys import flags +import sonic_yang +from jsondiff import diff +from swsssdk import port_util +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from utilities_common.general import load_module_from_source - # SONiC specific imports - import sonic_yang - from swsssdk import port_util - from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector - # Using load_source to 'import /usr/local/bin/sonic-cfggen as sonic_cfggen' - # since /usr/local/bin/sonic-cfggen does not have .py extension. - load_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') - from sonic_cfggen import deep_update, FormatConverter - -except ImportError as e: - raise ImportError("%s - required module not found" % str(e)) +# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. +sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') # Globals YANG_DIR = "/usr/local/yang-models" @@ -193,8 +186,8 @@ def readConfigDB(self): data = dict() configdb = ConfigDBConnector() configdb.connect() - deep_update(data, FormatConverter.db_to_output(configdb.get_config())) - self.configdbJsonIn = FormatConverter.to_serialized(data) + sonic_cfggen.deep_update(data, sonic_cfggen.FormatConverter.db_to_output(configdb.get_config())) + self.configdbJsonIn = sonic_cfggen.FormatConverter.to_serialized(data) self.sysLog(syslog.LOG_DEBUG, 'Reading Input from ConfigDB {}'.\ format(self.configdbJsonIn)) @@ -214,9 +207,9 @@ def writeConfigDB(self, jDiff): data = dict() configdb = ConfigDBConnector() configdb.connect(False) - deep_update(data, FormatConverter.to_deserialized(jDiff)) + sonic_cfggen.deep_update(data, sonic_cfggen.FormatConverter.to_deserialized(jDiff)) self.sysLog(msg="Write in DB: {}".format(data)) - configdb.mod_config(FormatConverter.output_to_db(data)) + configdb.mod_config(sonic_cfggen.FormatConverter.output_to_db(data)) return diff --git a/pfcwd/main.py b/pfcwd/main.py index 9b038316ca..1f8ec2293e 100644 --- a/pfcwd/main.py +++ b/pfcwd/main.py @@ -1,11 +1,9 @@ +import importlib import os -import imp import sys import click - import utilities_common.cli as clicommon - from natsort import natsorted from sonic_py_common.multi_asic import get_external_ports from tabulate import tabulate @@ -27,7 +25,7 @@ import mock_tables.dbconnector if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": import mock_tables.mock_multi_asic - imp.reload(mock_tables.mock_multi_asic) + importlib.reload(mock_tables.mock_multi_asic) mock_tables.dbconnector.load_namespace_config() except KeyError: diff --git a/tests/aclshow_test.py b/tests/aclshow_test.py index b2371e9723..e41d56b9eb 100644 --- a/tests/aclshow_test.py +++ b/tests/aclshow_test.py @@ -1,24 +1,26 @@ -import sys +import json import os -from imp import load_source +import sys from io import StringIO from unittest import mock +from utilities_common.general import load_module_from_source test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") sys.path.insert(0, modules_path) -load_source('aclshow', scripts_path+'/aclshow') -from aclshow import * +# Load the file under test +aclshow_path = os.path.join(scripts_path, 'aclshow') +aclshow = load_module_from_source('aclshow', aclshow_path) from .mock_tables import dbconnector # Expected output for aclshow -default_output = ''+ \ -"""RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT +default_output = """\ +RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ------------ ------------ ------ --------------- ------------- RULE_1 DATAACL 9999 101 100 RULE_2 DATAACL 9998 201 200 @@ -32,8 +34,8 @@ """ # Expected output for aclshow -a -all_output = '' + \ -"""RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT +all_output = """\ +RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ------------ ------------ ------ --------------- ------------- RULE_1 DATAACL 9999 101 100 RULE_2 DATAACL 9998 201 200 @@ -49,35 +51,35 @@ """ # Expected output for aclshow -r RULE_1 -t DATAACL -rule1_dataacl_output = '' + \ -"""RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT +rule1_dataacl_output = """\ +RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ----------- ------------ ------ --------------- ------------- RULE_1 DATAACL 9999 101 100 """ # Expected output for aclshow -r RULE_1 -t DATAACL -rule10_dataacl_output = '' + \ -"""RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT +rule10_dataacl_output = """\ +RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ----------- ------------ ------ --------------- ------------- RULE_10 DATAACL 9989 1001 1000 """ # Expected output for aclshow -a -r RULE_05 -rule05_all_output = ''+ \ -"""RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT +rule05_all_output = """\ +RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ----------- ------------ ------ --------------- ------------- RULE_05 DATAACL 9995 0 0 """ # Expected output for aclshow -r RULE_0 -rule0_output = '' + \ -"""RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT +rule0_output = """\ +RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ----------- ------------ ------ --------------- ------------- """ # Expected output for aclshow -r RULE_4,RULE_6 -vv -rule4_rule6_verbose_output = '' + \ -"""Reading ACL info... +rule4_rule6_verbose_output = """\ +Reading ACL info... Total number of ACL Tables: 8 Total number of ACL Rules: 11 @@ -88,15 +90,15 @@ """ # Expected output for aclshow -t EVERFLOW -everflow_output = '' + \ -"""RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT +everflow_output = """\ +RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ----------- ------------ ------ --------------- ------------- RULE_6 EVERFLOW 9994 601 600 """ # Expected output for aclshow -t DATAACL -dataacl_output = '' + \ -"""RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT +dataacl_output = """\ +RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ------------ ------------ ------ --------------- ------------- RULE_1 DATAACL 9999 101 100 RULE_2 DATAACL 9998 201 200 @@ -113,8 +115,8 @@ # Expected output for # aclshow -a -c ; aclshow -a -all_after_clear_output = '' + \ -"""RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT +all_after_clear_output = """\ +RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ------------ ------------ ------ --------------- ------------- RULE_1 DATAACL 9999 0 0 RULE_2 DATAACL 9998 0 0 @@ -129,6 +131,7 @@ RULE_08 EVERFLOW 9992 0 0 """ + class Aclshow(): def __init__(self, *args, **kwargs): """ @@ -146,8 +149,8 @@ def nullify_counters(self): This method is used to empty dumped counters if exist in /tmp/.counters_acl.p (by default). """ - if os.path.isfile(COUNTER_POSITION): - with open(COUNTER_POSITION, 'w') as fp: + if os.path.isfile(aclshow.COUNTER_POSITION): + with open(aclshow.COUNTER_POSITION, 'w') as fp: json.dump([], fp) def runTest(self): @@ -155,10 +158,10 @@ def runTest(self): This method invokes main() from aclshow utility (parametrized by argparse) parametrized by mock argparse. """ - @mock.patch('argparse.ArgumentParser.parse_args', return_value = argparse.Namespace(**self.kwargs)) - def run(mock_args): - main() - run() + with mock.patch.object(aclshow.argparse.ArgumentParser, + 'parse_args', + return_value=aclshow.argparse.Namespace(**self.kwargs)): + aclshow.main() def setUp(self): if self.nullify_on_start: @@ -173,56 +176,78 @@ def tearDown(self): sys.stdout = self.old_stdout # aclshow + + def test_default(): - test = Aclshow(all = None, clear = None, rules = None, tables = None, verbose = None) + test = Aclshow(all=None, clear=None, rules=None, tables=None, verbose=None) assert test.result.getvalue() == default_output # aclshow -a + + def test_all(): - test = Aclshow(all = True, clear = None, rules = None, tables = None, verbose = None) + test = Aclshow(all=True, clear=None, rules=None, tables=None, verbose=None) assert test.result.getvalue() == all_output # aclshow -r RULE_1 -t DATAACL + + def test_rule1_dataacl(): - test = Aclshow(all = None, clear = None, rules = 'RULE_1', tables = 'DATAACL', verbose = None) + test = Aclshow(all=None, clear=None, rules='RULE_1', tables='DATAACL', verbose=None) assert test.result.getvalue() == rule1_dataacl_output # aclshow -a -r RULE_05 + + def test_rule05_all(): - test = Aclshow(all = True, clear = None, rules = 'RULE_05', tables = None, verbose = None) + test = Aclshow(all=True, clear=None, rules='RULE_05', tables=None, verbose=None) assert test.result.getvalue() == rule05_all_output # aclshow -r RULE_0 + + def test_rule0(): - test = Aclshow(all = None, clear = None, rules = 'RULE_0', tables = None, verbose = None) + test = Aclshow(all=None, clear=None, rules='RULE_0', tables=None, verbose=None) assert test.result.getvalue() == rule0_output # aclshow -r RULE_10 -t DATAACL + + def test_rule10_lowercase_priority(): - test = Aclshow(all = None, clear = None, rules = 'RULE_10', tables = 'DATAACL', verbose = None) + test = Aclshow(all=None, clear=None, rules='RULE_10', tables='DATAACL', verbose=None) assert test.result.getvalue() == rule10_dataacl_output # aclshow -r RULE_4,RULE_6 -vv + + def test_rule4_rule6_verbose(): - test = Aclshow(all = None, clear = None, rules = 'RULE_4,RULE_6', tables = None, verbose = True) + test = Aclshow(all=None, clear=None, rules='RULE_4,RULE_6', tables=None, verbose=True) assert test.result.getvalue() == rule4_rule6_verbose_output # aclshow -t EVERFLOW + + def test_everflow(): test = Aclshow(all=None, clear=None, rules=None, tables='EVERFLOW', verbose=None) assert test.result.getvalue() == everflow_output # aclshow -t DATAACL + + def test_dataacl(): test = Aclshow(all=None, clear=None, rules=None, tables='DATAACL', verbose=None) assert test.result.getvalue() == dataacl_output # aclshow -c + + def test_clear(): test = Aclshow(all=None, clear=True, rules=None, tables=None, verbose=None) assert test.result.getvalue() == clear_output # aclshow -a -c ; aclshow -a + + def test_all_after_clear(): nullify_on_start, nullify_on_exit = True, False test = Aclshow(nullify_on_start, nullify_on_exit, all=True, clear=True, rules=None, tables=None, verbose=None) diff --git a/tests/buffer_test.py b/tests/buffer_test.py index fbbf2ba3bd..30abfad8eb 100644 --- a/tests/buffer_test.py +++ b/tests/buffer_test.py @@ -1,4 +1,3 @@ -import imp import os import sys from click.testing import CliRunner diff --git a/tests/config_dpb_test.py b/tests/config_dpb_test.py index e347538bcf..1d58e90c67 100644 --- a/tests/config_dpb_test.py +++ b/tests/config_dpb_test.py @@ -1,21 +1,22 @@ import json import os import re -from imp import load_source from unittest import mock import pytest from click.testing import CliRunner from utilities_common.db import Db +from utilities_common.general import load_module_from_source import config.main as config -load_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') -from sonic_cfggen import deep_update, FormatConverter +# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. +sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + +# Import config_mgmt.py +config_mgmt_py_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config_mgmt.py') +config_mgmt = load_module_from_source('config_mgmt', config_mgmt_py_path) -load_source('config_mgmt', \ - os.path.join(os.path.dirname(__file__), '..', 'config', 'config_mgmt.py')) -import config_mgmt # Sample platform.json for Test BRKOUT_CFG_FILE_JSON = { @@ -137,14 +138,14 @@ def mock_func(breakout_cfg_file, sonic_db): def write_config_db(cfgdb, config): data = dict() - deep_update(data, FormatConverter.to_deserialized(config)) - cfgdb.mod_config(FormatConverter.output_to_db(data)) + sonic_cfggen.deep_update(data, sonic_cfggen.FormatConverter.to_deserialized(config)) + cfgdb.mod_config(sonic_cfggen.FormatConverter.output_to_db(data)) return def read_config_db(cfgdb): data = dict() - deep_update(data, FormatConverter.db_to_output(cfgdb.get_config())) - return FormatConverter.to_serialized(data) + sonic_cfggen.deep_update(data, sonic_cfggen.FormatConverter.db_to_output(cfgdb.get_config())) + return sonic_cfggen.FormatConverter.to_serialized(data) def writeJson(d, file): with open(file, 'w') as f: diff --git a/tests/config_mgmt_test.py b/tests/config_mgmt_test.py index adeca71d4e..39e3870990 100644 --- a/tests/config_mgmt_test.py +++ b/tests/config_mgmt_test.py @@ -1,15 +1,15 @@ -import imp import os import sys +from json import dump +from copy import deepcopy from unittest import mock, TestCase -# import file under test i.e. config_mgmt.py -imp.load_source('config_mgmt', \ - os.path.join(os.path.dirname(__file__), '..', 'config', 'config_mgmt.py')) -import config_mgmt +from utilities_common.general import load_module_from_source + +# Import file under test i.e., config_mgmt.py +config_mgmt_py_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config_mgmt.py') +config_mgmt = load_module_from_source('config_mgmt', config_mgmt_py_path) -from json import dump -from copy import deepcopy class TestConfigMgmt(TestCase): ''' @@ -41,15 +41,15 @@ def test_search_keys(self): curConfig = deepcopy(configDbJson) self.writeJson(curConfig, config_mgmt.CONFIG_DB_JSON_FILE) cmdpb = config_mgmt.ConfigMgmtDPB(source=config_mgmt.CONFIG_DB_JSON_FILE) - out = cmdpb.configWithKeys(portBreakOutConfigDbJson, \ - ["Ethernet8","Ethernet9"]) + out = cmdpb.configWithKeys(portBreakOutConfigDbJson, + ["Ethernet8", "Ethernet9"]) assert "VLAN" not in out assert "INTERFACE" not in out for k in out['ACL_TABLE']: # only ports must be chosen len(out['ACL_TABLE'][k]) == 1 - out = cmdpb.configWithKeys(portBreakOutConfigDbJson, \ - ["Ethernet10","Ethernet11"]) + out = cmdpb.configWithKeys(portBreakOutConfigDbJson, + ["Ethernet10", "Ethernet11"]) assert "INTERFACE" in out for k in out['ACL_TABLE']: # only ports must be chosen @@ -58,13 +58,13 @@ def test_search_keys(self): def test_break_out(self): # prepare default config - self.writeJson(portBreakOutConfigDbJson, \ - config_mgmt.DEFAULT_CONFIG_DB_JSON_FILE) + self.writeJson(portBreakOutConfigDbJson, + config_mgmt.DEFAULT_CONFIG_DB_JSON_FILE) # prepare config dj json to start with curConfig = deepcopy(configDbJson) - #Ethernet8: start from 4x25G-->2x50G with -f -l + # Ethernet8: start from 4x25G-->2x50G with -f -l self.dpb_port8_4x25G_2x50G_f_l(curConfig) - #Ethernet8: move from 2x50G-->1x100G without force, list deps + # Ethernet8: move from 2x50G-->1x100G without force, list deps self.dpb_port8_2x50G_1x100G(curConfig) # Ethernet8: move from 2x50G-->1x100G with force, where deps exists self.dpb_port8_2x50G_1x100G_f(curConfig) @@ -136,28 +136,39 @@ def generate_args(self, portIdx, laneIdx, curMode, newMode): ''' # default params pre = "Ethernet" - laneMap = {"4x25G": [1,1,1,1], "2x50G": [2,2], "1x100G":[4], \ - "1x50G(2)+2x25G(2)":[2,1,1], "2x25G(2)+1x50G(2)":[1,1,2]} + laneMap = {"4x25G": [1, 1, 1, 1], "2x50G": [2, 2], "1x100G": [4], + "1x50G(2)+2x25G(2)": [2, 1, 1], "2x25G(2)+1x50G(2)": [1, 1, 2]} laneSpeed = 25000 # generate dPorts - l = list(laneMap[curMode]); l.insert(0, 0); id = portIdx; dPorts = list() + l = list(laneMap[curMode]) + l.insert(0, 0) + id = portIdx + dPorts = list() for i in l[:-1]: id = id + i portName = portName = "{}{}".format(pre, id) dPorts.append(portName) # generate aPorts - l = list(laneMap[newMode]); l.insert(0, 0); id = portIdx; aPorts = list() + l = list(laneMap[newMode]) + l.insert(0, 0) + id = portIdx + aPorts = list() for i in l[:-1]: id = id + i portName = portName = "{}{}".format(pre, id) aPorts.append(portName) # generate pJson - l = laneMap[newMode]; pJson = {"PORT": {}}; li = laneIdx; pi = 0 + l = laneMap[newMode] + pJson = {"PORT": {}} + li = laneIdx + pi = 0 for i in l: speed = laneSpeed*i - lanes = [str(li+j) for j in range(i)]; lanes = ','.join(lanes) + lanes = [str(li+j) for j in range(i)] + lanes = ','.join(lanes) pJson['PORT'][aPorts[pi]] = {"speed": str(speed), "lanes": str(lanes)} - li = li+i; pi = pi + 1 + li = li+i + pi = pi + 1 return dPorts, pJson def updateConfig(self, conf, uconf): @@ -256,10 +267,10 @@ def dpb_port8_1x100G_1x50G_2x25G_f_l(self, curConfig): ''' cmdpb = self.config_mgmt_dpb(curConfig) # create ARGS - dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ - curMode='1x100G', newMode='1x50G(2)+2x25G(2)') + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, + curMode='1x100G', newMode='1x50G(2)+2x25G(2)') deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, - force=True, loadDefConfig=True) + force=True, loadDefConfig=True) # Expected Result delConfig and addConfig is pushed in order delConfig = { 'PORT': { @@ -322,10 +333,10 @@ def dpb_port8_4x25G_1x100G_f(self, curConfig): ''' cmdpb = self.config_mgmt_dpb(curConfig) # create ARGS - dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ - curMode='4x25G', newMode='1x100G') + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, + curMode='4x25G', newMode='1x100G') deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, - force=False, loadDefConfig=False) + force=False, loadDefConfig=False) # Expected Result delConfig and addConfig is pushed in order delConfig = { 'PORT': { @@ -352,10 +363,10 @@ def dpb_port8_1x100G_4x25G(self, curConfig): assert for success and failure. ''' cmdpb = self.config_mgmt_dpb(curConfig) - dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ - curMode='1x100G', newMode='4x25G') + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, + curMode='1x100G', newMode='4x25G') deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, - force=False, loadDefConfig=False) + force=False, loadDefConfig=False) # Expected Result delConfig and addConfig is pushed in order delConfig = { 'PORT': { @@ -380,10 +391,10 @@ def dpb_port8_2x50G_1x100G_f(self, curConfig): ''' cmdpb = self.config_mgmt_dpb(curConfig) # create ARGS - dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ - curMode='2x50G', newMode='1x100G') + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, + curMode='2x50G', newMode='1x100G') deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, - force=True, loadDefConfig=False) + force=True, loadDefConfig=False) # Expected Result delConfig and addConfig is pushed in order delConfig = { 'ACL_TABLE': { @@ -416,10 +427,10 @@ def dpb_port8_2x50G_1x100G(self, curConfig): ''' cmdpb = self.config_mgmt_dpb(curConfig) # create ARGS - dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ - curMode='2x50G', newMode='1x100G') + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, + curMode='2x50G', newMode='1x100G') deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, - force=False, loadDefConfig=False) + force=False, loadDefConfig=False) # Expected Result assert ret == False and len(deps) == 3 assert cmdpb.writeConfigDB.call_count == 0 @@ -438,10 +449,10 @@ def dpb_port8_4x25G_2x50G_f_l(self, curConfig): ''' cmdpb = self.config_mgmt_dpb(curConfig) # create ARGS - dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ - curMode='4x25G', newMode='2x50G') - cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, force=True, \ - loadDefConfig=True) + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, + curMode='4x25G', newMode='2x50G') + cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, force=True, + loadDefConfig=True) # Expected Result delConfig and addConfig is pushed in order delConfig = { 'ACL_TABLE': { @@ -504,10 +515,10 @@ def dpb_port4_4x25G_2x50G_f_l(self, curConfig): ''' cmdpb = self.config_mgmt_dpb(curConfig) # create ARGS - dPorts, pJson = self.generate_args(portIdx=4, laneIdx=69, \ - curMode='4x25G', newMode='2x50G') - cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, force=True, \ - loadDefConfig=True) + dPorts, pJson = self.generate_args(portIdx=4, laneIdx=69, + curMode='4x25G', newMode='2x50G') + cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, force=True, + loadDefConfig=True) # Expected Result delConfig and addConfig is pushed in order delConfig = { 'ACL_TABLE': { @@ -543,8 +554,9 @@ def dpb_port4_4x25G_2x50G_f_l(self, curConfig): self.postUpdateConfig(curConfig, delConfig, addConfig) return + ###########GLOBAL Configs##################################### -configDbJson = { +configDbJson = { "ACL_TABLE": { "NO-NSW-PACL-TEST": { "policy_desc": "NO-NSW-PACL-TEST", @@ -553,7 +565,7 @@ def dpb_port4_4x25G_2x50G_f_l(self, curConfig): "ports": [ "Ethernet9", "Ethernet11", - ] + ] }, "NO-NSW-PACL-V4": { "policy_desc": "NO-NSW-PACL-V4", @@ -564,7 +576,7 @@ def dpb_port4_4x25G_2x50G_f_l(self, curConfig): "Ethernet4", "Ethernet8", "Ethernet10" - ] + ] } }, "VLAN": { @@ -691,7 +703,7 @@ def dpb_port4_4x25G_2x50G_f_l(self, curConfig): "ports": [ "Ethernet9", "Ethernet11", - ] + ] }, "NO-NSW-PACL-V4": { "policy_desc": "NO-NSW-PACL-V4", @@ -700,7 +712,7 @@ def dpb_port4_4x25G_2x50G_f_l(self, curConfig): "Ethernet4", "Ethernet8", "Ethernet10" - ] + ] } }, "VLAN": { @@ -718,7 +730,7 @@ def dpb_port4_4x25G_2x50G_f_l(self, curConfig): }, "Vlan100|Ethernet11": { "tagging_mode": "untagged" - } + } }, "INTERFACE": { "Ethernet11": {}, diff --git a/tests/config_test.py b/tests/config_test.py index 89d8313d53..381ca80304 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1,5 +1,5 @@ import filecmp -import imp +import importlib import os import traceback import json @@ -37,7 +37,7 @@ def setup_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "1" print("SETUP") import config.main - imp.reload(config.main) + importlib.reload(config.main) def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: @@ -63,7 +63,7 @@ def setup_class(cls): print("SETUP") os.environ['UTILITIES_UNIT_TESTING'] = "2" import config.main - imp.reload(config.main) + importlib.reload(config.main) def test_qos_reload_single( self, get_cmd_module, setup_qos_mock_apis, @@ -105,7 +105,7 @@ def setup_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "2" os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" import config.main - imp.reload(config.main) + importlib.reload(config.main) def test_qos_reload_masic( self, get_cmd_module, setup_qos_mock_apis, @@ -148,5 +148,5 @@ def teardown_class(cls): # change back to single asic config from .mock_tables import dbconnector from .mock_tables import mock_single_asic - imp.reload(mock_single_asic) + importlib.reload(mock_single_asic) dbconnector.load_namespace_config() diff --git a/tests/crm_test.py b/tests/crm_test.py index 0c42bf17fc..369d9a51ab 100644 --- a/tests/crm_test.py +++ b/tests/crm_test.py @@ -1,4 +1,4 @@ -import imp +import importlib import os import sys from importlib import reload @@ -1577,5 +1577,5 @@ def teardown_class(cls): os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" from .mock_tables import dbconnector from .mock_tables import mock_single_asic - imp.reload(mock_single_asic) + importlib.reload(mock_single_asic) dbconnector.load_namespace_config() diff --git a/tests/decode_syseeprom_test.py b/tests/decode_syseeprom_test.py index 4b7758d36c..ae3801d369 100644 --- a/tests/decode_syseeprom_test.py +++ b/tests/decode_syseeprom_test.py @@ -1,10 +1,10 @@ -import importlib import os import sys from unittest import mock import pytest from click.testing import CliRunner +from utilities_common.general import load_module_from_source from .mock_tables import dbconnector @@ -17,10 +17,7 @@ decode_syseeprom_path = os.path.join(scripts_path, 'decode-syseeprom') -loader = importlib.machinery.SourceFileLoader('decode-syseeprom', decode_syseeprom_path) -spec = importlib.util.spec_from_loader(loader.name, loader) -decode_syseeprom = importlib.util.module_from_spec(spec) -loader.exec_module(decode_syseeprom) +decode_syseeprom = load_module_from_source('decode-syseeprom', decode_syseeprom_path) # Replace swsscommon objects with mocked objects decode_syseeprom.SonicV2Connector = dbconnector.SonicV2Connector diff --git a/tests/feature_test.py b/tests/feature_test.py index 661dc9584d..be01eede12 100644 --- a/tests/feature_test.py +++ b/tests/feature_test.py @@ -1,4 +1,4 @@ -from importlib import reload +import importlib from click.testing import CliRunner @@ -395,7 +395,7 @@ def setup_class(cls): def test_config_bgp_feature_inconsistent_state(self, get_cmd_module): from .mock_tables import dbconnector from .mock_tables import mock_multi_asic_3_asics - reload(mock_multi_asic_3_asics) + importlib.reload(mock_multi_asic_3_asics) dbconnector.load_namespace_config() (config, show) = get_cmd_module db = Db() @@ -414,7 +414,7 @@ def test_config_bgp_feature_inconsistent_state(self, get_cmd_module): def test_config_bgp_feature_inconsistent_autorestart(self, get_cmd_module): from .mock_tables import dbconnector from .mock_tables import mock_multi_asic_3_asics - reload(mock_multi_asic_3_asics) + importlib.reload(mock_multi_asic_3_asics) dbconnector.load_namespace_config() (config, show) = get_cmd_module db = Db() @@ -433,7 +433,7 @@ def test_config_bgp_feature_inconsistent_autorestart(self, get_cmd_module): def test_config_bgp_feature_consistent_state(self, get_cmd_module): from .mock_tables import dbconnector from .mock_tables import mock_multi_asic - reload(mock_multi_asic) + importlib.reload(mock_multi_asic) dbconnector.load_namespace_config() (config, show) = get_cmd_module db = Db() @@ -457,7 +457,7 @@ def test_config_bgp_feature_consistent_state(self, get_cmd_module): def test_config_bgp_feature_consistent_autorestart(self, get_cmd_module): from .mock_tables import dbconnector from .mock_tables import mock_multi_asic - reload(mock_multi_asic) + importlib.reload(mock_multi_asic) dbconnector.load_namespace_config() (config, show) = get_cmd_module db = Db() @@ -484,4 +484,4 @@ def test_config_bgp_feature_consistent_autorestart(self, get_cmd_module): def teardown_class(cls): print("TEARDOWN") from .mock_tables import mock_single_asic - reload(mock_single_asic) + importlib.reload(mock_single_asic) diff --git a/tests/neighbor_advertiser_test.py b/tests/neighbor_advertiser_test.py index c6dee598af..4a7ab41863 100644 --- a/tests/neighbor_advertiser_test.py +++ b/tests/neighbor_advertiser_test.py @@ -1,18 +1,21 @@ -import sys import os -import pytest -from unittest import mock import subprocess +import sys +from unittest import mock + +import pytest from swsscommon.swsscommon import ConfigDBConnector +from utilities_common.general import load_module_from_source test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") sys.path.insert(0, modules_path) -from imp import load_source -load_source('neighbor_advertiser', scripts_path+'/neighbor_advertiser') -import neighbor_advertiser +# Load the file under test +neighbor_advertiser_path = os.path.join(scripts_path, 'neighbor_advertiser') +neighbor_advertiser = load_module_from_source('neighbor_advertiser', neighbor_advertiser_path) + class TestNeighborAdvertiser(object): @pytest.fixture diff --git a/tests/pfcstat_test.py b/tests/pfcstat_test.py index 8a63539efb..6e0a76d364 100644 --- a/tests/pfcstat_test.py +++ b/tests/pfcstat_test.py @@ -1,4 +1,3 @@ -import imp import os import shutil import sys diff --git a/tests/pfcwd_test.py b/tests/pfcwd_test.py index 9a97ff7b33..be04a11f25 100644 --- a/tests/pfcwd_test.py +++ b/tests/pfcwd_test.py @@ -1,4 +1,4 @@ -import imp +import importlib import os import sys from unittest.mock import patch @@ -273,7 +273,7 @@ def setup_class(cls): os.environ["UTILITIES_UNIT_TESTING"] = "2" os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" import pfcwd.main - imp.reload(pfcwd.main) + importlib.reload(pfcwd.main) def test_pfcwd_stats_all(self): import pfcwd.main as pfcwd diff --git a/tests/port2alias_test.py b/tests/port2alias_test.py index 842bc8ee5d..03b017f968 100644 --- a/tests/port2alias_test.py +++ b/tests/port2alias_test.py @@ -1,10 +1,13 @@ -import sys import os +import sys from unittest import TestCase -import imp +from utilities_common.general import load_module_from_source + +# Load the file under test +port2alias_path = os.path.join(os.path.dirname(__file__), '..', 'scripts', 'port2alias') +port2alias = load_module_from_source('port2alias', port2alias_path) -port2alias = imp.load_source('port2alias', os.path.join(os.path.dirname(__file__), '..', 'scripts', 'port2alias')) class TestPort2Alias(TestCase): def setUp(self): diff --git a/tests/psushow_test.py b/tests/psushow_test.py index c5038ba6c3..fd40a3beb8 100644 --- a/tests/psushow_test.py +++ b/tests/psushow_test.py @@ -1,10 +1,10 @@ -import importlib import os import sys from unittest import mock import pytest from click.testing import CliRunner +from utilities_common.general import load_module_from_source from .mock_tables import dbconnector @@ -17,10 +17,7 @@ # Load the file under test psushow_path = os.path.join(scripts_path, 'psushow') -loader = importlib.machinery.SourceFileLoader('psushow', psushow_path) -spec = importlib.util.spec_from_loader(loader.name, loader) -psushow = importlib.util.module_from_spec(spec) -loader.exec_module(psushow) +psushow = load_module_from_source('psushow', psushow_path) # Replace swsscommon objects with mocked objects psushow.SonicV2Connector = dbconnector.SonicV2Connector diff --git a/tests/watermarkstat_test.py b/tests/watermarkstat_test.py index 3b710896c3..cd4eae00a3 100644 --- a/tests/watermarkstat_test.py +++ b/tests/watermarkstat_test.py @@ -1,4 +1,3 @@ -import imp import os import sys diff --git a/utilities_common/general.py b/utilities_common/general.py new file mode 100644 index 0000000000..a1982793da --- /dev/null +++ b/utilities_common/general.py @@ -0,0 +1,17 @@ +import importlib.machinery +import importlib.util +import sys + +def load_module_from_source(module_name, file_path): + """ + This function will load the Python source file specified by + as a module named and return an instance of the module + """ + loader = importlib.machinery.SourceFileLoader(module_name, file_path) + spec = importlib.util.spec_from_loader(loader.name, loader) + module = importlib.util.module_from_spec(spec) + loader.exec_module(module) + + sys.modules[module_name] = module + + return module diff --git a/utilities_common/util_base.py b/utilities_common/util_base.py index d32e2dbf70..ff5570735c 100644 --- a/utilities_common/util_base.py +++ b/utilities_common/util_base.py @@ -49,7 +49,7 @@ def register_plugin(self, plugin, root_command): # try get information from platform API and return a default value if caught NotImplementedError def try_get(self, callback, default=None): """ - Handy function to invoke the callback and catch NotImplementedError + Handy function to invoke the callback, catch NotImplementedError and return a default value :param callback: Callback to be invoked :param default: Default return value if exception occur :return: Default return value if exception occur else return value of the callback @@ -82,4 +82,3 @@ def check_pddf_mode(self): return True else: return False - From 053978934eee34aa4c20e18d5244c480f6c97f61 Mon Sep 17 00:00:00 2001 From: Sumukha Tumkur Vani Date: Thu, 8 Apr 2021 23:54:48 -0700 Subject: [PATCH 03/41] [load_minigraph]: Avoid starting PFCWD for EPMS devicetype (#1552) --- config/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index 18eda659d9..e5a3cf6d0f 100644 --- a/config/main.py +++ b/config/main.py @@ -1186,7 +1186,7 @@ def load_minigraph(db, no_service_restart): # get the device type device_type = _get_device_type() - if device_type != 'MgmtToRRouter': + if device_type != 'MgmtToRRouter' and device_type != 'EPMS': clicommon.run_command("pfcwd start_default", display_cmd=True) # Update SONiC environmnet file From 02b263a6309484b6cdc43f416d84cd50a5e0005b Mon Sep 17 00:00:00 2001 From: vganesan-nokia <67648637+vganesan-nokia@users.noreply.github.com> Date: Fri, 9 Apr 2021 18:14:07 -0400 Subject: [PATCH 04/41] [voq/inbandif] Voq inbandif port (#1363) Inband port can be made available in PORT table. But regular port handlngs are not applicable for Inband port. This PR has change to avoid regular port handling for inband port for route_check and sfpshow script. --- scripts/route_check.py | 33 ++++++++++++++++++++++++ scripts/sfpshow | 6 ++--- tests/mock_tables/appl_db.json | 12 +++++++++ tests/mock_tables/asic0/appl_db.json | 12 +++++++++ tests/mock_tables/asic1/appl_db.json | 12 +++++++++ tests/mock_tables/asic2/appl_db.json | 12 +++++++++ tests/route_check_test.py | 38 ++++++++++++++++++++++++++++ 7 files changed, 122 insertions(+), 3 deletions(-) diff --git a/scripts/route_check.py b/scripts/route_check.py index efc144c2d3..fe870ab076 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -354,6 +354,36 @@ def filter_out_local_interfaces(keys): return rt +def filter_out_voq_neigh_routes(keys): + """ + helper to filter out voq neigh routes. These are the + routes statically added for the voq neighbors. We skip + writing route entries in asic db for these. We filter + out reporting error on all the host routes written on + inband interface prefixed with "Ethernte-IB" + :param keys: APPL-DB:ROUTE_TABLE Routes to check. + :return keys filtered out for voq neigh routes + """ + rt = [] + local_if_re = [r'Ethernet-IB\d+'] + + db = swsscommon.DBConnector(APPL_DB_NAME, 0) + tbl = swsscommon.Table(db, 'ROUTE_TABLE') + + for k in keys: + prefix = k.split("/") + e = dict(tbl.get(k)[1]) + if not e: + # Prefix might have been added. So try w/o it. + e = dict(tbl.get(prefix[0])[1]) + if not e or all([not (re.match(x, e['ifname']) and + ((prefix[1] == "32" and e['nexthop'] == "0.0.0.0") or + (prefix[1] == "128" and e['nexthop'] == "::"))) for x in local_if_re]): + rt.append(k) + + return rt + + def filter_out_default_routes(lst): """ helper to filter out default routes @@ -411,6 +441,9 @@ def check_routes(): if rt_appl_miss: rt_appl_miss = filter_out_local_interfaces(rt_appl_miss) + if rt_appl_miss: + rt_appl_miss = filter_out_voq_neigh_routes(rt_appl_miss) + if rt_appl_miss or rt_asic_miss: # Look for subscribe updates for a second adds, deletes = get_subscribe_updates(selector, subs) diff --git a/scripts/sfpshow b/scripts/sfpshow index 119e1252e9..3ee80ea2c0 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -12,7 +12,7 @@ import sys import click from natsort import natsorted -from sonic_py_common.interface import front_panel_prefix, backplane_prefix +from sonic_py_common.interface import front_panel_prefix, backplane_prefix, inband_prefix from sonic_py_common import multi_asic from tabulate import tabulate from utilities_common import multi_asic as multi_asic_util @@ -411,7 +411,7 @@ class SFPShow(object): sorted_table_keys = natsorted(port_table_keys) for i in sorted_table_keys: interface = re.split(':', i, maxsplit=1)[-1].strip() - if interface and interface.startswith(front_panel_prefix()) and not interface.startswith(backplane_prefix()): + if interface and interface.startswith(front_panel_prefix()) and not interface.startswith(backplane_prefix()) and not interface.startswith(inband_prefix()): presence = self.db.exists(self.db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(interface)) if presence: self.output += self.convert_interface_sfp_info_to_cli_output_string( @@ -435,7 +435,7 @@ class SFPShow(object): port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") for i in port_table_keys: key = re.split(':', i, maxsplit=1)[-1].strip() - if key and key.startswith(front_panel_prefix()) and not key.startswith(backplane_prefix()): + if key and key.startswith(front_panel_prefix()) and not key.startswith(backplane_prefix()) and not key.startswith(inband_prefix()): presence = self.db.exists(self.db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(key)) if presence: port_table.append((key, 'Present')) diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index 803473b9c4..a73fbb0e55 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -108,6 +108,18 @@ "pfc_asym": "off", "admin_status": "up" }, + "PORT_TABLE:Ethernet-IB0": { + "admin_status": "up", + "alias": "Ethernet-IB0", + "asic_port_name": "Rcy-ASIC0", + "description": "", + "index": "148", + "lanes": "109,0,0,0,0,0,0,0", + "mtu": "9100", + "oper_status": "up", + "role": "Int", + "speed": "100000" + }, "INTF_TABLE:Ethernet0.10": { "admin_status": "up" }, diff --git a/tests/mock_tables/asic0/appl_db.json b/tests/mock_tables/asic0/appl_db.json index cfe085962f..875a89fc8b 100644 --- a/tests/mock_tables/asic0/appl_db.json +++ b/tests/mock_tables/asic0/appl_db.json @@ -47,6 +47,18 @@ "speed": "40000", "asic_port_name": "Eth17-ASIC0" }, + "PORT_TABLE:Ethernet-IB0": { + "admin_status": "up", + "alias": "Ethernet-IB0", + "asic_port_name": "Rcy-ASIC0", + "description": "", + "index": "148", + "lanes": "109,0,0,0,0,0,0,0", + "mtu": "9100", + "oper_status": "up", + "role": "Int", + "speed": "100000" + }, "LAG_MEMBER_TABLE:PortChannel1002:Ethernet0": { "status": "disabled" }, diff --git a/tests/mock_tables/asic1/appl_db.json b/tests/mock_tables/asic1/appl_db.json index 3ac977cb02..ed5fbb2c3c 100644 --- a/tests/mock_tables/asic1/appl_db.json +++ b/tests/mock_tables/asic1/appl_db.json @@ -33,6 +33,18 @@ "speed": "40000", "asic_port_name": "Eth1-ASIC1" }, + "PORT_TABLE:Ethernet-IB1": { + "admin_status": "up", + "alias": "Ethernet-IB1", + "asic_port_name": "Rcy-ASIC1", + "description": "", + "index": "152", + "lanes": "109,0,0,0,0,0,0,0", + "mtu": "9100", + "oper_status": "up", + "role": "Int", + "speed": "100000" + }, "LAG_TABLE:PortChannel4009": { "admin_status": "up", "oper_status": "up", diff --git a/tests/mock_tables/asic2/appl_db.json b/tests/mock_tables/asic2/appl_db.json index a9c630228b..56eaf377bb 100644 --- a/tests/mock_tables/asic2/appl_db.json +++ b/tests/mock_tables/asic2/appl_db.json @@ -47,6 +47,18 @@ "speed": "40000", "asic_port_name": "Eth17-ASIC2" }, + "PORT_TABLE:Ethernet-IB2": { + "admin_status": "up", + "alias": "Ethernet-IB2", + "asic_port_name": "Rcy-ASIC2", + "description": "", + "index": "156", + "lanes": "109,0,0,0,0,0,0,0", + "mtu": "9100", + "oper_status": "up", + "role": "Int", + "speed": "100000" + }, "LAG_MEMBER_TABLE:PortChannel1015:Ethernet20": { "status": "enabled" }, diff --git a/tests/route_check_test.py b/tests/route_check_test.py index cf271f6669..f981275314 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -180,6 +180,44 @@ } } } + }, + "4": { + DESCR: "Good one with routes on voq inband interface", + ARGS: "route_check", + PRE: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo" }, + "10.10.197.1" : { "ifname": "Ethernet-IB0", "nexthop": "0.0.0.0"}, + "2603:10b0:503:df5::1" : { "ifname": "Ethernet-IB0", "nexthop": "::"}, + "100.0.0.2/32" : { "ifname": "Ethernet-IB0", "nexthop": "0.0.0.0" }, + "2064:100::2/128" : { "ifname": "Ethernet-IB0", "nexthop": "::" }, + "101.0.0.0/24" : { "ifname": "Ethernet-IB0", "nexthop": "100.0.0.2"} + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {}, + "Ethernet-IB0:10.10.197.1/24": {}, + "Ethernet-IB0:2603:10b0:503:df5::1/64": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.197.1/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df5::1/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "101.0.0.0/24" + RT_ENTRY_KEY_SUFFIX: {} + } + } + } } } From 38f9f605c384f06d6061db4adda7951fcb69b9be Mon Sep 17 00:00:00 2001 From: Samuel Angebault Date: Sat, 10 Apr 2021 09:18:16 -0700 Subject: [PATCH 05/41] sonic-installer: fix py3 issues in bootloader.aboot (#1553) These issues are only seen on secureboot enabled platform. --- sonic_installer/bootloader/aboot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index 9d12862948..3bf3e297e7 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -156,7 +156,7 @@ def getCert(cls, swiFile): return None with swi.open(sigInfo, 'r') as sigFile: for line in sigFile: - data = line.split(':') + data = line.decode('utf8').split(':') if len(data) == 2: if data[0] == ISSUERCERT: try: @@ -197,7 +197,7 @@ def get_rootfs_path(self, image_path): swipath = os.path.join(image_path, DEFAULT_SWI_IMAGE) offset = self._get_swi_file_offset(swipath, ROOTFS_NAME) - loopdev = subprocess.check_output(['losetup', '-f']).rstrip() + loopdev = subprocess.check_output(['losetup', '-f']).decode('utf8').rstrip() try: run_command_or_raise(['losetup', '-o', str(offset), loopdev, swipath]) From e57e7f7ba1bbca63c9bcbea58c5913d48c7e8d9a Mon Sep 17 00:00:00 2001 From: aystarik Date: Sun, 11 Apr 2021 00:33:42 +0300 Subject: [PATCH 06/41] cache the bvid to vlan translations (#1523) Add lookup table for bvid to vlan translations. bvid_tlb will store previous successful translations from slow get_vlan_id_from_bvid() This patch does not change the output from the command, only speeds up it for case of 10k+ MAC tables. --- scripts/fdbshow | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/scripts/fdbshow b/scripts/fdbshow index 2ae95cab79..97e51a784f 100755 --- a/scripts/fdbshow +++ b/scripts/fdbshow @@ -87,6 +87,7 @@ class FdbShow(object): if not fdb_str: return + bvid_tlb = {} oid_pfx = len("oid:0x") for s in fdb_str: fdb_entry = s @@ -111,15 +112,20 @@ class FdbShow(object): if 'bvid' not in fdb: # no possibility to find the Vlan id. skip the FDB entry continue - try: - vlan_id = port_util.get_vlan_id_from_bvid(self.db, fdb["bvid"]) - if vlan_id is None: - # the situation could be faced if the system has an FDB entries, - # which are linked to default Vlan(caused by untagged trafic) - continue - except Exception: - vlan_id = fdb["bvid"] - print("Failed to get Vlan id for bvid {}\n".format(fdb["bvid"])) + bvid = fdb["bvid"] + if bvid in bvid_tlb: + vlan_id = bvid_tlb[bvid] + else: + try: + vlan_id = port_util.get_vlan_id_from_bvid(self.db, bvid) + bvid_tlb[bvid] = vlan_id + if vlan_id is None: + # the situation could be faced if the system has an FDB entries, + # which are linked to default Vlan(caused by untagged trafic) + continue + except Exception: + vlan_id = bvid + print("Failed to get Vlan id for bvid {}\n".format(bvid)) self.bridge_mac_list.append((int(vlan_id),) + (fdb["mac"],) + (if_name,) + (fdb_type,)) self.bridge_mac_list.sort(key = lambda x: x[0]) From eba5c047bcd9ef476e23a1334e5529df12831b0a Mon Sep 17 00:00:00 2001 From: gechiang <62408185+gechiang@users.noreply.github.com> Date: Wed, 14 Apr 2021 17:46:11 -0700 Subject: [PATCH 07/41] Fix Multi-ASIC show specific resursive route by using common parsing function (#1560) * Fix Multi-ASIC show specific resursive route by using common parsing function * Use True/False as parameter instead or 1/0 --- show/bgp_common.py | 22 +++++------ tests/conftest.py | 2 + tests/ip_show_routes_multi_asic_test.py | 14 +++++++ .../asic0/ip_special_recursive_route.json | 39 +++++++++++++++++++ .../asic1/ip_special_recursive_route.json | 1 + .../asic2/ip_special_recursive_route.json | 35 +++++++++++++++++ tests/show_ip_route_common.py | 11 ++++++ 7 files changed, 113 insertions(+), 11 deletions(-) create mode 100644 tests/mock_tables/asic0/ip_special_recursive_route.json create mode 100644 tests/mock_tables/asic1/ip_special_recursive_route.json create mode 100644 tests/mock_tables/asic2/ip_special_recursive_route.json diff --git a/show/bgp_common.py b/show/bgp_common.py index f439c97e65..50df00dcbb 100644 --- a/show/bgp_common.py +++ b/show/bgp_common.py @@ -52,12 +52,18 @@ def get_mpls_label_strgs(label_list): label_str_2_return += "/" + label_string return label_str_2_return -def get_nexthop_info_str(nxhp_info): +def get_nexthop_info_str(nxhp_info, filterByIp): str_2_return = "" if "ip" in nxhp_info: - str_2_return = " via {},".format(nxhp_info['ip']) + if filterByIp: + str_2_return = " * {}".format(nxhp_info['ip']) + else: + str_2_return = " via {},".format(nxhp_info['ip']) if "interfaceName" in nxhp_info: - str_2_return += " {},".format(nxhp_info['interfaceName']) + if filterByIp: + str_2_return += ", via {}".format(nxhp_info['interfaceName']) + else: + str_2_return += " {},".format(nxhp_info['interfaceName']) elif "directlyConnected" in nxhp_info: str_2_return = " is directly connected," if "interfaceName" in nxhp_info: @@ -152,13 +158,7 @@ def print_ip_routes(route_info, filter_by_ip): if "directlyConnected" in info[i]['nexthops'][j]: print(" * directly connected, {}\n".format(info[i]['nexthops'][j]['interfaceName'])) else: - if "ip" in info[i]['nexthops'][j]: - str_2_print = " * {}".format(info[i]['nexthops'][j]['ip']) - - if "active" in info[i]['nexthops'][j]: - str_2_print += ", via {}".format(info[i]['nexthops'][j]['interfaceName']) - else: - str_2_print += " inactive" + str_2_print = get_nexthop_info_str(info[i]['nexthops'][j], True) print(str_2_print) print("") else: @@ -183,7 +183,7 @@ def print_ip_routes(route_info, filter_by_ip): # For all subsequent nexthops skip the spacing to not repeat the prefix section str_2_print += " "*(str_length - 3) # Get the nexhop info portion of the string - str_2_print += get_nexthop_info_str(info[i]['nexthops'][j]) + str_2_print += get_nexthop_info_str(info[i]['nexthops'][j], False) # add uptime at the end of the string str_2_print += " {}".format(info[i]['uptime']) # print out this string diff --git a/tests/conftest.py b/tests/conftest.py index 9a2f30438e..16c018bb64 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -170,6 +170,8 @@ def setup_multi_asic_bgp_instance(request): m_asic_json_file = 'ip_empty_route.json' elif request.param == 'ip_specific_route_on_1_asic': m_asic_json_file = 'ip_special_route_asic0_only.json' + elif request.param == 'ip_specific_recursive_route': + m_asic_json_file = 'ip_special_recursive_route.json' elif request.param == 'ip_route_summary': m_asic_json_file = 'ip_route_summary.txt' else: diff --git a/tests/ip_show_routes_multi_asic_test.py b/tests/ip_show_routes_multi_asic_test.py index 19e9137df0..271467dae5 100644 --- a/tests/ip_show_routes_multi_asic_test.py +++ b/tests/ip_show_routes_multi_asic_test.py @@ -77,6 +77,20 @@ def test_show_multi_asic_ip_route_specific_on_1_asic( assert result.exit_code == 0 assert result.output == show_ip_route_common.show_specific_ip_route_expected_output + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_specific_recursive_route'], indirect=['setup_multi_asic_bgp_instance']) + def test_show_multi_asic_ip_route_specific_recursive_route( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + show = setup_ip_route_commands + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["193.11.208.0/25"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.show_specific_recursive_route_expected_output + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', ['ipv6_specific_route'], indirect=['setup_multi_asic_bgp_instance']) def test_show_multi_asic_ipv6_route_specific( diff --git a/tests/mock_tables/asic0/ip_special_recursive_route.json b/tests/mock_tables/asic0/ip_special_recursive_route.json new file mode 100644 index 0000000000..0904495ceb --- /dev/null +++ b/tests/mock_tables/asic0/ip_special_recursive_route.json @@ -0,0 +1,39 @@ +{ + "193.11.208.0/25": [ + { + "destSelected": true, + "distance": 20, + "installed": true, + "internalFlags": 8, + "internalNextHopActiveNum": 2, + "internalNextHopNum": 2, + "internalStatus": 16, + "metric": 0, + "nexthops": [ + { + "active": true, + "afi": "ipv4", + "fib": true, + "flags": 3, + "interfaceIndex": 728, + "interfaceName": "PortChannel0005", + "ip": "10.0.0.5" + }, + { + "active": true, + "afi": "ipv4", + "fib": true, + "flags": 3, + "interfaceIndex": 727, + "interfaceName": "PortChannel0002", + "ip": "10.0.0.1" + } + ], + "prefix": "193.11.208.0/25", + "protocol": "bgp", + "selected": true, + "table": 254, + "uptime": "00:14:32" + } + ] +} diff --git a/tests/mock_tables/asic1/ip_special_recursive_route.json b/tests/mock_tables/asic1/ip_special_recursive_route.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/tests/mock_tables/asic1/ip_special_recursive_route.json @@ -0,0 +1 @@ +{} diff --git a/tests/mock_tables/asic2/ip_special_recursive_route.json b/tests/mock_tables/asic2/ip_special_recursive_route.json new file mode 100644 index 0000000000..76f6de0ed5 --- /dev/null +++ b/tests/mock_tables/asic2/ip_special_recursive_route.json @@ -0,0 +1,35 @@ +{ + "193.11.208.0/25": [ + { + "destSelected": true, + "distance": 20, + "installed": true, + "internalFlags": 8, + "internalNextHopActiveNum": 0, + "internalNextHopNum": 0, + "internalStatus": 16, + "metric": 0, + "nexthops": [ + { + "active": true, + "afi": "ipv4", + "flags": 5, + "ip": "10.0.0.9", + "recursive": true + }, + { + "active": true, + "afi": "ipv4", + "flags": 5, + "ip": "10.0.0.1", + "recursive": true + } + ], + "prefix": "193.11.208.0/25", + "protocol": "bgp", + "selected": true, + "table": 254, + "uptime": "00:14:35" + } + ] +} diff --git a/tests/show_ip_route_common.py b/tests/show_ip_route_common.py index e18e8b0a5a..386d32e55e 100644 --- a/tests/show_ip_route_common.py +++ b/tests/show_ip_route_common.py @@ -93,6 +93,17 @@ """ +show_specific_recursive_route_expected_output = """\ +Routing entry for 193.11.208.0/25 + Known via "bgp", distance 20, metric 0, best + Last update 00:14:32 ago + * 10.0.0.1, via PortChannel0002 + * 10.0.0.5, via PortChannel0005 + * 10.0.0.9 (recursive) + * 10.0.0.1 (recursive) + +""" + show_special_ip_route_expected_output = """\ Codes: K - kernel route, C - connected, S - static, R - RIP, O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, From 0e84418e91905cfc01a9f5e76f1083a44eff5ec7 Mon Sep 17 00:00:00 2001 From: Aravind Mani <53524901+aravindmani-1@users.noreply.github.com> Date: Thu, 15 Apr 2021 09:37:59 -0700 Subject: [PATCH 08/41] Stop PMON docker before cold and soft reboots (#1514) Prevent potential kernel oops if drivers are removed/devices are deinitialized while PMon daemons are still trying to access those devices. --- scripts/reboot | 13 +++++++++++++ scripts/soft-reboot | 12 ++++++++++++ 2 files changed, 25 insertions(+) diff --git a/scripts/reboot b/scripts/reboot index 24607fb095..546aa0fbff 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -50,17 +50,30 @@ function tag_images() fi } +function stop_pmon_service() +{ + CONTAINER_STOP_RC=0 + debug "Stopping pmon docker" + docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? + systemctl stop pmon || debug "Ignore stopping pmon error $?" + if [[ CONTAINER_STOP_RC -ne 0 ]]; then + debug "Failed killing container pmon RC $CONTAINER_STOP_RC ." + fi +} + function stop_sonic_services() { if [[ x"$SUBTYPE" == x"DualToR" ]]; then debug "DualToR detected, stopping mux container before reboot..." systemctl stop mux fi + if [[ x"$ASIC_TYPE" != x"mellanox" ]]; then debug "Stopping syncd process..." docker exec -i syncd /usr/bin/syncd_request_shutdown --cold > /dev/null sleep 3 fi + stop_pmon_service } function clear_warm_boot() diff --git a/scripts/soft-reboot b/scripts/soft-reboot index 52ccdd690b..504c58caff 100755 --- a/scripts/soft-reboot +++ b/scripts/soft-reboot @@ -59,6 +59,17 @@ function tag_images() fi } +function stop_pmon_service() +{ + CONTAINER_STOP_RC=0 + debug "Stopping pmon docker" + docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? + systemctl stop pmon || debug "Ignore stopping pmon error $?" + if [[ CONTAINER_STOP_RC -ne 0 ]]; then + debug "Failed killing container pmon RC $CONTAINER_STOP_RC ." + fi +} + function stop_sonic_services() { if [[ x"$ASIC_TYPE" != x"mellanox" ]]; then @@ -66,6 +77,7 @@ function stop_sonic_services() docker exec -i syncd /usr/bin/syncd_request_shutdown --cold > /dev/null sleep 3 fi + stop_pmon_service } function clear_lingering_reboot_config() From 149ccbd934f11298ffae81ef5b3cbb42f19b767d Mon Sep 17 00:00:00 2001 From: Mahesh Maddikayala <10645050+smaheshm@users.noreply.github.com> Date: Thu, 15 Apr 2021 09:40:55 -0700 Subject: [PATCH 09/41] [techsupport] Update show ip interface command (#1562) + update 'show ip interface' command in tech support collection + fix unbound variable issue in case command times out --- scripts/generate_dump | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 92508edefa..21d4a4e0c8 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -242,17 +242,20 @@ copy_from_docker() { local dstpath=$3 local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m" + local touch_cmd="sudo docker exec -i ${docker} touch ${filename}" + local cp_cmd="sudo docker cp ${docker}:${filename} ${dstpath}" + if $NOOP; then - echo "${timeout_cmd} sudo docker exec -i ${docker} touch ${filename}" - echo "${timeout_cmd} sudo docker cp ${docker}:${filename} ${dstpath}" + echo "${timeout_cmd} ${touch_cmd}" + echo "${timeout_cmd} ${cp_cmd}" else - eval "${timeout_cmd} sudo docker exec -i ${docker} touch ${filename}" + eval "${timeout_cmd} ${touch_cmd}" if [ $? -ne 0 ]; then - echo "Command: $cmd timedout after ${TIMEOUT_MIN} minutes." + echo "Command: $touch_cmd timedout after ${TIMEOUT_MIN} minutes." fi - eval "${timeout_cmd} sudo docker cp ${docker}:${filename} ${dstpath}" + eval "${timeout_cmd} ${cp_cmd}" if [ $? -ne 0 ]; then - echo "Command: $cmd timedout after ${TIMEOUT_MIN} minutes." + echo "Command: $cp_cmd timedout after ${TIMEOUT_MIN} minutes." fi fi end_t=$(date +%s%3N) @@ -1084,7 +1087,7 @@ main() { save_cmd "show interface status -d all" "interface.status" save_cmd "show interface transceiver presence" "interface.xcvrs.presence" save_cmd "show interface transceiver eeprom --dom" "interface.xcvrs.eeprom" - save_cmd_all_ns "show ip interface" "ip.interface" + save_cmd "show ip interface -d all" "ip.interface" save_cmd "lldpctl" "lldpctl" if [[ ( "$NUM_ASICS" > 1 ) ]]; then From 176cc4ab2aa058597f1c3cb54d825d429dab45b9 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Fri, 16 Apr 2021 07:40:27 -0700 Subject: [PATCH 10/41] 1) Loopback interfaces with valid nexthop IP are not ignored/treated as loopback. (#1565) 2) The vrf routes are *not* handled. --- scripts/route_check.py | 22 ++++++++++++++++++---- tests/route_check_test.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 4 deletions(-) diff --git a/scripts/route_check.py b/scripts/route_check.py index fe870ab076..1e03a9c6bd 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -264,6 +264,10 @@ def get_subscribe_updates(selector, subs): return (sorted(adds), sorted(deletes)) +def is_vrf(k): + return k.startswith("Vrf") + + def get_routes(): """ helper to read route table from APPL-DB. @@ -276,7 +280,7 @@ def get_routes(): valid_rt = [] for k in keys: - if not is_local(k): + if not is_vrf(k) and not is_local(k): valid_rt.append(add_prefix_ifnot(k.lower())) print_message(syslog.LOG_DEBUG, json.dumps({"ROUTE_TABLE": sorted(valid_rt)}, indent=4)) @@ -341,15 +345,25 @@ def filter_out_local_interfaces(keys): :return keys filtered out of local """ rt = [] - local_if_re = [r'eth0', r'lo', r'docker0', r'tun0', r'Loopback\d+'] + local_if_lst = {'eth0', 'docker0'} + local_if_lo = [r'tun0', r'lo', r'Loopback\d+'] db = swsscommon.DBConnector(APPL_DB_NAME, 0) tbl = swsscommon.Table(db, 'ROUTE_TABLE') for k in keys: e = dict(tbl.get(k)[1]) - if not e or all([not re.match(x, e['ifname']) for x in local_if_re]): - rt.append(k) + + ifname = e.get('ifname', '') + if ifname in local_if_lst: + continue + + if any([re.match(x, ifname) for x in local_if_lo]): + nh = e.get('nexthop') + if not nh or ipaddress.ip_address(nh).is_unspecified: + continue + + rt.append(k) return rt diff --git a/tests/route_check_test.py b/tests/route_check_test.py index f981275314..a39015c07f 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -218,6 +218,40 @@ } } } + }, + "5": { + DESCR: "local route with nexthop - fail", + ARGS: "route_check -m INFO -i 1000", + RET: -1, + PRE: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + "10.10.196.20/31" : { "ifname": "portchannel0" }, + "10.10.196.30/31" : { "ifname": "lo", "nexthop": "100.0.0.2" } + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.20/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + } + }, + RESULT: { + "missed_ROUTE_TABLE_routes": [ + "10.10.196.30/31" + ] + } } } From d5f538200b3abfb108e385c91d6b7c93746611ca Mon Sep 17 00:00:00 2001 From: Mahesh Maddikayala <10645050+smaheshm@users.noreply.github.com> Date: Fri, 16 Apr 2021 10:41:33 -0700 Subject: [PATCH 11/41] [CLI][queue counters] add JSON output option for queue counters (#1505) + added tests for 'show queue counters' CLI --- scripts/queuestat | 125 +++- show/main.py | 6 +- tests/mock_tables/counters_db.json | 434 ++++++++++++- tests/queue_counter_test.py | 960 +++++++++++++++++++++++++++++ utilities_common/cli.py | 12 +- 5 files changed, 1508 insertions(+), 29 deletions(-) create mode 100644 tests/queue_counter_test.py diff --git a/scripts/queuestat b/scripts/queuestat index 24369c82a5..b7f4f1d382 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -2,7 +2,7 @@ ##################################################################### # -# queuestat is a tool for summarizing queue statistics of all ports. +# queuestat is a tool for summarizing queue statistics of all ports. # ##################################################################### @@ -10,13 +10,25 @@ import _pickle as pickle import argparse import datetime import os.path -from swsscommon.swsscommon import SonicV2Connector import sys from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate +# mock the redis for unit test purposes # +try: + if os.environ["UTILITIES_UNIT_TESTING"] == "2": + modules_path = os.path.join(os.path.dirname(__file__), "..") + tests_path = os.path.join(modules_path, "tests") + sys.path.insert(0, modules_path) + sys.path.insert(0, tests_path) + import mock_tables.dbconnector # lgtm [py/unused-import] + +except KeyError: + pass + +from swsscommon.swsscommon import SonicV2Connector QueueStats = namedtuple("QueueStats", "queueindex, queuetype, totalpacket, totalbytes, droppacket, dropbytes") header = ['Port', 'TxQ', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] @@ -28,6 +40,7 @@ counter_bucket_dict = { 'SAI_QUEUE_STAT_DROPPED_BYTES': 5, } +from utilities_common.cli import json_dump from utilities_common.netstat import ns_diff, STATUS_NA QUEUE_TYPE_MC = 'MC' @@ -47,6 +60,24 @@ COUNTERS_QUEUE_PORT_MAP = "COUNTERS_QUEUE_PORT_MAP" cnstat_dir = 'N/A' cnstat_fqn_file = 'N/A' + +def build_json(port, cnstat): + def ports_stats(k): + p = {} + p[k[1]] = { + "totalpacket": k[2], + "totalbytes": k[3], + "droppacket": k[4], + "dropbytes": k[5] + } + return p + + out = {} + for k in cnstat: + out.update(ports_stats(k)) + return out + + class Queuestat(object): def __init__(self): self.db = SonicV2Connector(use_unix_socket_path=False) @@ -134,33 +165,45 @@ class Queuestat(object): if queue_map is None: return cnstat_dict for queue in natsorted(queue_map): - cnstat_dict[queue] = get_counters(queue_map[queue]) + cnstat_dict[queue] = get_counters(queue_map[queue]) return cnstat_dict - def cnstat_print(self, port, cnstat_dict): + def cnstat_print(self, port, cnstat_dict, json_opt): """ - Print the cnstat. + Print the cnstat. If JSON option is True, return data in + JSON format. """ table = [] + json_output = {port: {}} for key, data in cnstat_dict.items(): if key == 'time': + if json_opt: + json_output[port][key] = data continue table.append((port, data.queuetype + str(data.queueindex), data.totalpacket, data.totalbytes, data.droppacket, data.dropbytes)) - print(tabulate(table, header, tablefmt='simple', stralign='right')) - print() + if json_opt: + json_output[port].update(build_json(port, table)) + return json_output + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + print() - def cnstat_diff_print(self, port, cnstat_new_dict, cnstat_old_dict): + def cnstat_diff_print(self, port, cnstat_new_dict, cnstat_old_dict, json_opt): """ - Print the difference between two cnstat results. + Print the difference between two cnstat results. If JSON + option is True, return data in JSON format. """ table = [] + json_output = {port: {}} for key, cntr in cnstat_new_dict.items(): if key == 'time': + if json_opt: + json_output[port][key] = cntr continue old_cntr = None if key in cnstat_old_dict: @@ -177,26 +220,50 @@ class Queuestat(object): cntr.totalpacket, cntr.totalbytes, cntr.droppacket, cntr.dropbytes)) - print(tabulate(table, header, tablefmt='simple', stralign='right')) - print() + if json_opt: + json_output[port].update(build_json(port, table)) + return json_output + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + print() - def get_print_all_stat(self): - # Get stat for each port + def get_print_all_stat(self, json_opt): + """ + Get stat for each port + If JSON option is True, collect data for each port and + print data in JSON format for all ports + """ + json_output = {} for port in natsorted(self.counter_port_name_map): + json_output[port] = {} cnstat_dict = self.get_cnstat(self.port_queues_map[port]) cnstat_fqn_file_name = cnstat_fqn_file + port if os.path.isfile(cnstat_fqn_file_name): try: cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'rb')) - print(port + " Last cached time was " + str(cnstat_cached_dict.get('time'))) - self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict) + if json_opt: + json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) + json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt)) + else: + print(port + " Last cached time was " + str(cnstat_cached_dict.get('time'))) + self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt) except IOError as e: print(e.errno, e) else: - self.cnstat_print(port, cnstat_dict) + if json_opt: + json_output.update(self.cnstat_print(port, cnstat_dict, json_opt)) + else: + self.cnstat_print(port, cnstat_dict, json_opt) - def get_print_port_stat(self, port): + if json_opt: + print(json_dump(json_output)) + + def get_print_port_stat(self, port, json_opt): + """ + Get stat for the port + If JSON option is True print data in JSON format + """ if not port in self.port_queues_map: print("Port doesn't exist!", port) sys.exit(1) @@ -204,15 +271,27 @@ class Queuestat(object): # Get stat for the port queried cnstat_dict = self.get_cnstat(self.port_queues_map[port]) cnstat_fqn_file_name = cnstat_fqn_file + port + json_output = {} + json_output[port] = {} if os.path.isfile(cnstat_fqn_file_name): try: cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'rb')) - print("Last cached time was " + str(cnstat_cached_dict.get('time'))) - self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict) + if json_opt: + json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) + json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt)) + else: + print("Last cached time was " + str(cnstat_cached_dict.get('time'))) + self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt) except IOError as e: print(e.errno, e) else: - self.cnstat_print(port, cnstat_dict) + if json_opt: + json_output.update(self.cnstat_print(port, cnstat_dict, json_opt)) + else: + self.cnstat_print(port, cnstat_dict, json_opt) + + if json_opt: + print(json_dump(json_output)) def save_fresh_stats(self): if not os.path.exists(cnstat_dir): @@ -251,10 +330,12 @@ Examples: parser.add_argument('-c', '--clear', action='store_true', help='Clear previous stats and save new ones') parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats') parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') + parser.add_argument('-j', '--json_opt', action='store_true', help='Print in JSON format') args = parser.parse_args() save_fresh_stats = args.clear delete_all_stats = args.delete + json_opt = args.json_opt port_to_show_stats = args.port @@ -282,9 +363,9 @@ Examples: sys.exit(0) if port_to_show_stats!=None: - queuestat.get_print_port_stat(port_to_show_stats) + queuestat.get_print_port_stat(port_to_show_stats, json_opt) else: - queuestat.get_print_all_stat() + queuestat.get_print_all_stat(json_opt) sys.exit(0) diff --git a/show/main.py b/show/main.py index 8dbe740e71..16f5d8cec7 100755 --- a/show/main.py +++ b/show/main.py @@ -520,7 +520,8 @@ def queue(): @queue.command() @click.argument('interfacename', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") -def counters(interfacename, verbose): +@click.option('--json', is_flag=True, help="JSON output") +def counters(interfacename, verbose, json): """Show queue counters""" cmd = "queuestat" @@ -532,6 +533,9 @@ def counters(interfacename, verbose): if interfacename is not None: cmd += " -p {}".format(interfacename) + if json: + cmd += " -j" + run_command(cmd, display_cmd=verbose) # diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index 3d9164e1d8..4765556f45 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -1,4 +1,403 @@ { + "COUNTERS:oid:0x15000000000357": { + "SAI_QUEUE_STAT_BYTES": "30", + "SAI_QUEUE_STAT_DROPPED_BYTES": "74", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "56", + "SAI_QUEUE_STAT_PACKETS": "68", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "61" + }, + "COUNTERS:oid:0x15000000000358": { + "SAI_QUEUE_STAT_BYTES": "43", + "SAI_QUEUE_STAT_DROPPED_BYTES": "1", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "39", + "SAI_QUEUE_STAT_PACKETS": "60", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "88" + }, + "COUNTERS:oid:0x15000000000359": { + "SAI_QUEUE_STAT_BYTES": "7", + "SAI_QUEUE_STAT_DROPPED_BYTES": "21", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "39", + "SAI_QUEUE_STAT_PACKETS": "82", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "4" + }, + "COUNTERS:oid:0x1500000000035b": { + "SAI_QUEUE_STAT_BYTES": "59", + "SAI_QUEUE_STAT_DROPPED_BYTES": "94", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "12", + "SAI_QUEUE_STAT_PACKETS": "11", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "15" + }, + "COUNTERS:oid:0x1500000000035c": { + "SAI_QUEUE_STAT_BYTES": "62", + "SAI_QUEUE_STAT_DROPPED_BYTES": "40", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "35", + "SAI_QUEUE_STAT_PACKETS": "36", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "18" + }, + "COUNTERS:oid:0x1500000000035d": { + "SAI_QUEUE_STAT_BYTES": "91", + "SAI_QUEUE_STAT_DROPPED_BYTES": "88", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "2", + "SAI_QUEUE_STAT_PACKETS": "49", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "73" + }, + "COUNTERS:oid:0x1500000000035e": { + "SAI_QUEUE_STAT_BYTES": "17", + "SAI_QUEUE_STAT_DROPPED_BYTES": "74", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "94", + "SAI_QUEUE_STAT_PACKETS": "33", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "98" + }, + "COUNTERS:oid:0x1500000000035f": { + "SAI_QUEUE_STAT_BYTES": "71", + "SAI_QUEUE_STAT_DROPPED_BYTES": "33", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "95", + "SAI_QUEUE_STAT_PACKETS": "40", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "81" + }, + "COUNTERS:oid:0x15000000000360": { + "SAI_QUEUE_STAT_BYTES": "8", + "SAI_QUEUE_STAT_DROPPED_BYTES": "78", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "93", + "SAI_QUEUE_STAT_PACKETS": "54", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "59" + }, + "COUNTERS:oid:0x15000000000363": { + "SAI_QUEUE_STAT_BYTES": "96", + "SAI_QUEUE_STAT_DROPPED_BYTES": "9", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "74", + "SAI_QUEUE_STAT_PACKETS": "83", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "12" + }, + "COUNTERS:oid:0x15000000000364": { + "SAI_QUEUE_STAT_BYTES": "60", + "SAI_QUEUE_STAT_DROPPED_BYTES": "31", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "61", + "SAI_QUEUE_STAT_PACKETS": "15", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "COUNTERS:oid:0x15000000000365": { + "SAI_QUEUE_STAT_BYTES": "52", + "SAI_QUEUE_STAT_DROPPED_BYTES": "94", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "82", + "SAI_QUEUE_STAT_PACKETS": "45", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "68" + }, + "COUNTERS:oid:0x15000000000366": { + "SAI_QUEUE_STAT_BYTES": "88", + "SAI_QUEUE_STAT_DROPPED_BYTES": "52", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "89", + "SAI_QUEUE_STAT_PACKETS": "55", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "18" + }, + "COUNTERS:oid:0x15000000000367": { + "SAI_QUEUE_STAT_BYTES": "70", + "SAI_QUEUE_STAT_DROPPED_BYTES": "79", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "95", + "SAI_QUEUE_STAT_PACKETS": "14", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "21" + }, + "COUNTERS:oid:0x15000000000368": { + "SAI_QUEUE_STAT_BYTES": "60", + "SAI_QUEUE_STAT_DROPPED_BYTES": "81", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "66", + "SAI_QUEUE_STAT_PACKETS": "68", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "98" + }, + "COUNTERS:oid:0x15000000000369": { + "SAI_QUEUE_STAT_BYTES": "4", + "SAI_QUEUE_STAT_DROPPED_BYTES": "76", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "48", + "SAI_QUEUE_STAT_PACKETS": "63", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "93" + }, + "COUNTERS:oid:0x1500000000036a": { + "SAI_QUEUE_STAT_BYTES": "73", + "SAI_QUEUE_STAT_DROPPED_BYTES": "74", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "77", + "SAI_QUEUE_STAT_PACKETS": "41", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "50" + }, + "COUNTERS:oid:0x1500000000036b": { + "SAI_QUEUE_STAT_BYTES": "21", + "SAI_QUEUE_STAT_DROPPED_BYTES": "54", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "56", + "SAI_QUEUE_STAT_PACKETS": "60", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "30" + }, + "COUNTERS:oid:0x1500000000036c": { + "SAI_QUEUE_STAT_BYTES": "31", + "SAI_QUEUE_STAT_DROPPED_BYTES": "39", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "12", + "SAI_QUEUE_STAT_PACKETS": "57", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "20" + }, + "COUNTERS:oid:0x1500000000037f": { + "SAI_QUEUE_STAT_BYTES": "96", + "SAI_QUEUE_STAT_DROPPED_BYTES": "98", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "70", + "SAI_QUEUE_STAT_PACKETS": "41", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "46" + }, + "COUNTERS:oid:0x15000000000380": { + "SAI_QUEUE_STAT_BYTES": "49", + "SAI_QUEUE_STAT_DROPPED_BYTES": "36", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "63", + "SAI_QUEUE_STAT_PACKETS": "18", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "18" + }, + "COUNTERS:oid:0x15000000000381": { + "SAI_QUEUE_STAT_BYTES": "90", + "SAI_QUEUE_STAT_DROPPED_BYTES": "15", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "3", + "SAI_QUEUE_STAT_PACKETS": "99", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "15" + }, + "COUNTERS:oid:0x15000000000383": { + "SAI_QUEUE_STAT_BYTES": "84", + "SAI_QUEUE_STAT_DROPPED_BYTES": "94", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "82", + "SAI_QUEUE_STAT_PACKETS": "8", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "17" + }, + "COUNTERS:oid:0x15000000000384": { + "SAI_QUEUE_STAT_BYTES": "15", + "SAI_QUEUE_STAT_DROPPED_BYTES": "92", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "75", + "SAI_QUEUE_STAT_PACKETS": "83", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "42" + }, + "COUNTERS:oid:0x15000000000385": { + "SAI_QUEUE_STAT_BYTES": "26", + "SAI_QUEUE_STAT_DROPPED_BYTES": "71", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "50", + "SAI_QUEUE_STAT_PACKETS": "84", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "31" + }, + "COUNTERS:oid:0x15000000000386": { + "SAI_QUEUE_STAT_BYTES": "19", + "SAI_QUEUE_STAT_DROPPED_BYTES": "80", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "49", + "SAI_QUEUE_STAT_PACKETS": "27", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "4" + }, + "COUNTERS:oid:0x15000000000387": { + "SAI_QUEUE_STAT_BYTES": "89", + "SAI_QUEUE_STAT_DROPPED_BYTES": "33", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "13", + "SAI_QUEUE_STAT_PACKETS": "13", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "45" + }, + "COUNTERS:oid:0x15000000000388": { + "SAI_QUEUE_STAT_BYTES": "48", + "SAI_QUEUE_STAT_DROPPED_BYTES": "31", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "86", + "SAI_QUEUE_STAT_PACKETS": "43", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "6" + }, + "COUNTERS:oid:0x1500000000038b": { + "SAI_QUEUE_STAT_BYTES": "1", + "SAI_QUEUE_STAT_DROPPED_BYTES": "82", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "57", + "SAI_QUEUE_STAT_PACKETS": "50", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "5" + }, + "COUNTERS:oid:0x1500000000038c": { + "SAI_QUEUE_STAT_BYTES": "99", + "SAI_QUEUE_STAT_DROPPED_BYTES": "59", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "84", + "SAI_QUEUE_STAT_PACKETS": "67", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "32" + }, + "COUNTERS:oid:0x1500000000038d": { + "SAI_QUEUE_STAT_BYTES": "58", + "SAI_QUEUE_STAT_DROPPED_BYTES": "5", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "27", + "SAI_QUEUE_STAT_PACKETS": "4", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "17" + }, + "COUNTERS:oid:0x1500000000038e": { + "SAI_QUEUE_STAT_BYTES": "5", + "SAI_QUEUE_STAT_DROPPED_BYTES": "39", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "57", + "SAI_QUEUE_STAT_PACKETS": "74", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "99" + }, + "COUNTERS:oid:0x1500000000038f": { + "SAI_QUEUE_STAT_BYTES": "59", + "SAI_QUEUE_STAT_DROPPED_BYTES": "14", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "4", + "SAI_QUEUE_STAT_PACKETS": "21", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "71" + }, + "COUNTERS:oid:0x15000000000390": { + "SAI_QUEUE_STAT_BYTES": "61", + "SAI_QUEUE_STAT_DROPPED_BYTES": "53", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "19", + "SAI_QUEUE_STAT_PACKETS": "24", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "56" + }, + "COUNTERS:oid:0x15000000000391": { + "SAI_QUEUE_STAT_BYTES": "15", + "SAI_QUEUE_STAT_DROPPED_BYTES": "32", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "15", + "SAI_QUEUE_STAT_PACKETS": "51", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "50" + }, + "COUNTERS:oid:0x15000000000392": { + "SAI_QUEUE_STAT_BYTES": "18", + "SAI_QUEUE_STAT_DROPPED_BYTES": "15", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "23", + "SAI_QUEUE_STAT_PACKETS": "98", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "4" + }, + "COUNTERS:oid:0x15000000000393": { + "SAI_QUEUE_STAT_BYTES": "34", + "SAI_QUEUE_STAT_DROPPED_BYTES": "57", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "9", + "SAI_QUEUE_STAT_PACKETS": "41", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "61" + }, + "COUNTERS:oid:0x15000000000394": { + "SAI_QUEUE_STAT_BYTES": "7", + "SAI_QUEUE_STAT_DROPPED_BYTES": "99", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "18", + "SAI_QUEUE_STAT_PACKETS": "57", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "49" + }, + "COUNTERS:oid:0x150000000003a7": { + "SAI_QUEUE_STAT_BYTES": "5", + "SAI_QUEUE_STAT_DROPPED_BYTES": "56", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "36", + "SAI_QUEUE_STAT_PACKETS": "19", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "65" + }, + "COUNTERS:oid:0x150000000003a8": { + "SAI_QUEUE_STAT_BYTES": "17", + "SAI_QUEUE_STAT_DROPPED_BYTES": "91", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "68", + "SAI_QUEUE_STAT_PACKETS": "38", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "94" + }, + "COUNTERS:oid:0x150000000003a9": { + "SAI_QUEUE_STAT_BYTES": "65", + "SAI_QUEUE_STAT_DROPPED_BYTES": "51", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "79", + "SAI_QUEUE_STAT_PACKETS": "16", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "71" + }, + "COUNTERS:oid:0x150000000003aa": { + "SAI_QUEUE_STAT_BYTES": "97", + "SAI_QUEUE_STAT_DROPPED_BYTES": "72", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "63", + "SAI_QUEUE_STAT_PACKETS": "11", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "51" + }, + "COUNTERS:oid:0x150000000003ac": { + "SAI_QUEUE_STAT_BYTES": "84", + "SAI_QUEUE_STAT_DROPPED_BYTES": "59", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "30", + "SAI_QUEUE_STAT_PACKETS": "13", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "47" + }, + "COUNTERS:oid:0x150000000003ad": { + "SAI_QUEUE_STAT_BYTES": "67", + "SAI_QUEUE_STAT_DROPPED_BYTES": "85", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "99", + "SAI_QUEUE_STAT_PACKETS": "49", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "86" + }, + "COUNTERS:oid:0x150000000003ae": { + "SAI_QUEUE_STAT_BYTES": "63", + "SAI_QUEUE_STAT_DROPPED_BYTES": "88", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "38", + "SAI_QUEUE_STAT_PACKETS": "2", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "98" + }, + "COUNTERS:oid:0x150000000003af": { + "SAI_QUEUE_STAT_BYTES": "82", + "SAI_QUEUE_STAT_DROPPED_BYTES": "43", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "93", + "SAI_QUEUE_STAT_PACKETS": "0", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "50" + }, + "COUNTERS:oid:0x150000000003b0": { + "SAI_QUEUE_STAT_BYTES": "17", + "SAI_QUEUE_STAT_DROPPED_BYTES": "61", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "91", + "SAI_QUEUE_STAT_PACKETS": "80", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "91" + }, + "COUNTERS:oid:0x150000000003b3": { + "SAI_QUEUE_STAT_BYTES": "63", + "SAI_QUEUE_STAT_DROPPED_BYTES": "73", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "76", + "SAI_QUEUE_STAT_PACKETS": "81", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "12" + }, + "COUNTERS:oid:0x150000000003b4": { + "SAI_QUEUE_STAT_BYTES": "16", + "SAI_QUEUE_STAT_DROPPED_BYTES": "66", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "29", + "SAI_QUEUE_STAT_PACKETS": "29", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "32" + }, + "COUNTERS:oid:0x150000000003b5": { + "SAI_QUEUE_STAT_BYTES": "12", + "SAI_QUEUE_STAT_DROPPED_BYTES": "35", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "61", + "SAI_QUEUE_STAT_PACKETS": "32", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "89" + }, + "COUNTERS:oid:0x150000000003b6": { + "SAI_QUEUE_STAT_BYTES": "17", + "SAI_QUEUE_STAT_DROPPED_BYTES": "93", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "72", + "SAI_QUEUE_STAT_PACKETS": "79", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "49" + }, + "COUNTERS:oid:0x150000000003b7": { + "SAI_QUEUE_STAT_BYTES": "21", + "SAI_QUEUE_STAT_DROPPED_BYTES": "50", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "67", + "SAI_QUEUE_STAT_PACKETS": "23", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "4" + }, + "COUNTERS:oid:0x150000000003b8": { + "SAI_QUEUE_STAT_BYTES": "10", + "SAI_QUEUE_STAT_DROPPED_BYTES": "14", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "97", + "SAI_QUEUE_STAT_PACKETS": "37", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "32" + }, + "COUNTERS:oid:0x150000000003b9": { + "SAI_QUEUE_STAT_BYTES": "17", + "SAI_QUEUE_STAT_DROPPED_BYTES": "43", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "74", + "SAI_QUEUE_STAT_PACKETS": "30", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "42" + }, + "COUNTERS:oid:0x150000000003ba": { + "SAI_QUEUE_STAT_BYTES": "63", + "SAI_QUEUE_STAT_DROPPED_BYTES": "84", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "54", + "SAI_QUEUE_STAT_PACKETS": "0", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "43" + }, + "COUNTERS:oid:0x150000000003bb": { + "SAI_QUEUE_STAT_BYTES": "88", + "SAI_QUEUE_STAT_DROPPED_BYTES": "79", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "24", + "SAI_QUEUE_STAT_PACKETS": "69", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "62" + }, + "COUNTERS:oid:0x150000000003bc": { + "SAI_QUEUE_STAT_BYTES": "12", + "SAI_QUEUE_STAT_DROPPED_BYTES": "3", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "84", + "SAI_QUEUE_STAT_PACKETS": "20", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "81" + }, "COUNTERS:oid:0x60000000005a3": { "SAI_ROUTER_INTERFACE_STAT_IN_ERROR_OCTETS": "0", "SAI_ROUTER_INTERFACE_STAT_IN_ERROR_PACKETS": "0", @@ -7,7 +406,12 @@ "SAI_ROUTER_INTERFACE_STAT_OUT_ERROR_OCTETS": "0", "SAI_ROUTER_INTERFACE_STAT_OUT_ERROR_PACKETS": "0", "SAI_ROUTER_INTERFACE_STAT_OUT_OCTETS": "0", - "SAI_ROUTER_INTERFACE_STAT_OUT_PACKETS": "0" + "SAI_ROUTER_INTERFACE_STAT_OUT_PACKETS": "0", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "0", + "SAI_QUEUE_STAT_DROPPED_BYTES": "0", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "0", + "SAI_QUEUE_STAT_BYTES": "0", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" }, "COUNTERS:oid:0x60000000005a1": { "SAI_ROUTER_INTERFACE_STAT_IN_ERROR_OCTETS": "0", @@ -17,7 +421,12 @@ "SAI_ROUTER_INTERFACE_STAT_OUT_ERROR_OCTETS": "0", "SAI_ROUTER_INTERFACE_STAT_OUT_ERROR_PACKETS": "0", "SAI_ROUTER_INTERFACE_STAT_OUT_OCTETS": "0", - "SAI_ROUTER_INTERFACE_STAT_OUT_PACKETS": "0" + "SAI_ROUTER_INTERFACE_STAT_OUT_PACKETS": "0", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "0", + "SAI_QUEUE_STAT_DROPPED_BYTES": "0", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "0", + "SAI_QUEUE_STAT_BYTES": "0", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" }, "COUNTERS:oid:0x600000000065f": { "SAI_ROUTER_INTERFACE_STAT_IN_ERROR_OCTETS": "1128", @@ -854,7 +1263,12 @@ "PFC_WD_QUEUE_STATS_TX_PACKETS_LAST": "0", "PFC_WD_RESTORATION_TIME": "600000", "PFC_WD_RESTORATION_TIME_LEFT": "600000", - "PFC_WD_STATUS": "stormed" + "PFC_WD_STATUS": "stormed", + "SAI_QUEUE_STAT_BYTES": "70", + "SAI_QUEUE_STAT_DROPPED_BYTES": "76", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "19", + "SAI_QUEUE_STAT_PACKETS": "52", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "60" }, "COUNTERS:oid:0x15000000000382": { "PFC_WD_ACTION": "drop", @@ -872,7 +1286,12 @@ "PFC_WD_QUEUE_STATS_TX_PACKETS_LAST": "0", "PFC_WD_RESTORATION_TIME": "600000", "PFC_WD_RESTORATION_TIME_LEFT": "600000", - "PFC_WD_STATUS": "operational" + "PFC_WD_STATUS": "operational", + "SAI_QUEUE_STAT_BYTES": "89", + "SAI_QUEUE_STAT_DROPPED_BYTES": "41", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "48", + "SAI_QUEUE_STAT_PACKETS": "60", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "99" }, "COUNTERS:oid:0x150000000003ab": { "PFC_WD_ACTION": "drop", @@ -890,7 +1309,12 @@ "PFC_WD_QUEUE_STATS_TX_PACKETS_LAST": "0", "PFC_WD_RESTORATION_TIME": "600000", "PFC_WD_RESTORATION_TIME_LEFT": "600000", - "PFC_WD_STATUS": "stormed" + "PFC_WD_STATUS": "stormed", + "SAI_QUEUE_STAT_BYTES": "89", + "SAI_QUEUE_STAT_DROPPED_BYTES": "62", + "SAI_QUEUE_STAT_DROPPED_PACKETS": "62", + "SAI_QUEUE_STAT_PACKETS": "54", + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "97" }, "USER_WATERMARKS:oid:0x1a00000000034f": { "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "100", diff --git a/tests/queue_counter_test.py b/tests/queue_counter_test.py new file mode 100644 index 0000000000..66dfc828ed --- /dev/null +++ b/tests/queue_counter_test.py @@ -0,0 +1,960 @@ +import imp +import json +import os +import sys + +from click.testing import CliRunner +from unittest import TestCase +from swsscommon.swsscommon import ConfigDBConnector + +from .mock_tables import dbconnector + +import show.main as show +from utilities_common.cli import json_dump +from utilities_common.db import Db + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +show_queue_counters = """\ + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet0 UC0 68 30 56 74 +Ethernet0 UC1 60 43 39 1 +Ethernet0 UC2 82 7 39 21 +Ethernet0 UC3 52 70 19 76 +Ethernet0 UC4 11 59 12 94 +Ethernet0 UC5 36 62 35 40 +Ethernet0 UC6 49 91 2 88 +Ethernet0 UC7 33 17 94 74 +Ethernet0 UC8 40 71 95 33 +Ethernet0 UC9 54 8 93 78 +Ethernet0 MC10 83 96 74 9 +Ethernet0 MC11 15 60 61 31 +Ethernet0 MC12 45 52 82 94 +Ethernet0 MC13 55 88 89 52 +Ethernet0 MC14 14 70 95 79 +Ethernet0 MC15 68 60 66 81 +Ethernet0 MC16 63 4 48 76 +Ethernet0 MC17 41 73 77 74 +Ethernet0 MC18 60 21 56 54 +Ethernet0 MC19 57 31 12 39 +Ethernet0 ALL20 N/A N/A N/A N/A +Ethernet0 ALL21 N/A N/A N/A N/A +Ethernet0 ALL22 N/A N/A N/A N/A +Ethernet0 ALL23 N/A N/A N/A N/A +Ethernet0 ALL24 N/A N/A N/A N/A +Ethernet0 ALL25 N/A N/A N/A N/A +Ethernet0 ALL26 N/A N/A N/A N/A +Ethernet0 ALL27 N/A N/A N/A N/A +Ethernet0 ALL28 N/A N/A N/A N/A +Ethernet0 ALL29 N/A N/A N/A N/A + + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet4 UC0 41 96 70 98 +Ethernet4 UC1 18 49 63 36 +Ethernet4 UC2 99 90 3 15 +Ethernet4 UC3 60 89 48 41 +Ethernet4 UC4 8 84 82 94 +Ethernet4 UC5 83 15 75 92 +Ethernet4 UC6 84 26 50 71 +Ethernet4 UC7 27 19 49 80 +Ethernet4 UC8 13 89 13 33 +Ethernet4 UC9 43 48 86 31 +Ethernet4 MC10 50 1 57 82 +Ethernet4 MC11 67 99 84 59 +Ethernet4 MC12 4 58 27 5 +Ethernet4 MC13 74 5 57 39 +Ethernet4 MC14 21 59 4 14 +Ethernet4 MC15 24 61 19 53 +Ethernet4 MC16 51 15 15 32 +Ethernet4 MC17 98 18 23 15 +Ethernet4 MC18 41 34 9 57 +Ethernet4 MC19 57 7 18 99 +Ethernet4 ALL20 N/A N/A N/A N/A +Ethernet4 ALL21 N/A N/A N/A N/A +Ethernet4 ALL22 N/A N/A N/A N/A +Ethernet4 ALL23 N/A N/A N/A N/A +Ethernet4 ALL24 N/A N/A N/A N/A +Ethernet4 ALL25 N/A N/A N/A N/A +Ethernet4 ALL26 N/A N/A N/A N/A +Ethernet4 ALL27 N/A N/A N/A N/A +Ethernet4 ALL28 N/A N/A N/A N/A +Ethernet4 ALL29 N/A N/A N/A N/A + + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet8 UC0 19 5 36 56 +Ethernet8 UC1 38 17 68 91 +Ethernet8 UC2 16 65 79 51 +Ethernet8 UC3 11 97 63 72 +Ethernet8 UC4 54 89 62 62 +Ethernet8 UC5 13 84 30 59 +Ethernet8 UC6 49 67 99 85 +Ethernet8 UC7 2 63 38 88 +Ethernet8 UC8 0 82 93 43 +Ethernet8 UC9 80 17 91 61 +Ethernet8 MC10 81 63 76 73 +Ethernet8 MC11 29 16 29 66 +Ethernet8 MC12 32 12 61 35 +Ethernet8 MC13 79 17 72 93 +Ethernet8 MC14 23 21 67 50 +Ethernet8 MC15 37 10 97 14 +Ethernet8 MC16 30 17 74 43 +Ethernet8 MC17 0 63 54 84 +Ethernet8 MC18 69 88 24 79 +Ethernet8 MC19 20 12 84 3 +Ethernet8 ALL20 N/A N/A N/A N/A +Ethernet8 ALL21 N/A N/A N/A N/A +Ethernet8 ALL22 N/A N/A N/A N/A +Ethernet8 ALL23 N/A N/A N/A N/A +Ethernet8 ALL24 N/A N/A N/A N/A +Ethernet8 ALL25 N/A N/A N/A N/A +Ethernet8 ALL26 N/A N/A N/A N/A +Ethernet8 ALL27 N/A N/A N/A N/A +Ethernet8 ALL28 N/A N/A N/A N/A +Ethernet8 ALL29 N/A N/A N/A N/A + +""" + + +show_queue_counters_port = """\ + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet8 UC0 19 5 36 56 +Ethernet8 UC1 38 17 68 91 +Ethernet8 UC2 16 65 79 51 +Ethernet8 UC3 11 97 63 72 +Ethernet8 UC4 54 89 62 62 +Ethernet8 UC5 13 84 30 59 +Ethernet8 UC6 49 67 99 85 +Ethernet8 UC7 2 63 38 88 +Ethernet8 UC8 0 82 93 43 +Ethernet8 UC9 80 17 91 61 +Ethernet8 MC10 81 63 76 73 +Ethernet8 MC11 29 16 29 66 +Ethernet8 MC12 32 12 61 35 +Ethernet8 MC13 79 17 72 93 +Ethernet8 MC14 23 21 67 50 +Ethernet8 MC15 37 10 97 14 +Ethernet8 MC16 30 17 74 43 +Ethernet8 MC17 0 63 54 84 +Ethernet8 MC18 69 88 24 79 +Ethernet8 MC19 20 12 84 3 +Ethernet8 ALL20 N/A N/A N/A N/A +Ethernet8 ALL21 N/A N/A N/A N/A +Ethernet8 ALL22 N/A N/A N/A N/A +Ethernet8 ALL23 N/A N/A N/A N/A +Ethernet8 ALL24 N/A N/A N/A N/A +Ethernet8 ALL25 N/A N/A N/A N/A +Ethernet8 ALL26 N/A N/A N/A N/A +Ethernet8 ALL27 N/A N/A N/A N/A +Ethernet8 ALL28 N/A N/A N/A N/A +Ethernet8 ALL29 N/A N/A N/A N/A + +""" + +show_queue_counters_json = """\ +{ + "Ethernet0": { + "ALL20": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL21": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL22": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL23": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL24": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL25": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL26": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL27": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL28": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL29": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "MC10": { + "dropbytes": "9", + "droppacket": "74", + "totalbytes": "96", + "totalpacket": "83" + }, + "MC11": { + "dropbytes": "31", + "droppacket": "61", + "totalbytes": "60", + "totalpacket": "15" + }, + "MC12": { + "dropbytes": "94", + "droppacket": "82", + "totalbytes": "52", + "totalpacket": "45" + }, + "MC13": { + "dropbytes": "52", + "droppacket": "89", + "totalbytes": "88", + "totalpacket": "55" + }, + "MC14": { + "dropbytes": "79", + "droppacket": "95", + "totalbytes": "70", + "totalpacket": "14" + }, + "MC15": { + "dropbytes": "81", + "droppacket": "66", + "totalbytes": "60", + "totalpacket": "68" + }, + "MC16": { + "dropbytes": "76", + "droppacket": "48", + "totalbytes": "4", + "totalpacket": "63" + }, + "MC17": { + "dropbytes": "74", + "droppacket": "77", + "totalbytes": "73", + "totalpacket": "41" + }, + "MC18": { + "dropbytes": "54", + "droppacket": "56", + "totalbytes": "21", + "totalpacket": "60" + }, + "MC19": { + "dropbytes": "39", + "droppacket": "12", + "totalbytes": "31", + "totalpacket": "57" + }, + "UC0": { + "dropbytes": "74", + "droppacket": "56", + "totalbytes": "30", + "totalpacket": "68" + }, + "UC1": { + "dropbytes": "1", + "droppacket": "39", + "totalbytes": "43", + "totalpacket": "60" + }, + "UC2": { + "dropbytes": "21", + "droppacket": "39", + "totalbytes": "7", + "totalpacket": "82" + }, + "UC3": { + "dropbytes": "76", + "droppacket": "19", + "totalbytes": "70", + "totalpacket": "52" + }, + "UC4": { + "dropbytes": "94", + "droppacket": "12", + "totalbytes": "59", + "totalpacket": "11" + }, + "UC5": { + "dropbytes": "40", + "droppacket": "35", + "totalbytes": "62", + "totalpacket": "36" + }, + "UC6": { + "dropbytes": "88", + "droppacket": "2", + "totalbytes": "91", + "totalpacket": "49" + }, + "UC7": { + "dropbytes": "74", + "droppacket": "94", + "totalbytes": "17", + "totalpacket": "33" + }, + "UC8": { + "dropbytes": "33", + "droppacket": "95", + "totalbytes": "71", + "totalpacket": "40" + }, + "UC9": { + "dropbytes": "78", + "droppacket": "93", + "totalbytes": "8", + "totalpacket": "54" + } + }, + "Ethernet4": { + "ALL20": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL21": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL22": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL23": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL24": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL25": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL26": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL27": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL28": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL29": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "MC10": { + "dropbytes": "82", + "droppacket": "57", + "totalbytes": "1", + "totalpacket": "50" + }, + "MC11": { + "dropbytes": "59", + "droppacket": "84", + "totalbytes": "99", + "totalpacket": "67" + }, + "MC12": { + "dropbytes": "5", + "droppacket": "27", + "totalbytes": "58", + "totalpacket": "4" + }, + "MC13": { + "dropbytes": "39", + "droppacket": "57", + "totalbytes": "5", + "totalpacket": "74" + }, + "MC14": { + "dropbytes": "14", + "droppacket": "4", + "totalbytes": "59", + "totalpacket": "21" + }, + "MC15": { + "dropbytes": "53", + "droppacket": "19", + "totalbytes": "61", + "totalpacket": "24" + }, + "MC16": { + "dropbytes": "32", + "droppacket": "15", + "totalbytes": "15", + "totalpacket": "51" + }, + "MC17": { + "dropbytes": "15", + "droppacket": "23", + "totalbytes": "18", + "totalpacket": "98" + }, + "MC18": { + "dropbytes": "57", + "droppacket": "9", + "totalbytes": "34", + "totalpacket": "41" + }, + "MC19": { + "dropbytes": "99", + "droppacket": "18", + "totalbytes": "7", + "totalpacket": "57" + }, + "UC0": { + "dropbytes": "98", + "droppacket": "70", + "totalbytes": "96", + "totalpacket": "41" + }, + "UC1": { + "dropbytes": "36", + "droppacket": "63", + "totalbytes": "49", + "totalpacket": "18" + }, + "UC2": { + "dropbytes": "15", + "droppacket": "3", + "totalbytes": "90", + "totalpacket": "99" + }, + "UC3": { + "dropbytes": "41", + "droppacket": "48", + "totalbytes": "89", + "totalpacket": "60" + }, + "UC4": { + "dropbytes": "94", + "droppacket": "82", + "totalbytes": "84", + "totalpacket": "8" + }, + "UC5": { + "dropbytes": "92", + "droppacket": "75", + "totalbytes": "15", + "totalpacket": "83" + }, + "UC6": { + "dropbytes": "71", + "droppacket": "50", + "totalbytes": "26", + "totalpacket": "84" + }, + "UC7": { + "dropbytes": "80", + "droppacket": "49", + "totalbytes": "19", + "totalpacket": "27" + }, + "UC8": { + "dropbytes": "33", + "droppacket": "13", + "totalbytes": "89", + "totalpacket": "13" + }, + "UC9": { + "dropbytes": "31", + "droppacket": "86", + "totalbytes": "48", + "totalpacket": "43" + } + }, + "Ethernet8": { + "ALL20": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL21": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL22": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL23": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL24": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL25": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL26": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL27": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL28": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL29": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "MC10": { + "dropbytes": "73", + "droppacket": "76", + "totalbytes": "63", + "totalpacket": "81" + }, + "MC11": { + "dropbytes": "66", + "droppacket": "29", + "totalbytes": "16", + "totalpacket": "29" + }, + "MC12": { + "dropbytes": "35", + "droppacket": "61", + "totalbytes": "12", + "totalpacket": "32" + }, + "MC13": { + "dropbytes": "93", + "droppacket": "72", + "totalbytes": "17", + "totalpacket": "79" + }, + "MC14": { + "dropbytes": "50", + "droppacket": "67", + "totalbytes": "21", + "totalpacket": "23" + }, + "MC15": { + "dropbytes": "14", + "droppacket": "97", + "totalbytes": "10", + "totalpacket": "37" + }, + "MC16": { + "dropbytes": "43", + "droppacket": "74", + "totalbytes": "17", + "totalpacket": "30" + }, + "MC17": { + "dropbytes": "84", + "droppacket": "54", + "totalbytes": "63", + "totalpacket": "0" + }, + "MC18": { + "dropbytes": "79", + "droppacket": "24", + "totalbytes": "88", + "totalpacket": "69" + }, + "MC19": { + "dropbytes": "3", + "droppacket": "84", + "totalbytes": "12", + "totalpacket": "20" + }, + "UC0": { + "dropbytes": "56", + "droppacket": "36", + "totalbytes": "5", + "totalpacket": "19" + }, + "UC1": { + "dropbytes": "91", + "droppacket": "68", + "totalbytes": "17", + "totalpacket": "38" + }, + "UC2": { + "dropbytes": "51", + "droppacket": "79", + "totalbytes": "65", + "totalpacket": "16" + }, + "UC3": { + "dropbytes": "72", + "droppacket": "63", + "totalbytes": "97", + "totalpacket": "11" + }, + "UC4": { + "dropbytes": "62", + "droppacket": "62", + "totalbytes": "89", + "totalpacket": "54" + }, + "UC5": { + "dropbytes": "59", + "droppacket": "30", + "totalbytes": "84", + "totalpacket": "13" + }, + "UC6": { + "dropbytes": "85", + "droppacket": "99", + "totalbytes": "67", + "totalpacket": "49" + }, + "UC7": { + "dropbytes": "88", + "droppacket": "38", + "totalbytes": "63", + "totalpacket": "2" + }, + "UC8": { + "dropbytes": "43", + "droppacket": "93", + "totalbytes": "82", + "totalpacket": "0" + }, + "UC9": { + "dropbytes": "61", + "droppacket": "91", + "totalbytes": "17", + "totalpacket": "80" + } + } +}""" + +show_queue_counters_port_json = """\ +{ + "Ethernet8": { + "ALL20": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL21": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL22": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL23": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL24": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL25": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL26": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL27": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL28": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "ALL29": { + "dropbytes": "N/A", + "droppacket": "N/A", + "totalbytes": "N/A", + "totalpacket": "N/A" + }, + "MC10": { + "dropbytes": "73", + "droppacket": "76", + "totalbytes": "63", + "totalpacket": "81" + }, + "MC11": { + "dropbytes": "66", + "droppacket": "29", + "totalbytes": "16", + "totalpacket": "29" + }, + "MC12": { + "dropbytes": "35", + "droppacket": "61", + "totalbytes": "12", + "totalpacket": "32" + }, + "MC13": { + "dropbytes": "93", + "droppacket": "72", + "totalbytes": "17", + "totalpacket": "79" + }, + "MC14": { + "dropbytes": "50", + "droppacket": "67", + "totalbytes": "21", + "totalpacket": "23" + }, + "MC15": { + "dropbytes": "14", + "droppacket": "97", + "totalbytes": "10", + "totalpacket": "37" + }, + "MC16": { + "dropbytes": "43", + "droppacket": "74", + "totalbytes": "17", + "totalpacket": "30" + }, + "MC17": { + "dropbytes": "84", + "droppacket": "54", + "totalbytes": "63", + "totalpacket": "0" + }, + "MC18": { + "dropbytes": "79", + "droppacket": "24", + "totalbytes": "88", + "totalpacket": "69" + }, + "MC19": { + "dropbytes": "3", + "droppacket": "84", + "totalbytes": "12", + "totalpacket": "20" + }, + "UC0": { + "dropbytes": "56", + "droppacket": "36", + "totalbytes": "5", + "totalpacket": "19" + }, + "UC1": { + "dropbytes": "91", + "droppacket": "68", + "totalbytes": "17", + "totalpacket": "38" + }, + "UC2": { + "dropbytes": "51", + "droppacket": "79", + "totalbytes": "65", + "totalpacket": "16" + }, + "UC3": { + "dropbytes": "72", + "droppacket": "63", + "totalbytes": "97", + "totalpacket": "11" + }, + "UC4": { + "dropbytes": "62", + "droppacket": "62", + "totalbytes": "89", + "totalpacket": "54" + }, + "UC5": { + "dropbytes": "59", + "droppacket": "30", + "totalbytes": "84", + "totalpacket": "13" + }, + "UC6": { + "dropbytes": "85", + "droppacket": "99", + "totalbytes": "67", + "totalpacket": "49" + }, + "UC7": { + "dropbytes": "88", + "droppacket": "38", + "totalbytes": "63", + "totalpacket": "2" + }, + "UC8": { + "dropbytes": "43", + "droppacket": "93", + "totalbytes": "82", + "totalpacket": "0" + }, + "UC9": { + "dropbytes": "61", + "droppacket": "91", + "totalbytes": "17", + "totalpacket": "80" + } + } +}""" + + +class TestQueue(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ['UTILITIES_UNIT_TESTING'] = "2" + print("SETUP") + + def test_queue_counters(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["queue"].commands["counters"], + [] + ) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_queue_counters + + def test_queue_counters_port(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["queue"].commands["counters"], + ["Ethernet8"] + ) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_queue_counters_port + + def test_queue_counters_json(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["queue"].commands["counters"], + ["--json"] + ) + assert result.exit_code == 0 + print(result.output) + json_output = json.loads(result.output) + + # remove "time" from the output + for _, v in json_output.items(): + del v["time"] + assert json_dump(json_output) == show_queue_counters_json + + def test_queue_counters_port_json(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["queue"].commands["counters"], + ["Ethernet8 --json"] + ) + assert result.exit_code == 0 + print(result.output) + json_output = json.loads(result.output) + + # remove "time" from the output + for _, v in json_output.items(): + del v["time"] + assert json_dump(json_output) == show_queue_counters_port_json + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + print("TEARDOWN") diff --git a/utilities_common/cli.py b/utilities_common/cli.py index ed103649e4..afa42dc3b3 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -1,4 +1,5 @@ import configparser +import datetime import os import re import subprocess @@ -546,13 +547,22 @@ def run_command(command, display_cmd=False, ignore_error=False, return_cmd=False sys.exit(rc) +def json_serial(obj): + """JSON serializer for objects not serializable by default""" + + if isinstance(obj, (datetime.datetime, datetime.date)): + return obj.isoformat() + raise TypeError("Type %s not serializable" % type(obj)) + + def json_dump(data): """ Dump data in JSON format """ return json.dumps( - data, sort_keys=True, indent=2, ensure_ascii=False + data, sort_keys=True, indent=2, ensure_ascii=False, default=json_serial ) + def interface_is_untagged_member(db, interface_name): """ Check if interface is already untagged member""" From e296a69e999fd8fdd711b56f47af8f1939409735 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Fri, 16 Apr 2021 14:56:55 -0700 Subject: [PATCH 12/41] No more IP validation as it is more likely a URL (#1555) Dropped IP validation as server takes URL. --- config/kube.py | 21 +-------------------- tests/kube_test.py | 6 +----- 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/config/kube.py b/config/kube.py index ef27c0538e..a2dbeeffb6 100644 --- a/config/kube.py +++ b/config/kube.py @@ -1,5 +1,4 @@ import click -import socket from utilities_common.cli import AbbreviationGroup, pass_db @@ -21,22 +20,6 @@ KUBE_LABEL_TABLE = "KUBE_LABELS" KUBE_LABEL_SET_KEY = "SET" -def is_valid_ip4_addr(address): - try: - socket.inet_pton(socket.AF_INET, address) - except socket.error: # not a valid address - return False - return True - - -def is_valid_ip6_addr(address): - try: - socket.inet_pton(socket.AF_INET6, address) - except socket.error: # not a valid address - return False - return True - - def _update_kube_server(db, field, val): db_data = db.cfgdb.get_entry(KUBE_SERVER_TABLE_NAME, KUBE_SERVER_TABLE_KEY) def_data = { @@ -82,9 +65,7 @@ def server(): @pass_db def ip(db, vip): """Specify a kubernetes cluster VIP""" - if vip and not is_valid_ip4_addr(vip) and not is_valid_ip6_addr(vip): - click.echo('Invalid IP address %s' % vip) - sys.exit(1) + _update_kube_server(db, KUBE_SERVER_IP, vip) diff --git a/tests/kube_test.py b/tests/kube_test.py index a19402a627..90a4f6e292 100644 --- a/tests/kube_test.py +++ b/tests/kube_test.py @@ -125,15 +125,11 @@ def test_set_server_ip(self, get_cmd_module): self.__check_res(result, "check server IP", show_server_output_1) - def test_set_server_invalid_ip_port(self, get_cmd_module): + def test_set_server_invalid_port(self, get_cmd_module): (config, show) = get_cmd_module db = Db() runner = CliRunner() - # test invalid IP - result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["ip", "10101011"], obj=db) - assert result.exit_code == 1 - # test invalid port result = runner.invoke(config.config.commands["kubernetes"].commands["server"], ["port", "10101011"], obj=db) assert result.exit_code == 1 From f5efe8939530ba9767bcd92e5a688b2275c5f151 Mon Sep 17 00:00:00 2001 From: Danny Allen Date: Tue, 20 Apr 2021 10:29:23 -0700 Subject: [PATCH 13/41] [acl] Use a list instead of a comma-separated string for ACL port list (#1519) Signed-off-by: Danny Allen --- config/main.py | 4 ++-- tests/acl_config_test.py | 10 ++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/config/main.py b/config/main.py index e5a3cf6d0f..44bf799287 100644 --- a/config/main.py +++ b/config/main.py @@ -3339,7 +3339,7 @@ def parse_acl_table_info(table_name, table_type, description, ports, stage): if ports: for port in ports.split(","): port_list += expand_vlan_ports(port) - port_list = set(port_list) + port_list = list(set(port_list)) # convert to set first to remove duplicate ifaces else: port_list = valid_acl_ports @@ -3347,7 +3347,7 @@ def parse_acl_table_info(table_name, table_type, description, ports, stage): if port not in valid_acl_ports: raise ValueError("Cannot bind ACL to specified port {}".format(port)) - table_info["ports@"] = ",".join(port_list) + table_info["ports"] = port_list table_info["stage"] = stage diff --git a/tests/acl_config_test.py b/tests/acl_config_test.py index 63f92b787b..ff397e760d 100644 --- a/tests/acl_config_test.py +++ b/tests/acl_config_test.py @@ -25,9 +25,7 @@ def test_parse_table_with_vlan_expansion(self): assert table_info["type"] == "L3" assert table_info["policy_desc"] == "TEST" assert table_info["stage"] == "egress" - - port_list = table_info["ports@"].split(",") - assert set(port_list) == {"Ethernet4", "Ethernet8", "Ethernet12", "Ethernet16"} + assert set(table_info["ports"]) == {"Ethernet4", "Ethernet8", "Ethernet12", "Ethernet16"} def test_parse_table_with_vlan_and_duplicates(self): table_info = parse_acl_table_info("TEST", "L3", None, "Ethernet4,Vlan1000", "egress") @@ -36,9 +34,9 @@ def test_parse_table_with_vlan_and_duplicates(self): assert table_info["stage"] == "egress" # Since Ethernet4 is a member of Vlan1000 we should not include it twice in the output - port_list = table_info["ports@"].split(",") - assert len(port_list) == 4 - assert set(port_list) == {"Ethernet4", "Ethernet8", "Ethernet12", "Ethernet16"} + port_set = set(table_info["ports"]) + assert len(port_set) == 4 + assert set(port_set) == {"Ethernet4", "Ethernet8", "Ethernet12", "Ethernet16"} def test_parse_table_with_empty_vlan(self): with pytest.raises(ValueError): From 59ed6f399de8af29fa6f3ffb50e07d23f215feff Mon Sep 17 00:00:00 2001 From: Santhosh Kumar T <53558409+santhosh-kt@users.noreply.github.com> Date: Fri, 23 Apr 2021 00:03:39 +0530 Subject: [PATCH 14/41] platform pre-check for reboot in master branch (#1556) What I did Added platform pre check support in reboot script. Checking platform based changes before stopping dockers and sonic services. Porting changes in master from 201911 branch #1472 How I did it On branch reboot_pre_check_master Changes not staged for commit: (use "git add ..." to update what will be committed) (use "git checkout -- ..." to discard changes in working directory) modified: scripts/reboot How to verify it Write a platform pre check script(platform_reboot_pre_check) and place it in /usr/share/sonic/device// directory. If the script exit with status 0, reboot will be proceeded. If script exit with non-zero status, the reboot script gets stopped. --- scripts/reboot | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/reboot b/scripts/reboot index 546aa0fbff..dfbd309ba8 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -3,6 +3,7 @@ DEVPATH="/usr/share/sonic/device" PLAT_REBOOT="platform_reboot" PLATFORM_UPDATE_REBOOT_CAUSE="platform_update_reboot_cause" REBOOT_CAUSE_FILE="/host/reboot-cause/reboot-cause.txt" +PLATFORM_REBOOT_PRE_CHECK="platform_reboot_pre_check" REBOOT_TIME=$(date) # Reboot immediately if we run the kdump capture kernel @@ -121,6 +122,11 @@ function reboot_pre_check() fi rm ${filename} + if [ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_REBOOT_PRE_CHECK} ]; then + ${DEVPATH}/${PLATFORM}/${PLATFORM_REBOOT_PRE_CHECK} + [[ $? -ne 0 ]] && exit $? + fi + # Verify the next image by sonic-installer local message=$(sonic-installer verify-next-image 2>&1) if [ $? -ne 0 ]; then From b10c1575868f44b64913bf572b33cef0496c3ec7 Mon Sep 17 00:00:00 2001 From: a-barboza <29963827+a-barboza@users.noreply.github.com> Date: Thu, 22 Apr 2021 16:14:17 -0700 Subject: [PATCH 15/41] RADIUS Management User Authentication Feature (#1521) What I did Radius Management User Authentication Feature How I did it HLD: https://github.com/Azure/SONiC/blob/master/doc/aaa/radius_authentication.md How to verify it This is the CLI only. The changes are reflected in the Redis Config DB. Previous command output (if the output of a command-line utility has changed) New command output (if the output of a command-line utility has changed) admin@sonic:~$ show radius RADIUS global auth_type pap (default) RADIUS global retransmit 3 (default) RADIUS global timeout 5 (default) RADIUS global passkey (default) admin@sonic:~$ admin@sonic:~$ sudo config radius Usage: config radius [OPTIONS] COMMAND [ARGS]... RADIUS server configuration Options: -?, -h, --help Show this message and exit. Commands: add Specify a RADIUS server authtype Specify RADIUS server global auth_type [chap | pap | mschapv2] default set its default configuration delete Delete a RADIUS server nasip Specify RADIUS server global NAS-IP|IPV6-Address passkey Specify RADIUS server global passkey retransmit Specify RADIUS server global retry attempts <0 - 10> sourceip Specify RADIUS server global source ip statistics Specify RADIUS server global statistics [enable | disable |... timeout Specify RADIUS server global timeout <1 - 60> admin@sonic:~$ --- config/aaa.py | 311 ++++++++++++++++++++++++++++++++++++++++++- config/main.py | 1 + show/main.py | 58 +++++++- tests/aaa_test.py | 138 +++++++++++++++++++ tests/radius_test.py | 194 +++++++++++++++++++++++++++ 5 files changed, 696 insertions(+), 6 deletions(-) create mode 100644 tests/aaa_test.py create mode 100644 tests/radius_test.py diff --git a/config/aaa.py b/config/aaa.py index fb2db721ae..d39b00a20f 100644 --- a/config/aaa.py +++ b/config/aaa.py @@ -1,7 +1,17 @@ import click +import ipaddress +import re from swsscommon.swsscommon import ConfigDBConnector import utilities_common.cli as clicommon +RADIUS_MAXSERVERS = 8 +RADIUS_PASSKEY_MAX_LEN = 65 +VALID_CHARS_MSG = "Valid chars are ASCII printable except SPACE, '#', and ','" + +def is_secret(secret): + return bool(re.match('^' + '[^ #,]*' + '$', secret)) + + def add_table_kv(table, entry, key, val): config_db = ConfigDBConnector() config_db.connect() @@ -61,20 +71,69 @@ def fallback(option): authentication.add_command(fallback) +# cmd: aaa authentication debug +@click.command() +@click.argument('option', type=click.Choice(["enable", "disable", "default"])) +def debug(option): + """AAA debug [enable | disable | default]""" + if option == 'default': + del_table_key('AAA', 'authentication', 'debug') + else: + if option == 'enable': + add_table_kv('AAA', 'authentication', 'debug', True) + elif option == 'disable': + add_table_kv('AAA', 'authentication', 'debug', False) +authentication.add_command(debug) + + +# cmd: aaa authentication trace +@click.command() +@click.argument('option', type=click.Choice(["enable", "disable", "default"])) +def trace(option): + """AAA packet trace [enable | disable | default]""" + if option == 'default': + del_table_key('AAA', 'authentication', 'trace') + else: + if option == 'enable': + add_table_kv('AAA', 'authentication', 'trace', True) + elif option == 'disable': + add_table_kv('AAA', 'authentication', 'trace', False) +authentication.add_command(trace) + + @click.command() -@click.argument('auth_protocol', nargs=-1, type=click.Choice(["tacacs+", "local", "default"])) +@click.argument('auth_protocol', nargs=-1, type=click.Choice(["radius", "tacacs+", "local", "default"])) def login(auth_protocol): - """Switch login authentication [ {tacacs+, local} | default ]""" + """Switch login authentication [ {radius, tacacs+, local} | default ]""" if len(auth_protocol) is 0: click.echo('Argument "auth_protocol" is required') return + elif len(auth_protocol) > 2: + click.echo('Not a valid command.') + return if 'default' in auth_protocol: + if len(auth_protocol) !=1: + click.echo('Not a valid command') + return del_table_key('AAA', 'authentication', 'login') else: val = auth_protocol[0] if len(auth_protocol) == 2: - val += ',' + auth_protocol[1] + val2 = auth_protocol[1] + good_ap = False + if val == 'local': + if val2 == 'radius' or val2 == 'tacacs+': + good_ap = True + elif val == 'radius' or val == 'tacacs+': + if val2 == 'local': + good_ap = True + if good_ap == True: + val += ',' + val2 + else: + click.echo('Not a valid command') + return + add_table_kv('AAA', 'authentication', 'login', val) authentication.add_command(login) @@ -189,3 +248,249 @@ def delete(address): config_db.connect() config_db.set_entry('TACPLUS_SERVER', address, None) tacacs.add_command(delete) + + +@click.group() +def radius(): + """RADIUS server configuration""" + pass + + +@click.group() +@click.pass_context +def default(ctx): + """set its default configuration""" + ctx.obj = 'default' +radius.add_command(default) + + +@click.command() +@click.argument('second', metavar='', type=click.IntRange(1, 60), required=False) +@click.pass_context +def timeout(ctx, second): + """Specify RADIUS server global timeout <1 - 60>""" + if ctx.obj == 'default': + del_table_key('RADIUS', 'global', 'timeout') + elif second: + add_table_kv('RADIUS', 'global', 'timeout', second) + else: + click.echo('Not support empty argument') +radius.add_command(timeout) +default.add_command(timeout) + + +@click.command() +@click.argument('retries', metavar='', type=click.IntRange(0, 10), required=False) +@click.pass_context +def retransmit(ctx, retries): + """Specify RADIUS server global retry attempts <0 - 10>""" + if ctx.obj == 'default': + del_table_key('RADIUS', 'global', 'retransmit') + elif retries != None: + add_table_kv('RADIUS', 'global', 'retransmit', retries) + else: + click.echo('Not support empty argument') +radius.add_command(retransmit) +default.add_command(retransmit) + + +@click.command() +@click.argument('type', metavar='', type=click.Choice(["chap", "pap", "mschapv2"]), required=False) +@click.pass_context +def authtype(ctx, type): + """Specify RADIUS server global auth_type [chap | pap | mschapv2]""" + if ctx.obj == 'default': + del_table_key('RADIUS', 'global', 'auth_type') + elif type: + add_table_kv('RADIUS', 'global', 'auth_type', type) + else: + click.echo('Not support empty argument') +radius.add_command(authtype) +default.add_command(authtype) + + +@click.command() +@click.argument('secret', metavar='', required=False) +@click.pass_context +def passkey(ctx, secret): + """Specify RADIUS server global passkey """ + if ctx.obj == 'default': + del_table_key('RADIUS', 'global', 'passkey') + elif secret: + if len(secret) > RADIUS_PASSKEY_MAX_LEN: + click.echo('Maximum of %d chars can be configured' % RADIUS_PASSKEY_MAX_LEN) + return + elif not is_secret(secret): + click.echo(VALID_CHARS_MSG) + return + add_table_kv('RADIUS', 'global', 'passkey', secret) + else: + click.echo('Not support empty argument') +radius.add_command(passkey) +default.add_command(passkey) + +@click.command() +@click.argument('src_ip', metavar='', required=False) +@click.pass_context +def sourceip(ctx, src_ip): + """Specify RADIUS server global source ip """ + if ctx.obj == 'default': + del_table_key('RADIUS', 'global', 'src_ip') + return + elif not src_ip: + click.echo('Not support empty argument') + return + + if not clicommon.is_ipaddress(src_ip): + click.echo('Invalid ip address') + return + + v6_invalid_list = [ipaddress.IPv6Address(unicode('0::0')), ipaddress.IPv6Address(unicode('0::1'))] + net = ipaddress.ip_network(unicode(src_ip), strict=False) + if (net.version == 4): + if src_ip == "0.0.0.0": + click.echo('enter non-zero ip address') + return + ip = ipaddress.IPv4Address(src_ip) + if ip.is_reserved: + click.echo('Reserved ip is not valid') + return + if ip.is_multicast: + click.echo('Multicast ip is not valid') + return + elif (net.version == 6): + ip = ipaddress.IPv6Address(src_ip) + if (ip.is_multicast): + click.echo('Multicast ip is not valid') + return + if (ip in v6_invalid_list): + click.echo('Invalid ip address') + return + add_table_kv('RADIUS', 'global', 'src_ip', src_ip) +radius.add_command(sourceip) +default.add_command(sourceip) + +@click.command() +@click.argument('nas_ip', metavar='', required=False) +@click.pass_context +def nasip(ctx, nas_ip): + """Specify RADIUS server global NAS-IP|IPV6-Address """ + if ctx.obj == 'default': + del_table_key('RADIUS', 'global', 'nas_ip') + return + elif not nas_ip: + click.echo('Not support empty argument') + return + + if not clicommon.is_ipaddress(nas_ip): + click.echo('Invalid ip address') + return + + v6_invalid_list = [ipaddress.IPv6Address(unicode('0::0')), ipaddress.IPv6Address(unicode('0::1'))] + net = ipaddress.ip_network(unicode(nas_ip), strict=False) + if (net.version == 4): + if nas_ip == "0.0.0.0": + click.echo('enter non-zero ip address') + return + ip = ipaddress.IPv4Address(nas_ip) + if ip.is_reserved: + click.echo('Reserved ip is not valid') + return + if ip.is_multicast: + click.echo('Multicast ip is not valid') + return + elif (net.version == 6): + ip = ipaddress.IPv6Address(nas_ip) + if (ip.is_multicast): + click.echo('Multicast ip is not valid') + return + if (ip in v6_invalid_list): + click.echo('Invalid ip address') + return + add_table_kv('RADIUS', 'global', 'nas_ip', nas_ip) +radius.add_command(nasip) +default.add_command(nasip) + +@click.command() +@click.argument('option', type=click.Choice(["enable", "disable", "default"])) +def statistics(option): + """Specify RADIUS server global statistics [enable | disable | default]""" + if option == 'default': + del_table_key('RADIUS', 'global', 'statistics') + else: + if option == 'enable': + add_table_kv('RADIUS', 'global', 'statistics', True) + elif option == 'disable': + add_table_kv('RADIUS', 'global', 'statistics', False) +radius.add_command(statistics) + + +# cmd: radius add --retransmit COUNT --timeout SECOND --key SECRET --type TYPE --auth-port PORT --pri PRIORITY +@click.command() +@click.argument('address', metavar='') +@click.option('-r', '--retransmit', help='Retransmit attempts, default 3', type=click.IntRange(1, 10)) +@click.option('-t', '--timeout', help='Transmission timeout interval, default 5', type=click.IntRange(1, 60)) +@click.option('-k', '--key', help='Shared secret') +@click.option('-a', '--auth_type', help='Authentication type, default pap', type=click.Choice(["chap", "pap", "mschapv2"])) +@click.option('-o', '--auth-port', help='UDP port range is 1 to 65535, default 1812', type=click.IntRange(1, 65535), default=1812) +@click.option('-p', '--pri', help="Priority, default 1", type=click.IntRange(1, 64), default=1) +@click.option('-m', '--use-mgmt-vrf', help="Management vrf, default is no vrf", is_flag=True) +@click.option('-s', '--source-interface', help='Source Interface') +def add(address, retransmit, timeout, key, auth_type, auth_port, pri, use_mgmt_vrf, source_interface): + """Specify a RADIUS server""" + + if key: + if len(key) > RADIUS_PASSKEY_MAX_LEN: + click.echo('--key: Maximum of %d chars can be configured' % RADIUS_PASSKEY_MAX_LEN) + return + elif not is_secret(key): + click.echo('--key: ' + VALID_CHARS_MSG) + return + + config_db = ConfigDBConnector() + config_db.connect() + old_data = config_db.get_table('RADIUS_SERVER') + if address in old_data : + click.echo('server %s already exists' % address) + return + if len(old_data) == RADIUS_MAXSERVERS: + click.echo('Maximum of %d can be configured' % RADIUS_MAXSERVERS) + else: + data = { + 'auth_port': str(auth_port), + 'priority': pri + } + if auth_type is not None: + data['auth_type'] = auth_type + if retransmit is not None: + data['retransmit'] = str(retransmit) + if timeout is not None: + data['timeout'] = str(timeout) + if key is not None: + data['passkey'] = key + if use_mgmt_vrf : + data['vrf'] = "mgmt" + if source_interface : + if (source_interface.startswith("Ethernet") or \ + source_interface.startswith("PortChannel") or \ + source_interface.startswith("Vlan") or \ + source_interface.startswith("Loopback") or \ + source_interface == "eth0"): + data['src_intf'] = source_interface + else: + click.echo('Not supported interface name (valid interface name: Etherent/PortChannel/Vlan/Loopback/eth0)') + config_db.set_entry('RADIUS_SERVER', address, data) +radius.add_command(add) + + +# cmd: radius delete +# 'del' is keyword, replace with 'delete' +@click.command() +@click.argument('address', metavar='') +def delete(address): + """Delete a RADIUS server""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.set_entry('RADIUS_SERVER', address, None) +radius.add_command(delete) diff --git a/config/main.py b/config/main.py index 44bf799287..244a2570a6 100644 --- a/config/main.py +++ b/config/main.py @@ -866,6 +866,7 @@ def config(ctx): # Add groups from other modules config.add_command(aaa.aaa) config.add_command(aaa.tacacs) +config.add_command(aaa.radius) config.add_command(chassis_modules.chassis_modules) config.add_command(console.console) config.add_command(feature.feature) diff --git a/show/main.py b/show/main.py index 16f5d8cec7..b0b2986a78 100755 --- a/show/main.py +++ b/show/main.py @@ -1248,10 +1248,10 @@ def services(): break @cli.command() -def aaa(): +@clicommon.pass_db +def aaa(db): """Show AAA configuration""" - config_db = ConfigDBConnector() - config_db.connect() + config_db = db.cfgdb data = config_db.get_table('AAA') output = '' @@ -1299,6 +1299,58 @@ def tacacs(): output += (' %s %s\n' % (key, str(entry[key]))) click.echo(output) +@cli.command() +@clicommon.pass_db +def radius(db): + """Show RADIUS configuration""" + output = '' + config_db = db.cfgdb + data = config_db.get_table('RADIUS') + + radius = { + 'global': { + 'auth_type': 'pap (default)', + 'retransmit': '3 (default)', + 'timeout': '5 (default)', + 'passkey': ' (default)' + } + } + if 'global' in data: + radius['global'].update(data['global']) + for key in radius['global']: + output += ('RADIUS global %s %s\n' % (str(key), str(radius['global'][key]))) + + data = config_db.get_table('RADIUS_SERVER') + if data != {}: + for row in data: + entry = data[row] + output += ('\nRADIUS_SERVER address %s\n' % row) + for key in entry: + output += (' %s %s\n' % (key, str(entry[key]))) + + counters_db = SonicV2Connector(host='127.0.0.1') + counters_db.connect(counters_db.COUNTERS_DB, retry_on=False) + + if radius['global'].get('statistics', False) and (data != {}): + for row in data: + exists = counters_db.exists(counters_db.COUNTERS_DB, + 'RADIUS_SERVER_STATS:{}'.format(row)) + if not exists: + continue + + counter_entry = counters_db.get_all(counters_db.COUNTERS_DB, + 'RADIUS_SERVER_STATS:{}'.format(row)) + output += ('\nStatistics for RADIUS_SERVER address %s\n' % row) + for key in counter_entry: + if counter_entry[key] != "0": + output += (' %s %s\n' % (key, str(counter_entry[key]))) + try: + counters_db.close(counters_db.COUNTERS_DB) + except Exception as e: + pass + + click.echo(output) + # # 'mirror_session' command ("show mirror_session ...") # diff --git a/tests/aaa_test.py b/tests/aaa_test.py new file mode 100644 index 0000000000..d202b41ad7 --- /dev/null +++ b/tests/aaa_test.py @@ -0,0 +1,138 @@ +import imp +import os +import sys + +from click.testing import CliRunner +from utilities_common.db import Db + +import config.main as config +import show.main as show + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +import mock_tables.dbconnector + +show_aaa_default_output="""\ +AAA authentication login local (default) +AAA authentication failthrough False (default) + +""" + +show_aaa_radius_output="""\ +AAA authentication login radius +AAA authentication failthrough False (default) + +""" + +show_aaa_radius_local_output="""\ +AAA authentication login radius,local +AAA authentication failthrough False (default) + +""" + +config_aaa_empty_output="""\ +""" + +config_aaa_not_a_valid_command_output="""\ +Not a valid command +""" + +class TestAaa(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + import config.main + imp.reload(config.main) + import show.main + imp.reload(show.main) + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + print("TEARDOWN") + + def test_show_aaa_default(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["aaa"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_aaa_default_output + + def test_config_aaa_radius(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + db.cfgdb.delete_table("AAA") + + result = runner.invoke(config.config.commands["aaa"],\ + ["authentication", "login", "radius"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_aaa_empty_output + + db.cfgdb.mod_entry("AAA", "authentication", {'login' : 'radius'}) + + result = runner.invoke(show.cli.commands["aaa"], [], obj=db) + assert result.exit_code == 0 + assert result.output == show_aaa_radius_output + + result = runner.invoke(config.config.commands["aaa"],\ + ["authentication", "login", "default"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_aaa_empty_output + + db.cfgdb.delete_table("AAA") + + result = runner.invoke(show.cli.commands["aaa"], [], obj=db) + assert result.exit_code == 0 + assert result.output == show_aaa_default_output + + def test_config_aaa_radius_local(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + db.cfgdb.delete_table("AAA") + + result = runner.invoke(config.config.commands["aaa"],\ + ["authentication", "login", "radius", "local"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_aaa_empty_output + + db.cfgdb.mod_entry("AAA", "authentication", {'login' : 'radius,local'}) + + result = runner.invoke(show.cli.commands["aaa"], [], obj=db) + assert result.exit_code == 0 + assert result.output == show_aaa_radius_local_output + + result = runner.invoke(config.config.commands["aaa"],\ + ["authentication", "login", "default"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_aaa_empty_output + + db.cfgdb.delete_table("AAA") + + result = runner.invoke(show.cli.commands["aaa"], [], obj=db) + assert result.exit_code == 0 + assert result.output == show_aaa_default_output + + def test_config_aaa_radius_invalid(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["aaa"],\ + ["authentication", "login", "radius", "tacacs+"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_aaa_not_a_valid_command_output + diff --git a/tests/radius_test.py b/tests/radius_test.py new file mode 100644 index 0000000000..117e19bde8 --- /dev/null +++ b/tests/radius_test.py @@ -0,0 +1,194 @@ +import imp +import os +import sys + +from click.testing import CliRunner +from utilities_common.db import Db + +import config.main as config +import show.main as show + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +import mock_tables.dbconnector + +show_radius_default_output="""\ +RADIUS global auth_type pap (default) +RADIUS global retransmit 3 (default) +RADIUS global timeout 5 (default) +RADIUS global passkey (default) + +""" + +show_radius_server_output="""\ +RADIUS global auth_type pap (default) +RADIUS global retransmit 3 (default) +RADIUS global timeout 5 (default) +RADIUS global passkey (default) + +RADIUS_SERVER address 10.10.10.10 + auth_port 1812 + passkey testing123 + priority 1 + retransmit 1 + src_intf eth0 + timeout 3 + +""" + +show_radius_global_output="""\ +RADIUS global auth_type chap +RADIUS global retransmit 3 (default) +RADIUS global timeout 5 (default) +RADIUS global passkey (default) + +""" + +config_radius_empty_output="""\ +""" + +config_radius_server_invalidkey_output="""\ +--key: Valid chars are ASCII printable except SPACE, '#', and ',' +""" + +config_radius_invalidipaddress_output="""\ +Invalid ip address +""" + +class TestRadius(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + import config.main + imp.reload(config.main) + import show.main + imp.reload(show.main) + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + print("TEARDOWN") + + def test_show_radius_default(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["radius"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_radius_default_output + + def test_config_radius_server(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + db.cfgdb.delete_table("RADIUS_SERVER") + + result = runner.invoke(config.config.commands["radius"],\ + ["add", "10.10.10.10", "-r", "1", "-t", "3",\ + "-k", "testing123", "-s", "eth0"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_radius_empty_output + + db.cfgdb.mod_entry("RADIUS_SERVER", "10.10.10.10", \ + {'auth_port' : '1812', \ + 'passkey' : 'testing123', \ + 'priority' : '1', \ + 'retransmit': '1', \ + 'src_intf' : 'eth0', \ + 'timeout' : '3', \ + } \ + ) + + result = runner.invoke(show.cli.commands["radius"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_radius_server_output + + result = runner.invoke(config.config.commands["radius"],\ + ["delete", "10.10.10.10"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_radius_empty_output + + db.cfgdb.delete_table("RADIUS_SERVER") + + result = runner.invoke(show.cli.commands["radius"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_radius_default_output + + def test_config_radius_server_invalidkey(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["radius"],\ + ["add", "10.10.10.10", "-r", "1", "-t", "3",\ + "-k", "comma,invalid", "-s", "eth0"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_radius_server_invalidkey_output + + def test_config_radius_nasip_invalid(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["radius"],\ + ["nasip", "invalid"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_radius_invalidipaddress_output + + def test_config_radius_sourceip_invalid(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["radius"],\ + ["sourceip", "invalid"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_radius_invalidipaddress_output + + def test_config_radius_authtype(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + db = Db() + db.cfgdb.delete_table("RADIUS") + + result = runner.invoke(config.config.commands["radius"],\ + ["authtype", "chap"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_radius_empty_output + + db.cfgdb.mod_entry("RADIUS", "global", \ + {'auth_type' : 'chap'} \ + ) + + result = runner.invoke(show.cli.commands["radius"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_radius_global_output + + result = runner.invoke(config.config.commands["radius"],\ + ["default", "authtype"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_radius_empty_output + + db.cfgdb.delete_table("RADIUS") + + result = runner.invoke(show.cli.commands["radius"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_radius_default_output + From a3d37f17b78a62ca3e7486de1b40df84b49dbf23 Mon Sep 17 00:00:00 2001 From: Blueve <672454911@qq.com> Date: Sun, 25 Apr 2021 12:08:00 +0800 Subject: [PATCH 16/41] [console] Display success message after line cleared (#1579) * [console] Display success message after line cleared Signed-off-by: Jing Kan jika@microsoft.com --- consutil/main.py | 2 ++ tests/console_test.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/consutil/main.py b/consutil/main.py index 069f6bb27e..9fc41cded5 100644 --- a/consutil/main.py +++ b/consutil/main.py @@ -74,6 +74,8 @@ def clear(db, target, devicename): if not target_port.clear_session(): click.echo("No process is connected to line " + target_port.line_num) + else: + click.echo("Cleared line") # 'connect' subcommand @consutil.command() diff --git a/tests/console_test.py b/tests/console_test.py index e4b5156837..3df59adbdf 100644 --- a/tests/console_test.py +++ b/tests/console_test.py @@ -698,4 +698,4 @@ def test_clear_success(self): print(result.exit_code) print(sys.stderr, result.output) assert result.exit_code == 0 - assert result.output == "" + assert "Cleared line" in result.output From 41d8ddc689b380dec35ee8d64bbaf98e6251ae69 Mon Sep 17 00:00:00 2001 From: Mohamed Ghoneim Date: Sun, 25 Apr 2021 18:40:53 -0700 Subject: [PATCH 17/41] [config][generic-update] Adding apply-patch, rollback, checkpoints commands (#1536) #### What I did Adding apply-patch, rollback, replace, checkpoint, delete-checkpoint, list-checkpoints functionality. #### How I did it This PR is implementing the first step in in README.md in the design document: https://github.com/Azure/SONiC/pull/736 #### How to verify it Using unit-tests #### Previous command output (if the output of a command-line utility has changed) #### New command output (if the output of a command-line utility has changed) ```sh admin@sonic:~$ sudo config apply-patch --help Usage: config apply-patch [OPTIONS] PATCH_FILE_PATH Apply given patch of updates to Config. A patch is a JsonPatch which follows rfc6902. This command can be used do partial updates to the config with minimum disruption to running processes. It allows addition as well as deletion of configs. The patch file represents a diff of ConfigDb(ABNF) format or SonicYang format. : Path to the patch file on the file-system. Options: -f, --format [CONFIGDB|SONICYANG] format of config of the patch is either ConfigDb(ABNF) or SonicYang -d, --dry-run test out the command without affecting config state -v, --verbose print additional details of what the operation is doing -h, -?, --help Show this message and exit. admin@sonic:~$ sudo config replace --help Usage: config replace [OPTIONS] TARGET_FILE_PATH Replace the whole config with the specified config. The config is replaced with minimum disruption e.g. if ACL config is different between current and target config only ACL config is updated, and other config/services such as DHCP will not be affected. **WARNING** The target config file should be the whole config, not just the part intended to be updated. : Path to the target file on the file-system. Options: -f, --format [CONFIGDB|SONICYANG] format of target config is either ConfigDb(ABNF) or SonicYang -d, --dry-run test out the command without affecting config state -v, --verbose print additional details of what the operation is doing -h, -?, --help Show this message and exit. admin@sonic:~$ sudo config rollback --help Usage: config rollback [OPTIONS] CHECKPOINT_NAME Rollback the whole config to the specified checkpoint. The config is rolled back with minimum disruption e.g. if ACL config is different between current and checkpoint config only ACL config is updated, and other config/services such as DHCP will not be affected. : The checkpoint name, use `config list-checkpoints` command to see available checkpoints. Options: -d, --dry-run test out the command without affecting config state -v, --verbose print additional details of what the operation is doing -?, -h, --help Show this message and exit. admin@sonic:~$ sudo config checkpoint --help Usage: config checkpoint [OPTIONS] CHECKPOINT_NAME Take a checkpoint of the whole current config with the specified checkpoint name. : The checkpoint name, use `config list-checkpoints` command to see available checkpoints. Options: -v, --verbose print additional details of what the operation is doing -h, -?, --help Show this message and exit. admin@sonic:~$ sudo config delete-checkpoint --help Usage: config delete-checkpoint [OPTIONS] CHECKPOINT_NAME Delete a checkpoint with the specified checkpoint name. : The checkpoint name, use `config list-checkpoints` command to see available checkpoints. Options: -v, --verbose print additional details of what the operation is doing -h, -?, --help Show this message and exit. admin@sonic:~$ sudo config list-checkpoints --help Usage: config list-checkpoints [OPTIONS] List the config checkpoints available. Options: -v, --verbose print additional details of what the operation is doing -?, -h, --help Show this message and exit. ``` --- .azure-pipelines/docker-sonic-vs/Dockerfile | 6 +- config/main.py | 131 ++- generic_config_updater/__init__.py | 0 generic_config_updater/generic_updater.py | 339 ++++++++ generic_config_updater/gu_common.py | 176 ++++ setup.py | 6 +- tests/config_test.py | 619 ++++++++++++++ tests/generic_config_updater/__init__.py | 0 .../files/config_db_after_multi_patch.json | 122 +++ .../files/config_db_as_json.json | 92 +++ .../files/config_db_as_json_invalid.json | 7 + .../files/cropped_config_db_as_json.json | 86 ++ ...multi_operation_config_db_patch.json-patch | 88 ++ ...ulti_operation_sonic_yang_patch.json-patch | 97 +++ ...ingle_operation_config_db_patch.json-patch | 6 + ...ngle_operation_sonic_yang_patch.json-patch | 6 + .../files/sonic_yang_after_multi_patch.json | 153 ++++ .../files/sonic_yang_as_json.json | 114 +++ .../files/sonic_yang_as_json_invalid.json | 13 + ...c_yang_as_json_with_unexpected_colons.json | 114 +++ .../sonic_yang_as_json_without_colons.json | 114 +++ .../generic_updater_test.py | 766 ++++++++++++++++++ .../generic_config_updater/gu_common_test.py | 335 ++++++++ .../generic_config_updater/gutest_helpers.py | 53 ++ 24 files changed, 3437 insertions(+), 6 deletions(-) create mode 100644 generic_config_updater/__init__.py create mode 100644 generic_config_updater/generic_updater.py create mode 100644 generic_config_updater/gu_common.py create mode 100644 tests/generic_config_updater/__init__.py create mode 100644 tests/generic_config_updater/files/config_db_after_multi_patch.json create mode 100644 tests/generic_config_updater/files/config_db_as_json.json create mode 100644 tests/generic_config_updater/files/config_db_as_json_invalid.json create mode 100644 tests/generic_config_updater/files/cropped_config_db_as_json.json create mode 100644 tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch create mode 100644 tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch create mode 100644 tests/generic_config_updater/files/single_operation_config_db_patch.json-patch create mode 100644 tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch create mode 100644 tests/generic_config_updater/files/sonic_yang_after_multi_patch.json create mode 100644 tests/generic_config_updater/files/sonic_yang_as_json.json create mode 100644 tests/generic_config_updater/files/sonic_yang_as_json_invalid.json create mode 100644 tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json create mode 100644 tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json create mode 100644 tests/generic_config_updater/generic_updater_test.py create mode 100644 tests/generic_config_updater/gu_common_test.py create mode 100644 tests/generic_config_updater/gutest_helpers.py diff --git a/.azure-pipelines/docker-sonic-vs/Dockerfile b/.azure-pipelines/docker-sonic-vs/Dockerfile index 4e0a50e7a4..2b3e634232 100644 --- a/.azure-pipelines/docker-sonic-vs/Dockerfile +++ b/.azure-pipelines/docker-sonic-vs/Dockerfile @@ -4,4 +4,8 @@ ARG docker_container_name ADD ["wheels", "/wheels"] -RUN pip3 install --no-deps --force-reinstall /wheels/sonic_utilities-1.2-py3-none-any.whl +# Uninstalls only sonic-utilities and does not impact its dependencies +RUN pip3 uninstall -y sonic-utilities + +# Installs sonic-utilities, adds missing dependencies, upgrades out-dated depndencies +RUN pip3 install /wheels/sonic_utilities-1.2-py3-none-any.whl diff --git a/config/main.py b/config/main.py index 244a2570a6..6fad33f9c1 100644 --- a/config/main.py +++ b/config/main.py @@ -3,6 +3,7 @@ import click import ipaddress import json +import jsonpatch import netaddr import netifaces import os @@ -11,6 +12,7 @@ import sys import time +from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat from socket import AF_INET, AF_INET6 from minigraph import parse_device_desc_xml from portconfig import get_child_ports @@ -826,7 +828,7 @@ def cache_arp_entries(): if filter_err: click.echo("Could not filter FDB entries prior to reloading") success = False - + # If we are able to successfully cache ARP table info, signal SWSS to restore from our cache # by creating /host/config-reload/needs-restore if success: @@ -987,6 +989,129 @@ def load(filename, yes): log.log_info("'load' executing...") clicommon.run_command(command, display_cmd=True) +@config.command('apply-patch') +@click.argument('patch-file-path', type=str, required=True) +@click.option('-f', '--format', type=click.Choice([e.name for e in ConfigFormat]), + default=ConfigFormat.CONFIGDB.name, + help='format of config of the patch is either ConfigDb(ABNF) or SonicYang') +@click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state') +@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') +@click.pass_context +def apply_patch(ctx, patch_file_path, format, dry_run, verbose): + """Apply given patch of updates to Config. A patch is a JsonPatch which follows rfc6902. + This command can be used do partial updates to the config with minimum disruption to running processes. + It allows addition as well as deletion of configs. The patch file represents a diff of ConfigDb(ABNF) + format or SonicYang format. + + : Path to the patch file on the file-system.""" + try: + with open(patch_file_path, 'r') as fh: + text = fh.read() + patch_as_json = json.loads(text) + patch = jsonpatch.JsonPatch(patch_as_json) + + config_format = ConfigFormat[format.upper()] + + GenericUpdater().apply_patch(patch, config_format, verbose, dry_run) + + click.secho("Patch applied successfully.", fg="cyan", underline=True) + except Exception as ex: + click.secho("Failed to apply patch", fg="red", underline=True, err=True) + ctx.fail(ex) + +@config.command() +@click.argument('target-file-path', type=str, required=True) +@click.option('-f', '--format', type=click.Choice([e.name for e in ConfigFormat]), + default=ConfigFormat.CONFIGDB.name, + help='format of target config is either ConfigDb(ABNF) or SonicYang') +@click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state') +@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') +@click.pass_context +def replace(ctx, target_file_path, format, dry_run, verbose): + """Replace the whole config with the specified config. The config is replaced with minimum disruption e.g. + if ACL config is different between current and target config only ACL config is updated, and other config/services + such as DHCP will not be affected. + + **WARNING** The target config file should be the whole config, not just the part intended to be updated. + + : Path to the target file on the file-system.""" + try: + with open(target_file_path, 'r') as fh: + target_config_as_text = fh.read() + target_config = json.loads(target_config_as_text) + + config_format = ConfigFormat[format.upper()] + + GenericUpdater().replace(target_config, config_format, verbose, dry_run) + + click.secho("Config replaced successfully.", fg="cyan", underline=True) + except Exception as ex: + click.secho("Failed to replace config", fg="red", underline=True, err=True) + ctx.fail(ex) + +@config.command() +@click.argument('checkpoint-name', type=str, required=True) +@click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state') +@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') +@click.pass_context +def rollback(ctx, checkpoint_name, dry_run, verbose): + """Rollback the whole config to the specified checkpoint. The config is rolled back with minimum disruption e.g. + if ACL config is different between current and checkpoint config only ACL config is updated, and other config/services + such as DHCP will not be affected. + + : The checkpoint name, use `config list-checkpoints` command to see available checkpoints.""" + try: + GenericUpdater().rollback(checkpoint_name, verbose, dry_run) + + click.secho("Config rolled back successfully.", fg="cyan", underline=True) + except Exception as ex: + click.secho("Failed to rollback config", fg="red", underline=True, err=True) + ctx.fail(ex) + +@config.command() +@click.argument('checkpoint-name', type=str, required=True) +@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') +@click.pass_context +def checkpoint(ctx, checkpoint_name, verbose): + """Take a checkpoint of the whole current config with the specified checkpoint name. + + : The checkpoint name, use `config list-checkpoints` command to see available checkpoints.""" + try: + GenericUpdater().checkpoint(checkpoint_name, verbose) + + click.secho("Checkpoint created successfully.", fg="cyan", underline=True) + except Exception as ex: + click.secho("Failed to create a config checkpoint", fg="red", underline=True, err=True) + ctx.fail(ex) + +@config.command('delete-checkpoint') +@click.argument('checkpoint-name', type=str, required=True) +@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') +@click.pass_context +def delete_checkpoint(ctx, checkpoint_name, verbose): + """Delete a checkpoint with the specified checkpoint name. + + : The checkpoint name, use `config list-checkpoints` command to see available checkpoints.""" + try: + GenericUpdater().delete_checkpoint(checkpoint_name, verbose) + + click.secho("Checkpoint deleted successfully.", fg="cyan", underline=True) + except Exception as ex: + click.secho("Failed to delete config checkpoint", fg="red", underline=True, err=True) + ctx.fail(ex) + +@config.command('list-checkpoints') +@click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') +@click.pass_context +def list_checkpoints(ctx, verbose): + """List the config checkpoints available.""" + try: + checkpoints_list = GenericUpdater().list_checkpoints(verbose) + formatted_output = json.dumps(checkpoints_list, indent=4) + click.echo(formatted_output) + except Exception as ex: + click.secho("Failed to list config checkpoints", fg="red", underline=True, err=True) + ctx.fail(ex) @config.command() @click.option('-y', '--yes', is_flag=True) @@ -2581,8 +2706,8 @@ def add(ctx, interface_name, ip_addr, gw): if interface_name is None: ctx.fail("'interface_name' is None!") - # Add a validation to check this interface is not a member in vlan before - # changing it to a router port + # Add a validation to check this interface is not a member in vlan before + # changing it to a router port vlan_member_table = config_db.get_table('VLAN_MEMBER') if (interface_is_in_vlan(vlan_member_table, interface_name)): click.echo("Interface {} is a member of vlan\nAborting!".format(interface_name)) diff --git a/generic_config_updater/__init__.py b/generic_config_updater/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py new file mode 100644 index 0000000000..079d7ab742 --- /dev/null +++ b/generic_config_updater/generic_updater.py @@ -0,0 +1,339 @@ +import json +import os +from enum import Enum +from .gu_common import GenericConfigUpdaterError, ConfigWrapper, \ + DryRunConfigWrapper, PatchWrapper + +CHECKPOINTS_DIR = "/etc/sonic/checkpoints" +CHECKPOINT_EXT = ".cp.json" + +class ConfigLock: + def acquire_lock(self): + # TODO: Implement ConfigLock + pass + + def release_lock(self): + # TODO: Implement ConfigLock + pass + +class PatchSorter: + def sort(self, patch): + # TODO: Implement patch sorter + raise NotImplementedError("PatchSorter.sort(patch) is not implemented yet") + +class ChangeApplier: + def apply(self, change): + # TODO: Implement change applier + raise NotImplementedError("ChangeApplier.apply(change) is not implemented yet") + +class ConfigFormat(Enum): + CONFIGDB = 1 + SONICYANG = 2 + +class PatchApplier: + def __init__(self, + patchsorter=None, + changeapplier=None, + config_wrapper=None, + patch_wrapper=None): + self.patchsorter = patchsorter if patchsorter is not None else PatchSorter() + self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier() + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper() + + def apply(self, patch): + # validate patch is only updating tables with yang models + if not(self.patch_wrapper.validate_config_db_patch_has_yang_models(patch)): + raise ValueError(f"Given patch is not valid because it has changes to tables without YANG models") + + # Get old config + old_config = self.config_wrapper.get_config_db_as_json() + + # Generate target config + target_config = self.patch_wrapper.simulate_patch(patch, old_config) + + # Validate target config + if not(self.config_wrapper.validate_config_db_config(target_config)): + raise ValueError(f"Given patch is not valid because it will result in an invalid config") + + # Generate list of changes to apply + changes = self.patchsorter.sort(patch) + + # Apply changes in order + for change in changes: + self.changeapplier.apply(change) + + # Validate config updated successfully + new_config = self.config_wrapper.get_config_db_as_json() + if not(self.patch_wrapper.verify_same_json(target_config, new_config)): + raise GenericConfigUpdaterError(f"After applying patch to config, there are still some parts not updated") + +class ConfigReplacer: + def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None): + self.patch_applier = patch_applier if patch_applier is not None else PatchApplier() + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper() + + def replace(self, target_config): + if not(self.config_wrapper.validate_config_db_config(target_config)): + raise ValueError(f"The given target config is not valid") + + old_config = self.config_wrapper.get_config_db_as_json() + patch = self.patch_wrapper.generate_patch(old_config, target_config) + + self.patch_applier.apply(patch) + + new_config = self.config_wrapper.get_config_db_as_json() + if not(self.patch_wrapper.verify_same_json(target_config, new_config)): + raise GenericConfigUpdaterError(f"After replacing config, there is still some parts not updated") + +class FileSystemConfigRollbacker: + def __init__(self, + checkpoints_dir=CHECKPOINTS_DIR, + config_replacer=None, + config_wrapper=None): + self.checkpoints_dir = checkpoints_dir + self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer() + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() + + def rollback(self, checkpoint_name): + if not self._check_checkpoint_exists(checkpoint_name): + raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") + + target_config = self._get_checkpoint_content(checkpoint_name) + + self.config_replacer.replace(target_config) + + def checkpoint(self, checkpoint_name): + json_content = self.config_wrapper.get_config_db_as_json() + + if not self.config_wrapper.validate_config_db_config(json_content): + raise ValueError(f"Running configs on the device are not valid.") + + path = self._get_checkpoint_full_path(checkpoint_name) + + self._ensure_checkpoints_dir_exists() + + self._save_json_file(path, json_content) + + def list_checkpoints(self): + if not self._checkpoints_dir_exist(): + return [] + + return self._get_checkpoint_names() + + def delete_checkpoint(self, checkpoint_name): + if not self._check_checkpoint_exists(checkpoint_name): + raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") + + self._delete_checkpoint(checkpoint_name) + + def _ensure_checkpoints_dir_exists(self): + os.makedirs(self.checkpoints_dir, exist_ok=True) + + def _save_json_file(self, path, json_content): + with open(path, "w") as fh: + fh.write(json.dumps(json_content)) + + def _get_checkpoint_content(self, checkpoint_name): + path = self._get_checkpoint_full_path(checkpoint_name) + with open(path) as fh: + text = fh.read() + return json.loads(text) + + def _get_checkpoint_full_path(self, name): + return os.path.join(self.checkpoints_dir, f"{name}{CHECKPOINT_EXT}") + + def _get_checkpoint_names(self): + file_names = [] + for file_name in os.listdir(self.checkpoints_dir): + if file_name.endswith(CHECKPOINT_EXT): + # Remove extension from file name. + # Example assuming ext is '.cp.json', then 'checkpoint1.cp.json' becomes 'checkpoint1' + file_names.append(file_name[:-len(CHECKPOINT_EXT)]) + + return file_names + + def _checkpoints_dir_exist(self): + return os.path.isdir(self.checkpoints_dir) + + def _check_checkpoint_exists(self, name): + path = self._get_checkpoint_full_path(name) + return os.path.isfile(path) + + def _delete_checkpoint(self, name): + path = self._get_checkpoint_full_path(name) + return os.remove(path) + +class Decorator(PatchApplier, ConfigReplacer, FileSystemConfigRollbacker): + def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None): + # initing base classes to make LGTM happy + PatchApplier.__init__(self) + ConfigReplacer.__init__(self) + FileSystemConfigRollbacker.__init__(self) + + self.decorated_patch_applier = decorated_patch_applier + self.decorated_config_replacer = decorated_config_replacer + self.decorated_config_rollbacker = decorated_config_rollbacker + + def apply(self, patch): + self.decorated_patch_applier.apply(patch) + + def replace(self, target_config): + self.decorated_config_replacer.replace(target_config) + + def rollback(self, checkpoint_name): + self.decorated_config_rollbacker.rollback(checkpoint_name) + + def checkpoint(self, checkpoint_name): + self.decorated_config_rollbacker.checkpoint(checkpoint_name) + + def list_checkpoints(self): + return self.decorated_config_rollbacker.list_checkpoints() + + def delete_checkpoint(self, checkpoint_name): + self.decorated_config_rollbacker.delete_checkpoint(checkpoint_name) + +class SonicYangDecorator(Decorator): + def __init__(self, patch_wrapper, config_wrapper, decorated_patch_applier=None, decorated_config_replacer=None): + Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer) + + self.patch_wrapper = patch_wrapper + self.config_wrapper = config_wrapper + + def apply(self, patch): + config_db_patch = self.patch_wrapper.convert_sonic_yang_patch_to_config_db_patch(patch) + Decorator.apply(self, config_db_patch) + + def replace(self, target_config): + config_db_target_config = self.config_wrapper.convert_sonic_yang_to_config_db(target_config) + Decorator.replace(self, config_db_target_config) + +class ConfigLockDecorator(Decorator): + def __init__(self, + decorated_patch_applier=None, + decorated_config_replacer=None, + decorated_config_rollbacker=None, + config_lock = ConfigLock()): + Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, decorated_config_rollbacker) + + self.config_lock = config_lock + + def apply(self, patch): + self.execute_write_action(Decorator.apply, self, patch) + + def replace(self, target_config): + self.execute_write_action(Decorator.replace, self, target_config) + + def rollback(self, checkpoint_name): + self.execute_write_action(Decorator.rollback, self, checkpoint_name) + + def checkpoint(self, checkpoint_name): + self.execute_write_action(Decorator.checkpoint, self, checkpoint_name) + + def execute_write_action(self, action, *args): + self.config_lock.acquire_lock() + action(*args) + self.config_lock.release_lock() + +class GenericUpdateFactory: + def create_patch_applier(self, config_format, verbose, dry_run): + self.init_verbose_logging(verbose) + + config_wrapper = self.get_config_wrapper(dry_run) + + patch_applier = PatchApplier(config_wrapper=config_wrapper) + + patch_wrapper = PatchWrapper(config_wrapper) + + if config_format == ConfigFormat.CONFIGDB: + pass + elif config_format == ConfigFormat.SONICYANG: + patch_applier = SonicYangDecorator( + decorated_patch_applier = patch_applier, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper) + else: + raise ValueError(f"config-format '{config_format}' is not supported") + + if not dry_run: + patch_applier = ConfigLockDecorator(decorated_patch_applier = patch_applier) + + return patch_applier + + def create_config_replacer(self, config_format, verbose, dry_run): + self.init_verbose_logging(verbose) + + config_wrapper = self.get_config_wrapper(dry_run) + + patch_applier = PatchApplier(config_wrapper=config_wrapper) + + patch_wrapper = PatchWrapper(config_wrapper) + + config_replacer = ConfigReplacer(patch_applier=patch_applier, config_wrapper=config_wrapper) + if config_format == ConfigFormat.CONFIGDB: + pass + elif config_format == ConfigFormat.SONICYANG: + config_replacer = SonicYangDecorator( + decorated_config_replacer = config_replacer, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper) + else: + raise ValueError(f"config-format '{config_format}' is not supported") + + if not dry_run: + config_replacer = ConfigLockDecorator(decorated_config_replacer = config_replacer) + + return config_replacer + + def create_config_rollbacker(self, verbose, dry_run=False): + self.init_verbose_logging(verbose) + + config_wrapper = self.get_config_wrapper(dry_run) + + patch_applier = PatchApplier(config_wrapper=config_wrapper) + config_replacer = ConfigReplacer(config_wrapper=config_wrapper, patch_applier=patch_applier) + config_rollbacker = FileSystemConfigRollbacker(config_wrapper = config_wrapper, config_replacer = config_replacer) + + if not dry_run: + config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker = config_rollbacker) + + return config_rollbacker + + def init_verbose_logging(self, verbose): + # TODO: implement verbose logging + # Usually logs have levels such as: error, warning, info, debug. + # By default all log levels should show up to the user, except debug. + # By allowing verbose logging, debug msgs will also be shown to the user. + pass + + def get_config_wrapper(self, dry_run): + if dry_run: + return DryRunConfigWrapper() + else: + return ConfigWrapper() + +class GenericUpdater: + def __init__(self, generic_update_factory=None): + self.generic_update_factory = \ + generic_update_factory if generic_update_factory is not None else GenericUpdateFactory() + + def apply_patch(self, patch, config_format, verbose, dry_run): + patch_applier = self.generic_update_factory.create_patch_applier(config_format, verbose, dry_run) + patch_applier.apply(patch) + + def replace(self, target_config, config_format, verbose, dry_run): + config_replacer = self.generic_update_factory.create_config_replacer(config_format, verbose, dry_run) + config_replacer.replace(target_config) + + def rollback(self, checkpoint_name, verbose, dry_run): + config_rollbacker = self.generic_update_factory.create_config_rollbacker(verbose, dry_run) + config_rollbacker.rollback(checkpoint_name) + + def checkpoint(self, checkpoint_name, verbose): + config_rollbacker = self.generic_update_factory.create_config_rollbacker(verbose) + config_rollbacker.checkpoint(checkpoint_name) + + def delete_checkpoint(self, checkpoint_name, verbose): + config_rollbacker = self.generic_update_factory.create_config_rollbacker(verbose) + config_rollbacker.delete_checkpoint(checkpoint_name) + + def list_checkpoints(self, verbose): + config_rollbacker = self.generic_update_factory.create_config_rollbacker(verbose) + return config_rollbacker.list_checkpoints() diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py new file mode 100644 index 0000000000..2aa6a36d8a --- /dev/null +++ b/generic_config_updater/gu_common.py @@ -0,0 +1,176 @@ +import json +import jsonpatch +import sonic_yang +import subprocess +import copy + +YANG_DIR = "/usr/local/yang-models" + +class GenericConfigUpdaterError(Exception): + pass + +class JsonChange: + # TODO: Implement JsonChange + pass + +class ConfigWrapper: + def __init__(self, yang_dir = YANG_DIR): + self.yang_dir = YANG_DIR + + def get_config_db_as_json(self): + text = self._get_config_db_as_text() + return json.loads(text) + + def _get_config_db_as_text(self): + # TODO: Getting configs from CLI is very slow, need to get it from sonic-cffgen directly + cmd = "show runningconfiguration all" + result = subprocess.Popen(cmd, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + text, err = result.communicate() + return_code = result.returncode + if return_code: # non-zero means failure + raise GenericConfigUpdaterError(f"Failed to get running config, Return code: {return_code}, Error: {err}") + return text + + def get_sonic_yang_as_json(self): + config_db_json = self.get_config_db_as_json() + return self.convert_config_db_to_sonic_yang(config_db_json) + + def convert_config_db_to_sonic_yang(self, config_db_as_json): + sy = sonic_yang.SonicYang(self.yang_dir) + sy.loadYangModel() + + # Crop config_db tables that do not have sonic yang models + cropped_config_db_as_json = self.crop_tables_without_yang(config_db_as_json) + + sonic_yang_as_json = dict() + + sy._xlateConfigDBtoYang(cropped_config_db_as_json, sonic_yang_as_json) + + return sonic_yang_as_json + + def convert_sonic_yang_to_config_db(self, sonic_yang_as_json): + sy = sonic_yang.SonicYang(self.yang_dir) + sy.loadYangModel() + + # replace container of the format 'module:table' with just 'table' + new_sonic_yang_json = {} + for module_top in sonic_yang_as_json: + new_sonic_yang_json[module_top] = {} + for container in sonic_yang_as_json[module_top]: + tokens = container.split(':') + if len(tokens) > 2: + raise ValueError(f"Expecting ':' or '
', found {container}") + table = container if len(tokens) == 1 else tokens[1] + new_sonic_yang_json[module_top][table] = sonic_yang_as_json[module_top][container] + + config_db_as_json = dict() + sy.xlateJson = new_sonic_yang_json + sy.revXlateJson = config_db_as_json + sy._revXlateYangtoConfigDB(new_sonic_yang_json, config_db_as_json) + + return config_db_as_json + + def validate_sonic_yang_config(self, sonic_yang_as_json): + config_db_as_json = self.convert_sonic_yang_to_config_db(sonic_yang_as_json) + + sy = sonic_yang.SonicYang(self.yang_dir) + sy.loadYangModel() + + try: + sy.loadData(config_db_as_json) + + sy.validate_data_tree() + return True + except sonic_yang.SonicYangException as ex: + return False + + def validate_config_db_config(self, config_db_as_json): + sy = sonic_yang.SonicYang(self.yang_dir) + sy.loadYangModel() + + try: + tmp_config_db_as_json = copy.deepcopy(config_db_as_json) + + sy.loadData(tmp_config_db_as_json) + + sy.validate_data_tree() + return True + except sonic_yang.SonicYangException as ex: + return False + + def crop_tables_without_yang(self, config_db_as_json): + sy = sonic_yang.SonicYang(self.yang_dir) + sy.loadYangModel() + + sy.jIn = copy.deepcopy(config_db_as_json) + + sy.tablesWithOutYang = dict() + + sy._cropConfigDB() + + return sy.jIn + + def _create_and_connect_config_db(self): + if self.default_config_db_connector != None: + return self.default_config_db_connector + + config_db = ConfigDBConnector() + config_db.connect() + return config_db + +class DryRunConfigWrapper(ConfigWrapper): + # TODO: implement DryRunConfigWrapper + # This class will simulate all read/write operations to ConfigDB on a virtual storage unit. + pass + +class PatchWrapper: + def __init__(self, config_wrapper=None): + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() + + def validate_config_db_patch_has_yang_models(self, patch): + config_db = {} + for operation in patch: + tokens = operation['path'].split('/')[1:] + if len(tokens) == 0: # Modifying whole config_db + tables_dict = {table_name: {} for table_name in operation['value']} + config_db.update(tables_dict) + elif not tokens[0]: # Not empty + raise ValueError("Table name in patch cannot be empty") + else: + config_db[tokens[0]] = {} + + cropped_config_db = self.config_wrapper.crop_tables_without_yang(config_db) + + # valid if no tables dropped during cropping + return len(cropped_config_db.keys()) == len(config_db.keys()) + + def verify_same_json(self, expected, actual): + # patch will be [] if no diff, [] evaluates to False + return not jsonpatch.make_patch(expected, actual) + + def generate_patch(self, current, target): + return jsonpatch.make_patch(current, target) + + def simulate_patch(self, patch, jsonconfig): + return patch.apply(jsonconfig) + + def convert_config_db_patch_to_sonic_yang_patch(self, patch): + if not(self.validate_config_db_patch_has_yang_models(patch)): + raise ValueError(f"Given patch is not valid") + + current_config_db = self.config_wrapper.get_config_db_as_json() + target_config_db = self.simulate_patch(patch, current_config_db) + + current_yang = self.config_wrapper.convert_config_db_to_sonic_yang(current_config_db) + target_yang = self.config_wrapper.convert_config_db_to_sonic_yang(target_config_db) + + return self.generate_patch(current_yang, target_yang) + + def convert_sonic_yang_patch_to_config_db_patch(self, patch): + current_yang = self.config_wrapper.get_sonic_yang_as_json() + target_yang = self.simulate_patch(patch, current_yang) + + current_config_db = self.config_wrapper.convert_sonic_yang_to_config_db(current_yang) + target_config_db = self.config_wrapper.convert_sonic_yang_to_config_db(target_yang) + + return self.generate_patch(current_config_db, target_config_db) diff --git a/setup.py b/setup.py index 02a8d53e38..d070827667 100644 --- a/setup.py +++ b/setup.py @@ -30,6 +30,7 @@ 'counterpoll', 'crm', 'debug', + 'generic_config_updater', 'pfcwd', 'sfputil', 'ssdutil', @@ -157,6 +158,7 @@ 'click==7.0', 'ipaddress==1.0.23', 'jsondiff==1.2.0', + 'jsonpatch==1.32.0', 'm2crypto==0.31.0', 'natsort==6.2.1', # 6.2.1 is the last version which supports Python 2. Can update once we no longer support Python 2 'netaddr==0.8.0', @@ -164,12 +166,13 @@ 'pexpect==4.8.0', 'pyroute2==0.5.14', 'requests==2.25.0', + 'sonic-config-engine', 'sonic-platform-common', 'sonic-py-common', 'sonic-yang-mgmt', 'swsssdk>=2.0.1', 'tabulate==0.8.2', - 'xmltodict==0.12.0' + 'xmltodict==0.12.0', ], setup_requires= [ 'pytest-runner', @@ -178,7 +181,6 @@ tests_require = [ 'pytest', 'mockredispy>=2.9.3', - 'sonic-config-engine', 'deepdiff==5.2.3' ], classifiers=[ diff --git a/tests/config_test.py b/tests/config_test.py index 381ca80304..32ecc5bdef 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -3,6 +3,9 @@ import os import traceback import json +import jsonpatch +import sys +import unittest from unittest import mock import click @@ -11,6 +14,10 @@ from sonic_py_common import device_info from utilities_common.db import Db +from generic_config_updater.generic_updater import ConfigFormat + +import config.main as config + load_minigraph_command_output="""\ Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db @@ -150,3 +157,615 @@ def teardown_class(cls): from .mock_tables import mock_single_asic importlib.reload(mock_single_asic) dbconnector.load_namespace_config() + +class TestGenericUpdateCommands(unittest.TestCase): + def setUp(self): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + self.runner = CliRunner() + self.any_patch_as_json = [{"op":"remove", "path":"/PORT"}] + self.any_patch = jsonpatch.JsonPatch(self.any_patch_as_json) + self.any_patch_as_text = json.dumps(self.any_patch_as_json) + self.any_path = '/usr/admin/patch.json-patch' + self.any_target_config = {"PORT": {}} + self.any_target_config_as_text = json.dumps(self.any_target_config) + self.any_checkpoint_name = "any_checkpoint_name" + self.any_checkpoints_list = ["checkpoint1", "checkpoint2", "checkpoint3"] + self.any_checkpoints_list_as_text = json.dumps(self.any_checkpoints_list, indent=4) + + def test_apply_patch__no_params__get_required_params_error_msg(self): + # Arrange + unexpected_exit_code = 0 + expected_output = "Error: Missing argument \"PATCH_FILE_PATH\"" + + # Act + result = self.runner.invoke(config.config.commands["apply-patch"]) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_apply_patch__help__gets_help_msg(self): + # Arrange + expected_exit_code = 0 + expected_output = "Options:" # this indicates the options are listed + + # Act + result = self.runner.invoke(config.config.commands["apply-patch"], ['--help']) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_apply_patch__only_required_params__default_values_used_for_optional_params(self): + # Arrange + expected_exit_code = 0 + expected_output = "Patch applied successfully" + expected_call_with_default_values = mock.call(self.any_patch, ConfigFormat.CONFIGDB, False, False) + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + with mock.patch('builtins.open', mock.mock_open(read_data=self.any_patch_as_text)): + + # Act + result = self.runner.invoke(config.config.commands["apply-patch"], [self.any_path], catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.apply_patch.assert_called_once() + mock_generic_updater.apply_patch.assert_has_calls([expected_call_with_default_values]) + + def test_apply_patch__all_optional_params_non_default__non_default_values_used(self): + # Arrange + expected_exit_code = 0 + expected_output = "Patch applied successfully" + expected_call_with_non_default_values = mock.call(self.any_patch, ConfigFormat.SONICYANG, True, True) + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + with mock.patch('builtins.open', mock.mock_open(read_data=self.any_patch_as_text)): + + # Act + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.any_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--verbose"], + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.apply_patch.assert_called_once() + mock_generic_updater.apply_patch.assert_has_calls([expected_call_with_non_default_values]) + + def test_apply_patch__exception_thrown__error_displayed_error_code_returned(self): + # Arrange + unexpected_exit_code = 0 + any_error_message = "any_error_message" + mock_generic_updater = mock.Mock() + mock_generic_updater.apply_patch.side_effect = Exception(any_error_message) + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + with mock.patch('builtins.open', mock.mock_open(read_data=self.any_patch_as_text)): + + # Act + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.any_path], + catch_exceptions=False) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(any_error_message in result.output) + + def test_apply_patch__optional_parameters_passed_correctly(self): + self.validate_apply_patch_optional_parameter( + ["--format", ConfigFormat.SONICYANG.name], + mock.call(self.any_patch, ConfigFormat.SONICYANG, False, False)) + self.validate_apply_patch_optional_parameter( + ["--verbose"], + mock.call(self.any_patch, ConfigFormat.CONFIGDB, True, False)) + self.validate_apply_patch_optional_parameter( + ["--dry-run"], + mock.call(self.any_patch, ConfigFormat.CONFIGDB, False, True)) + + def validate_apply_patch_optional_parameter(self, param_args, expected_call): + # Arrange + expected_exit_code = 0 + expected_output = "Patch applied successfully" + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + with mock.patch('builtins.open', mock.mock_open(read_data=self.any_patch_as_text)): + + # Act + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.any_path] + param_args, + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.apply_patch.assert_called_once() + mock_generic_updater.apply_patch.assert_has_calls([expected_call]) + + def test_replace__no_params__get_required_params_error_msg(self): + # Arrange + unexpected_exit_code = 0 + expected_output = "Error: Missing argument \"TARGET_FILE_PATH\"" + + # Act + result = self.runner.invoke(config.config.commands["replace"]) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_replace__help__gets_help_msg(self): + # Arrange + expected_exit_code = 0 + expected_output = "Options:" # this indicates the options are listed + + # Act + result = self.runner.invoke(config.config.commands["replace"], ['--help']) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_replace__only_required_params__default_values_used_for_optional_params(self): + # Arrange + expected_exit_code = 0 + expected_output = "Config replaced successfully" + expected_call_with_default_values = mock.call(self.any_target_config, ConfigFormat.CONFIGDB, False, False) + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + with mock.patch('builtins.open', mock.mock_open(read_data=self.any_target_config_as_text)): + + # Act + result = self.runner.invoke(config.config.commands["replace"], [self.any_path], catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.replace.assert_called_once() + mock_generic_updater.replace.assert_has_calls([expected_call_with_default_values]) + + def test_replace__all_optional_params_non_default__non_default_values_used(self): + # Arrange + expected_exit_code = 0 + expected_output = "Config replaced successfully" + expected_call_with_non_default_values = mock.call(self.any_target_config, ConfigFormat.SONICYANG, True, True) + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + with mock.patch('builtins.open', mock.mock_open(read_data=self.any_target_config_as_text)): + + # Act + result = self.runner.invoke(config.config.commands["replace"], + [self.any_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--verbose"], + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.replace.assert_called_once() + mock_generic_updater.replace.assert_has_calls([expected_call_with_non_default_values]) + + def test_replace__exception_thrown__error_displayed_error_code_returned(self): + # Arrange + unexpected_exit_code = 0 + any_error_message = "any_error_message" + mock_generic_updater = mock.Mock() + mock_generic_updater.replace.side_effect = Exception(any_error_message) + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + with mock.patch('builtins.open', mock.mock_open(read_data=self.any_target_config_as_text)): + + # Act + result = self.runner.invoke(config.config.commands["replace"], + [self.any_path], + catch_exceptions=False) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(any_error_message in result.output) + + def test_replace__optional_parameters_passed_correctly(self): + self.validate_replace_optional_parameter( + ["--format", ConfigFormat.SONICYANG.name], + mock.call(self.any_target_config, ConfigFormat.SONICYANG, False, False)) + self.validate_replace_optional_parameter( + ["--verbose"], + mock.call(self.any_target_config, ConfigFormat.CONFIGDB, True, False)) + self.validate_replace_optional_parameter( + ["--dry-run"], + mock.call(self.any_target_config, ConfigFormat.CONFIGDB, False, True)) + + def validate_replace_optional_parameter(self, param_args, expected_call): + # Arrange + expected_exit_code = 0 + expected_output = "Config replaced successfully" + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + with mock.patch('builtins.open', mock.mock_open(read_data=self.any_target_config_as_text)): + + # Act + result = self.runner.invoke(config.config.commands["replace"], + [self.any_path] + param_args, + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.replace.assert_called_once() + mock_generic_updater.replace.assert_has_calls([expected_call]) + + def test_rollback__no_params__get_required_params_error_msg(self): + # Arrange + unexpected_exit_code = 0 + expected_output = "Error: Missing argument \"CHECKPOINT_NAME\"" + + # Act + result = self.runner.invoke(config.config.commands["rollback"]) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_rollback__help__gets_help_msg(self): + # Arrange + expected_exit_code = 0 + expected_output = "Options:" # this indicates the options are listed + + # Act + result = self.runner.invoke(config.config.commands["rollback"], ['--help']) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_rollback__only_required_params__default_values_used_for_optional_params(self): + # Arrange + expected_exit_code = 0 + expected_output = "Config rolled back successfully" + expected_call_with_default_values = mock.call(self.any_checkpoint_name, False, False) + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + # Act + result = self.runner.invoke(config.config.commands["rollback"], [self.any_checkpoint_name], catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.rollback.assert_called_once() + mock_generic_updater.rollback.assert_has_calls([expected_call_with_default_values]) + + def test_rollback__all_optional_params_non_default__non_default_values_used(self): + # Arrange + expected_exit_code = 0 + expected_output = "Config rolled back successfully" + expected_call_with_non_default_values = mock.call(self.any_checkpoint_name, True, True) + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + + # Act + result = self.runner.invoke(config.config.commands["rollback"], + [self.any_checkpoint_name, + "--dry-run", + "--verbose"], + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.rollback.assert_called_once() + mock_generic_updater.rollback.assert_has_calls([expected_call_with_non_default_values]) + + def test_rollback__exception_thrown__error_displayed_error_code_returned(self): + # Arrange + unexpected_exit_code = 0 + any_error_message = "any_error_message" + mock_generic_updater = mock.Mock() + mock_generic_updater.rollback.side_effect = Exception(any_error_message) + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + + # Act + result = self.runner.invoke(config.config.commands["rollback"], + [self.any_checkpoint_name], + catch_exceptions=False) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(any_error_message in result.output) + + def test_rollback__optional_parameters_passed_correctly(self): + self.validate_rollback_optional_parameter( + ["--verbose"], + mock.call(self.any_checkpoint_name, True, False)) + self.validate_rollback_optional_parameter( + ["--dry-run"], + mock.call(self.any_checkpoint_name, False, True)) + + def validate_rollback_optional_parameter(self, param_args, expected_call): + # Arrange + expected_exit_code = 0 + expected_output = "Config rolled back successfully" + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + # Act + result = self.runner.invoke(config.config.commands["rollback"], + [self.any_checkpoint_name] + param_args, + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.rollback.assert_called_once() + mock_generic_updater.rollback.assert_has_calls([expected_call]) + + def test_checkpoint__no_params__get_required_params_error_msg(self): + # Arrange + unexpected_exit_code = 0 + expected_output = "Error: Missing argument \"CHECKPOINT_NAME\"" + + # Act + result = self.runner.invoke(config.config.commands["checkpoint"]) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_checkpoint__help__gets_help_msg(self): + # Arrange + expected_exit_code = 0 + expected_output = "Options:" # this indicates the options are listed + + # Act + result = self.runner.invoke(config.config.commands["checkpoint"], ['--help']) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_checkpoint__only_required_params__default_values_used_for_optional_params(self): + # Arrange + expected_exit_code = 0 + expected_output = "Checkpoint created successfully" + expected_call_with_default_values = mock.call(self.any_checkpoint_name, False) + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + # Act + result = self.runner.invoke(config.config.commands["checkpoint"], [self.any_checkpoint_name], catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.checkpoint.assert_called_once() + mock_generic_updater.checkpoint.assert_has_calls([expected_call_with_default_values]) + + def test_checkpoint__all_optional_params_non_default__non_default_values_used(self): + # Arrange + expected_exit_code = 0 + expected_output = "Checkpoint created successfully" + expected_call_with_non_default_values = mock.call(self.any_checkpoint_name, True) + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + + # Act + result = self.runner.invoke(config.config.commands["checkpoint"], + [self.any_checkpoint_name, + "--verbose"], + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.checkpoint.assert_called_once() + mock_generic_updater.checkpoint.assert_has_calls([expected_call_with_non_default_values]) + + def test_checkpoint__exception_thrown__error_displayed_error_code_returned(self): + # Arrange + unexpected_exit_code = 0 + any_error_message = "any_error_message" + mock_generic_updater = mock.Mock() + mock_generic_updater.checkpoint.side_effect = Exception(any_error_message) + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + + # Act + result = self.runner.invoke(config.config.commands["checkpoint"], + [self.any_checkpoint_name], + catch_exceptions=False) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(any_error_message in result.output) + + def test_checkpoint__optional_parameters_passed_correctly(self): + self.validate_checkpoint_optional_parameter( + ["--verbose"], + mock.call(self.any_checkpoint_name, True)) + + def validate_checkpoint_optional_parameter(self, param_args, expected_call): + # Arrange + expected_exit_code = 0 + expected_output = "Checkpoint created successfully" + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + # Act + result = self.runner.invoke(config.config.commands["checkpoint"], + [self.any_checkpoint_name] + param_args, + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.checkpoint.assert_called_once() + mock_generic_updater.checkpoint.assert_has_calls([expected_call]) + + def test_delete_checkpoint__no_params__get_required_params_error_msg(self): + # Arrange + unexpected_exit_code = 0 + expected_output = "Error: Missing argument \"CHECKPOINT_NAME\"" + + # Act + result = self.runner.invoke(config.config.commands["delete-checkpoint"]) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_delete_checkpoint__help__gets_help_msg(self): + # Arrange + expected_exit_code = 0 + expected_output = "Options:" # this indicates the options are listed + + # Act + result = self.runner.invoke(config.config.commands["delete-checkpoint"], ['--help']) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_delete_checkpoint__only_required_params__default_values_used_for_optional_params(self): + # Arrange + expected_exit_code = 0 + expected_output = "Checkpoint deleted successfully" + expected_call_with_default_values = mock.call(self.any_checkpoint_name, False) + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + # Act + result = self.runner.invoke(config.config.commands["delete-checkpoint"], [self.any_checkpoint_name], catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.delete_checkpoint.assert_called_once() + mock_generic_updater.delete_checkpoint.assert_has_calls([expected_call_with_default_values]) + + def test_delete_checkpoint__all_optional_params_non_default__non_default_values_used(self): + # Arrange + expected_exit_code = 0 + expected_output = "Checkpoint deleted successfully" + expected_call_with_non_default_values = mock.call(self.any_checkpoint_name, True) + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + + # Act + result = self.runner.invoke(config.config.commands["delete-checkpoint"], + [self.any_checkpoint_name, + "--verbose"], + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.delete_checkpoint.assert_called_once() + mock_generic_updater.delete_checkpoint.assert_has_calls([expected_call_with_non_default_values]) + + def test_delete_checkpoint__exception_thrown__error_displayed_error_code_returned(self): + # Arrange + unexpected_exit_code = 0 + any_error_message = "any_error_message" + mock_generic_updater = mock.Mock() + mock_generic_updater.delete_checkpoint.side_effect = Exception(any_error_message) + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + + # Act + result = self.runner.invoke(config.config.commands["delete-checkpoint"], + [self.any_checkpoint_name], + catch_exceptions=False) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(any_error_message in result.output) + + def test_delete_checkpoint__optional_parameters_passed_correctly(self): + self.validate_delete_checkpoint_optional_parameter( + ["--verbose"], + mock.call(self.any_checkpoint_name, True)) + + def validate_delete_checkpoint_optional_parameter(self, param_args, expected_call): + # Arrange + expected_exit_code = 0 + expected_output = "Checkpoint deleted successfully" + mock_generic_updater = mock.Mock() + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + # Act + result = self.runner.invoke(config.config.commands["delete-checkpoint"], + [self.any_checkpoint_name] + param_args, + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.delete_checkpoint.assert_called_once() + mock_generic_updater.delete_checkpoint.assert_has_calls([expected_call]) + + def test_list_checkpoints__help__gets_help_msg(self): + # Arrange + expected_exit_code = 0 + expected_output = "Options:" # this indicates the options are listed + + # Act + result = self.runner.invoke(config.config.commands["list-checkpoints"], ['--help']) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + + def test_list_checkpoints__all_optional_params_non_default__non_default_values_used(self): + # Arrange + expected_exit_code = 0 + expected_output = self.any_checkpoints_list_as_text + expected_call_with_non_default_values = mock.call(True) + mock_generic_updater = mock.Mock() + mock_generic_updater.list_checkpoints.return_value = self.any_checkpoints_list + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + + # Act + result = self.runner.invoke(config.config.commands["list-checkpoints"], + ["--verbose"], + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.list_checkpoints.assert_called_once() + mock_generic_updater.list_checkpoints.assert_has_calls([expected_call_with_non_default_values]) + + def test_list_checkpoints__exception_thrown__error_displayed_error_code_returned(self): + # Arrange + unexpected_exit_code = 0 + any_error_message = "any_error_message" + mock_generic_updater = mock.Mock() + mock_generic_updater.list_checkpoints.side_effect = Exception(any_error_message) + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + + # Act + result = self.runner.invoke(config.config.commands["list-checkpoints"], + catch_exceptions=False) + + # Assert + self.assertNotEqual(unexpected_exit_code, result.exit_code) + self.assertTrue(any_error_message in result.output) + + def test_list_checkpoints__optional_parameters_passed_correctly(self): + self.validate_list_checkpoints_optional_parameter( + ["--verbose"], + mock.call(True)) + + def validate_list_checkpoints_optional_parameter(self, param_args, expected_call): + # Arrange + expected_exit_code = 0 + expected_output = self.any_checkpoints_list_as_text + mock_generic_updater = mock.Mock() + mock_generic_updater.list_checkpoints.return_value = self.any_checkpoints_list + with mock.patch('config.main.GenericUpdater', return_value=mock_generic_updater): + # Act + result = self.runner.invoke(config.config.commands["list-checkpoints"], + param_args, + catch_exceptions=False) + + # Assert + self.assertEqual(expected_exit_code, result.exit_code) + self.assertTrue(expected_output in result.output) + mock_generic_updater.list_checkpoints.assert_called_once() + mock_generic_updater.list_checkpoints.assert_has_calls([expected_call]) diff --git a/tests/generic_config_updater/__init__.py b/tests/generic_config_updater/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/generic_config_updater/files/config_db_after_multi_patch.json b/tests/generic_config_updater/files/config_db_after_multi_patch.json new file mode 100644 index 0000000000..042bf1d51b --- /dev/null +++ b/tests/generic_config_updater/files/config_db_after_multi_patch.json @@ -0,0 +1,122 @@ +{ + "VLAN_MEMBER": { + "Vlan1000|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet4": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet8": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet2": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet3": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet1": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan1000": { + "vlanid": "1000", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0", + "Ethernet1", + "Ethernet2", + "Ethernet3" + ] + }, + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1/1", + "lanes": "65", + "description": "", + "speed": "10000" + }, + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet3": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "10000" + }, + "Ethernet1": { + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": "10000" + }, + "Ethernet2": { + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "10000" + } + }, + "TABLE_WITHOUT_YANG": { + "Item1": { + "key11": "value11", + "key12": "value12" + } + } +} \ No newline at end of file diff --git a/tests/generic_config_updater/files/config_db_as_json.json b/tests/generic_config_updater/files/config_db_as_json.json new file mode 100644 index 0000000000..02fb7c7e6a --- /dev/null +++ b/tests/generic_config_updater/files/config_db_as_json.json @@ -0,0 +1,92 @@ +{ + "VLAN_MEMBER": { + "Vlan1000|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet4": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet8": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan1000": { + "vlanid": "1000", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + }, + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + }, + "TABLE_WITHOUT_YANG": { + "Item1": { + "key11": "value11", + "key12": "value12" + } + } +} diff --git a/tests/generic_config_updater/files/config_db_as_json_invalid.json b/tests/generic_config_updater/files/config_db_as_json_invalid.json new file mode 100644 index 0000000000..a2cfdc91df --- /dev/null +++ b/tests/generic_config_updater/files/config_db_as_json_invalid.json @@ -0,0 +1,7 @@ +{ + "VLAN_MEMBER": { + "Vlan1000|Ethernet8": { + "tagging_mode": "untagged" + } + } +} diff --git a/tests/generic_config_updater/files/cropped_config_db_as_json.json b/tests/generic_config_updater/files/cropped_config_db_as_json.json new file mode 100644 index 0000000000..261e912c71 --- /dev/null +++ b/tests/generic_config_updater/files/cropped_config_db_as_json.json @@ -0,0 +1,86 @@ +{ + "VLAN_MEMBER": { + "Vlan1000|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet4": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet8": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan1000": { + "vlanid": "1000", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + }, + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } +} diff --git a/tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch b/tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch new file mode 100644 index 0000000000..8eddd7a19d --- /dev/null +++ b/tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch @@ -0,0 +1,88 @@ +[ + { + "op": "add", + "path": "/PORT/Ethernet3", + "value": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "10000" + } + }, + { + "op": "add", + "path": "/PORT/Ethernet1", + "value": { + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": "10000" + } + }, + { + "op": "add", + "path": "/PORT/Ethernet2", + "value": { + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "10000" + } + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/lanes", + "value": "65" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/alias", + "value": "Eth1/1" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/description", + "value": "" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/speed", + "value": "10000" + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet2", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet3", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet1", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", + "value": "Ethernet1" + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/2", + "value": "Ethernet2" + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/3", + "value": "Ethernet3" + } +] diff --git a/tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch b/tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch new file mode 100644 index 0000000000..f7005bb4a0 --- /dev/null +++ b/tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch @@ -0,0 +1,97 @@ +[ + { + "op": "add", + "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/3", + "value": { + "name": "Vlan100", + "port": "Ethernet2", + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/4", + "value": { + "name": "Vlan100", + "port": "Ethernet3", + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/5", + "value": { + "name": "Vlan100", + "port": "Ethernet1", + "tagging_mode": "untagged" + } + }, + { + "op": "replace", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/lanes", + "value": "65" + }, + { + "op": "replace", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/alias", + "value": "Eth1/1" + }, + { + "op": "replace", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/speed", + "value": 10000 + }, + { + "op": "replace", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/description", + "value": "" + }, + { + "op": "add", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/3", + "value": { + "name": "Ethernet3", + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": 10000 + } + }, + { + "op": "add", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/4", + "value": { + "name": "Ethernet1", + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": 10000 + } + }, + { + "op": "add", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/5", + "value": { + "name": "Ethernet2", + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": 10000 + } + }, + { + "op": "add", + "path": "/sonic-acl:sonic-acl/sonic-acl:ACL_TABLE/ACL_TABLE_LIST/0/ports/1", + "value": "Ethernet1" + }, + { + "op": "add", + "path": "/sonic-acl:sonic-acl/sonic-acl:ACL_TABLE/ACL_TABLE_LIST/0/ports/2", + "value": "Ethernet2" + }, + { + "op": "add", + "path": "/sonic-acl:sonic-acl/sonic-acl:ACL_TABLE/ACL_TABLE_LIST/0/ports/3", + "value": "Ethernet3" + } +] diff --git a/tests/generic_config_updater/files/single_operation_config_db_patch.json-patch b/tests/generic_config_updater/files/single_operation_config_db_patch.json-patch new file mode 100644 index 0000000000..7cc0967bf0 --- /dev/null +++ b/tests/generic_config_updater/files/single_operation_config_db_patch.json-patch @@ -0,0 +1,6 @@ +[ + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan1000|Ethernet8" + } +] diff --git a/tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch b/tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch new file mode 100644 index 0000000000..5a46560496 --- /dev/null +++ b/tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch @@ -0,0 +1,6 @@ +[ + { + "op": "remove", + "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/2" + } +] diff --git a/tests/generic_config_updater/files/sonic_yang_after_multi_patch.json b/tests/generic_config_updater/files/sonic_yang_after_multi_patch.json new file mode 100644 index 0000000000..0c9ddd4546 --- /dev/null +++ b/tests/generic_config_updater/files/sonic_yang_after_multi_patch.json @@ -0,0 +1,153 @@ +{ + "sonic-vlan:sonic-vlan": { + "sonic-vlan:VLAN_MEMBER": { + "VLAN_MEMBER_LIST": [ + { + "name": "Vlan1000", + "port": "Ethernet0", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet4", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet8", + "tagging_mode": "untagged" + }, + { + "name": "Vlan100", + "port": "Ethernet2", + "tagging_mode": "untagged" + }, + { + "name": "Vlan100", + "port": "Ethernet3", + "tagging_mode": "untagged" + }, + { + "name": "Vlan100", + "port": "Ethernet1", + "tagging_mode": "untagged" + } + ] + }, + "sonic-vlan:VLAN": { + "VLAN_LIST": [ + { + "name": "Vlan1000", + "vlanid": 1000, + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + ] + } + }, + "sonic-acl:sonic-acl": { + "sonic-acl:ACL_TABLE": { + "ACL_TABLE_LIST": [ + { + "ACL_TABLE_NAME": "NO-NSW-PACL-V4", + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0", + "Ethernet1", + "Ethernet2", + "Ethernet3" + ] + }, + { + "ACL_TABLE_NAME": "DATAACL", + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + { + "ACL_TABLE_NAME": "EVERFLOW", + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + { + "ACL_TABLE_NAME": "EVERFLOWV6", + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "name": "Ethernet0", + "alias": "Eth1/1", + "lanes": "65", + "description": "", + "speed": 10000 + }, + { + "name": "Ethernet4", + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": 1, + "lanes": "29,30,31,32", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + }, + { + "name": "Ethernet8", + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": 2, + "lanes": "33,34,35,36", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + }, + { + "name": "Ethernet3", + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": 10000 + }, + { + "name": "Ethernet1", + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": 10000 + }, + { + "name": "Ethernet2", + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": 10000 + } + ] + } + } +} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json.json b/tests/generic_config_updater/files/sonic_yang_as_json.json new file mode 100644 index 0000000000..37f0fe6ba7 --- /dev/null +++ b/tests/generic_config_updater/files/sonic_yang_as_json.json @@ -0,0 +1,114 @@ +{ + "sonic-vlan:sonic-vlan": { + "sonic-vlan:VLAN_MEMBER": { + "VLAN_MEMBER_LIST": [ + { + "name": "Vlan1000", + "port": "Ethernet0", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet4", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet8", + "tagging_mode": "untagged" + } + ] + }, + "sonic-vlan:VLAN": { + "VLAN_LIST": [ + { + "name": "Vlan1000", + "vlanid": 1000, + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + ] + } + }, + "sonic-acl:sonic-acl": { + "sonic-acl:ACL_TABLE": { + "ACL_TABLE_LIST": [ + { + "ACL_TABLE_NAME": "NO-NSW-PACL-V4", + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + { + "ACL_TABLE_NAME": "DATAACL", + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + { + "ACL_TABLE_NAME": "EVERFLOW", + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + { + "ACL_TABLE_NAME": "EVERFLOWV6", + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "name": "Ethernet0", + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": 100000 + }, + { + "name": "Ethernet4", + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": 1, + "lanes": "29,30,31,32", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + }, + { + "name": "Ethernet8", + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": 2, + "lanes": "33,34,35,36", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + } + ] + } + } +} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json_invalid.json b/tests/generic_config_updater/files/sonic_yang_as_json_invalid.json new file mode 100644 index 0000000000..4f67d7e6a6 --- /dev/null +++ b/tests/generic_config_updater/files/sonic_yang_as_json_invalid.json @@ -0,0 +1,13 @@ +{ + "sonic-vlan:sonic-vlan": { + "sonic-vlan:VLAN_MEMBER": { + "VLAN_MEMBER_LIST": [ + { + "name": "Vlan1000", + "port": "Ethernet4", + "tagging_mode": "untagged" + } + ] + } + } +} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json b/tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json new file mode 100644 index 0000000000..aac97da42b --- /dev/null +++ b/tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json @@ -0,0 +1,114 @@ +{ + "sonic-vlan:sonic-vlan": { + "sonic-vlan::VLAN_MEMBER": { + "VLAN_MEMBER_LIST": [ + { + "name": "Vlan1000", + "port": "Ethernet0", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet4", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet8", + "tagging_mode": "untagged" + } + ] + }, + "sonic-vlan::VLAN": { + "VLAN_LIST": [ + { + "name": "Vlan1000", + "vlanid": 1000, + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + ] + } + }, + "sonic-acl:sonic-acl": { + "sonic-vlan::ACL_TABLE": { + "ACL_TABLE_LIST": [ + { + "ACL_TABLE_NAME": "NO-NSW-PACL-V4", + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + { + "ACL_TABLE_NAME": "DATAACL", + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + { + "ACL_TABLE_NAME": "EVERFLOW", + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + { + "ACL_TABLE_NAME": "EVERFLOWV6", + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-vlan::PORT": { + "PORT_LIST": [ + { + "name": "Ethernet0", + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": 100000 + }, + { + "name": "Ethernet4", + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": 1, + "lanes": "29,30,31,32", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + }, + { + "name": "Ethernet8", + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": 2, + "lanes": "33,34,35,36", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + } + ] + } + } +} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json b/tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json new file mode 100644 index 0000000000..ad4ab15f4a --- /dev/null +++ b/tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json @@ -0,0 +1,114 @@ +{ + "sonic-vlan:sonic-vlan": { + "VLAN_MEMBER": { + "VLAN_MEMBER_LIST": [ + { + "name": "Vlan1000", + "port": "Ethernet0", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet4", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet8", + "tagging_mode": "untagged" + } + ] + }, + "VLAN": { + "VLAN_LIST": [ + { + "name": "Vlan1000", + "vlanid": 1000, + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + ] + } + }, + "sonic-acl:sonic-acl": { + "ACL_TABLE": { + "ACL_TABLE_LIST": [ + { + "ACL_TABLE_NAME": "NO-NSW-PACL-V4", + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + { + "ACL_TABLE_NAME": "DATAACL", + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + { + "ACL_TABLE_NAME": "EVERFLOW", + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + { + "ACL_TABLE_NAME": "EVERFLOWV6", + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + ] + } + }, + "sonic-port:sonic-port": { + "PORT": { + "PORT_LIST": [ + { + "name": "Ethernet0", + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": 100000 + }, + { + "name": "Ethernet4", + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": 1, + "lanes": "29,30,31,32", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + }, + { + "name": "Ethernet8", + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": 2, + "lanes": "33,34,35,36", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + } + ] + } + } +} diff --git a/tests/generic_config_updater/generic_updater_test.py b/tests/generic_config_updater/generic_updater_test.py new file mode 100644 index 0000000000..f201280062 --- /dev/null +++ b/tests/generic_config_updater/generic_updater_test.py @@ -0,0 +1,766 @@ +import json +import os +import shutil +import unittest +from unittest.mock import MagicMock, Mock, call +from .gutest_helpers import create_side_effect_dict, Files + +import generic_config_updater.generic_updater as gu + +# import sys +# sys.path.insert(0,'../../generic_config_updater') +# import generic_updater as gu + +class TestPatchApplier(unittest.TestCase): + def test_apply__invalid_patch_updating_tables_without_yang_models__failure(self): + # Arrange + patch_applier = self.__create_patch_applier(valid_patch_only_tables_with_yang_models=False) + + # Act and assert + self.assertRaises(ValueError, patch_applier.apply, Files.MULTI_OPERATION_CONFIG_DB_PATCH) + + def test_apply__invalid_config_db__failure(self): + # Arrange + patch_applier = self.__create_patch_applier(valid_config_db=False) + + # Act and assert + self.assertRaises(ValueError, patch_applier.apply, Files.MULTI_OPERATION_CONFIG_DB_PATCH) + + def test_apply__json_not_fully_updated__failure(self): + # Arrange + patch_applier = self.__create_patch_applier(verified_same_config=False) + + # Act and assert + self.assertRaises(gu.GenericConfigUpdaterError, patch_applier.apply, Files.MULTI_OPERATION_CONFIG_DB_PATCH) + + def test_apply__no_errors__update_successful(self): + # Arrange + changes = [Mock(), Mock()] + patch_applier = self.__create_patch_applier(changes) + + # Act + patch_applier.apply(Files.MULTI_OPERATION_CONFIG_DB_PATCH) + + # Assert + patch_applier.patch_wrapper.validate_config_db_patch_has_yang_models.assert_has_calls( + [call(Files.MULTI_OPERATION_CONFIG_DB_PATCH)]) + patch_applier.config_wrapper.get_config_db_as_json.assert_has_calls([call(), call()]) + patch_applier.patch_wrapper.simulate_patch.assert_has_calls( + [call(Files.MULTI_OPERATION_CONFIG_DB_PATCH, Files.CONFIG_DB_AS_JSON)]) + patch_applier.config_wrapper.validate_config_db_config.assert_has_calls( + [call(Files.CONFIG_DB_AFTER_MULTI_PATCH)]) + patch_applier.patchsorter.sort.assert_has_calls([call(Files.MULTI_OPERATION_CONFIG_DB_PATCH)]) + patch_applier.changeapplier.apply.assert_has_calls([call(changes[0]), call(changes[1])]) + patch_applier.patch_wrapper.verify_same_json.assert_has_calls( + [call(Files.CONFIG_DB_AFTER_MULTI_PATCH, Files.CONFIG_DB_AFTER_MULTI_PATCH)]) + + def __create_patch_applier(self, + changes=None, + valid_patch_only_tables_with_yang_models=True, + valid_config_db=True, + verified_same_config=True): + config_wrapper = Mock() + config_wrapper.get_config_db_as_json.side_effect = \ + [Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AFTER_MULTI_PATCH] + config_wrapper.validate_config_db_config.side_effect = \ + create_side_effect_dict({(str(Files.CONFIG_DB_AFTER_MULTI_PATCH),): valid_config_db}) + + patch_wrapper = Mock() + patch_wrapper.validate_config_db_patch_has_yang_models.side_effect = \ + create_side_effect_dict( + {(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): valid_patch_only_tables_with_yang_models}) + patch_wrapper.simulate_patch.side_effect = \ + create_side_effect_dict( + {(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH), str(Files.CONFIG_DB_AS_JSON)): + Files.CONFIG_DB_AFTER_MULTI_PATCH}) + patch_wrapper.verify_same_json.side_effect = \ + create_side_effect_dict( + {(str(Files.CONFIG_DB_AFTER_MULTI_PATCH), str(Files.CONFIG_DB_AFTER_MULTI_PATCH)): + verified_same_config}) + + changes = [Mock(), Mock()] if not changes else changes + patchsorter = Mock() + patchsorter.sort.side_effect = \ + create_side_effect_dict({(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): changes}) + + changeapplier = Mock() + changeapplier.apply.side_effect = create_side_effect_dict({(str(changes[0]),): 0, (str(changes[1]),): 0}) + + return gu.PatchApplier(patchsorter, changeapplier, config_wrapper, patch_wrapper) + +class TestConfigReplacer(unittest.TestCase): + def test_replace__invalid_config_db__failure(self): + # Arrange + config_replacer = self.__create_config_replacer(valid_config_db=False) + + # Act and assert + self.assertRaises(ValueError, config_replacer.replace, Files.CONFIG_DB_AFTER_MULTI_PATCH) + + def test_replace__json_not_fully_updated__failure(self): + # Arrange + config_replacer = self.__create_config_replacer(verified_same_config=False) + + # Act and assert + self.assertRaises(gu.GenericConfigUpdaterError, config_replacer.replace, Files.CONFIG_DB_AFTER_MULTI_PATCH) + + def test_replace__no_errors__update_successful(self): + # Arrange + config_replacer = self.__create_config_replacer() + + # Act + config_replacer.replace(Files.CONFIG_DB_AFTER_MULTI_PATCH) + + # Assert + config_replacer.config_wrapper.validate_config_db_config.assert_has_calls( + [call(Files.CONFIG_DB_AFTER_MULTI_PATCH)]) + config_replacer.config_wrapper.get_config_db_as_json.assert_has_calls([call(), call()]) + config_replacer.patch_wrapper.generate_patch.assert_has_calls( + [call(Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AFTER_MULTI_PATCH)]) + config_replacer.patch_applier.apply.assert_has_calls([call(Files.MULTI_OPERATION_CONFIG_DB_PATCH)]) + config_replacer.patch_wrapper.verify_same_json.assert_has_calls( + [call(Files.CONFIG_DB_AFTER_MULTI_PATCH, Files.CONFIG_DB_AFTER_MULTI_PATCH)]) + + def __create_config_replacer(self, changes=None, valid_config_db=True, verified_same_config=True): + config_wrapper = Mock() + config_wrapper.validate_config_db_config.side_effect = \ + create_side_effect_dict({(str(Files.CONFIG_DB_AFTER_MULTI_PATCH),): valid_config_db}) + config_wrapper.get_config_db_as_json.side_effect = \ + [Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AFTER_MULTI_PATCH] + + patch_wrapper = Mock() + patch_wrapper.generate_patch.side_effect = \ + create_side_effect_dict( + {(str(Files.CONFIG_DB_AS_JSON), str(Files.CONFIG_DB_AFTER_MULTI_PATCH)): + Files.MULTI_OPERATION_CONFIG_DB_PATCH}) + patch_wrapper.verify_same_json.side_effect = \ + create_side_effect_dict( + {(str(Files.CONFIG_DB_AFTER_MULTI_PATCH), str(Files.CONFIG_DB_AFTER_MULTI_PATCH)): \ + verified_same_config}) + + changes = [Mock(), Mock()] if not changes else changes + patchsorter = Mock() + patchsorter.sort.side_effect = create_side_effect_dict({(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): \ + changes}) + + patch_applier = Mock() + patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): 0}) + + return gu.ConfigReplacer(patch_applier, config_wrapper, patch_wrapper) + +class TestFileSystemConfigRollbacker(unittest.TestCase): + def setUp(self): + self.checkpoints_dir = os.path.join(os.getcwd(),"checkpoints") + self.checkpoint_ext = ".cp.json" + self.any_checkpoint_name = "anycheckpoint" + self.any_other_checkpoint_name = "anyothercheckpoint" + self.any_config = {} + self.clean_up() + + def tearDown(self): + self.clean_up() + + def test_rollback__checkpoint_does_not_exist__failure(self): + # Arrange + rollbacker = self.create_rollbacker() + + # Act and assert + self.assertRaises(ValueError, rollbacker.rollback, "NonExistingCheckpoint") + + def test_rollback__no_errors__success(self): + # Arrange + self.create_checkpoints_dir() + self.add_checkpoint(self.any_checkpoint_name, self.any_config) + rollbacker = self.create_rollbacker() + + # Act + rollbacker.rollback(self.any_checkpoint_name) + + # Assert + rollbacker.config_replacer.replace.assert_has_calls([call(self.any_config)]) + + def test_checkpoint__checkpoints_dir_does_not_exist__checkpoint_created(self): + # Arrange + rollbacker = self.create_rollbacker() + self.assertFalse(os.path.isdir(self.checkpoints_dir)) + + # Act + rollbacker.checkpoint(self.any_checkpoint_name) + + # Assert + self.assertTrue(os.path.isdir(self.checkpoints_dir)) + self.assertEqual(self.any_config, self.get_checkpoint(self.any_checkpoint_name)) + + def test_checkpoint__config_not_valid__failure(self): + # Arrange + rollbacker = self.create_rollbacker(valid_config=False) + + # Act and assert + self.assertRaises(ValueError, rollbacker.checkpoint, self.any_checkpoint_name) + + def test_checkpoint__checkpoints_dir_exists__checkpoint_created(self): + # Arrange + self.create_checkpoints_dir() + rollbacker = self.create_rollbacker() + + # Act + rollbacker.checkpoint(self.any_checkpoint_name) + + # Assert + self.assertEqual(self.any_config, self.get_checkpoint(self.any_checkpoint_name)) + + def test_list_checkpoints__checkpoints_dir_does_not_exist__empty_list(self): + # Arrange + rollbacker = self.create_rollbacker() + self.assertFalse(os.path.isdir(self.checkpoints_dir)) + expected = [] + + # Act + actual = rollbacker.list_checkpoints() + + # Assert + # 'assertCountEqual' does check same count, same elements ignoring order + self.assertCountEqual(expected, actual) + + def test_list_checkpoints__checkpoints_dir_exist_but_no_files__empty_list(self): + # Arrange + self.create_checkpoints_dir() + rollbacker = self.create_rollbacker() + expected = [] + + # Act + actual = rollbacker.list_checkpoints() + + # Assert + # 'assertCountEqual' does check same count, same elements ignoring order + self.assertCountEqual(expected, actual) + + def test_list_checkpoints__checkpoints_dir_has_multiple_files__multiple_files(self): + # Arrange + self.create_checkpoints_dir() + self.add_checkpoint(self.any_checkpoint_name, self.any_config) + self.add_checkpoint(self.any_other_checkpoint_name, self.any_config) + rollbacker = self.create_rollbacker() + expected = [self.any_checkpoint_name, self.any_other_checkpoint_name] + + # Act + actual = rollbacker.list_checkpoints() + + # Assert + # 'assertCountEqual' does check same count, same elements ignoring order + self.assertCountEqual(expected, actual) + + def test_list_checkpoints__checkpoints_names_have_special_characters__multiple_files(self): + # Arrange + self.create_checkpoints_dir() + self.add_checkpoint("check.point1", self.any_config) + self.add_checkpoint(".checkpoint2", self.any_config) + self.add_checkpoint("checkpoint3.", self.any_config) + rollbacker = self.create_rollbacker() + expected = ["check.point1", ".checkpoint2", "checkpoint3."] + + # Act + actual = rollbacker.list_checkpoints() + + # Assert + # 'assertCountEqual' does check same count, same elements ignoring order + self.assertCountEqual(expected, actual) + + def test_delete_checkpoint__checkpoint_does_not_exist__failure(self): + # Arrange + rollbacker = self.create_rollbacker() + + # Act and assert + self.assertRaises(ValueError, rollbacker.delete_checkpoint, self.any_checkpoint_name) + + def test_delete_checkpoint__checkpoint_exist__success(self): + # Arrange + self.create_checkpoints_dir() + self.add_checkpoint(self.any_checkpoint_name, self.any_config) + rollbacker = self.create_rollbacker() + + # Act + rollbacker.delete_checkpoint(self.any_checkpoint_name) + + # Assert + self.assertFalse(self.check_checkpoint_exists(self.any_checkpoint_name)) + + def test_multiple_operations(self): + rollbacker = self.create_rollbacker() + + # 'assertCountEqual' does check same count, same elements ignoring order + self.assertCountEqual([], rollbacker.list_checkpoints()) + + rollbacker.checkpoint(self.any_checkpoint_name) + self.assertCountEqual([self.any_checkpoint_name], rollbacker.list_checkpoints()) + self.assertEqual(self.any_config, self.get_checkpoint(self.any_checkpoint_name)) + + rollbacker.rollback(self.any_checkpoint_name) + rollbacker.config_replacer.replace.assert_has_calls([call(self.any_config)]) + + rollbacker.checkpoint(self.any_other_checkpoint_name) + self.assertCountEqual([self.any_checkpoint_name, self.any_other_checkpoint_name], rollbacker.list_checkpoints()) + self.assertEqual(self.any_config, self.get_checkpoint(self.any_other_checkpoint_name)) + + rollbacker.delete_checkpoint(self.any_checkpoint_name) + self.assertCountEqual([self.any_other_checkpoint_name], rollbacker.list_checkpoints()) + + rollbacker.delete_checkpoint(self.any_other_checkpoint_name) + self.assertCountEqual([], rollbacker.list_checkpoints()) + + def clean_up(self): + if os.path.isdir(self.checkpoints_dir): + shutil.rmtree(self.checkpoints_dir) + + def create_checkpoints_dir(self): + os.makedirs(self.checkpoints_dir) + + def add_checkpoint(self, name, json_content): + path=os.path.join(self.checkpoints_dir, f"{name}{self.checkpoint_ext}") + with open(path, "w") as fh: + fh.write(json.dumps(json_content)) + + def get_checkpoint(self, name): + path=os.path.join(self.checkpoints_dir, f"{name}{self.checkpoint_ext}") + with open(path) as fh: + text = fh.read() + return json.loads(text) + + def check_checkpoint_exists(self, name): + path=os.path.join(self.checkpoints_dir, f"{name}{self.checkpoint_ext}") + return os.path.isfile(path) + + def create_rollbacker(self, valid_config=True): + replacer = Mock() + replacer.replace.side_effect = create_side_effect_dict({(str(self.any_config),): 0}) + + config_wrapper = Mock() + config_wrapper.get_config_db_as_json.return_value = self.any_config + config_wrapper.validate_config_db_config.return_value = valid_config + + return gu.FileSystemConfigRollbacker(checkpoints_dir=self.checkpoints_dir, + config_replacer=replacer, + config_wrapper=config_wrapper) + +class TestGenericUpdateFactory(unittest.TestCase): + def setUp(self): + self.any_verbose=True + self.any_dry_run=True + + def test_create_patch_applier__invalid_config_format__failure(self): + # Arrange + factory = gu.GenericUpdateFactory() + + # Act and assert + self.assertRaises( + ValueError, factory.create_patch_applier, "INVALID_FORMAT", self.any_verbose, self.any_dry_run) + + def test_create_patch_applier__different_options(self): + # Arrange + options = [ + {"verbose": {True: None, False: None}}, + {"dry_run": {True: None, False: gu.ConfigLockDecorator}}, + { + "config_format": { + gu.ConfigFormat.SONICYANG: gu.SonicYangDecorator, + gu.ConfigFormat.CONFIGDB: None, + } + }, + ] + + # Act and assert + self.recursively_test_create_func(options, 0, {}, [], self.validate_create_patch_applier) + + def test_create_config_replacer__invalid_config_format__failure(self): + # Arrange + factory = gu.GenericUpdateFactory() + + # Act and assert + self.assertRaises( + ValueError, factory.create_config_replacer, "INVALID_FORMAT", self.any_verbose, self.any_dry_run) + + def test_create_config_replacer__different_options(self): + # Arrange + options = [ + {"verbose": {True: None, False: None}}, + {"dry_run": {True: None, False: gu.ConfigLockDecorator}}, + { + "config_format": { + gu.ConfigFormat.SONICYANG: gu.SonicYangDecorator, + gu.ConfigFormat.CONFIGDB: None, + } + }, + ] + + # Act and assert + self.recursively_test_create_func(options, 0, {}, [], self.validate_create_config_replacer) + + def test_create_config_rollbacker__different_options(self): + # Arrange + options = [ + {"verbose": {True: None, False: None}}, + {"dry_run": {True: None, False: gu.ConfigLockDecorator}} + ] + + # Act and assert + self.recursively_test_create_func(options, 0, {}, [], self.validate_create_config_rollbacker) + + def recursively_test_create_func(self, options, cur_option, params, expected_decorators, create_func): + if cur_option == len(options): + create_func(params, expected_decorators) + return + + param = list(options[cur_option].keys())[0] + for key in options[cur_option][param]: + params[param] = key + decorator = options[cur_option][param][key] + if decorator != None: + expected_decorators.append(decorator) + self.recursively_test_create_func(options, cur_option+1, params, expected_decorators, create_func) + if decorator != None: + expected_decorators.pop() + + def validate_create_patch_applier(self, params, expected_decorators): + factory = gu.GenericUpdateFactory() + patch_applier = factory.create_patch_applier(params["config_format"], params["verbose"], params["dry_run"]) + for decorator_type in expected_decorators: + self.assertIsInstance(patch_applier, decorator_type) + + patch_applier = patch_applier.decorated_patch_applier + + self.assertIsInstance(patch_applier, gu.PatchApplier) + if params["dry_run"]: + self.assertIsInstance(patch_applier.config_wrapper, gu.DryRunConfigWrapper) + else: + self.assertIsInstance(patch_applier.config_wrapper, gu.ConfigWrapper) + + def validate_create_config_replacer(self, params, expected_decorators): + factory = gu.GenericUpdateFactory() + config_replacer = factory.create_config_replacer(params["config_format"], params["verbose"], params["dry_run"]) + for decorator_type in expected_decorators: + self.assertIsInstance(config_replacer, decorator_type) + + config_replacer = config_replacer.decorated_config_replacer + + self.assertIsInstance(config_replacer, gu.ConfigReplacer) + if params["dry_run"]: + self.assertIsInstance(config_replacer.config_wrapper, gu.DryRunConfigWrapper) + self.assertIsInstance(config_replacer.patch_applier.config_wrapper, gu.DryRunConfigWrapper) + else: + self.assertIsInstance(config_replacer.config_wrapper, gu.ConfigWrapper) + self.assertIsInstance(config_replacer.patch_applier.config_wrapper, gu.ConfigWrapper) + + def validate_create_config_rollbacker(self, params, expected_decorators): + factory = gu.GenericUpdateFactory() + config_rollbacker = factory.create_config_rollbacker(params["verbose"], params["dry_run"]) + for decorator_type in expected_decorators: + self.assertIsInstance(config_rollbacker, decorator_type) + + config_rollbacker = config_rollbacker.decorated_config_rollbacker + + self.assertIsInstance(config_rollbacker, gu.FileSystemConfigRollbacker) + if params["dry_run"]: + self.assertIsInstance(config_rollbacker.config_wrapper, gu.DryRunConfigWrapper) + self.assertIsInstance(config_rollbacker.config_replacer.config_wrapper, gu.DryRunConfigWrapper) + self.assertIsInstance( + config_rollbacker.config_replacer.patch_applier.config_wrapper, gu.DryRunConfigWrapper) + else: + self.assertIsInstance(config_rollbacker.config_wrapper, gu.ConfigWrapper) + self.assertIsInstance(config_rollbacker.config_replacer.config_wrapper, gu.ConfigWrapper) + self.assertIsInstance( + config_rollbacker.config_replacer.patch_applier.config_wrapper, gu.ConfigWrapper) + +class TestGenericUpdater(unittest.TestCase): + def setUp(self): + self.any_checkpoint_name = "anycheckpoint" + self.any_other_checkpoint_name = "anyothercheckpoint" + self.any_checkpoints_list = [self.any_checkpoint_name, self.any_other_checkpoint_name] + self.any_config_format = gu.ConfigFormat.SONICYANG + self.any_verbose = True + self.any_dry_run = True + + def test_apply_patch__creates_applier_and_apply(self): + # Arrange + patch_applier = Mock() + patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): 0}) + + factory = Mock() + factory.create_patch_applier.side_effect = \ + create_side_effect_dict( + {(str(self.any_config_format), str(self.any_verbose), str(self.any_dry_run),): patch_applier}) + + generic_updater = gu.GenericUpdater(factory) + + # Act + generic_updater.apply_patch( + Files.SINGLE_OPERATION_SONIC_YANG_PATCH, self.any_config_format, self.any_verbose, self.any_dry_run) + + # Assert + patch_applier.apply.assert_has_calls([call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) + + def test_replace__creates_replacer_and_replace(self): + # Arrange + config_replacer = Mock() + config_replacer.replace.side_effect = create_side_effect_dict({(str(Files.SONIC_YANG_AS_JSON),): 0}) + + factory = Mock() + factory.create_config_replacer.side_effect = \ + create_side_effect_dict( + {(str(self.any_config_format), str(self.any_verbose), str(self.any_dry_run),): config_replacer}) + + generic_updater = gu.GenericUpdater(factory) + + # Act + generic_updater.replace(Files.SONIC_YANG_AS_JSON, self.any_config_format, self.any_verbose, self.any_dry_run) + + # Assert + config_replacer.replace.assert_has_calls([call(Files.SONIC_YANG_AS_JSON)]) + + def test_rollback__creates_rollbacker_and_rollback(self): + # Arrange + config_rollbacker = Mock() + config_rollbacker.rollback.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + + factory = Mock() + factory.create_config_rollbacker.side_effect = \ + create_side_effect_dict({(str(self.any_verbose), str(self.any_dry_run),): config_rollbacker}) + + generic_updater = gu.GenericUpdater(factory) + + # Act + generic_updater.rollback(self.any_checkpoint_name, self.any_verbose, self.any_dry_run) + + # Assert + config_rollbacker.rollback.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_checkpoint__creates_rollbacker_and_checkpoint(self): + # Arrange + config_rollbacker = Mock() + config_rollbacker.checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + + factory = Mock() + factory.create_config_rollbacker.side_effect = \ + create_side_effect_dict({(str(self.any_verbose),): config_rollbacker}) + + generic_updater = gu.GenericUpdater(factory) + + # Act + generic_updater.checkpoint(self.any_checkpoint_name, self.any_verbose) + + # Assert + config_rollbacker.checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_delete_checkpoint__creates_rollbacker_and_deletes_checkpoint(self): + # Arrange + config_rollbacker = Mock() + config_rollbacker.delete_checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + + factory = Mock() + factory.create_config_rollbacker.side_effect = \ + create_side_effect_dict({(str(self.any_verbose),): config_rollbacker}) + + generic_updater = gu.GenericUpdater(factory) + + # Act + generic_updater.delete_checkpoint(self.any_checkpoint_name, self.any_verbose) + + # Assert + config_rollbacker.delete_checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_list_checkpoints__creates_rollbacker_and_list_checkpoints(self): + # Arrange + config_rollbacker = Mock() + config_rollbacker.list_checkpoints.return_value = self.any_checkpoints_list + + factory = Mock() + factory.create_config_rollbacker.side_effect = \ + create_side_effect_dict({(str(self.any_verbose),): config_rollbacker}) + + generic_updater = gu.GenericUpdater(factory) + + expected = self.any_checkpoints_list + + # Act + actual = generic_updater.list_checkpoints(self.any_verbose) + + # Assert + self.assertCountEqual(expected, actual) + +class TestDecorator(unittest.TestCase): + def setUp(self): + self.decorated_patch_applier = Mock() + self.decorated_config_replacer = Mock() + self.decorated_config_rollbacker = Mock() + + self.any_checkpoint_name = "anycheckpoint" + self.any_other_checkpoint_name = "anyothercheckpoint" + self.any_checkpoints_list = [self.any_checkpoint_name, self.any_other_checkpoint_name] + self.decorated_config_rollbacker.list_checkpoints.return_value = self.any_checkpoints_list + + self.decorator = gu.Decorator( + self.decorated_patch_applier, self.decorated_config_replacer, self.decorated_config_rollbacker) + + def test_apply__calls_decorated_applier(self): + # Act + self.decorator.apply(Files.SINGLE_OPERATION_SONIC_YANG_PATCH) + + # Assert + self.decorated_patch_applier.apply.assert_has_calls([call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) + + def test_replace__calls_decorated_replacer(self): + # Act + self.decorator.replace(Files.SONIC_YANG_AS_JSON) + + # Assert + self.decorated_config_replacer.replace.assert_has_calls([call(Files.SONIC_YANG_AS_JSON)]) + + def test_rollback__calls_decorated_rollbacker(self): + # Act + self.decorator.rollback(self.any_checkpoint_name) + + # Assert + self.decorated_config_rollbacker.rollback.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_checkpoint__calls_decorated_rollbacker(self): + # Act + self.decorator.checkpoint(self.any_checkpoint_name) + + # Assert + self.decorated_config_rollbacker.checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_delete_checkpoint__calls_decorated_rollbacker(self): + # Act + self.decorator.delete_checkpoint(self.any_checkpoint_name) + + # Assert + self.decorated_config_rollbacker.delete_checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_list_checkpoints__calls_decorated_rollbacker(self): + # Arrange + expected = self.any_checkpoints_list + + # Act + actual = self.decorator.list_checkpoints() + + # Assert + self.decorated_config_rollbacker.list_checkpoints.assert_called_once() + self.assertListEqual(expected, actual) + +class TestSonicYangDecorator(unittest.TestCase): + def test_apply__converts_to_config_db_and_calls_decorated_class(self): + # Arrange + sonic_yang_decorator = self.__create_sonic_yang_decorator() + + # Act + sonic_yang_decorator.apply(Files.SINGLE_OPERATION_SONIC_YANG_PATCH) + + # Assert + sonic_yang_decorator.patch_wrapper.convert_sonic_yang_patch_to_config_db_patch.assert_has_calls( + [call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) + sonic_yang_decorator.decorated_patch_applier.apply.assert_has_calls( + [call(Files.SINGLE_OPERATION_CONFIG_DB_PATCH)]) + + def test_replace__converts_to_config_db_and_calls_decorated_class(self): + # Arrange + sonic_yang_decorator = self.__create_sonic_yang_decorator() + + # Act + sonic_yang_decorator.replace(Files.SONIC_YANG_AS_JSON) + + # Assert + sonic_yang_decorator.config_wrapper.convert_sonic_yang_to_config_db.assert_has_calls( + [call(Files.SONIC_YANG_AS_JSON)]) + sonic_yang_decorator.decorated_config_replacer.replace.assert_has_calls([call(Files.CONFIG_DB_AS_JSON)]) + + def __create_sonic_yang_decorator(self): + patch_applier = Mock() + patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_CONFIG_DB_PATCH),): 0}) + + patch_wrapper = Mock() + patch_wrapper.convert_sonic_yang_patch_to_config_db_patch.side_effect = \ + create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): \ + Files.SINGLE_OPERATION_CONFIG_DB_PATCH}) + + config_replacer = Mock() + config_replacer.replace.side_effect = create_side_effect_dict({(str(Files.CONFIG_DB_AS_JSON),): 0}) + + config_wrapper = Mock() + config_wrapper.convert_sonic_yang_to_config_db.side_effect = \ + create_side_effect_dict({(str(Files.SONIC_YANG_AS_JSON),): Files.CONFIG_DB_AS_JSON}) + + return gu.SonicYangDecorator(decorated_patch_applier=patch_applier, + decorated_config_replacer=config_replacer, + patch_wrapper=patch_wrapper, + config_wrapper=config_wrapper) + +class TestConfigLockDecorator(unittest.TestCase): + def setUp(self): + self.any_checkpoint_name = "anycheckpoint" + + def test_apply__lock_config(self): + # Arrange + config_lock_decorator = self.__create_config_lock_decorator() + + # Act + config_lock_decorator.apply(Files.SINGLE_OPERATION_SONIC_YANG_PATCH) + + # Assert + config_lock_decorator.config_lock.acquire_lock.assert_called_once() + config_lock_decorator.decorated_patch_applier.apply.assert_has_calls( + [call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) + config_lock_decorator.config_lock.release_lock.assert_called_once() + + def test_replace__lock_config(self): + # Arrange + config_lock_decorator = self.__create_config_lock_decorator() + + # Act + config_lock_decorator.replace(Files.SONIC_YANG_AS_JSON) + + # Assert + config_lock_decorator.config_lock.acquire_lock.assert_called_once() + config_lock_decorator.decorated_config_replacer.replace.assert_has_calls([call(Files.SONIC_YANG_AS_JSON)]) + config_lock_decorator.config_lock.release_lock.assert_called_once() + + def test_rollback__lock_config(self): + # Arrange + config_lock_decorator = self.__create_config_lock_decorator() + + # Act + config_lock_decorator.rollback(self.any_checkpoint_name) + + # Assert + config_lock_decorator.config_lock.acquire_lock.assert_called_once() + config_lock_decorator.decorated_config_rollbacker.rollback.assert_has_calls([call(self.any_checkpoint_name)]) + config_lock_decorator.config_lock.release_lock.assert_called_once() + + def test_checkpoint__lock_config(self): + # Arrange + config_lock_decorator = self.__create_config_lock_decorator() + + # Act + config_lock_decorator.checkpoint(self.any_checkpoint_name) + + # Assert + config_lock_decorator.config_lock.acquire_lock.assert_called_once() + config_lock_decorator.decorated_config_rollbacker.checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) + config_lock_decorator.config_lock.release_lock.assert_called_once() + + def __create_config_lock_decorator(self): + config_lock = Mock() + + patch_applier = Mock() + patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): 0}) + + config_replacer = Mock() + config_replacer.replace.side_effect = create_side_effect_dict({(str(Files.SONIC_YANG_AS_JSON),): 0}) + + config_rollbacker = Mock() + config_rollbacker.rollback.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + config_rollbacker.checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + + config_rollbacker.delete_checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + + return gu.ConfigLockDecorator(config_lock=config_lock, + decorated_patch_applier=patch_applier, + decorated_config_replacer=config_replacer, + decorated_config_rollbacker=config_rollbacker) diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py new file mode 100644 index 0000000000..f18ad45799 --- /dev/null +++ b/tests/generic_config_updater/gu_common_test.py @@ -0,0 +1,335 @@ +import json +import jsonpatch +import unittest +from unittest.mock import MagicMock, Mock +from .gutest_helpers import create_side_effect_dict, Files + +import generic_config_updater.gu_common as gu_common + +# import sys +# sys.path.insert(0,'../../generic_config_updater') +# import gu_common + +class TestConfigWrapper(unittest.TestCase): + def setUp(self): + self.config_wrapper_mock = gu_common.ConfigWrapper() + self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) + + def test_ctor__default_values_set(self): + config_wrapper = gu_common.ConfigWrapper() + + self.assertEqual("/usr/local/yang-models", gu_common.YANG_DIR) + + def test_get_sonic_yang_as_json__returns_sonic_yang_as_json(self): + # Arrange + config_wrapper = self.config_wrapper_mock + expected = Files.SONIC_YANG_AS_JSON + + # Act + actual = config_wrapper.get_sonic_yang_as_json() + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_config_db_to_sonic_yang__empty_config_db__returns_empty_sonic_yang(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = {} + + # Act + actual = config_wrapper.convert_config_db_to_sonic_yang({}) + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_config_db_to_sonic_yang__non_empty_config_db__returns_sonic_yang_as_json(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = Files.SONIC_YANG_AS_JSON + + # Act + actual = config_wrapper.convert_config_db_to_sonic_yang(Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_sonic_yang_to_config_db__empty_sonic_yang__returns_empty_config_db(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = {} + + # Act + actual = config_wrapper.convert_sonic_yang_to_config_db({}) + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_sonic_yang_to_config_db__non_empty_sonic_yang__returns_config_db_as_json(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = Files.CROPPED_CONFIG_DB_AS_JSON + + # Act + actual = config_wrapper.convert_sonic_yang_to_config_db(Files.SONIC_YANG_AS_JSON) + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_sonic_yang_to_config_db__table_name_without_colons__returns_config_db_as_json(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = Files.CROPPED_CONFIG_DB_AS_JSON + + # Act + actual = config_wrapper.convert_sonic_yang_to_config_db(Files.SONIC_YANG_AS_JSON_WITHOUT_COLONS) + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_sonic_yang_to_config_db__table_name_with_unexpected_colons__returns_config_db_as_json(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = Files.CROPPED_CONFIG_DB_AS_JSON + + # Act and assert + self.assertRaises(ValueError, + config_wrapper.convert_sonic_yang_to_config_db, + Files.SONIC_YANG_AS_JSON_WITH_UNEXPECTED_COLONS) + + def test_validate_sonic_yang_config__valid_config__returns_true(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = True + + # Act + actual = config_wrapper.validate_sonic_yang_config(Files.SONIC_YANG_AS_JSON) + + # Assert + self.assertEqual(expected, actual) + + def test_validate_sonic_yang_config__invvalid_config__returns_false(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = False + + # Act + actual = config_wrapper.validate_sonic_yang_config(Files.SONIC_YANG_AS_JSON_INVALID) + + # Assert + self.assertEqual(expected, actual) + + def test_validate_config_db_config__valid_config__returns_true(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = True + + # Act + actual = config_wrapper.validate_config_db_config(Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertEqual(expected, actual) + + def test_validate_config_db_config__invalid_config__returns_false(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = False + + # Act + actual = config_wrapper.validate_config_db_config(Files.CONFIG_DB_AS_JSON_INVALID) + + # Assert + self.assertEqual(expected, actual) + + def test_crop_tables_without_yang__returns_cropped_config_db_as_json(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = Files.CROPPED_CONFIG_DB_AS_JSON + + # Act + actual = config_wrapper.crop_tables_without_yang(Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertDictEqual(expected, actual) + +class TestPatchWrapper(unittest.TestCase): + def setUp(self): + self.config_wrapper_mock = gu_common.ConfigWrapper() + self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) + + def test_validate_config_db_patch_has_yang_models__table_without_yang_model__returns_false(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + patch = [ { 'op': 'remove', 'path': '/TABLE_WITHOUT_YANG' } ] + expected = False + + # Act + actual = patch_wrapper.validate_config_db_patch_has_yang_models(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_validate_config_db_patch_has_yang_models__table_with_yang_model__returns_true(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + patch = [ { 'op': 'remove', 'path': '/ACL_TABLE' } ] + expected = True + + # Act + actual = patch_wrapper.validate_config_db_patch_has_yang_models(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_convert_config_db_patch_to_sonic_yang_patch__invalid_config_db_patch__failure(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + patch = [ { 'op': 'remove', 'path': '/TABLE_WITHOUT_YANG' } ] + + # Act and Assert + self.assertRaises(ValueError, patch_wrapper.convert_config_db_patch_to_sonic_yang_patch, patch) + + def test_same_patch__no_diff__returns_true(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + + # Act and Assert + self.assertTrue(patch_wrapper.verify_same_json(Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AS_JSON)) + + def test_same_patch__diff__returns_false(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + + # Act and Assert + self.assertFalse(patch_wrapper.verify_same_json(Files.CONFIG_DB_AS_JSON, Files.CROPPED_CONFIG_DB_AS_JSON)) + + def test_generate_patch__no_diff__empty_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + + # Act + patch = patch_wrapper.generate_patch(Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertFalse(patch) + + def test_simulate_patch__empty_patch__no_changes(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + patch = jsonpatch.JsonPatch([]) + expected = Files.CONFIG_DB_AS_JSON + + # Act + actual = patch_wrapper.simulate_patch(patch, Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertDictEqual(expected, actual) + + def test_simulate_patch__non_empty_patch__changes_applied(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + patch = Files.SINGLE_OPERATION_CONFIG_DB_PATCH + expected = Files.SINGLE_OPERATION_CONFIG_DB_PATCH.apply(Files.CONFIG_DB_AS_JSON) + + # Act + actual = patch_wrapper.simulate_patch(patch, Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertDictEqual(expected, actual) + + def test_generate_patch__diff__non_empty_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + after_update_json = Files.SINGLE_OPERATION_CONFIG_DB_PATCH.apply(Files.CONFIG_DB_AS_JSON) + expected = Files.SINGLE_OPERATION_CONFIG_DB_PATCH + + # Act + actual = patch_wrapper.generate_patch(Files.CONFIG_DB_AS_JSON, after_update_json) + + # Assert + self.assertTrue(actual) + self.assertEqual(expected, actual) + + def test_convert_config_db_patch_to_sonic_yang_patch__empty_patch__returns_empty_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) + patch = jsonpatch.JsonPatch([]) + expected = jsonpatch.JsonPatch([]) + + # Act + actual = patch_wrapper.convert_config_db_patch_to_sonic_yang_patch(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_convert_config_db_patch_to_sonic_yang_patch__single_operation_patch__returns_sonic_yang_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) + patch = Files.SINGLE_OPERATION_CONFIG_DB_PATCH + expected = Files.SINGLE_OPERATION_SONIC_YANG_PATCH + + # Act + actual = patch_wrapper.convert_config_db_patch_to_sonic_yang_patch(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_convert_config_db_patch_to_sonic_yang_patch__multiple_operations_patch__returns_sonic_yang_patch(self): + # Arrange + config_wrapper = self.config_wrapper_mock + patch_wrapper = gu_common.PatchWrapper(config_wrapper = config_wrapper) + config_db_patch = Files.MULTI_OPERATION_CONFIG_DB_PATCH + + # Act + sonic_yang_patch = patch_wrapper.convert_config_db_patch_to_sonic_yang_patch(config_db_patch) + + # Assert + self.__assert_same_patch(config_db_patch, sonic_yang_patch, config_wrapper, patch_wrapper) + + def test_convert_sonic_yang_patch_to_config_db_patch__empty_patch__returns_empty_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) + patch = jsonpatch.JsonPatch([]) + expected = jsonpatch.JsonPatch([]) + + # Act + actual = patch_wrapper.convert_sonic_yang_patch_to_config_db_patch(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_convert_sonic_yang_patch_to_config_db_patch__single_operation_patch__returns_config_db_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) + patch = Files.SINGLE_OPERATION_SONIC_YANG_PATCH + expected = Files.SINGLE_OPERATION_CONFIG_DB_PATCH + + # Act + actual = patch_wrapper.convert_sonic_yang_patch_to_config_db_patch(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_convert_sonic_yang_patch_to_config_db_patch__multiple_operations_patch__returns_config_db_patch(self): + # Arrange + config_wrapper = self.config_wrapper_mock + patch_wrapper = gu_common.PatchWrapper(config_wrapper = config_wrapper) + sonic_yang_patch = Files.MULTI_OPERATION_SONIC_YANG_PATCH + + # Act + config_db_patch = patch_wrapper.convert_sonic_yang_patch_to_config_db_patch(sonic_yang_patch) + + # Assert + self.__assert_same_patch(config_db_patch, sonic_yang_patch, config_wrapper, patch_wrapper) + + def __assert_same_patch(self, config_db_patch, sonic_yang_patch, config_wrapper, patch_wrapper): + sonic_yang = config_wrapper.get_sonic_yang_as_json() + config_db = config_wrapper.get_config_db_as_json() + + after_update_sonic_yang = patch_wrapper.simulate_patch(sonic_yang_patch, sonic_yang) + after_update_config_db = patch_wrapper.simulate_patch(config_db_patch, config_db) + after_update_config_db_cropped = config_wrapper.crop_tables_without_yang(after_update_config_db) + + after_update_sonic_yang_as_config_db = \ + config_wrapper.convert_sonic_yang_to_config_db(after_update_sonic_yang) + + self.assertTrue(patch_wrapper.verify_same_json(after_update_config_db_cropped, after_update_sonic_yang_as_config_db)) diff --git a/tests/generic_config_updater/gutest_helpers.py b/tests/generic_config_updater/gutest_helpers.py new file mode 100644 index 0000000000..2e8984ad68 --- /dev/null +++ b/tests/generic_config_updater/gutest_helpers.py @@ -0,0 +1,53 @@ +import json +import jsonpatch +import os +import shutil +import sys +import unittest +from unittest.mock import MagicMock, Mock, call + +class MockSideEffectDict: + def __init__(self, map): + self.map = map + + def side_effect_func(self, *args): + l = [str(arg) for arg in args] + key = tuple(l) + value = self.map.get(key) + if value is None: + raise ValueError(f"Given arguments were not found in arguments map.\n Arguments: {key}\n Map: {self.map}") + + return value + +def create_side_effect_dict(map): + return MockSideEffectDict(map).side_effect_func + +class FilesLoader: + def __init__(self): + self.files_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files") + self.cache = {} + + def __getattr__(self, attr): + return self._load(attr) + + def _load(self, file_name): + normalized_file_name = file_name.lower() + + # Try load json file + json_file_path = os.path.join(self.files_path, f"{normalized_file_name}.json") + if os.path.isfile(json_file_path): + with open(json_file_path) as fh: + text = fh.read() + return json.loads(text) + + # Try load json-patch file + jsonpatch_file_path = os.path.join(self.files_path, f"{normalized_file_name}.json-patch") + if os.path.isfile(jsonpatch_file_path): + with open(jsonpatch_file_path) as fh: + text = fh.read() + return jsonpatch.JsonPatch(json.loads(text)) + + raise ValueError(f"There is no file called '{file_name}' in 'files/' directory") + +# Files.File_Name will look for a file called "file_name" in the "files/" directory +Files = FilesLoader() From c3963c5673cf944aec2ff89868f4a864261f51d3 Mon Sep 17 00:00:00 2001 From: maksymbelei95 <75987222+maksymbelei95@users.noreply.github.com> Date: Mon, 26 Apr 2021 19:40:06 +0300 Subject: [PATCH 18/41] Fix remove ip rif (#1535) *Added checking of static routes, related to the interface, before deleting of the last IP entry to prevent deleting the RIF if a static route is present in the system. Signed-off-by: Maksym Belei --- config/main.py | 20 ++++ tests/config_int_ip_common.py | 31 ++++++ tests/config_int_ip_test.py | 158 ++++++++++++++++++++++++++++++ tests/conftest.py | 32 +++++- tests/crm_test.py | 1 + tests/int_ip_input/config_db.json | 41 ++++++++ tests/vlan_test.py | 6 ++ 7 files changed, 288 insertions(+), 1 deletion(-) create mode 100644 tests/config_int_ip_common.py create mode 100644 tests/config_int_ip_test.py create mode 100644 tests/int_ip_input/config_db.json diff --git a/config/main.py b/config/main.py index 6fad33f9c1..e9bab3172d 100644 --- a/config/main.py +++ b/config/main.py @@ -22,6 +22,7 @@ from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, SonicDBConfig from utilities_common.db import Db from utilities_common.intf_filter import parse_interface_in_filter +from utilities_common import bgp_util import utilities_common.cli as clicommon from .utils import log @@ -2787,6 +2788,25 @@ def remove(ctx, interface_name, ip_addr): table_name = get_interface_table_name(interface_name) if table_name == "": ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") + interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name) + # If we deleting the last IP entry of the interface, check whether a static route present for the RIF + # before deleting the entry and also the RIF. + if len(interface_dependent) == 1 and interface_dependent[0][1] == ip_addr: + # Check both IPv4 and IPv6 routes. + ip_versions = [ "ip", "ipv6"] + for ip_ver in ip_versions: + # Compete the command and ask Zebra to return the routes. + # Scopes of all VRFs will be checked. + cmd = "show {} route vrf all static".format(ip_ver) + if multi_asic.is_multi_asic(): + output = bgp_util.run_bgp_command(cmd, ctx.obj['namespace']) + else: + output = bgp_util.run_bgp_command(cmd) + # If there is output data, check is there a static route, + # bound to the interface. + if output != "": + if any(interface_name in output_line for output_line in output.splitlines()): + ctx.fail("Cannot remove the last IP entry of interface {}. A static {} route is still bound to the RIF.".format(interface_name, ip_ver)) config_db.set_entry(table_name, (interface_name, ip_addr), None) interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name) if len(interface_dependent) == 0 and is_interface_bind_to_vrf(config_db, interface_name) is False: diff --git a/tests/config_int_ip_common.py b/tests/config_int_ip_common.py new file mode 100644 index 0000000000..7cebfdb8ba --- /dev/null +++ b/tests/config_int_ip_common.py @@ -0,0 +1,31 @@ +show_ip_route_with_static_expected_output = """\ +Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route, q - queued, r - rejected, b - backup + +VRF Vrf11: +S>* 20.0.0.1/32 [1/0] is directly connected, Ethernet2, weight 1, 00:40:18 + +VRF default: +S>* 0.0.0.0/0 [200/0] via 192.168.111.3, eth0, weight 1, 19:51:57 +S>* 20.0.0.1/32 [1/0] is directly connected, Ethernet4 (vrf Vrf11), weight 1, 00:38:52 +S>* 20.0.0.4/32 [1/0] is directly connected, PortChannel2, weight 1, 00:38:52 +S>* 20.0.0.8/32 [1/0] is directly connected, Vlan2, weight 1, 00:38:52 +""" + +show_ipv6_route_with_static_expected_output = """\ +Codes: K - kernel route, C - connected, S - static, R - RIPng, + O - OSPFv3, I - IS-IS, B - BGP, N - NHRP, T - Table, + v - VNC, V - VNC-Direct, A - Babel, D - SHARP, F - PBR, + f - OpenFabric, + > - selected route, * - FIB route, q - queued, r - rejected, b - backup + +VRF Vrf11: +S>* fe80::/24 [1/0] is directly connected, Vlan4, weight 1, 00:00:04 + +VRF default: +S>* 20c0:a800:0:21::/64 [20/0] is directly connected, PortChannel4, 2d22h02m +S>* fe80::/32 [1/0] is directly connected, Ethernet8 (vrf Vrf11), weight 1, 00:00:04 +""" \ No newline at end of file diff --git a/tests/config_int_ip_test.py b/tests/config_int_ip_test.py new file mode 100644 index 0000000000..6968fcbe45 --- /dev/null +++ b/tests/config_int_ip_test.py @@ -0,0 +1,158 @@ +import os +import sys +import pytest +import mock +from importlib import reload + +from click.testing import CliRunner + +from utilities_common.db import Db + +modules_path = os.path.join(os.path.dirname(__file__), "..") +test_path = os.path.join(modules_path, "tests") +sys.path.insert(0, modules_path) +sys.path.insert(0, test_path) +mock_db_path = os.path.join(test_path, "int_ip_input") + + +class TestIntIp(object): + @pytest.fixture(scope="class", autouse=True) + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "1" + import config.main as config + reload(config) + yield + print("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + from .mock_tables import dbconnector + dbconnector.dedicated_dbs = {} + + @pytest.mark.parametrize('setup_single_bgp_instance', + ['ip_route_for_int_ip'], indirect=['setup_single_bgp_instance']) + def test_config_int_ip_rem( + self, + get_cmd_module, + setup_single_bgp_instance): + (config, show) = get_cmd_module + jsonfile_config = os.path.join(mock_db_path, "config_db.json") + from .mock_tables import dbconnector + dbconnector.dedicated_dbs['CONFIG_DB'] = jsonfile_config + + runner = CliRunner() + db = Db() + obj = {'config_db': db.cfgdb} + + # remove vlan IP`s + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet16", "192.168.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + + @pytest.mark.parametrize('setup_single_bgp_instance', + ['ip_route_for_int_ip'], indirect=['setup_single_bgp_instance']) + def test_config_int_ip_rem_static( + self, + get_cmd_module, + setup_single_bgp_instance): + (config, show) = get_cmd_module + jsonfile_config = os.path.join(mock_db_path, "config_db") + from .mock_tables import dbconnector + dbconnector.dedicated_dbs['CONFIG_DB'] = jsonfile_config + + runner = CliRunner() + db = Db() + obj = {'config_db': db.cfgdb} + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet2", "192.168.0.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert "Error: Cannot remove the last IP entry of interface Ethernet2. A static ip route is still bound to the RIF." in result.output + assert mock_run_command.call_count == 0 + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet8", "192.168.3.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert "Error: Cannot remove the last IP entry of interface Ethernet8. A static ipv6 route is still bound to the RIF." in result.output + assert mock_run_command.call_count == 0 + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Vlan2", "192.168.1.1/21"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert "Error: Cannot remove the last IP entry of interface Vlan2. A static ip route is still bound to the RIF." in result.output + assert mock_run_command.call_count == 0 + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["PortChannel2", "10.0.0.56/31"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert "Error: Cannot remove the last IP entry of interface PortChannel2. A static ip route is still bound to the RIF." in result.output + assert mock_run_command.call_count == 0 + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet4", "192.168.4.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + +class TestIntIpMultiasic(object): + @pytest.fixture(scope="class", autouse=True) + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "1" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + reload(mock_multi_asic) + dbconnector.load_namespace_config() + yield + print("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + reload(mock_single_asic) + dbconnector.dedicated_dbs = {} + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_for_int_ip'], indirect=['setup_multi_asic_bgp_instance']) + def test_config_int_ip_rem_static_multiasic( + self, + get_cmd_module, + setup_multi_asic_bgp_instance): + (config, show) = get_cmd_module + jsonfile_config = os.path.join(mock_db_path, "config_db") + from .mock_tables import dbconnector + dbconnector.dedicated_dbs['CONFIG_DB'] = jsonfile_config + + runner = CliRunner() + db = Db() + obj = {'config_db': db.cfgdb, 'namespace': 'test_ns'} + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet2", "192.168.0.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert "Error: Cannot remove the last IP entry of interface Ethernet2. A static ip route is still bound to the RIF." in result.output + assert mock_run_command.call_count == 0 + + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet8", "192.168.3.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + assert "Error: Cannot remove the last IP entry of interface Ethernet8. A static ipv6 route is still bound to the RIF." in result.output + assert mock_run_command.call_count == 0 \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py index 16c018bb64..4ff1a002bd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -9,6 +9,7 @@ from .mock_tables import dbconnector from . import show_ip_route_common +from . import config_int_ip_common test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -124,6 +125,14 @@ def mock_run_bgp_command(vtysh_cmd, bgp_namespace): mock_frr_data = json_data.read() return mock_frr_data return "" + + def mock_run_bgp_command_for_static(vtysh_cmd, bgp_namespace=""): + if vtysh_cmd == "show ip route vrf all static": + return config_int_ip_common.show_ip_route_with_static_expected_output + elif vtysh_cmd == "show ipv6 route vrf all static": + return config_int_ip_common.show_ipv6_route_with_static_expected_output + else: + return "" def mock_run_show_ip_route_commands(request): if request.param == 'ipv6_route_err': @@ -147,10 +156,18 @@ def mock_run_show_ip_route_commands(request): request.param == 'ipv6_route', request.param == 'ipv6_specific_route']): bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_run_show_ip_route_commands(request)) + elif request.param == 'ip_route_for_int_ip': + _old_run_bgp_command = bgp_util.run_bgp_command + bgp_util.run_bgp_command = mock_run_bgp_command_for_static else: bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_run_bgp_command("", "")) + yield + + if request.param == 'ip_route_for_int_ip': + bgp_util.run_bgp_command = _old_run_bgp_command + @pytest.fixture def setup_multi_asic_bgp_instance(request): @@ -178,6 +195,16 @@ def setup_multi_asic_bgp_instance(request): m_asic_json_file = os.path.join( test_path, 'mock_tables', 'dummy.json') + def mock_run_bgp_command_for_static(vtysh_cmd, bgp_namespace=""): + if bgp_namespace != 'test_ns': + return "" + if vtysh_cmd == "show ip route vrf all static": + return config_int_ip_common.show_ip_route_with_static_expected_output + elif vtysh_cmd == "show ipv6 route vrf all static": + return config_int_ip_common.show_ipv6_route_with_static_expected_output + else: + return "" + def mock_run_bgp_command(vtysh_cmd, bgp_namespace): bgp_mocked_json = os.path.join( test_path, 'mock_tables', bgp_namespace, m_asic_json_file) @@ -189,7 +216,10 @@ def mock_run_bgp_command(vtysh_cmd, bgp_namespace): return "" _old_run_bgp_command = bgp_util.run_bgp_command - bgp_util.run_bgp_command = mock_run_bgp_command + if request.param == 'ip_route_for_int_ip': + bgp_util.run_bgp_command = mock_run_bgp_command_for_static + else: + bgp_util.run_bgp_command = mock_run_bgp_command yield diff --git a/tests/crm_test.py b/tests/crm_test.py index 369d9a51ab..d99402e057 100644 --- a/tests/crm_test.py +++ b/tests/crm_test.py @@ -1216,6 +1216,7 @@ def setup_class(cls): os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" from .mock_tables import dbconnector from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) dbconnector.load_namespace_config() def test_crm_show_summary(self): diff --git a/tests/int_ip_input/config_db.json b/tests/int_ip_input/config_db.json new file mode 100644 index 0000000000..3f2d6e5beb --- /dev/null +++ b/tests/int_ip_input/config_db.json @@ -0,0 +1,41 @@ +{ + "INTERFACE|Ethernet16": { + "NULL": "NULL" + }, + "INTERFACE|Ethernet16|192.168.10.1/24": { + "NULL": "NULL" + }, + "INTERFACE|Ethernet2": { + "NULL": "NULL" + }, + "INTERFACE|Ethernet2|192.168.0.1/24": { + "NULL": "NULL" + }, + "INTERFACE|Ethernet4": { + "NULL": "NULL" + }, + "INTERFACE|Ethernet4|192.168.4.1/24": { + "NULL": "NULL" + }, + "INTERFACE|Ethernet4|192.168.100.1/24": { + "NULL": "NULL" + }, + "INTERFACE|Ethernet8": { + "NULL": "NULL" + }, + "INTERFACE|Ethernet8|192.168.3.1/24": { + "NULL": "NULL" + }, + "PORTCHANNEL_INTERFACE|PortChannel2": { + "NULL": "NULL" + }, + "PORTCHANNEL_INTERFACE|PortChannel2|10.0.0.56/31": { + "NULL": "NULL" + }, + "VLAN_INTERFACE|Vlan2": { + "proxy_arp": "enabled" + }, + "VLAN_INTERFACE|Vlan2|192.168.1.1/21": { + "NULL": "NULL" + } +} \ No newline at end of file diff --git a/tests/vlan_test.py b/tests/vlan_test.py index d4832dc2cf..ad3ff9fbb4 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -7,6 +7,7 @@ import config.main as config import show.main as show from utilities_common.db import Db +from importlib import reload show_vlan_brief_output="""\ +-----------+-----------------+-----------------+----------------+-----------------------+-------------+ @@ -188,6 +189,11 @@ class TestVlan(object): @classmethod def setup_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "1" + # ensure that we are working with single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + reload(mock_single_asic) + dbconnector.load_namespace_config() print("SETUP") def test_show_vlan(self): From 9dba93fd96194d70ee181ed48f439bc4f3c00b82 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Mon, 26 Apr 2021 14:10:44 -0700 Subject: [PATCH 19/41] disk_check: Check & mount RO as RW using tmpfs (#1569) What I did There is a bug that occasionally turn root-overlay as RO. This makes /etc & /home as RO. This blocks any new remote user login, as that needs to write into /etc & /home. This tool scans /etc & /home (or given dirs) as in RW or RO state. If RO, it could create a writable overlay using tmpfs. This is transient and stays until next reboot. Any write after the overlay will be lost upon reboot. But this allows new remote users login. How I did it Create upper & work dirs in /run/mount (tmpfs). Mount /etc & /home as lowerdirs and use the same name for final merge. This allows anyone opening a file in /etc or /home to operate on the merged overlay, transparently. How to verify it Mount any dir on tmpfs ( mount -t tmpfs tmpfs test_dir) remount as RO (mount -o remount,ro test_dir) Pass that dir to this script. (disk_check.py -d ./test_dir) Now it should be RW --- scripts/disk_check.py | 151 ++++++++++++++++++++++++++++++++++++ setup.py | 1 + tests/disk_check_test.py | 161 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 313 insertions(+) create mode 100644 scripts/disk_check.py create mode 100644 tests/disk_check_test.py diff --git a/scripts/disk_check.py b/scripts/disk_check.py new file mode 100644 index 0000000000..94959bfa1a --- /dev/null +++ b/scripts/disk_check.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +What: + There have been cases, where disk turns Read-only due to kernel bug. + In Read-only state, system blocks new remote user login via TACACS. + This utility is to check & make transient recovery as needed. + +How: + check for Read-Write permission. If Read-only, create writable overlay using tmpfs. + + By default "/etc" & "/home" are checked and if in Read-only state, make them Read-Write + using overlay on top of tmpfs. + + Making /etc & /home as writable lets successful new remote user login. + + If in Read-only state or in Read-Write state with the help of tmpfs overlay, + syslog ERR messages are written, to help raise alerts. + + Monit may be used to invoke it periodically, to help scan & fix and + report via syslog. + +""" + +import argparse +import os +import sys +import syslog +import subprocess + +UPPER_DIR = "/run/mount/upper" +WORK_DIR = "/run/mount/work" +MOUNTS_FILE = "/proc/mounts" + +def log_err(m): + print("Err: {}".format(m), file=sys.stderr) + syslog.syslog(syslog.LOG_ERR, m) + + +def log_info(m): + print("Info: {}".format(m)) + syslog.syslog(syslog.LOG_INFO, m) + + +def log_debug(m): + print("debug: {}".format(m)) + syslog.syslog(syslog.LOG_DEBUG, m) + + +def test_writable(dirs): + for d in dirs: + rw = os.access(d, os.W_OK) + if not rw: + log_err("{} is not read-write".format(d)) + return False + else: + log_debug("{} is Read-Write".format(d)) + return True + + +def run_cmd(cmd): + proc = subprocess.run(cmd, shell=True, text=True, capture_output=True) + ret = proc.returncode + if ret: + log_err("failed: ret={} cmd={}".format(ret, cmd)) + else: + log_info("ret={} cmd: {}".format(ret, cmd)) + + if proc.stdout: + log_info("stdout: {}".format(str(proc.stdout))) + if proc.stderr: + log_info("stderr: {}".format(str(proc.stderr))) + return ret + + +def get_dname(path_name): + return os.path.basename(os.path.normpath(path_name)) + + +def do_mnt(dirs): + if os.path.exists(UPPER_DIR): + log_err("Already mounted") + return 1 + + for i in (UPPER_DIR, WORK_DIR): + try: + os.mkdir(i) + except OSError as error: + log_err("Failed to create {}".format(i)) + return 1 + + for d in dirs: + ret = run_cmd("mount -t overlay overlay_{} -o lowerdir={}," + "upperdir={},workdir={} {}".format( + get_dname(d), d, UPPER_DIR, WORK_DIR, d)) + if ret: + break + + if ret: + log_err("Failed to mount {} as Read-Write".format(dirs)) + else: + log_info("{} are mounted as Read-Write".format(dirs)) + return ret + + +def is_mounted(dirs): + if not os.path.exists(UPPER_DIR): + return False + + onames = set() + for d in dirs: + onames.add("overlay_{}".format(get_dname(d))) + + with open(MOUNTS_FILE, "r") as s: + for ln in s.readlines(): + n = ln.strip().split()[0] + if n in onames: + log_debug("Mount exists for {}".format(n)) + return True + return False + + +def do_check(skip_mount, dirs): + ret = 0 + if not test_writable(dirs): + if not skip_mount: + ret = do_mnt(dirs) + + # Check if mounted + if (not ret) and is_mounted(dirs): + log_err("READ-ONLY: Mounted {} to make Read-Write".format(dirs)) + + return ret + + +def main(): + parser=argparse.ArgumentParser( + description="check disk for Read-Write and mount etc & home as Read-Write") + parser.add_argument('-s', "--skip-mount", action='store_true', default=False, + help="Skip mounting /etc & /home as Read-Write") + parser.add_argument('-d', "--dirs", default="/etc,/home", + help="dirs to mount") + args = parser.parse_args() + + ret = do_check(args.skip_mount, args.dirs.split(",")) + return ret + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/setup.py b/setup.py index d070827667..cd706eb433 100644 --- a/setup.py +++ b/setup.py @@ -81,6 +81,7 @@ 'scripts/db_migrator.py', 'scripts/decode-syseeprom', 'scripts/dropcheck', + 'scripts/disk_check.py', 'scripts/dropconfig', 'scripts/dropstat', 'scripts/dump_nat_entries.py', diff --git a/tests/disk_check_test.py b/tests/disk_check_test.py new file mode 100644 index 0000000000..40bc55f0f5 --- /dev/null +++ b/tests/disk_check_test.py @@ -0,0 +1,161 @@ +import sys +import syslog +from unittest.mock import patch +import pytest + +sys.path.append("scripts") +import disk_check + +disk_check.MOUNTS_FILE = "/tmp/proc_mounts" + +test_data = { + "0": { + "desc": "All good as /tmp is read-write", + "args": ["", "-d", "/tmp"], + "err": "" + }, + "1": { + "desc": "Not good as /tmpx is not read-write; But fix skipped", + "args": ["", "-d", "/tmpx", "-s"], + "err": "/tmpx is not read-write" + }, + "2": { + "desc": "Not good as /tmpx is not read-write; expect mount", + "args": ["", "-d", "/tmpx"], + "upperdir": "/tmp/tmpx", + "workdir": "/tmp/tmpy", + "mounts": "overlay_tmpx blahblah", + "err": "/tmpx is not read-write|READ-ONLY: Mounted ['/tmpx'] to make Read-Write", + "cmds": ['mount -t overlay overlay_tmpx -o lowerdir=/tmpx,upperdir=/tmp/tmpx,workdir=/tmp/tmpy /tmpx'] + }, + "3": { + "desc": "Not good as /tmpx is not read-write; mount fail as create of upper fails", + "args": ["", "-d", "/tmpx"], + "upperdir": "/tmpx", + "expect_ret": 1 + }, + "4": { + "desc": "Not good as /tmpx is not read-write; mount fail as upper exist", + "args": ["", "-d", "/tmpx"], + "upperdir": "/tmp", + "err": "/tmpx is not read-write|Already mounted", + "expect_ret": 1 + }, + "5": { + "desc": "/tmp is read-write, but as well mount exists; hence report", + "args": ["", "-d", "/tmp"], + "upperdir": "/tmp", + "mounts": "overlay_tmp blahblah", + "err": "READ-ONLY: Mounted ['/tmp'] to make Read-Write" + }, + "6": { + "desc": "Test another code path for good case", + "args": ["", "-d", "/tmp"], + "upperdir": "/tmp" + } +} + +err_data = "" +cmds = [] +current_tc = None + +def mount_file(d): + with open(disk_check.MOUNTS_FILE, "w") as s: + s.write(d) + + +def report_err_msg(lvl, m): + global err_data + if lvl == syslog.LOG_ERR: + if err_data: + err_data += "|" + err_data += m + + +class proc: + returncode = 0 + stdout = None + stderr = None + + def __init__(self, proc_upd = None): + if proc_upd: + self.returncode = proc_upd.get("ret", 0) + self.stdout = proc_upd.get("stdout", None) + self.stderr = proc_upd.get("stderr", None) + + +def mock_subproc_run(cmd, shell, text, capture_output): + global cmds + + upd = (current_tc["proc"][len(cmds)] + if len(current_tc.get("proc", [])) > len(cmds) else None) + cmds.append(cmd) + + return proc(upd) + + +def init_tc(tc): + global err_data, cmds, current_tc + + err_data = "" + cmds = [] + mount_file(tc.get("mounts", "")) + current_tc = tc + + +def swap_upper(tc): + tmp_u = tc["upperdir"] + tc["upperdir"] = disk_check.UPPER_DIR + disk_check.UPPER_DIR = tmp_u + + +def swap_work(tc): + tmp_w = tc["workdir"] + tc["upperdir"] = disk_check.WORK_DIR + disk_check.WORK_DIR = tmp_w + + +class TestDiskCheck(object): + def setup(self): + pass + + + @patch("disk_check.syslog.syslog") + @patch("disk_check.subprocess.run") + def test_readonly(self, mock_proc, mock_log): + global err_data, cmds + + mock_proc.side_effect = mock_subproc_run + mock_log.side_effect = report_err_msg + + for i, tc in test_data.items(): + print("-----------Start tc {}---------".format(i)) + init_tc(tc) + + with patch('sys.argv', tc["args"]): + if "upperdir" in tc: + swap_upper(tc) + + if "workdir" in tc: + # restore + swap_work(tc) + + ret = disk_check.main() + + if "upperdir" in tc: + # restore + swap_upper(tc) + + if "workdir" in tc: + # restore + swap_work(tc) + + print("ret = {}".format(ret)) + print("err_data={}".format(err_data)) + print("cmds: {}".format(cmds)) + + assert ret == tc.get("expect_ret", 0) + if "err" in tc: + assert err_data == tc["err"] + assert cmds == tc.get("cmds", []) + print("-----------End tc {}-----------".format(i)) From c166f66827157f8e442e2176ce3efe166a5f6b5e Mon Sep 17 00:00:00 2001 From: arlakshm <55814491+arlakshm@users.noreply.github.com> Date: Tue, 27 Apr 2021 12:00:57 -0700 Subject: [PATCH 20/41] [multi-asic] support show ip bgp neigh/network for multi asic (#1574) This change is to add support for the commands "show ip bgp neighbor "and "show ip bgp network" for multi asic platforms Add unit tests for these commands Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan --- show/bgp_frr_v4.py | 97 ++- show/bgp_frr_v6.py | 88 +- tests/bgp_commands_input/__init__.py | 0 .../bgp_neighbor_test_vector.py | 755 ++++++++++++++++++ .../bgp_network_test_vector.py | 522 ++++++++++++ tests/conftest.py | 41 +- tests/mock_tables/asic0/config_db.json | 20 + tests/mock_tables/asic1/config_db.json | 20 + tests/mock_tables/mock_multi_asic.py | 2 + tests/pfcstat_test.py | 5 + tests/pfcwd_test.py | 4 + tests/show_bgp_neighbor_test.py | 128 +++ tests/show_bgp_network_test.py | 100 +++ utilities_common/bgp_util.py | 39 +- utilities_common/multi_asic.py | 5 + 15 files changed, 1776 insertions(+), 50 deletions(-) create mode 100644 tests/bgp_commands_input/__init__.py create mode 100644 tests/bgp_commands_input/bgp_neighbor_test_vector.py create mode 100644 tests/bgp_commands_input/bgp_network_test_vector.py create mode 100644 tests/show_bgp_neighbor_test.py create mode 100644 tests/show_bgp_network_test.py diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py index 5b630d8981..4a3e065200 100644 --- a/show/bgp_frr_v4.py +++ b/show/bgp_frr_v4.py @@ -1,11 +1,12 @@ import click + +from sonic_py_common import multi_asic +from show.main import ip import utilities_common.bgp_util as bgp_util import utilities_common.cli as clicommon import utilities_common.constants as constants import utilities_common.multi_asic as multi_asic_util -from show.main import ip, run_command - ############################################################################### # # 'show ip bgp' cli stanza @@ -13,7 +14,6 @@ ############################################################################### - @ip.group(cls=clicommon.AliasedGroup) def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" @@ -24,43 +24,93 @@ def bgp(): @bgp.command() @multi_asic_util.multi_asic_click_options def summary(namespace, display): - bgp_summary = bgp_util.get_bgp_summary_from_all_bgp_instances(constants.IPV4, namespace,display) + bgp_summary = bgp_util.get_bgp_summary_from_all_bgp_instances( + constants.IPV4, namespace, display) bgp_util.display_bgp_summary(bgp_summary=bgp_summary, af=constants.IPV4) - + # 'neighbors' subcommand ("show ip bgp neighbors") @bgp.command() @click.argument('ipaddress', required=False) -@click.argument('info_type', type=click.Choice(['routes', 'advertised-routes', 'received-routes']), required=False) -def neighbors(ipaddress, info_type): +@click.argument('info_type', + type=click.Choice( + ['routes', 'advertised-routes', 'received-routes']), + required=False) +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def neighbors(ipaddress, info_type, namespace): """Show IP (IPv4) BGP neighbors""" - command = 'sudo vtysh -c "show ip bgp neighbor' - + command = 'show ip bgp neighbor' if ipaddress is not None: - command += ' {}'.format(ipaddress) + if not bgp_util.is_ipv4_address(ipaddress): + ctx = click.get_current_context() + ctx.fail("{} is not valid ipv4 address\n".format(ipaddress)) + try: + actual_namespace = bgp_util.get_namespace_for_bgp_neighbor( + ipaddress) + if namespace is not None and namespace != actual_namespace: + click.echo( + "[WARNING]: bgp neighbor {} is present in namespace {} not in {}" + .format(ipaddress, actual_namespace, namespace)) - # info_type is only valid if ipaddress is specified - if info_type is not None: - command += ' {}'.format(info_type) + # save the namespace in which the bgp neighbor is configured + namespace = actual_namespace + + command += ' {}'.format(ipaddress) - command += '"' + # info_type is only valid if ipaddress is specified + if info_type is not None: + command += ' {}'.format(info_type) + except ValueError as err: + ctx = click.get_current_context() + ctx.fail("{}\n".format(err)) + + ns_list = multi_asic.get_namespace_list(namespace) + output = "" + for ns in ns_list: + output += bgp_util.run_bgp_command(command, ns) + + click.echo(output.rstrip('\n')) - run_command(command) # 'network' subcommand ("show ip bgp network") @bgp.command() -@click.argument('ipaddress', metavar='[|]', required=False) -@click.argument('info_type', metavar='[bestpath|json|longer-prefixes|multipath]', - type=click.Choice(['bestpath', 'json', 'longer-prefixes', 'multipath']), required=False) -def network(ipaddress, info_type): +@click.argument('ipaddress', + metavar='[|]', + required=False) +@click.argument('info_type', + metavar='[bestpath|json|longer-prefixes|multipath]', + type=click.Choice( + ['bestpath', 'json', 'longer-prefixes', 'multipath']), + required=False) +@click.option('--namespace', + '-n', + 'namespace', + type=str, + show_default=True, + required=True if multi_asic.is_multi_asic is True else False, + help='Namespace name or all', + default=None, + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def network(ipaddress, info_type, namespace): """Show IP (IPv4) BGP network""" - command = 'sudo vtysh -c "show ip bgp' + if multi_asic.is_multi_asic() and namespace not in multi_asic.get_namespace_list(): + ctx = click.get_current_context() + ctx.fail('-n/--namespace option required. provide namespace from list {}'\ + .format(multi_asic.get_namespace_list())) + command = 'show ip bgp' if ipaddress is not None: if '/' in ipaddress: - # For network prefixes then this all info_type(s) are available + # For network prefixes then this all info_type(s) are available pass else: # For an ipaddress then check info_type, exit if specified option doesn't work. @@ -75,6 +125,5 @@ def network(ipaddress, info_type): if info_type is not None: command += ' {}'.format(info_type) - command += '"' - - run_command(command) + output = bgp_util.run_bgp_command(command, namespace) + click.echo(output.rstrip('\n')) diff --git a/show/bgp_frr_v6.py b/show/bgp_frr_v6.py index 39be295b18..eb1a5b641b 100644 --- a/show/bgp_frr_v6.py +++ b/show/bgp_frr_v6.py @@ -1,7 +1,8 @@ import click +from sonic_py_common import multi_asic import utilities_common.cli as clicommon -from show.main import ipv6, run_command +from show.main import ipv6 import utilities_common.multi_asic as multi_asic_util import utilities_common.bgp_util as bgp_util import utilities_common.constants as constants @@ -31,27 +32,85 @@ def summary(namespace, display): # 'neighbors' subcommand ("show ipv6 bgp neighbors") @bgp.command() @click.argument('ipaddress', required=False) -@click.argument('info_type', type=click.Choice(['routes', 'advertised-routes', 'received-routes']), required=False) -def neighbors(ipaddress, info_type): +@click.argument('info_type', + type=click.Choice( + ['routes', 'advertised-routes', 'received-routes']), + required=False) +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def neighbors(ipaddress, info_type, namespace): """Show IPv6 BGP neighbors""" - ipaddress = "" if ipaddress is None else ipaddress + + if ipaddress is not None: + if not bgp_util.is_ipv6_address(ipaddress): + ctx = click.get_current_context() + ctx.fail("{} is not valid ipv6 address\n".format(ipaddress)) + try: + actual_namespace = bgp_util.get_namespace_for_bgp_neighbor( + ipaddress) + if namespace is not None and namespace != actual_namespace: + click.echo( + "bgp neighbor {} is present in namespace {} not in {}" + .format(ipaddress, actual_namespace, namespace)) + + # save the namespace in which the bgp neighbor is configured + namespace = actual_namespace + except ValueError as err: + ctx = click.get_current_context() + ctx.fail("{}\n".format(err)) + else: + ipaddress = "" + info_type = "" if info_type is None else info_type - command = 'sudo vtysh -c "show bgp ipv6 neighbor {} {}"'.format(ipaddress, info_type) - run_command(command) + command = 'show bgp ipv6 neighbor {} {}'.format( + ipaddress, info_type) + + ns_list = multi_asic.get_namespace_list(namespace) + output = "" + for ns in ns_list: + output += bgp_util.run_bgp_command(command, ns) + + click.echo(output.rstrip('\n')) + # 'network' subcommand ("show ipv6 bgp network") @bgp.command() -@click.argument('ipaddress', metavar='[|]', required=False) -@click.argument('info_type', metavar='[bestpath|json|longer-prefixes|multipath]', - type=click.Choice(['bestpath', 'json', 'longer-prefixes', 'multipath']), required=False) -def network(ipaddress, info_type): +@click.argument('ipaddress', + metavar='[|]', + required=False) +@click.argument('info_type', + metavar='[bestpath|json|longer-prefixes|multipath]', + type=click.Choice( + ['bestpath', 'json', 'longer-prefixes', 'multipath']), + required=False) +@click.option('--namespace', + '-n', + 'namespace', + type=str, + show_default=True, + required=True if multi_asic.is_multi_asic is True else False, + help='Namespace name or all', + default=None, + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def network(ipaddress, info_type, namespace): """Show BGP ipv6 network""" - command = 'sudo vtysh -c "show bgp ipv6' + command = 'show bgp ipv6' + + if multi_asic.is_multi_asic() and namespace not in multi_asic.get_namespace_list(): + ctx = click.get_current_context() + ctx.fail('-n/--namespace option required. provide namespace from list {}'\ + .format(multi_asic.get_namespace_list())) if ipaddress is not None: if '/' in ipaddress: - # For network prefixes then this all info_type(s) are available + # For network prefixes then this all info_type(s) are available pass else: # For an ipaddress then check info_type, exit if specified option doesn't work. @@ -66,6 +125,5 @@ def network(ipaddress, info_type): if info_type is not None: command += ' {}'.format(info_type) - command += '"' - - run_command(command) + output = bgp_util.run_bgp_command(command, namespace) + click.echo(output.rstrip('\n')) diff --git a/tests/bgp_commands_input/__init__.py b/tests/bgp_commands_input/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/bgp_commands_input/bgp_neighbor_test_vector.py b/tests/bgp_commands_input/bgp_neighbor_test_vector.py new file mode 100644 index 0000000000..a5766c2a5c --- /dev/null +++ b/tests/bgp_commands_input/bgp_neighbor_test_vector.py @@ -0,0 +1,755 @@ +bgp_v4_neighbors_output = \ +""" +BGP neighbor is 10.0.0.57, remote AS 64600, local AS 65100, external link + Description: ARISTA01T1 + Member of peer-group PEER_V4 for session parameters + BGP version 4, remote router ID 100.1.0.29, local router ID 10.1.0.32 + BGP state = Established, up for 00:00:39 + Last read 00:00:00, Last write 00:00:00 + Hold time is 10, keepalive interval is 3 seconds + Configured hold time is 10, keepalive interval is 3 seconds + Neighbor capabilities: + 4 Byte AS: advertised and received + AddPath: + IPv4 Unicast: RX advertised IPv4 Unicast and received + Route refresh: advertised and received(new) + Address Family IPv4 Unicast: advertised and received + Hostname Capability: advertised (name: vlab-01,domain name: n/a) not received + Graceful Restart Capability: advertised and received + Remote Restart timer is 300 seconds + Address families by peer: + none + Graceful restart information: + End-of-RIB send: IPv4 Unicast + End-of-RIB received: IPv4 Unicast + Local GR Mode: Restart* + Remote GR Mode: Helper + R bit: False + Timers: + Configured Restart Time(sec): 240 + Received Restart Time(sec): 300 + IPv4 Unicast: + F bit: False + End-of-RIB sent: Yes + End-of-RIB sent after update: No + End-of-RIB received: Yes + Timers: + Configured Stale Path Time(sec): 360 + Configured Selection Deferral Time(sec): 360 + Message statistics: + Inq depth is 0 + Outq depth is 0 + Sent Rcvd + Opens: 2 1 + Notifications: 2 2 + Updates: 3203 3202 + Keepalives: 14 15 + Route Refresh: 0 0 + Capability: 0 0 + Total: 3221 3220 + Minimum time between advertisement runs is 0 seconds + + For address family: IPv4 Unicast + PEER_V4 peer-group member + Update group 1, subgroup 1 + Packet Queue length 0 + Inbound soft reconfiguration allowed + Community attribute sent to this neighbor(all) + Inbound path policy configured + Outbound path policy configured + Route map for incoming advertisements is *FROM_BGP_PEER_V4 + Route map for outgoing advertisements is *TO_BGP_PEER_V4 + 6400 accepted prefixes + + Connections established 1; dropped 0 + Last reset 00:01:01, No AFI/SAFI activated for peer +Local host: 10.0.0.56, Local port: 179 +Foreign host: 10.0.0.57, Foreign port: 44731 +Nexthop: 10.0.0.56 +Nexthop global: fc00::71 +Nexthop local: fe80::5054:ff:fea9:41c2 +BGP connection: shared network +BGP Connect Retry Timer in Seconds: 10 +Estimated round trip time: 20 ms +Read thread: on Write thread: on FD used: 28 +""" + +bgp_v4_neighbor_invalid = \ +"""Error: Bgp neighbor 20.1.1.1 not configured""" + +bgp_v4_neighbor_invalid_address = \ +"""Error: invalid_address is not valid ipv4 address""" + +bgp_v4_neighbor_output_adv_routes = \ +""" +BGP table version is 6405, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +*> 0.0.0.0/0 0.0.0.0 0 64600 65534 6666 6667 i +*> 10.1.0.32/32 0.0.0.0 0 32768 i +*> 100.1.0.29/32 0.0.0.0 0 64600 i +*> 100.1.0.30/32 0.0.0.0 0 64600 i +*> 100.1.0.31/32 0.0.0.0 0 64600 i +*> 100.1.0.32/32 0.0.0.0 0 64600 i +*> 192.168.0.0/21 0.0.0.0 0 32768 i +*> 192.168.8.0/25 0.0.0.0 0 64600 65501 i +*> 192.168.8.128/25 0.0.0.0 0 64600 65501 i +*> 192.168.16.0/25 0.0.0.0 0 64600 65502 i +*> 192.168.16.128/25 + 0.0.0.0 0 64600 65502 i +*> 192.168.24.0/25 0.0.0.0 0 64600 65503 i +*> 192.168.24.128/25 + 0.0.0.0 0 64600 65503 i +*> 192.168.32.0/25 0.0.0.0 0 64600 65504 i +*> 192.168.32.128/25 + 0.0.0.0 0 64600 65504 i +*> 192.168.40.0/25 0.0.0.0 0 64600 65505 i +*> 192.168.40.128/25 + 0.0.0.0 0 64600 65505 i +*> 192.168.48.0/25 0.0.0.0 0 64600 65506 i +*> 192.168.48.128/25 + 0.0.0.0 0 64600 65506 i +*> 192.168.56.0/25 0.0.0.0 0 64600 65507 i +*> 192.168.56.128/25 + 0.0.0.0 0 64600 65507 i +""" + +bgp_v4_neighbor_output_recv_routes = \ +""" +BGP table version is 6405, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +*> 0.0.0.0/0 10.0.0.57 0 64600 65534 6666 6667 i +*> 100.1.0.29/32 10.0.0.57 0 64600 i +*> 192.168.8.0/25 10.0.0.57 0 64600 65501 i +*> 192.168.8.128/25 10.0.0.57 0 64600 65501 i +*> 192.168.16.0/25 10.0.0.57 0 64600 65502 i +*> 192.168.16.128/25 + 10.0.0.57 0 64600 65502 i +*> 192.168.24.0/25 10.0.0.57 0 64600 65503 i +*> 192.168.24.128/25 + 10.0.0.57 0 64600 65503 i +*> 192.168.32.0/25 10.0.0.57 0 64600 65504 i +*> 192.168.32.128/25 + 10.0.0.57 0 64600 65504 i +*> 192.168.40.0/25 10.0.0.57 0 64600 65505 i +*> 192.168.40.128/25 + 10.0.0.57 0 64600 65505 i +*> 192.168.48.0/25 10.0.0.57 0 64600 65506 i +*> 192.168.48.128/25 + 10.0.0.57 0 64600 65506 i +*> 192.168.56.0/25 10.0.0.57 0 64600 65507 i +*> 192.168.56.128/25 + 10.0.0.57 0 64600 65507 i +""" + +bgp_v6_neighbors_output = \ +""" +BGP neighbor is fc00::72, remote AS 64600, local AS 65100, external link + Description: ARISTA01T1 + Member of peer-group PEER_V6 for session parameters + BGP version 4, remote router ID 100.1.0.29, local router ID 10.1.0.32 + BGP state = Established, up for 01:06:23 + Last read 00:00:02, Last write 00:00:00 + Hold time is 10, keepalive interval is 3 seconds + Configured hold time is 10, keepalive interval is 3 seconds + Neighbor capabilities: + 4 Byte AS: advertised and received + AddPath: + IPv6 Unicast: RX advertised IPv6 Unicast and received + Route refresh: advertised and received(new) + Address Family IPv6 Unicast: advertised and received + Hostname Capability: advertised (name: vlab-01,domain name: n/a) not received + Graceful Restart Capability: advertised and received + Remote Restart timer is 300 seconds + Address families by peer: + none + Graceful restart information: + End-of-RIB send: IPv6 Unicast + End-of-RIB received: IPv6 Unicast + Local GR Mode: Restart* + Remote GR Mode: Helper + R bit: False + Timers: + Configured Restart Time(sec): 240 + Received Restart Time(sec): 300 + IPv6 Unicast: + F bit: False + End-of-RIB sent: Yes + End-of-RIB sent after update: No + End-of-RIB received: Yes + Timers: + Configured Stale Path Time(sec): 360 + Configured Selection Deferral Time(sec): 360 + Message statistics: + Inq depth is 0 + Outq depth is 0 + Sent Rcvd + Opens: 1 1 + Notifications: 0 0 + Updates: 3206 3202 + Keepalives: 1328 1329 + Route Refresh: 0 0 + Capability: 0 0 + Total: 4535 4532 + Minimum time between advertisement runs is 0 seconds + + For address family: IPv6 Unicast + PEER_V6 peer-group member + Update group 2, subgroup 2 + Packet Queue length 0 + Inbound soft reconfiguration allowed + Community attribute sent to this neighbor(all) + Inbound path policy configured + Outbound path policy configured + Route map for incoming advertisements is *FROM_BGP_PEER_V6 + Route map for outgoing advertisements is *TO_BGP_PEER_V6 + 6400 accepted prefixes + + Connections established 1; dropped 0 + Last reset 01:06:46, Waiting for peer OPEN +Local host: fc00::71, Local port: 59726 +Foreign host: fc00::72, Foreign port: 179 +Nexthop: 10.0.0.56 +Nexthop global: fc00::71 +Nexthop local: fe80::5054:ff:fea9:41c2 +BGP connection: shared network +BGP Connect Retry Timer in Seconds: 10 +Estimated round trip time: 4 ms +Read thread: on Write thread: on FD used: 30 +""" + +bgp_v6_neighbor_output_adv_routes = \ +""" +BGP table version is 6407, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +*> ::/0 :: 0 64600 65534 6666 6667 i +*> 2064:100::1d/128 :: 0 64600 i +*> 2064:100::1e/128 :: 0 64600 i +*> 2064:100::1f/128 :: 0 64600 i +*> 2064:100::20/128 :: 0 64600 i +*> 20c0:a808::/64 :: 0 64600 65501 i +*> 20c0:a808:0:80::/64 + :: 0 64600 65501 i +*> 20c0:a810::/64 :: 0 64600 65502 i +*> 20c0:a810:0:80::/64 + :: 0 64600 65502 i +*> 20c0:a818::/64 :: 0 64600 65503 i +*> 20c0:a818:0:80::/64 + :: 0 64600 65503 i +*> 20c0:a820::/64 :: 0 64600 65504 i +*> 20c0:a820:0:80::/64 + :: 0 64600 65504 i +*> 20c0:a828::/64 :: 0 64600 65505 i +*> 20c0:a828:0:80::/64 + :: 0 64600 65505 i +*> 20c0:a830::/64 :: 0 64600 65506 i +*> 20c0:a830:0:80::/64 + :: 0 64600 65506 i +*> 20c0:a838::/64 :: 0 64600 65507 i +*> 20c0:a838:0:80::/64 + :: 0 64600 65507 i +*> 20c0:a840::/64 :: 0 64600 65508 i +*> 20c0:a840:0:80::/64 + :: 0 64600 65508 i +*> 20c0:a848::/64 :: 0 64600 65509 i +*> 20c0:a848:0:80::/64 + :: 0 64600 65509 i +*> 20c0:a850::/64 :: 0 64600 65510 i +*> 20c0:a850:0:80::/64 + :: 0 64600 65510 i +*> 20c0:a858::/64 :: 0 64600 65511 i +*> 20c0:a858:0:80::/64 + :: 0 64600 65511 i +*> 20c0:a860::/64 :: 0 64600 65512 i +*> 20c0:a860:0:80::/64 + :: 0 64600 65512 i +*> 20c0:a868::/64 :: 0 64600 65513 i +*> 20c0:a868:0:80::/64 + :: 0 64600 65513 i +""" + +bgp_v6_neighbor_output_recv_routes = \ +""" +BGP table version is 6407, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +*> ::/0 fc00::72 0 64600 65534 6666 6667 i +*> 2064:100::1d/128 fc00::72 0 64600 i +*> 20c0:a808::/64 fc00::72 0 64600 65501 i +*> 20c0:a808:0:80::/64 + fc00::72 0 64600 65501 i +*> 20c0:a810::/64 fc00::72 0 64600 65502 i +*> 20c0:a810:0:80::/64 + fc00::72 0 64600 65502 i +*> 20c0:a818::/64 fc00::72 0 64600 65503 i +*> 20c0:a818:0:80::/64 + fc00::72 0 64600 65503 i +*> 20c0:a820::/64 fc00::72 0 64600 65504 i +*> 20c0:a820:0:80::/64 + fc00::72 0 64600 65504 i +*> 20c0:a828::/64 fc00::72 0 64600 65505 i +*> 20c0:a828:0:80::/64 + fc00::72 0 64600 65505 i +*> 20c0:a830::/64 fc00::72 0 64600 65506 i +*> 20c0:a830:0:80::/64 + fc00::72 0 64600 65506 i +*> 20c0:a838::/64 fc00::72 0 64600 65507 i +*> 20c0:a838:0:80::/64 + fc00::72 0 64600 65507 i +*> 20c0:a840::/64 fc00::72 0 64600 65508 i +*> 20c0:a840:0:80::/64 + fc00::72 0 64600 65508 i +*> 20c0:a848::/64 fc00::72 0 64600 65509 i +*> 20c0:a848:0:80::/64 + fc00::72 0 64600 65509 i +*> 20c0:a850::/64 fc00::72 0 64600 65510 i +*> 20c0:a850:0:80::/64 + fc00::72 0 64600 65510 i +*> 20c0:a858::/64 fc00::72 0 64600 65511 i +*> 20c0:a858:0:80::/64 + fc00::72 0 64600 65511 i +*> 20c0:a860::/64 fc00::72 0 64600 65512 i +*> 20c0:a860:0:80::/64 + fc00::72 0 64600 65512 i +*> 20c0:a868::/64 fc00::72 0 64600 65513 i +*> 20c0:a868:0:80::/64 + fc00::72 0 64600 65513 i +""" + +bgp_v6_neighbor_invalid = \ +"""Error: Bgp neighbor aa00::72 not configured""" + +bgp_v6_neighbor_invalid_address = \ +"""Error: 20.1.1.1 is not valid ipv6 address""" + +bgp_v4_neighbors_output_asic0 = \ +""" +BGP neighbor is 10.0.0.1, remote AS 65200, local AS 65100, external link + Description: ARISTA01T2 + Member of peer-group TIER2_V4 for session parameters + BGP version 4, remote router ID 100.1.0.1, local router ID 10.1.0.32 + BGP state = Established, up for 04:41:19 + Last read 00:00:19, Last write 00:00:19 + Hold time is 180, keepalive interval is 60 seconds + Neighbor capabilities: + 4 Byte AS: advertised and received + AddPath: + IPv4 Unicast: RX advertised IPv4 Unicast and received + Route refresh: advertised and received(new) + Address Family IPv4 Unicast: advertised and received + Hostname Capability: advertised (name: str-n3164-acs-2,domain name: n/a) not received + Graceful Restart Capabilty: advertised and received + Remote Restart timer is 300 seconds + Address families by peer: + IPv4 Unicast(not preserved) + Graceful restart information: + End-of-RIB send: IPv4 Unicast + End-of-RIB received: IPv4 Unicast + Message statistics: + Inq depth is 0 + Outq depth is 0 + Sent Rcvd + Opens: 2 1 + Notifications: 2 0 + Updates: 43 3187 + Keepalives: 282 283 + Route Refresh: 0 0 + Capability: 0 0 + Total: 329 3471 + Minimum time between advertisement runs is 0 seconds + + For address family: IPv4 Unicast + TIER2_V4 peer-group member + Update group 3, subgroup 3 + Packet Queue length 0 + Inbound soft reconfiguration allowed + Community attribute sent to this neighbor(all) + Inbound path policy configured + Outbound path policy configured + Route map for incoming advertisements is *FROM_TIER2_V4 + Route map for outgoing advertisements is *TO_TIER2_V4 + 6370 accepted prefixes + Maximum prefixes allowed 12000 (warning-only) + Threshold for warning message 90% + + Connections established 1; dropped 0 + Last reset 04:41:43, No AFI/SAFI activated for peer +Local host: 10.0.0.0, Local port: 179 +Foreign host: 10.0.0.1, Foreign port: 56376 +Nexthop: 10.0.0.0 +Nexthop global: fc00::1 +Nexthop local: fe80::2be:75ff:fe3a:ef50 +BGP connection: shared network +BGP Connect Retry Timer in Seconds: 120 +Read thread: on Write thread: on FD used: 25 +""" +bgp_v4_neighbors_output_asic1 = \ +""" +BGP neighbor is 10.1.0.1, remote AS 65100, local AS 65100, internal link + Description: ASIC0 +Hostname: sonic + Member of peer-group INTERNAL_PEER_V4 for session parameters + BGP version 4, remote router ID 10.1.0.32, local router ID 8.0.0.4 + BGP state = Established, up for 04:50:18 + Last read 00:00:03, Last write 00:00:03 + Hold time is 10, keepalive interval is 3 seconds + Configured hold time is 10, keepalive interval is 3 seconds + Neighbor capabilities: + 4 Byte AS: advertised and received + AddPath: + IPv4 Unicast: RX advertised IPv4 Unicast and received + Route refresh: advertised and received(old & new) + Address Family IPv4 Unicast: advertised and received + Hostname Capability: advertised (name: str-n3164-acs-2,domain name: n/a) received (name: str-n3164-acs-2,domain name: n/a) + Graceful Restart Capabilty: advertised and received + Remote Restart timer is 240 seconds + Address families by peer: + IPv4 Unicast(preserved) + Graceful restart information: + End-of-RIB send: IPv4 Unicast + End-of-RIB received: IPv4 Unicast + Message statistics: + Inq depth is 0 + Outq depth is 0 + Sent Rcvd + Opens: 1 1 + Notifications: 0 0 + Updates: 6390 3194 + Keepalives: 5806 5806 + Route Refresh: 0 0 + Capability: 0 0 + Total: 12197 9001 + Minimum time between advertisement runs is 0 seconds + + For address family: IPv4 Unicast + INTERNAL_PEER_V4 peer-group member + Update group 2, subgroup 2 + Packet Queue length 0 + Route-Reflector Client + Inbound soft reconfiguration allowed + NEXT_HOP is always this router + Community attribute sent to this neighbor(all) + Inbound path policy configured + Outbound path policy configured + Route map for incoming advertisements is *FROM_BGP_INTERNAL_PEER_V4 + Route map for outgoing advertisements is *TO_BGP_INTERNAL_PEER_V4 + 6377 accepted prefixes + + Connections established 1; dropped 0 + Last reset 04:50:40, Waiting for NHT +Local host: 10.1.0.0, Local port: 52802 +Foreign host: 10.1.0.1, Foreign port: 179 +Nexthop: 10.1.0.0 +Nexthop global: 2603:10e2:400:1::1 +Nexthop local: fe80::42:f0ff:fe7f:104 +BGP connection: shared network +BGP Connect Retry Timer in Seconds: 10 +Read thread: on Write thread: on FD used: 17 +""" +bgp_v4_neighbors_output_all_asics = bgp_v4_neighbors_output_asic0 + bgp_v4_neighbors_output_asic1 + +bgp_v6_neighbor_output_warning =\ +"""bgp neighbor 2603:10e2:400:1::2 is present in namespace asic1 not in asic0""" + +bgp_v6_neighbors_output_asic0 = \ +""" + BGP neighbor is fc00::2, remote AS 65200, local AS 65100, external link + Description: ARISTA01T2 + Member of peer-group TIER2_V6 for session parameters + BGP version 4, remote router ID 100.1.0.1, local router ID 10.1.0.32 + BGP state = Established, up for 13:26:44 + Last read 00:00:45, Last write 00:00:44 + Hold time is 180, keepalive interval is 60 seconds + Neighbor capabilities: + 4 Byte AS: advertised and received + AddPath: + IPv6 Unicast: RX advertised IPv6 Unicast and received + Route refresh: advertised and received(new) + Address Family IPv6 Unicast: advertised and received + Hostname Capability: advertised (name: str-n3164-acs-2,domain name: n/a) not received + Graceful Restart Capabilty: advertised and received + Remote Restart timer is 300 seconds + Address families by peer: + IPv6 Unicast(not preserved) + Graceful restart information: + End-of-RIB send: IPv6 Unicast + End-of-RIB received: IPv6 Unicast + Message statistics: + Inq depth is 0 + Outq depth is 0 + Sent Rcvd + Opens: 2 1 + Notifications: 2 0 + Updates: 5 3187 + Keepalives: 807 808 + Route Refresh: 0 0 + Capability: 0 0 + Total: 816 3996 + Minimum time between advertisement runs is 0 seconds + + For address family: IPv6 Unicast + TIER2_V6 peer-group member + Update group 2, subgroup 2 + Packet Queue length 0 + Inbound soft reconfiguration allowed + Community attribute sent to this neighbor(all) + Inbound path policy configured + Outbound path policy configured + Route map for incoming advertisements is *FROM_TIER2_V6 + Route map for outgoing advertisements is *TO_TIER2_V6 + 6370 accepted prefixes + Maximum prefixes allowed 8000 (warning-only) + Threshold for warning message 90% + + Connections established 1; dropped 0 + Last reset 13:27:08, No AFI/SAFI activated for peer +Local host: fc00::1, Local port: 179 +Foreign host: fc00::2, Foreign port: 57838 +Nexthop: 10.0.0.0 +Nexthop global: fc00::1 +Nexthop local: fe80::2be:75ff:fe3a:ef50 +BGP connection: shared network +BGP Connect Retry Timer in Seconds: 120 +Read thread: on Write thread: on FD used: 26 +""" + +bgp_v6_neighbors_output_asic1 = \ +""" + BGP neighbor is 2603:10e2:400:1::2, remote AS 65100, local AS 65100, internal link + Description: ASIC0 +Hostname: str-n3164-acs-2 + Member of peer-group INTERNAL_PEER_V6 for session parameters + BGP version 4, remote router ID 10.1.0.32, local router ID 8.0.0.4 + BGP state = Established, up for 13:28:48 + Last read 00:00:02, Last write 00:00:02 + Hold time is 10, keepalive interval is 3 seconds + Configured hold time is 10, keepalive interval is 3 seconds + Neighbor capabilities: + 4 Byte AS: advertised and received + AddPath: + IPv6 Unicast: RX advertised IPv6 Unicast and received + Route refresh: advertised and received(old & new) + Address Family IPv6 Unicast: advertised and received + Hostname Capability: advertised (name: str-n3164-acs-2,domain name: n/a) received (name: str-n3164-acs-2,domain name: n/a) + Graceful Restart Capabilty: advertised and received + Remote Restart timer is 240 seconds + Address families by peer: + IPv6 Unicast(preserved) + Graceful restart information: + End-of-RIB send: IPv6 Unicast + End-of-RIB received: IPv6 Unicast + Message statistics: + Inq depth is 0 + Outq depth is 0 + Sent Rcvd + Opens: 1 1 + Notifications: 0 0 + Updates: 6380 4746 + Keepalives: 16176 16176 + Route Refresh: 0 0 + Capability: 0 0 + Total: 22557 20923 + Minimum time between advertisement runs is 0 seconds + + For address family: IPv6 Unicast + INTERNAL_PEER_V6 peer-group member + Update group 1, subgroup 1 + Packet Queue length 0 + Route-Reflector Client + Inbound soft reconfiguration allowed + NEXT_HOP is always this router + Community attribute sent to this neighbor(all) + Inbound path policy configured + Outbound path policy configured + Route map for incoming advertisements is *FROM_BGP_INTERNAL_PEER_V6 + Route map for outgoing advertisements is *TO_BGP_INTERNAL_PEER_V6 + 6380 accepted prefixes + + Connections established 1; dropped 0 + Last reset 13:29:08, No AFI/SAFI activated for peer +Local host: 2603:10e2:400:1::1, Local port: 179 +Foreign host: 2603:10e2:400:1::2, Foreign port: 58984 +Nexthop: 10.1.0.0 +Nexthop global: 2603:10e2:400:1::1 +Nexthop local: fe80::42:f0ff:fe7f:104 +BGP connection: shared network +BGP Connect Retry Timer in Seconds: 10 +Read thread: on Write thread: on FD used: 22 +""" + +bgp_v6_neighbors_output_all_asics = bgp_v6_neighbors_output_asic0 +\ + bgp_v6_neighbors_output_asic1 + + +def mock_show_bgp_neighbor_multi_asic(param, namespace): + if param == 'bgp_v4_neighbors_output_all_asics': + if namespace == 'asic0': + return bgp_v4_neighbors_output_asic0 + if namespace == 'asic1': + return bgp_v4_neighbors_output_asic1 + if param == 'bgp_v6_neighbors_output_all_asics': + if namespace == 'asic0': + return bgp_v6_neighbors_output_asic0 + if namespace == 'asic1': + return bgp_v6_neighbors_output_asic1 + if param == 'bgp_v4_neighbors_output_asic0': + return bgp_v4_neighbors_output_asic0 + if param == 'bgp_v4_neighbors_output_asic1': + return bgp_v4_neighbors_output_asic1 + elif param == 'bgp_v6_neighbors_output_all_asics': + return bgp_v6_neighbors_output_all_asics + if param == 'bgp_v6_neighbors_output_asic0': + return bgp_v6_neighbors_output_asic0 + if param == 'bgp_v6_neighbors_output_asic1': + return bgp_v6_neighbors_output_asic1 + else: + return "" + + +def mock_show_bgp_neighbor_single_asic(request): + if request.param == 'bgp_v4_neighbors_output': + return bgp_v4_neighbors_output + elif request.param == 'bgp_v6_neighbors_output': + return bgp_v6_neighbors_output + elif request.param == 'bgp_v4_neighbor_output_adv_routes': + return bgp_v4_neighbor_output_adv_routes + elif request.param == 'bgp_v4_neighbor_output_recv_routes': + return bgp_v4_neighbor_output_recv_routes + elif request.param == 'bgp_v6_neighbor_output_adv_routes': + return bgp_v6_neighbor_output_adv_routes + elif request.param == 'bgp_v6_neighbor_output_recv_routes': + return bgp_v6_neighbor_output_recv_routes + else: + return "" + + +testData = { + 'bgp_v4_neighbors': { + 'args': [], + 'rc': 0, + 'rc_output': bgp_v4_neighbors_output + }, + 'bgp_v4_neighbor_ip_address': { + 'args': ['10.0.0.57'], + 'rc': 0, + 'rc_output': bgp_v4_neighbors_output + }, + 'bgp_v4_neighbor_invalid': { + 'args': ['20.1.1.1'], + 'rc': 2, + 'rc_err_msg': bgp_v4_neighbor_invalid + }, + 'bgp_v4_neighbor_invalid_address': { + 'args': ['invalid_address'], + 'rc': 2, + 'rc_err_msg': bgp_v4_neighbor_invalid_address + }, + 'bgp_v4_neighbor_adv_routes': { + 'args': ["10.0.0.57", "advertised-routes"], + 'rc': 0, + 'rc_output': bgp_v4_neighbor_output_adv_routes + }, + 'bgp_v4_neighbor_recv_routes': { + 'args': ["10.0.0.57", "received-routes"], + 'rc': 0, + 'rc_output': bgp_v4_neighbor_output_recv_routes + }, + 'bgp_v6_neighbors': { + 'args': [], + 'rc': 0, + 'rc_output': bgp_v6_neighbors_output + }, + 'bgp_v6_neighbor_ip_address': { + 'args': ['fc00::72'], + 'rc': 0, + 'rc_output': bgp_v6_neighbors_output + }, + 'bgp_v6_neighbor_invalid': { + 'args': ['aa00::72'], + 'rc': 2, + 'rc_err_msg': bgp_v6_neighbor_invalid + }, + 'bgp_v6_neighbor_invalid_address': { + 'args': ['20.1.1.1'], + 'rc': 2, + 'rc_err_msg': bgp_v6_neighbor_invalid_address + }, + 'bgp_v6_neighbor_adv_routes': { + 'args': ["fc00::72", "advertised-routes"], + 'rc': 0, + 'rc_output': bgp_v6_neighbor_output_adv_routes + }, + 'bgp_v6_neighbor_recv_routes': { + 'args': ["fc00::72", "received-routes"], + 'rc': 0, + 'rc_output': bgp_v6_neighbor_output_recv_routes + }, + 'bgp_v4_neighbors_multi_asic' : { + 'args': [], + 'rc': 0, + 'rc_output': bgp_v4_neighbors_output_all_asics + }, + 'bgp_v4_neighbors_asic' : { + 'args': ['-nasic1'], + 'rc': 0, + 'rc_output': bgp_v4_neighbors_output_asic1 + }, + 'bgp_v4_neighbors_external' : { + 'args': ['10.0.0.1'], + 'rc': 0, + 'rc_output': bgp_v4_neighbors_output_asic0 + }, + 'bgp_v4_neighbors_internal' : { + 'args': ['10.1.0.1'], + 'rc': 0, + 'rc_output': bgp_v4_neighbors_output_asic1 + }, + 'bgp_v6_neighbors_multi_asic' : { + 'args': [], + 'rc': 0, + 'rc_output': bgp_v6_neighbors_output_all_asics + }, + 'bgp_v6_neighbors_asic' : { + 'args': ['-nasic0'], + 'rc': 0, + 'rc_output': bgp_v6_neighbors_output_asic0 + }, + 'bgp_v6_neighbors_external' : { + 'args': ['fc00::2'], + 'rc': 0, + 'rc_output': bgp_v6_neighbors_output_asic0 + }, + 'bgp_v6_neighbors_internal' : { + 'args': ['2603:10e2:400:1::2'], + 'rc': 0, + 'rc_output': bgp_v6_neighbors_output_asic1 + }, + 'bgp_v6_neighbor_warning' : { + 'args': ['2603:10e2:400:1::2', '-nasic0'], + 'rc': 0, + 'rc_warning_msg': bgp_v6_neighbor_output_warning + }, + +} \ No newline at end of file diff --git a/tests/bgp_commands_input/bgp_network_test_vector.py b/tests/bgp_commands_input/bgp_network_test_vector.py new file mode 100644 index 0000000000..da93e8e8e8 --- /dev/null +++ b/tests/bgp_commands_input/bgp_network_test_vector.py @@ -0,0 +1,522 @@ +bgp_v4_network = \ +""" +BGP table version is 6405, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +*= 0.0.0.0/0 10.0.0.63 0 64600 65534 6666 6667 i +*= 10.0.0.61 0 64600 65534 6666 6667 i +*= 10.0.0.59 0 64600 65534 6666 6667 i +*> 10.0.0.57 0 64600 65534 6666 6667 i +*> 10.1.0.32/32 0.0.0.0 0 32768 i +*> 100.1.0.29/32 10.0.0.57 0 64600 i +*> 100.1.0.30/32 10.0.0.59 0 64600 i +*> 100.1.0.31/32 10.0.0.61 0 64600 i +*> 100.1.0.32/32 10.0.0.63 0 64600 i +*> 192.168.0.0/21 0.0.0.0 0 32768 i +*= 192.168.8.0/25 10.0.0.63 0 64600 65501 i +*= 10.0.0.61 0 64600 65501 i +*= 10.0.0.59 0 64600 65501 i +*> 10.0.0.57 0 64600 65501 i +*= 192.168.8.128/25 10.0.0.63 0 64600 65501 i +*= 10.0.0.61 0 64600 65501 i +*= 10.0.0.59 0 64600 65501 i +*> 10.0.0.57 0 64600 65501 i +*= 192.168.16.0/25 10.0.0.63 0 64600 65502 i +*= 10.0.0.61 0 64600 65502 i +*= 10.0.0.59 0 64600 65502 i +*> 10.0.0.57 0 64600 65502 i +*= 192.168.16.128/25 + 10.0.0.63 0 64600 65502 i +*= 10.0.0.61 0 64600 65502 i +*= 10.0.0.59 0 64600 65502 i +*> 10.0.0.57 0 64600 65502 i +*= 192.168.24.0/25 10.0.0.63 0 64600 65503 i +*= 10.0.0.61 0 64600 65503 i +*= 10.0.0.59 0 64600 65503 i +*> 10.0.0.57 0 64600 65503 i +*= 192.168.24.128/25 + 10.0.0.63 0 64600 65503 i +*= 10.0.0.61 0 64600 65503 i +*= 10.0.0.59 0 64600 65503 i +*> 10.0.0.57 0 64600 65503 i +*= 192.168.32.0/25 10.0.0.63 0 64600 65504 i +*= 10.0.0.61 0 64600 65504 i +*= 10.0.0.59 0 64600 65504 i +*> 10.0.0.57 0 64600 65504 i +""" + +bgp_v4_network_ip_address = \ +""" +BGP routing table entry for 193.11.248.128/25 +Paths: (4 available, best #4, table default) + Advertised to non peer-group peers: + 10.0.0.57 10.0.0.59 10.0.0.61 10.0.0.63 + 64600 65534 64799 65515 + 10.0.0.61 from 10.0.0.61 (100.1.0.31) + Origin IGP, valid, external, multipath + Community: 5060:12345 + Last update: Tue Apr 20 05:54:41 2021 + 64600 65534 64799 65515 + 10.0.0.59 from 10.0.0.59 (100.1.0.30) + Origin IGP, valid, external, multipath + Community: 5060:12345 + Last update: Tue Apr 20 05:54:19 2021 + 64600 65534 64799 65515 + 10.0.0.63 from 10.0.0.63 (100.1.0.32) + Origin IGP, valid, external, multipath + Community: 5060:12345 + Last update: Tue Apr 20 05:54:16 2021 + 64600 65534 64799 65515 + 10.0.0.57 from 10.0.0.57 (100.1.0.29) + Origin IGP, valid, external, multipath, best (Router ID) + Community: 5060:12345 + Last update: Tue Apr 20 05:54:16 2021 +""" + +bgp_v4_network_longer_prefixes_error = \ +"""The parameter option: "longer-prefixes" only available if passing a network prefix +EX: 'show ip bgp network 10.0.0.0/24 longer-prefixes' +Aborted! +""" + +bgp_v4_network_bestpath = \ +""" +BGP routing table entry for 193.11.248.128/25 +Paths: (4 available, best #4, table default) + Advertised to non peer-group peers: + 10.0.0.57 10.0.0.59 10.0.0.61 10.0.0.63 + 64600 65534 64799 65515 + 10.0.0.57 from 10.0.0.57 (100.1.0.29) + Origin IGP, valid, external, multipath, best (Router ID) + Community: 5060:12345 + Last update: Tue Apr 20 05:54:15 2021 +""" + +bgp_v6_network = \ +""" +BGP table version is 6407, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +*= ::/0 fc00::7e 0 64600 65534 6666 6667 i +*= fc00::7a 0 64600 65534 6666 6667 i +*= fc00::76 0 64600 65534 6666 6667 i +*> fc00::72 0 64600 65534 6666 6667 i +*> 2064:100::1d/128 fc00::72 0 64600 i +*> 2064:100::1e/128 fc00::76 0 64600 i +*> 2064:100::1f/128 fc00::7a 0 64600 i +*> 2064:100::20/128 fc00::7e 0 64600 i +*= 20c0:a808::/64 fc00::7e 0 64600 65501 i +*= fc00::7a 0 64600 65501 i +*= fc00::76 0 64600 65501 i +*> fc00::72 0 64600 65501 i +*= 20c0:a808:0:80::/64 + fc00::7e 0 64600 65501 i +*= fc00::7a 0 64600 65501 i +*= fc00::76 0 64600 65501 i +*> fc00::72 0 64600 65501 i +*= 20c0:a810::/64 fc00::7e 0 64600 65502 i +*= fc00::7a 0 64600 65502 i +*= fc00::76 0 64600 65502 i +*> fc00::72 0 64600 65502 i +*= 20c0:a810:0:80::/64 + fc00::7e 0 64600 65502 i +*= fc00::7a 0 64600 65502 i +*= fc00::76 0 64600 65502 i +*> fc00::72 0 64600 65502 i +*= 20c0:a818::/64 fc00::7e 0 64600 65503 i +*= fc00::7a 0 64600 65503 i +*= fc00::76 0 64600 65503 i +*> fc00::72 0 64600 65503 i +*= 20c0:a818:0:80::/64 + fc00::7e 0 64600 65503 i +*= fc00::7a 0 64600 65503 i +*= fc00::76 0 64600 65503 i +*> fc00::72 0 64600 65503 i +*= 20c0:a820::/64 fc00::7e 0 64600 65504 i +*= fc00::7a 0 64600 65504 i +*= fc00::76 0 64600 65504 i +*> fc00::72 0 64600 65504 i +*= 20c0:a820:0:80::/64 + fc00::7e 0 64600 65504 i +*= fc00::7a 0 64600 65504 i +*= fc00::76 0 64600 65504 i +*> fc00::72 0 64600 65504 i +""" + +bgp_v6_network_ip_address = \ +""" +BGP routing table entry for 20c0:a820:0:80::/64 +Paths: (4 available, best #4, table default) + Advertised to non peer-group peers: + fc00::72 fc00::76 fc00::7a fc00::7e + 64600 65504 + fc00::7e from fc00::7e (100.1.0.32) + (fe80::1850:e9ff:fef9:27cb) (prefer-global) + Origin IGP, valid, external, multipath + Community: 5060:12345 + Last update: Tue Apr 20 05:54:17 2021 + 64600 65504 + fc00::7a from fc00::7a (100.1.0.31) + (fe80::1810:25ff:fe01:c153) (prefer-global) + Origin IGP, valid, external, multipath + Community: 5060:12345 + Last update: Tue Apr 20 05:54:17 2021 + 64600 65504 + fc00::76 from fc00::76 (100.1.0.30) + (fe80::80a7:74ff:fee1:d66d) (prefer-global) + Origin IGP, valid, external, multipath + Community: 5060:12345 + Last update: Tue Apr 20 05:54:17 2021 + 64600 65504 + fc00::72 from fc00::72 (100.1.0.29) + (fe80::90ec:bcff:fe4b:1e3e) (prefer-global) + Origin IGP, valid, external, multipath, best (Router ID) + Community: 5060:12345 + Last update: Tue Apr 20 05:54:16 2021 +""" + +bgp_v6_network_longer_prefixes_error = \ +"""The parameter option: "longer-prefixes" only available if passing a network prefix +EX: 'show ipv6 bgp network fc00:1::/64 longer-prefixes' +Aborted! +""" + +bgp_v6_network_longer_prefixes = \ +""" +BGP table version is 6407, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +*= 20c0:a820:0:80::/64 + fc00::7e 0 64600 65504 i +*= fc00::7a 0 64600 65504 i +*= fc00::76 0 64600 65504 i +*> fc00::72 0 64600 65504 i + +Displayed 1 routes and 25602 total paths +""" + +bgp_v6_network_bestpath = \ +""" +BGP routing table entry for 20c0:a820:0:80::/64 +Paths: (4 available, best #4, table default) + Advertised to non peer-group peers: + fc00::72 fc00::76 fc00::7a fc00::7e + 64600 65504 + fc00::72 from fc00::72 (100.1.0.29) + (fe80::90ec:bcff:fe4b:1e3e) (prefer-global) + Origin IGP, valid, external, multipath, best (Router ID) + Community: 5060:12345 + Last update: Tue Apr 20 05:54:15 2021 +""" + +multi_asic_bgp_network_err = \ +"""Error: -n/--namespace option required. provide namespace from list ['asic0', 'asic1']""" + +bgp_v4_network_asic0 = \ +""" +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +""" + +bgp_v4_network_ip_address_asic0 = \ +""" + BGP routing table entry for 10.0.0.44/31 +Paths: (2 available, best #2, table default, not advertised outside local AS) + Not advertised to any peer + Local + 10.1.0.2 from 10.1.0.2 (8.0.0.5) + Origin incomplete, metric 0, localpref 100, valid, internal, multipath + Community: local-AS + Originator: 8.0.0.5, Cluster list: 8.0.0.5 + Last update: Thu Apr 22 02:13:31 2021 + + Local + 10.1.0.0 from 10.1.0.0 (8.0.0.4) + Origin incomplete, metric 0, localpref 100, valid, internal, multipath, best (Router ID) + Community: local-AS + Originator: 8.0.0.4, Cluster list: 8.0.0.4 + Last update: Thu Apr 22 02:13:31 2021 +""" +bgp_v4_network_bestpath_asic0 = \ +""" +BGP routing table entry for 10.0.0.44/31 +Paths: (2 available, best #2, table default, not advertised outside local AS) + Not advertised to any peer + Local + 10.1.0.0 from 10.1.0.0 (8.0.0.4) + Origin incomplete, metric 0, localpref 100, valid, internal, multipath, best (Router ID) + Community: local-AS + Originator: 8.0.0.4, Cluster list: 8.0.0.4 + Last update: Thu Apr 22 02:13:30 2021 +""" + +bgp_v6_network_asic0 = \ +""" +BGP table version is 12849, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i::/0 2603:10e2:400:1::1 + 100 0 65200 6666 6667 i +* i 2603:10e2:400:1::5 + 100 0 65200 6666 6667 i +*= fc00::6 0 65200 6666 6667 i +*> fc00::2 0 65200 6666 6667 i +* i2064:100::1/128 2603:10e2:400:1::1 + 100 0 65200 i +* i 2603:10e2:400:1::5 + 100 0 65200 i +*> fc00::2 0 65200 i +* i2064:100::3/128 2603:10e2:400:1::1 + 100 0 65200 i +* i 2603:10e2:400:1::5 + 100 0 65200 i +*> fc00::6 0 65200 i +*=i2064:100::5/128 2603:10e2:400:1::5 + 100 0 65200 i +*>i 2603:10e2:400:1::1 + 100 0 65200 i +*>i2064:100::7/128 2603:10e2:400:1::1 + 100 0 65200 i +*=i 2603:10e2:400:1::5 + 100 0 65200 i +*>i20c0:a800::/64 2603:10e2:400:1::1 + 100 0 64004 i +*=i 2603:10e2:400:1::5 + 100 0 64004 i +*>i20c0:a800:0:80::/64 + 2603:10e2:400:1::1 + 100 0 64004 i +*=i 2603:10e2:400:1::5 + 100 0 64004 i +*>i20c0:a808::/64 2603:10e2:400:1::1 + 100 0 64004 i +*=i 2603:10e2:400:1::5 + 100 0 64004 i +""" + +bgp_v6_network_ip_address_asic0 = \ +""" +BGP routing table entry for 20c0:a808:0:80::/64 +Paths: (2 available, best #1, table default) + Advertised to non peer-group peers: + fc00::2 fc00::6 + 64004 + 2603:10e2:400:1::1 from 2603:10e2:400:1::1 (8.0.0.4) + Origin IGP, localpref 100, valid, internal, multipath, best (Router ID) + Community: 8075:8823 + Originator: 8.0.0.4, Cluster list: 8.0.0.4 + Last update: Thu Apr 22 02:13:31 2021 + + 64004 + 2603:10e2:400:1::5 from 2603:10e2:400:1::5 (8.0.0.5) + Origin IGP, localpref 100, valid, internal, multipath + Community: 8075:8823 + Originator: 8.0.0.5, Cluster list: 8.0.0.5 + Last update: Thu Apr 22 02:13:31 2021 +""" + +bgp_v6_network_ip_address_asic0_bestpath = \ +""" +BGP routing table entry for 20c0:a808:0:80::/64 +Paths: (2 available, best #1, table default) + Advertised to non peer-group peers: + fc00::2 fc00::6 + 64004 + 2603:10e2:400:1::1 from 2603:10e2:400:1::1 (8.0.0.4) + Origin IGP, localpref 100, valid, internal, multipath, best (Router ID) + Community: 8075:8823 + Originator: 8.0.0.4, Cluster list: 8.0.0.4 + Last update: Thu Apr 22 02:13:30 2021 +""" + + +def mock_show_bgp_network_single_asic(request): + param = request.param + if param == 'bgp_v4_network': + return bgp_v4_network + elif param == 'bgp_v4_network_ip_address': + return bgp_v4_network_ip_address + elif param == 'bgp_v4_network_bestpath': + return bgp_v4_network_bestpath + elif param == 'bgp_v6_network': + return bgp_v6_network + elif param == 'bgp_v6_network_ip_address': + return bgp_v6_network_ip_address + elif param == 'bgp_v6_network_longer_prefixes': + return bgp_v6_network_longer_prefixes + elif param == 'bgp_v6_network_bestpath': + return bgp_v6_network_bestpath + else: + return "" + + +def mock_show_bgp_network_multi_asic(param): + if param == "bgp_v4_network_asic0": + return bgp_v4_network_asic0 + elif param == 'bgp_v4_network_ip_address_asic0': + return bgp_v4_network_ip_address_asic0 + elif param == 'bgp_v4_network_bestpath_asic0': + return bgp_v4_network_bestpath_asic0 + if param == "bgp_v6_network_asic0": + return bgp_v4_network_asic0 + elif param == 'bgp_v6_network_ip_address_asic0': + return bgp_v6_network_ip_address_asic0 + elif param == 'bgp_v6_network_bestpath_asic0': + return bgp_v6_network_ip_address_asic0_bestpath + else: + return '' + + +testData = { + 'bgp_v4_network': { + 'args': [], + 'rc': 0, + 'rc_output': bgp_v4_network + }, + 'bgp_v4_network_ip_address': { + 'args': [' 193.11.248.128/25'], + 'rc': 0, + 'rc_output': bgp_v4_network_ip_address + }, + 'bgp_v4_network_bestpath': { + 'args': [' 193.11.248.128/25', 'bestpath'], + 'rc': 0, + 'rc_output': bgp_v4_network_bestpath + }, + 'bgp_v4_network_longer_prefixes_error': { + 'args': [' 193.11.248.128', 'longer-prefixes'], + 'rc': 1, + 'rc_output': bgp_v4_network_longer_prefixes_error + }, + 'bgp_v6_network': { + 'args': [], + 'rc': 0, + 'rc_output': bgp_v6_network + }, + 'bgp_v6_network_ip_address': { + 'args': [' 20c0:a820:0:80::/64'], + 'rc': 0, + 'rc_output': bgp_v6_network_ip_address + }, + 'bgp_v6_network_bestpath': { + 'args': [' 20c0:a820:0:80::/64', 'bestpath'], + 'rc': 0, + 'rc_output': bgp_v6_network_bestpath + }, + 'bgp_v6_network_longer_prefixes_error': { + 'args': [' 20c0:a820:0:80::', 'longer-prefixes'], + 'rc': 1, + 'rc_output': bgp_v6_network_longer_prefixes_error + }, + 'bgp_v6_network_longer_prefixes': { + 'args': [' 20c0:a820:0:80::/64', 'longer-prefixes'], + 'rc': 0, + 'rc_output': bgp_v6_network_longer_prefixes + }, + 'bgp_v4_network_multi_asic': { + 'args': [], + 'rc': 2, + 'rc_err_msg': multi_asic_bgp_network_err + }, + 'bgp_v4_network_asic0': { + 'args': ['-nasic0'], + 'rc': 0, + 'rc_output': bgp_v4_network_asic0 + }, + 'bgp_v4_network_ip_address_asic0': { + 'args': ['-nasic0', '10.0.0.44'], + 'rc': 0, + 'rc_output': bgp_v4_network_ip_address_asic0 + }, + 'bgp_v4_network_bestpath_asic0': { + 'args': ['-nasic0', '10.0.0.44', 'bestpath'], + 'rc': 0, + 'rc_output': bgp_v4_network_bestpath_asic0 + }, + 'bgp_v6_network_multi_asic': { + 'args': [], + 'rc': 2, + 'rc_err_msg': multi_asic_bgp_network_err + }, + 'bgp_v6_network_asic0': { + 'args': ['-nasic0'], + 'rc': 0, + 'rc_output': bgp_v4_network_asic0 + }, + 'bgp_v6_network_ip_address_asic0': { + 'args': ['-nasic0', '20c0:a808:0:80::/64'], + 'rc': 0, + 'rc_output': bgp_v6_network_ip_address_asic0 + }, + 'bgp_v6_network_bestpath_asic0': { + 'args': ['-nasic0', '20c0:a808:0:80::/64', 'bestpath'], + 'rc': 0, + 'rc_output': bgp_v6_network_ip_address_asic0_bestpath + } +} \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py index 4ff1a002bd..6ad352b224 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,14 +1,24 @@ import json import os +import re import sys from unittest import mock + import pytest from sonic_py_common import device_info from swsscommon.swsscommon import ConfigDBConnector from .mock_tables import dbconnector from . import show_ip_route_common +from .bgp_commands_input.bgp_neighbor_test_vector import( + mock_show_bgp_neighbor_single_asic, + mock_show_bgp_neighbor_multi_asic, + ) +from .bgp_commands_input.bgp_network_test_vector import ( + mock_show_bgp_network_single_asic, + mock_show_bgp_network_multi_asic + ) from . import config_int_ip_common test_path = os.path.dirname(os.path.abspath(__file__)) @@ -108,7 +118,6 @@ def setup_t1_topo(): @pytest.fixture def setup_single_bgp_instance(request): import utilities_common.bgp_util as bgp_util - if request.param == 'v4': bgp_mocked_json = os.path.join( test_path, 'mock_tables', 'ipv4_bgp_summary.json') @@ -119,7 +128,7 @@ def setup_single_bgp_instance(request): bgp_mocked_json = os.path.join( test_path, 'mock_tables', 'dummy.json') - def mock_run_bgp_command(vtysh_cmd, bgp_namespace): + def mock_show_bgp_summary(vtysh_cmd, bgp_namespace): if os.path.isfile(bgp_mocked_json): with open(bgp_mocked_json) as json_data: mock_frr_data = json_data.read() @@ -140,7 +149,7 @@ def mock_run_show_ip_route_commands(request): elif request.param == 'ip_route': return show_ip_route_common.show_ip_route_expected_output elif request.param == 'ip_specific_route': - return show_ip_route_common.show_specific_ip_route_expected_output + return show_ip_route_common.show_specific_ip_route_expected_output elif request.param == 'ip_special_route': return show_ip_route_common.show_special_ip_route_expected_output elif request.param == 'ipv6_route': @@ -150,18 +159,26 @@ def mock_run_show_ip_route_commands(request): else: return "" - + if any ([request.param == 'ipv6_route_err', request.param == 'ip_route',\ request.param == 'ip_specific_route', request.param == 'ip_special_route',\ request.param == 'ipv6_route', request.param == 'ipv6_specific_route']): bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_run_show_ip_route_commands(request)) + elif request.param.startswith('bgp_v4_neighbor') or \ + request.param.startswith('bgp_v6_neighbor'): + bgp_util.run_bgp_command = mock.MagicMock( + return_value=mock_show_bgp_neighbor_single_asic(request)) + elif request.param.startswith('bgp_v4_network') or \ + request.param.startswith('bgp_v6_network'): + bgp_util.run_bgp_command = mock.MagicMock( + return_value=mock_show_bgp_network_single_asic(request)) elif request.param == 'ip_route_for_int_ip': _old_run_bgp_command = bgp_util.run_bgp_command bgp_util.run_bgp_command = mock_run_bgp_command_for_static else: bgp_util.run_bgp_command = mock.MagicMock( - return_value=mock_run_bgp_command("", "")) + return_value=mock_show_bgp_summary("", "")) yield @@ -191,6 +208,11 @@ def setup_multi_asic_bgp_instance(request): m_asic_json_file = 'ip_special_recursive_route.json' elif request.param == 'ip_route_summary': m_asic_json_file = 'ip_route_summary.txt' + elif request.param.startswith('bgp_v4_network') or \ + request.param.startswith('bgp_v6_network') or \ + request.param.startswith('bgp_v4_neighbor') or \ + request.param.startswith('bgp_v6_neighbor'): + m_asic_json_file = request.param else: m_asic_json_file = os.path.join( test_path, 'mock_tables', 'dummy.json') @@ -206,6 +228,14 @@ def mock_run_bgp_command_for_static(vtysh_cmd, bgp_namespace=""): return "" def mock_run_bgp_command(vtysh_cmd, bgp_namespace): + if m_asic_json_file.startswith('bgp_v4_network') or \ + m_asic_json_file.startswith('bgp_v6_network'): + return mock_show_bgp_network_multi_asic(m_asic_json_file) + + if m_asic_json_file.startswith('bgp_v4_neighbor') or \ + m_asic_json_file.startswith('bgp_v6_neighbor'): + return mock_show_bgp_neighbor_multi_asic(m_asic_json_file, bgp_namespace) + bgp_mocked_json = os.path.join( test_path, 'mock_tables', bgp_namespace, m_asic_json_file) if os.path.isfile(bgp_mocked_json): @@ -241,3 +271,4 @@ def setup_ip_route_commands(): import show.main as show return show + diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 0643baf57f..adc620ffb1 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -203,5 +203,25 @@ }, "VLAN_MEMBER|Vlan1000|PortChannel1002": { "tagging_mode": "tagged" + }, + "BGP_NEIGHBOR|10.0.0.1": { + "rrclient": "0", + "name": "ARISTA01T2", + "local_addr": "10.0.0.0", + "nhopself": "0", + "admin_status": "up", + "holdtime": "10", + "asn": "65200", + "keepalive": "3" + }, + "BGP_NEIGHBOR|fc00::2": { + "rrclient": "0", + "name": "ARISTA01T2", + "local_addr": "fc00::1", + "nhopself": "0", + "admin_status": "up", + "holdtime": "10", + "asn": "65200", + "keepalive": "3" } } diff --git a/tests/mock_tables/asic1/config_db.json b/tests/mock_tables/asic1/config_db.json index 5aab92c45a..f5caae6d35 100644 --- a/tests/mock_tables/asic1/config_db.json +++ b/tests/mock_tables/asic1/config_db.json @@ -165,5 +165,25 @@ "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled" + }, + "BGP_INTERNAL_NEIGHBOR|10.1.0.1": { + "rrclient": "0", + "name": "ASIC0", + "local_addr": "10.1.0.0", + "nhopself": "0", + "admin_status": "up", + "holdtime": "0", + "asn": "65100", + "keepalive": "0" + }, + "BGP_INTERNAL_NEIGHBOR|2603:10e2:400:1::2": { + "rrclient": "0", + "name": "ASIC0", + "local_addr": "2603:10e2:400:1::1", + "nhopself": "0", + "admin_status": "up", + "holdtime": "0", + "asn": "65100", + "keepalive": "0" } } diff --git a/tests/mock_tables/mock_multi_asic.py b/tests/mock_tables/mock_multi_asic.py index 46f943369d..0e5589c301 100644 --- a/tests/mock_tables/mock_multi_asic.py +++ b/tests/mock_tables/mock_multi_asic.py @@ -63,6 +63,8 @@ def mock_is_multi_asic(): def mock_get_namespace_list(namespace=None): + if namespace: + return [namespace] return ['asic0', 'asic1'] diff --git a/tests/pfcstat_test.py b/tests/pfcstat_test.py index 6e0a76d364..5115ad11f1 100644 --- a/tests/pfcstat_test.py +++ b/tests/pfcstat_test.py @@ -1,3 +1,4 @@ +import importlib import os import shutil import sys @@ -247,3 +248,7 @@ def teardown_class(cls): os.environ["UTILITIES_UNIT_TESTING"] = "0" os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" del_cached_stats() + import mock_tables.mock_single_asic + importlib.reload(mock_tables.mock_single_asic) + import pfcwd.main + importlib.reload(pfcwd.main) diff --git a/tests/pfcwd_test.py b/tests/pfcwd_test.py index be04a11f25..b0af050233 100644 --- a/tests/pfcwd_test.py +++ b/tests/pfcwd_test.py @@ -497,3 +497,7 @@ def teardown_class(cls): ) os.environ["UTILITIES_UNIT_TESTING"] = "0" os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + import mock_tables.mock_single_asic + importlib.reload(mock_tables.mock_single_asic) + import pfcwd.main + importlib.reload(pfcwd.main) \ No newline at end of file diff --git a/tests/show_bgp_neighbor_test.py b/tests/show_bgp_neighbor_test.py new file mode 100644 index 0000000000..de84223060 --- /dev/null +++ b/tests/show_bgp_neighbor_test.py @@ -0,0 +1,128 @@ +import importlib +import os + +import pytest + +from click.testing import CliRunner +from .bgp_commands_input.bgp_neighbor_test_vector import * + + +def executor(test_vector, show): + runner = CliRunner() + input = testData[test_vector] + if test_vector.startswith('bgp_v6'): + exec_cmd = show.cli.commands["ipv6"].commands["bgp"].commands["neighbors"] + else: + exec_cmd = show.cli.commands["ip"].commands["bgp"].commands["neighbors"] + + result = runner.invoke(exec_cmd, input['args']) + + print(result.exit_code) + print(result.output) + + if input['rc'] == 0: + assert result.exit_code == 0 + else: + assert result.exit_code == input['rc'] + + if 'rc_err_msg' in input: + output = result.output.strip().split("\n")[-1] + assert input['rc_err_msg'] == output + + if 'rc_output' in input: + assert result.output == input['rc_output'] + + if 'rc_warning_msg' in input: + output = result.output.strip().split("\n")[0] + assert input['rc_warning_msg'] in output + + +class TestBgpNeighbors(object): + + @classmethod + def setup_class(cls): + print("SETUP") + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_single_bgp_instance, test_vector', + [ + ('bgp_v4_neighbors_output', 'bgp_v4_neighbors'), + ('bgp_v4_neighbors_output', + 'bgp_v4_neighbor_ip_address'), + ('bgp_v4_neighbor_invalid_neigh', + 'bgp_v4_neighbor_invalid'), + ('bgp_v4_neighbor_invalid_address', + 'bgp_v4_neighbor_invalid_address'), + ('bgp_v4_neighbor_output_adv_routes', + 'bgp_v4_neighbor_adv_routes'), + ('bgp_v4_neighbor_output_recv_routes', + 'bgp_v4_neighbor_recv_routes'), + ('bgp_v6_neighbors_output', 'bgp_v6_neighbors'), + ('bgp_v6_neighbors_output', + 'bgp_v6_neighbor_ip_address'), + ('bgp_v6_neighbor_invalid', + 'bgp_v6_neighbor_invalid'), + ('bgp_v6_neighbor_invalid_address', + 'bgp_v6_neighbor_invalid_address'), + ('bgp_v6_neighbor_output_adv_routes', + 'bgp_v6_neighbor_adv_routes'), + ('bgp_v6_neighbor_output_recv_routes', + 'bgp_v6_neighbor_recv_routes'), + ], + indirect=['setup_single_bgp_instance']) + def test_bgp_neighbors(self, + setup_bgp_commands, + setup_single_bgp_instance, + test_vector): + show = setup_bgp_commands + executor(test_vector, show) + + +class TestBgpNeighborsMultiAsic(object): + @classmethod + def setup_class(cls): + print("SETUP") + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance, test_vector', + [ + ('bgp_v4_neighbors_output_all_asics', + 'bgp_v4_neighbors_multi_asic'), + ('bgp_v4_neighbors_output_asic1', + 'bgp_v4_neighbors_asic'), + ('bgp_v4_neighbors_output_asic1', + 'bgp_v4_neighbors_internal'), + ('bgp_v4_neighbors_output_asic0', + 'bgp_v4_neighbors_external'), + ('bgp_v6_neighbor_output_warning', + 'bgp_v6_neighbor_warning'), + ('bgp_v6_neighbors_output_all_asics', + 'bgp_v6_neighbors_multi_asic'), + ('bgp_v6_neighbors_output_asic0', + 'bgp_v6_neighbors_asic'), + ('bgp_v6_neighbors_output_asic0', + 'bgp_v6_neighbors_external'), + ('bgp_v6_neighbors_output_asic1', + 'bgp_v6_neighbors_internal') + ], + indirect=['setup_multi_asic_bgp_instance']) + def test_bgp_neighbors(self, + setup_bgp_commands, + setup_multi_asic_bgp_instance, + test_vector): + show = setup_bgp_commands + executor(test_vector, show) + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_database_config diff --git a/tests/show_bgp_network_test.py b/tests/show_bgp_network_test.py new file mode 100644 index 0000000000..f610199538 --- /dev/null +++ b/tests/show_bgp_network_test.py @@ -0,0 +1,100 @@ +import importlib +import os + +import pytest + +from click.testing import CliRunner +from .bgp_commands_input import bgp_network_test_vector + + +def executor(test_vector, show): + runner = CliRunner() + input = bgp_network_test_vector.testData[test_vector] + if test_vector.startswith('bgp_v6'): + exec_cmd = show.cli.commands["ipv6"].commands["bgp"].commands["network"] + else: + exec_cmd = show.cli.commands["ip"].commands["bgp"].commands["network"] + + result = runner.invoke(exec_cmd, input['args']) + + print(result.exit_code) + print(result.output) + + if input['rc'] == 0: + assert result.exit_code == 0 + else: + assert result.exit_code == input['rc'] + + if 'rc_err_msg' in input: + output = result.output.strip().split("\n")[-1] + assert input['rc_err_msg'] in output + + if 'rc_output' in input: + assert result.output == input['rc_output'] + + if 'rc_warning_msg' in input: + output = result.output.strip().split("\n")[0] + assert input['rc_warning_msg'] in output + + +class TestBgpNetwork(object): + + @classmethod + def setup_class(cls): + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_database_config + + + @pytest.mark.parametrize( + 'setup_single_bgp_instance, test_vector', + [('bgp_v4_network', 'bgp_v4_network'), + ('bgp_v6_network', 'bgp_v6_network'), + ('bgp_v4_network_ip_address', 'bgp_v4_network_ip_address'), + ('bgp_v6_network_ip_address', 'bgp_v6_network_ip_address'), + ('bgp_v6_network_bestpath', 'bgp_v6_network_bestpath'), + ('bgp_v4_network_bestpath', 'bgp_v4_network_bestpath'), + ('bgp_v6_network_longer_prefixes', 'bgp_v6_network_longer_prefixes'), + ('bgp_v4_network', 'bgp_v4_network_longer_prefixes_error'), + ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error')], + indirect=['setup_single_bgp_instance']) + def test_bgp_network(self, setup_bgp_commands, test_vector, + setup_single_bgp_instance): + show = setup_bgp_commands + executor(test_vector, show) + + +class TestMultiAsicBgpNetwork(object): + + @classmethod + def setup_class(cls): + print("SETUP") + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize( + 'setup_multi_asic_bgp_instance, test_vector', + [('bgp_v4_network', 'bgp_v4_network_multi_asic'), + ('bgp_v6_network', 'bgp_v6_network_multi_asic'), + ('bgp_v4_network_asic0', 'bgp_v4_network_asic0'), + ('bgp_v4_network_ip_address_asic0', 'bgp_v4_network_ip_address_asic0'), + ('bgp_v4_network_bestpath_asic0', 'bgp_v4_network_bestpath_asic0'), + ('bgp_v6_network_asic0', 'bgp_v6_network_asic0'), + ('bgp_v6_network_ip_address_asic0', 'bgp_v6_network_ip_address_asic0'), + ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0')], + indirect=['setup_multi_asic_bgp_instance']) + def test_bgp_network(self, setup_bgp_commands, test_vector, + setup_multi_asic_bgp_instance): + show = setup_bgp_commands + executor(test_vector, show) + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + from .mock_tables import dbconnector + dbconnector.load_database_config diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index 3d3968bd32..1d46ec5686 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -11,6 +11,32 @@ from utilities_common import constants +def get_namespace_for_bgp_neighbor(neighbor_ip): + namespace_list = multi_asic.get_namespace_list() + for namespace in namespace_list: + if is_bgp_neigh_present(neighbor_ip, namespace): + return namespace + + # neighbor IP not present in any namespace + raise ValueError( + ' Bgp neighbor {} not configured'.format(neighbor_ip)) + + +def is_bgp_neigh_present(neighbor_ip, namespace=multi_asic.DEFAULT_NAMESPACE): + config_db = multi_asic.connect_config_db_for_ns(namespace) + #check the internal + bgp_session = config_db.get_entry(multi_asic.BGP_NEIGH_CFG_DB_TABLE, + neighbor_ip) + if bgp_session: + return True + + bgp_session = config_db.get_entry( + multi_asic.BGP_INTERNAL_NEIGH_CFG_DB_TABLE, neighbor_ip) + if bgp_session: + return True + return False + + def is_ipv4_address(ip_address): """ Checks if given ip is ipv4 @@ -147,11 +173,13 @@ def get_neighbor_dict_from_table(db, table_name): return neighbor_dict -def run_bgp_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE): +def run_bgp_command(vtysh_cmd, + bgp_namespace=multi_asic.DEFAULT_NAMESPACE): bgp_instance_id = ' ' output = None if bgp_namespace is not multi_asic.DEFAULT_NAMESPACE: - bgp_instance_id = " -n {} ".format(multi_asic.get_asic_id_from_name(bgp_namespace)) + bgp_instance_id = " -n {} ".format( + multi_asic.get_asic_id_from_name(bgp_namespace)) cmd = 'sudo vtysh {} -c "{}"'.format( bgp_instance_id, vtysh_cmd) @@ -159,11 +187,10 @@ def run_bgp_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE): output = clicommon.run_command(cmd, return_cmd=True) except Exception: ctx = click.get_current_context() - ctx.fail("Unable to get summary from bgp".format(bgp_instance_id)) + ctx.fail("Unable to get summary from bgp {}".format(bgp_instance_id)) return output - def get_bgp_summary_from_all_bgp_instances(af, namespace, display): device = multi_asic_util.MultiAsic(display, namespace) @@ -258,7 +285,7 @@ def process_bgp_summary_json(bgp_summary, cmd_output, device): bgp_summary['peerGroupMemory'] = bgp_summary.get( 'peerGroupMemory', 0) + cmd_output['peerGroupMemory'] - #store instance level field is seperate dict + # store instance level field is seperate dict router_info = {} router_info['router_id'] = cmd_output['routerId'] router_info['vrf'] = cmd_output['vrfId'] @@ -288,7 +315,7 @@ def process_bgp_summary_json(bgp_summary, cmd_output, device): peers.append(value['pfxRcd']) else: peers.append(value['state']) - + # Get the bgp neighbour name ans store it neigh_name = get_bgp_neighbor_ip_to_name( peer_ip, static_neighbors, dynamic_neighbors) diff --git a/utilities_common/multi_asic.py b/utilities_common/multi_asic.py index d98b26b399..920b5eebe5 100644 --- a/utilities_common/multi_asic.py +++ b/utilities_common/multi_asic.py @@ -105,6 +105,11 @@ def multi_asic_display_default_option(): help='Namespace name or all'), ] +def multi_asic_namespace_validation_callback(ctx, param, value): + if not multi_asic.is_multi_asic: + click.echo("-n/--namespace is not available for single asic") + ctx.abort() + return value def multi_asic_click_options(func): for option in reversed(_multi_asic_click_options): From 08337aa7637b290bb8407c38b2a5dbe3e8383b3e Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Thu, 29 Apr 2021 04:58:30 +0300 Subject: [PATCH 21/41] [sonic-package-manager] first phase implementation of sonic-package-manager (#1527) What I did Implemented sonic-package-manager utility to manager SONiC Packages as per HLD Azure/SONiC#682. Implemented optional logic to migrate packages into new SONiC image in sonic-installer. How I did it Implemented as per HLD Azure/SONiC#682. How to verify it (Doc: Azure/SONiC#682) install package uninstall package upgrade package S2S upgrade THANK YOU, Stepan! --- doc/Command-Reference.md | 318 +++++- setup.py | 14 + .../bash_completion.d/sonic-package-manager | 8 + sonic-utilities-data/bash_completion.d/spm | 1 + sonic-utilities-data/templates/dump.sh.j2 | 29 + .../templates/service_mgmt.sh.j2 | 149 +++ .../templates/sonic.service.j2 | 39 + sonic-utilities-data/templates/timer.unit.j2 | 15 + sonic_installer/bootloader/aboot.py | 11 +- sonic_installer/bootloader/bootloader.py | 5 +- sonic_installer/common.py | 3 + sonic_installer/main.py | 127 ++- sonic_package_manager/__init__.py | 5 + sonic_package_manager/constraint.py | 166 ++++ sonic_package_manager/database.py | 222 +++++ sonic_package_manager/dockerapi.py | 226 +++++ sonic_package_manager/errors.py | 146 +++ sonic_package_manager/logger.py | 29 + sonic_package_manager/main.py | 460 +++++++++ sonic_package_manager/manager.py | 931 ++++++++++++++++++ sonic_package_manager/manifest.py | 210 ++++ sonic_package_manager/metadata.py | 185 ++++ sonic_package_manager/package.py | 53 + sonic_package_manager/progress.py | 52 + sonic_package_manager/reference.py | 30 + sonic_package_manager/registry.py | 157 +++ .../service_creator/__init__.py | 3 + .../service_creator/creator.py | 342 +++++++ .../service_creator/feature.py | 108 ++ .../service_creator/sonic_db.py | 98 ++ .../service_creator/utils.py | 17 + sonic_package_manager/source.py | 183 ++++ sonic_package_manager/utils.py | 42 + sonic_package_manager/version.py | 23 + tests/sonic_package_manager/conftest.py | 377 +++++++ tests/sonic_package_manager/test_cli.py | 63 ++ .../sonic_package_manager/test_constraint.py | 76 ++ tests/sonic_package_manager/test_database.py | 89 ++ tests/sonic_package_manager/test_manager.py | 322 ++++++ tests/sonic_package_manager/test_manifest.py | 74 ++ tests/sonic_package_manager/test_metadata.py | 37 + tests/sonic_package_manager/test_reference.py | 18 + tests/sonic_package_manager/test_registry.py | 15 + .../test_service_creator.py | 171 ++++ tests/sonic_package_manager/test_utils.py | 8 + 45 files changed, 5633 insertions(+), 24 deletions(-) create mode 100644 sonic-utilities-data/bash_completion.d/sonic-package-manager create mode 120000 sonic-utilities-data/bash_completion.d/spm create mode 100644 sonic-utilities-data/templates/dump.sh.j2 create mode 100644 sonic-utilities-data/templates/service_mgmt.sh.j2 create mode 100644 sonic-utilities-data/templates/sonic.service.j2 create mode 100644 sonic-utilities-data/templates/timer.unit.j2 create mode 100644 sonic_package_manager/__init__.py create mode 100644 sonic_package_manager/constraint.py create mode 100644 sonic_package_manager/database.py create mode 100644 sonic_package_manager/dockerapi.py create mode 100644 sonic_package_manager/errors.py create mode 100644 sonic_package_manager/logger.py create mode 100644 sonic_package_manager/main.py create mode 100644 sonic_package_manager/manager.py create mode 100644 sonic_package_manager/manifest.py create mode 100644 sonic_package_manager/metadata.py create mode 100644 sonic_package_manager/package.py create mode 100644 sonic_package_manager/progress.py create mode 100644 sonic_package_manager/reference.py create mode 100644 sonic_package_manager/registry.py create mode 100644 sonic_package_manager/service_creator/__init__.py create mode 100644 sonic_package_manager/service_creator/creator.py create mode 100644 sonic_package_manager/service_creator/feature.py create mode 100644 sonic_package_manager/service_creator/sonic_db.py create mode 100644 sonic_package_manager/service_creator/utils.py create mode 100644 sonic_package_manager/source.py create mode 100644 sonic_package_manager/utils.py create mode 100644 sonic_package_manager/version.py create mode 100644 tests/sonic_package_manager/conftest.py create mode 100644 tests/sonic_package_manager/test_cli.py create mode 100644 tests/sonic_package_manager/test_constraint.py create mode 100644 tests/sonic_package_manager/test_database.py create mode 100644 tests/sonic_package_manager/test_manager.py create mode 100644 tests/sonic_package_manager/test_manifest.py create mode 100644 tests/sonic_package_manager/test_metadata.py create mode 100644 tests/sonic_package_manager/test_reference.py create mode 100644 tests/sonic_package_manager/test_registry.py create mode 100644 tests/sonic_package_manager/test_service_creator.py create mode 100644 tests/sonic_package_manager/test_utils.py diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 6615413255..ab4e28dbdc 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -143,6 +143,7 @@ * [Watermark Show commands](#watermark-show-commands) * [Watermark Config commands](#watermark-config-commands) * [Software Installation and Management](#software-installation-and-management) + * [SONiC Package Manager](#sonic-package-manager) * [SONiC Installer](#sonic-installer) * [Troubleshooting Commands](#troubleshooting-commands) * [Routing Stack](#routing-stack) @@ -7961,8 +7962,316 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#waterm ## Software Installation and Management -SONiC software can be installed in two methods, viz, "using sonic-installer tool", "ONIE Installer". +SONiC images can be installed in one of two methods: +1. From within a running SONiC image using the `sonic-installer` utility +2. From the vendor's bootloader (E.g., ONIE, Aboot, etc.) +SONiC packages are available as prebuilt Docker images and meant to be installed with the *sonic-package-manager* utility. + +### SONiC Package Manager + +The *sonic-package-manager* is a command line tool to manage (e.g. install, upgrade or uninstall) SONiC Packages. + +**sonic-package-manager list** + +This command lists all available SONiC packages, their desription, installed version and installation status. +SONiC package status can be *Installed*, *Not installed* or *Built-In*. "Built-In" status means that a feature is built-in to SONiC image and can't be upgraded or uninstalled. + +- Usage: + ``` + sonic-package-manager list + ``` + +- Example: + ``` + admin@sonic:~$ sonic-package-manager list + Name Repository Description Version Status + -------------- --------------------------- ---------------------------- --------- -------------- + cpu-report azure/cpu-report CPU report package N/A Not Installed + database docker-database SONiC database package 1.0.0 Built-In + dhcp-relay azure/docker-dhcp-relay SONiC dhcp-relay package 1.0.0 Installed + fpm-frr docker-fpm-frr SONiC fpm-frr package 1.0.0 Built-In + lldp docker-lldp SONiC lldp package 1.0.0 Built-In + macsec docker-macsec SONiC macsec package 1.0.0 Built-In + mgmt-framework docker-sonic-mgmt-framework SONiC mgmt-framework package 1.0.0 Built-In + nat docker-nat SONiC nat package 1.0.0 Built-In + pmon docker-platform-monitor SONiC pmon package 1.0.0 Built-In + radv docker-router-advertiser SONiC radv package 1.0.0 Built-In + sflow docker-sflow SONiC sflow package 1.0.0 Built-In + snmp docker-snmp SONiC snmp package 1.0.0 Built-In + swss docker-orchagent SONiC swss package 1.0.0 Built-In + syncd docker-syncd-mlnx SONiC syncd package 1.0.0 Built-In + teamd docker-teamd SONiC teamd package 1.0.0 Built-In + telemetry docker-sonic-telemetry SONiC telemetry package 1.0.0 Built-In + ``` + +**sonic-package-manager repository add** + +This command will add a new repository as source for SONiC packages to the database. *NOTE*: requires elevated (root) privileges to run + +- Usage: + ``` + Usage: sonic-package-manager repository add [OPTIONS] NAME REPOSITORY + + Add a new repository to database. + + NOTE: This command requires elevated (root) privileges to run. + + Options: + --default-reference TEXT Default installation reference. Can be a tag or + sha256 digest in repository. + --description TEXT Optional package entry description. + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sudo sonic-package-manager repository add \ + cpu-report azure/sonic-cpu-report --default-reference 1.0.0 + ``` + +**sonic-package-manager repository remove** + +This command will remove a repository as source for SONiC packages from the database . The package has to be *Not Installed* in order to be removed from package database. *NOTE*: requires elevated (root) privileges to run + +- Usage: + ``` + Usage: sonic-package-manager repository remove [OPTIONS] NAME + + Remove repository from database. + + NOTE: This command requires elevated (root) privileges to run. + + Options: + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sudo sonic-package-manager repository remove cpu-report + ``` + +**sonic-package-manager install** + +This command pulls and installs a package on SONiC host. *NOTE*: this command requires elevated (root) privileges to run + +- Usage: + ``` + Usage: sonic-package-manager install [OPTIONS] [PACKAGE_EXPR] + + Install/Upgrade package using [PACKAGE_EXPR] in format + "[=|@]". + + The repository to pull the package from is resolved by lookup in + package database, thus the package has to be added via "sonic- + package-manager repository add" command. + + In case when [PACKAGE_EXPR] is a package name "" this command + will install or upgrade to a version referenced by "default- + reference" in package database. + + NOTE: This command requires elevated (root) privileges to run. + + Options: + --enable Set the default state of the feature to enabled + and enable feature right after installation. NOTE: + user needs to execute "config save -y" to make + this setting persistent. + --set-owner [local|kube] Default owner configuration setting for a feature. + --from-repository TEXT Fetch package directly from image registry + repository. NOTE: This argument is mutually + exclusive with arguments: [package_expr, + from_tarball]. + --from-tarball FILE Fetch package from saved image tarball. NOTE: This + argument is mutually exclusive with arguments: + [package_expr, from_repository]. + -f, --force Force operation by ignoring package dependency + tree and package manifest validation failures. + -y, --yes Automatically answer yes on prompts. + -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG. + Default is INFO. + --skip-host-plugins Do not install host OS plugins provided by the + package (CLI, etc). NOTE: In case when package + host OS plugins are set as mandatory in package + manifest this option will fail the installation. + --allow-downgrade Allow package downgrade. By default an attempt to + downgrade the package will result in a failure + since downgrade might not be supported by the + package, thus requires explicit request from the + user. + --help Show this message and exit.. + ``` +- Example: + ``` + admin@sonic:~$ sudo sonic-package-manager install dhcp-relay=1.0.2 + ``` + ``` + admin@sonic:~$ sudo sonic-package-manager install dhcp-relay@latest + ``` + ``` + admin@sonic:~$ sudo sonic-package-manager install dhcp-relay@sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd + ``` + ``` + admin@sonic:~$ sudo sonic-package-manager install --from-repository azure/sonic-cpu-report:latest + ``` + ``` + admin@sonic:~$ sudo sonic-package-manager install --from-tarball sonic-docker-image.gz + ``` + +**sonic-package-manager uninstall** + +This command uninstalls package from SONiC host. User needs to stop the feature prior to uninstalling it. +*NOTE*: this command requires elevated (root) privileges to run. + +- Usage: + ``` + Usage: sonic-package-manager uninstall [OPTIONS] NAME + + Uninstall package. + + NOTE: This command requires elevated (root) privileges to run. + + Options: + -f, --force Force operation by ignoring package dependency tree and + package manifest validation failures. + -y, --yes Automatically answer yes on prompts. + -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG. Default + is INFO. + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sudo sonic-package-manager uninstall dhcp-relay + ``` + +**sonic-package-manager reset** + +This comamnd resets the package by reinstalling it to its default version. *NOTE*: this command requires elevated (root) privileges to run. + +- Usage: + ``` + Usage: sonic-package-manager reset [OPTIONS] NAME + + Reset package to the default version. + + NOTE: This command requires elevated (root) privileges to run. + + Options: + -f, --force Force operation by ignoring package dependency tree and + package manifest validation failures. + -y, --yes Automatically answer yes on prompts. + -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG. Default + is INFO. + --skip-host-plugins Do not install host OS plugins provided by the package + (CLI, etc). NOTE: In case when package host OS plugins + are set as mandatory in package manifest this option + will fail the installation. + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sudo sonic-package-manager reset dhcp-relay + ``` + +**sonic-package-manager show package versions** + +This command will retrieve a list of all available versions for the given package from the configured upstream repository + +- Usage: + ``` + Usage: sonic-package-manager show package versions [OPTIONS] NAME + + Show available versions. + + Options: + --all Show all available tags in repository. + --plain Plain output. + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sonic-package-manager show package versions dhcp-relay + • 1.0.0 + • 1.0.2 + • 2.0.0 + ``` + ``` + admin@sonic:~$ sonic-package-manager show package versions dhcp-relay --plain + 1.0.0 + 1.0.2 + 2.0.0 + ``` + ``` + admin@sonic:~$ sonic-package-manager show package versions dhcp-relay --all + • 1.0.0 + • 1.0.2 + • 2.0.0 + • latest + ``` + +**sonic-package-manager show package changelog** + +This command fetches the changelog from the package manifest and displays it. *NOTE*: package changelog can be retrieved from registry or read from image tarball without installing it. + +- Usage: + ``` + Usage: sonic-package-manager show package changelog [OPTIONS] [PACKAGE_EXPR] + + Show package changelog. + + Options: + --from-repository TEXT Fetch package directly from image registry + repository NOTE: This argument is mutually exclusive + with arguments: [from_tarball, package_expr]. + --from-tarball FILE Fetch package from saved image tarball NOTE: This + argument is mutually exclusive with arguments: + [package_expr, from_repository]. + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sonic-package-manager show package changelog dhcp-relay + 1.0.0: + + • Initial release + + Author (author@email.com) Mon, 25 May 2020 12:25:00 +0300 + ``` + +**sonic-package-manager show package manifest** + +This command fetches the package manifest and displays it. *NOTE*: package manifest can be retrieved from registry or read from image tarball without installing it. + +- Usage: + ``` + Usage: sonic-package-manager show package manifest [OPTIONS] [PACKAGE_EXPR] + + Show package manifest. + + Options: + --from-repository TEXT Fetch package directly from image registry + repository NOTE: This argument is mutually exclusive + with arguments: [package_expr, from_tarball]. + --from-tarball FILE Fetch package from saved image tarball NOTE: This + argument is mutually exclusive with arguments: + [from_repository, package_expr]. + -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG + --help Show this message and exit. + ``` +- Example: + ``` + admin@sonic:~$ sonic-package-manager show package manifest dhcp-relay=2.0.0 + { + "version": "1.0.0", + "package": { + "version": "2.0.0", + "depends": [ + "database>=1.0.0,<2.0.0" + ] + }, + "service": { + "name": "dhcp_relay" + } + } + ``` ### SONiC Installer This is a command line tool available as part of the SONiC software; If the device is already running the SONiC software, this tool can be used to install an alternate image in the partition. @@ -8033,6 +8342,13 @@ This command is used to install a new image on the alternate image partition. T Done ``` +Installing a new image using the sonic-installer will keep using the packages installed on the currently running SONiC image and automatically migrate those. In order to perform clean SONiC installation use the *--skip-package-migration* option: + +- Example: + ``` + admin@sonic:~$ sudo sonic-installer install https://sonic-jenkins.westus.cloudapp.azure.com/job/xxxx/job/buildimage-xxxx-all/xxx/artifact/target/sonic-xxxx.bin --skip-package-migration + ``` + **sonic-installer set_default** This command is be used to change the image which can be loaded by default in all the subsequent reboots. diff --git a/setup.py b/setup.py index cd706eb433..15f93b46f7 100644 --- a/setup.py +++ b/setup.py @@ -48,6 +48,8 @@ 'show.plugins', 'sonic_installer', 'sonic_installer.bootloader', + 'sonic_package_manager', + 'sonic_package_manager.service_creator', 'tests', 'undebug', 'utilities_common', @@ -151,13 +153,21 @@ 'sonic-clear = clear.main:cli', 'sonic-installer = sonic_installer.main:sonic_installer', 'sonic_installer = sonic_installer.main:sonic_installer', # Deprecated + 'sonic-package-manager = sonic_package_manager.main:cli', + 'spm = sonic_package_manager.main:cli', 'undebug = undebug.main:cli', 'watchdogutil = watchdogutil.main:watchdogutil', ] }, install_requires=[ 'click==7.0', + 'click-log==0.3.2', + 'docker==4.4.4', + 'docker-image-py==0.1.10', + 'filelock==3.0.12', + 'enlighten==1.8.0', 'ipaddress==1.0.23', + 'jinja2==2.11.3', 'jsondiff==1.2.0', 'jsonpatch==1.32.0', 'm2crypto==0.31.0', @@ -165,6 +175,8 @@ 'netaddr==0.8.0', 'netifaces==0.10.7', 'pexpect==4.8.0', + 'poetry-semver==0.1.0', + 'prettyprinter==0.18.0', 'pyroute2==0.5.14', 'requests==2.25.0', 'sonic-config-engine', @@ -173,6 +185,7 @@ 'sonic-yang-mgmt', 'swsssdk>=2.0.1', 'tabulate==0.8.2', + 'www-authenticate==0.9.2', 'xmltodict==0.12.0', ], setup_requires= [ @@ -180,6 +193,7 @@ 'wheel' ], tests_require = [ + 'pyfakefs', 'pytest', 'mockredispy>=2.9.3', 'deepdiff==5.2.3' diff --git a/sonic-utilities-data/bash_completion.d/sonic-package-manager b/sonic-utilities-data/bash_completion.d/sonic-package-manager new file mode 100644 index 0000000000..a8a2456603 --- /dev/null +++ b/sonic-utilities-data/bash_completion.d/sonic-package-manager @@ -0,0 +1,8 @@ +_sonic_package_manager_completion() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _SONIC_PACKAGE_MANAGER_COMPLETE=complete $1 ) ) + return 0 +} + +complete -F _sonic_package_manager_completion -o default sonic-package-manager; diff --git a/sonic-utilities-data/bash_completion.d/spm b/sonic-utilities-data/bash_completion.d/spm new file mode 120000 index 0000000000..3fff069223 --- /dev/null +++ b/sonic-utilities-data/bash_completion.d/spm @@ -0,0 +1 @@ +sonic-package-manager \ No newline at end of file diff --git a/sonic-utilities-data/templates/dump.sh.j2 b/sonic-utilities-data/templates/dump.sh.j2 new file mode 100644 index 0000000000..ebb7ed8f24 --- /dev/null +++ b/sonic-utilities-data/templates/dump.sh.j2 @@ -0,0 +1,29 @@ +#!/bin/bash + +# +# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== +# auto-generated from {{ source }} by sonic-package-manager +# + +service="{{ manifest.service.name }}" +dump_command="{{ manifest.package['debug-dump'] }}" +container_re="^${service}[0-9]*$" +{% raw %} +container_ids="$(docker ps -f name=${container_re} -f status=running --format {{.Names}})" +{% endraw %} +tmp_dir=$(mktemp -d) +tmp_dump_dir="$tmp_dir/$service" +tmp_archive=$(mktemp) + +mkdir -p "$tmp_dump_dir" + +for container_id in $container_ids; do + docker exec -t "${container_id}" ${dump_command} &> "${tmp_dump_dir}/${container_id}" +done + + +tar -C $(dirname $tmp_dump_dir) -cf $tmp_archive $service + +cat $tmp_archive +rm $tmp_archive +rm -rf $tmp_dir diff --git a/sonic-utilities-data/templates/service_mgmt.sh.j2 b/sonic-utilities-data/templates/service_mgmt.sh.j2 new file mode 100644 index 0000000000..e46ba47380 --- /dev/null +++ b/sonic-utilities-data/templates/service_mgmt.sh.j2 @@ -0,0 +1,149 @@ +#!/bin/bash + +# +# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== +# auto-generated from {{ source }} by sonic-package-manager +# + +SERVICE="{{ manifest.service.name }}" +NAMESPACE_PREFIX="asic" +SONIC_DB_CLI="sonic-db-cli" +TMPDIR="/tmp/" +DEBUGLOG="${TMPDIR}/${SERVICE}.log" +[[ ! -z $DEV ]] && DEBUGLOG="${TMPDIR}/${SERVICE}-${DEV}.log" +[[ ! -z $DEV ]] && NET_NS="${NAMESPACE_PREFIX}${DEV}" # name of the network namespace +[[ ! -z $DEV ]] && SONIC_DB_CLI="${SONIC_DB_CLI} -n ${NET_NS}" + +{%- for service in manifest.service.dependent %} +{%- if service in multi_instance_services %} +MULTI_INST_DEPENDENT="${MULTI_INST_DEPENDENT} {{ service }}" +{%- else %} +DEPENDENT="${DEPENDENT} {{ service }}" +{%- endif %} +{%- endfor %} + +# Update dependent list based on other packages requirements +if [[ -f /etc/sonic/${SERVICE}_dependent ]]; then + DEPENDENT="${DEPENDENT} $(cat /etc/sonic/${SERVICE}_dependent)" +fi + +if [[ -f /etc/sonic/${SERVICE}_multi_inst_dependent ]]; then + MULTI_INST_DEPENDENT="${MULTI_INST_DEPENDENT} cat /etc/sonic/${SERVICE}_multi_inst_dependent" +fi + +function debug() +{ + /usr/bin/logger $1 + /bin/echo `date` "- $1" >> ${DEBUGLOG} +} + +function check_warm_boot() +{ + SYSTEM_WARM_START=`$SONIC_DB_CLI STATE_DB hget "WARM_RESTART_ENABLE_TABLE|system" enable` + SERVICE_WARM_START=`$SONIC_DB_CLI STATE_DB hget "WARM_RESTART_ENABLE_TABLE|${SERVICE}" enable` + if [[ x"$SYSTEM_WARM_START" == x"true" ]] || [[ x"$SERVICE_WARM_START" == x"true" ]]; then + WARM_BOOT="true" +{#- TODO: restore count validation for SONiC packages #} + else + WARM_BOOT="false" + fi +} + +function check_fast_boot() +{ + if [[ $($SONIC_DB_CLI STATE_DB GET "FAST_REBOOT|system") == "1" ]]; then + FAST_BOOT="true" + else + FAST_BOOT="false" + fi +} + +function start_dependent_services() { + if [[ x"$WARM_BOOT" != x"true" ]]; then + for dep in ${DEPENDENT}; do + /bin/systemctl start ${dep} + done + for dep in ${MULTI_INST_DEPENDENT}; do + if [[ ! -z $DEV ]]; then + /bin/systemctl start ${dep}@$DEV + else + /bin/systemctl start ${dep} + fi + done + fi +} + +function stop_dependent_services() { + if [[ x"$WARM_BOOT" != x"true" ]] && [[ x"$FAST_BOOT" != x"true" ]]; then + for dep in ${DEPENDENT}; do + /bin/systemctl stop ${dep} + done + for dep in ${MULTI_INST_DEPENDENT}; do + if [[ ! -z $DEV ]]; then + /bin/systemctl stop ${dep}@$DEV + else + /bin/systemctl stop ${dep} + fi + done + fi +} + +function start() { + debug "Starting ${SERVICE}$DEV service..." + + # start service docker + /usr/bin/${SERVICE}.sh start $DEV + debug "Started ${SERVICE}$DEV service..." + +{%- if manifest.service["post-start-action"] %} + docker exec -t ${SERVICE}${DEV} {{ manifest.service["post-start-action"] }} +{%- endif %} +} + +function wait() { + start_dependent_services + + if [[ ! -z $DEV ]]; then + /usr/bin/${SERVICE}.sh wait $DEV + else + /usr/bin/${SERVICE}.sh wait + fi +} + +function stop() { + debug "Stopping ${SERVICE}$DEV service..." + +{%- if manifest.service["pre-shutdown-action"] %} + docker exec -t ${SERVICE}${DEV} {{ manifest.service["pre-shutdown-action"] }} +{%- endif %} + + # For WARM/FAST boot do not perform service stop + if [[ x"$WARM_BOOT" != x"true" ]] && [[ x"$FAST_BOOT" != x"true" ]]; then + /usr/bin/${SERVICE}.sh stop $DEV + else + docker kill ${SERVICE}$DEV &> /dev/null || debug "Docker ${SERVICE}$DEV is not running ($?) ..." + fi + + debug "Stopped ${SERVICE}$DEV service..." + + stop_dependent_services +} + +OP=$1 +DEV=$2 + +check_warm_boot +check_fast_boot + +debug "Fast boot flag: ${SERVICE}$DEV ${FAST_BOOT}." +debug "Warm boot flag: ${SERVICE}$DEV ${WARM_BOOT}." + +case "$OP" in + start|wait|stop) + $1 + ;; + *) + echo "Usage: $0 {start|wait|stop}" + exit 1 + ;; +esac diff --git a/sonic-utilities-data/templates/sonic.service.j2 b/sonic-utilities-data/templates/sonic.service.j2 new file mode 100644 index 0000000000..72d6ab698c --- /dev/null +++ b/sonic-utilities-data/templates/sonic.service.j2 @@ -0,0 +1,39 @@ +# +# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== +# auto-generated from {{ source }} by sonic-package-manager +# +{%- set path = '/usr/local/bin' %} +{%- set multi_instance = multi_instance|default(False) %} +{%- set multi_instance_services = multi_instance_services|default([]) %} +[Unit] +Description={{ manifest.service.name }} container +{%- for service in manifest.service.requires %} +Requires={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service +{%- endfor %} +{%- for service in manifest.service.requisite %} +Requisite={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service +{%- endfor %} +{%- for service in manifest.service.after %} +After={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service +{%- endfor %} +{%- for service in manifest.service.before %} +Before={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service +{%- endfor %} +BindsTo=sonic.target +After=sonic.target +StartLimitIntervalSec=1200 +StartLimitBurst=3 + +[Service] +ExecStartPre={{path}}/{{manifest.service.name}}.sh start{% if multi_instance %} %i{% endif %} +ExecStart={{path}}/{{manifest.service.name}}.sh wait{% if multi_instance %} %i{% endif %} +ExecStop={{path}}/{{manifest.service.name}}.sh stop{% if multi_instance %} %i{% endif %} +RestartSec=30 + +{%- if not manifest.service.delayed %} +[Install] +WantedBy=sonic.target +{%- for service in manifest.service["wanted-by"] %} +WantedBy={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service +{%- endfor %} +{%- endif %} diff --git a/sonic-utilities-data/templates/timer.unit.j2 b/sonic-utilities-data/templates/timer.unit.j2 new file mode 100644 index 0000000000..a757b8deb8 --- /dev/null +++ b/sonic-utilities-data/templates/timer.unit.j2 @@ -0,0 +1,15 @@ +# +# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== +# auto-generated from {{ source }} by sonic-package-manager +# +[Unit] +Description=Delays {{ manifest.service.name }} until SONiC has started +PartOf={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service + +[Timer] +OnUnitActiveSec=0 sec +OnBootSec=3min 30 sec +Unit={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service + +[Install] +WantedBy=timers.target sonic.target diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index 3bf3e297e7..a2ef2acf4f 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -19,7 +19,6 @@ HOST_PATH, IMAGE_DIR_PREFIX, IMAGE_PREFIX, - ROOTFS_NAME, run_command, run_command_or_raise, ) @@ -189,14 +188,14 @@ def _get_swi_file_offset(self, swipath, filename): return f._fileobj.tell() # pylint: disable=protected-access @contextmanager - def get_rootfs_path(self, image_path): - rootfs_path = os.path.join(image_path, ROOTFS_NAME) - if os.path.exists(rootfs_path) and not isSecureboot(): - yield rootfs_path + def get_path_in_image(self, image_path, path): + path_in_image = os.path.join(image_path, path) + if os.path.exists(path_in_image) and not isSecureboot(): + yield path_in_image return swipath = os.path.join(image_path, DEFAULT_SWI_IMAGE) - offset = self._get_swi_file_offset(swipath, ROOTFS_NAME) + offset = self._get_swi_file_offset(swipath, path) loopdev = subprocess.check_output(['losetup', '-f']).decode('utf8').rstrip() try: diff --git a/sonic_installer/bootloader/bootloader.py b/sonic_installer/bootloader/bootloader.py index b59c9edccd..a6694977ae 100644 --- a/sonic_installer/bootloader/bootloader.py +++ b/sonic_installer/bootloader/bootloader.py @@ -9,7 +9,6 @@ HOST_PATH, IMAGE_DIR_PREFIX, IMAGE_PREFIX, - ROOTFS_NAME, ) class Bootloader(object): @@ -71,6 +70,6 @@ def get_image_path(cls, image): return image.replace(IMAGE_PREFIX, prefix) @contextmanager - def get_rootfs_path(self, image_path): + def get_path_in_image(self, image_path, path_in_image): """returns the path to the squashfs""" - yield path.join(image_path, ROOTFS_NAME) + yield path.join(image_path, path_in_image) diff --git a/sonic_installer/common.py b/sonic_installer/common.py index c49aaac032..ac1416789f 100644 --- a/sonic_installer/common.py +++ b/sonic_installer/common.py @@ -14,6 +14,9 @@ IMAGE_PREFIX = 'SONiC-OS-' IMAGE_DIR_PREFIX = 'image-' ROOTFS_NAME = 'fs.squashfs' +UPPERDIR_NAME = 'rw' +WORKDIR_NAME = 'work' +DOCKERDIR_NAME = 'docker' # Run bash command and print output to stdout def run_command(command): diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 92ad7677f4..12a2ab7e0e 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -1,4 +1,5 @@ import configparser +import contextlib import os import re import subprocess @@ -11,7 +12,14 @@ from swsscommon.swsscommon import SonicV2Connector from .bootloader import get_bootloader -from .common import run_command, run_command_or_raise, IMAGE_PREFIX +from .common import ( + run_command, run_command_or_raise, + IMAGE_PREFIX, + ROOTFS_NAME, + UPPERDIR_NAME, + WORKDIR_NAME, + DOCKERDIR_NAME, +) from .exception import SonicRuntimeException SYSLOG_IDENTIFIER = "sonic-installer" @@ -218,17 +226,48 @@ def print_deprecation_warning(deprecated_cmd_or_subcmd, new_cmd_or_subcmd): fg="red", err=True) click.secho("Please use '{}' instead".format(new_cmd_or_subcmd), fg="red", err=True) -def update_sonic_environment(click, bootloader, binary_image_version): + +def mount_squash_fs(squashfs_path, mount_point): + run_command_or_raise(["mkdir", "-p", mount_point]) + run_command_or_raise(["mount", "-t", "squashfs", squashfs_path, mount_point]) + + +def umount(mount_point, read_only=True, recursive=False, force=True, remove_dir=True): + flags = [] + if read_only: + flags.append("-r") + if force: + flags.append("-f") + if recursive: + flags.append("-R") + run_command_or_raise(["umount", *flags, mount_point]) + if remove_dir: + run_command_or_raise(["rm", "-rf", mount_point]) + + +def mount_overlay_fs(lowerdir, upperdir, workdir, mount_point): + run_command_or_raise(["mkdir", "-p", mount_point]) + overlay_options = "rw,relatime,lowerdir={},upperdir={},workdir={}".format(lowerdir, upperdir, workdir) + run_command_or_raise(["mount", "overlay", "-t", "overlay", "-o", overlay_options, mount_point]) + + +def mount_bind(source, mount_point): + run_command_or_raise(["mkdir", "-p", mount_point]) + run_command_or_raise(["mount", "--bind", source, mount_point]) + + +def mount_procfs_chroot(root): + run_command_or_raise(["chroot", root, "mount", "proc", "/proc", "-t", "proc"]) + + +def mount_sysfs_chroot(root): + run_command_or_raise(["chroot", root, "mount", "sysfs", "/sys", "-t", "sysfs"]) + + +def update_sonic_environment(bootloader, binary_image_version): """Prepare sonic environment variable using incoming image template file. If incoming image template does not exist use current image template file. """ - def mount_next_image_fs(squashfs_path, mount_point): - run_command_or_raise(["mkdir", "-p", mount_point]) - run_command_or_raise(["mount", "-t", "squashfs", squashfs_path, mount_point]) - - def umount_next_image_fs(mount_point): - run_command_or_raise(["umount", "-rf", mount_point]) - run_command_or_raise(["rm", "-rf", mount_point]) SONIC_ENV_TEMPLATE_FILE = os.path.join("usr", "share", "sonic", "templates", "sonic-environment.j2") SONIC_VERSION_YML_FILE = os.path.join("etc", "sonic", "sonic_version.yml") @@ -239,9 +278,9 @@ def umount_next_image_fs(mount_point): env_dir = os.path.join(new_image_dir, "sonic-config") env_file = os.path.join(env_dir, "sonic-environment") - with bootloader.get_rootfs_path(new_image_dir) as new_image_squashfs_path: + with bootloader.get_path_in_image(new_image_dir, ROOTFS_NAME) as new_image_squashfs_path: try: - mount_next_image_fs(new_image_squashfs_path, new_image_mount) + mount_squash_fs(new_image_squashfs_path, new_image_mount) next_sonic_env_template_file = os.path.join(new_image_mount, SONIC_ENV_TEMPLATE_FILE) next_sonic_version_yml_file = os.path.join(new_image_mount, SONIC_VERSION_YML_FILE) @@ -264,7 +303,62 @@ def umount_next_image_fs(mount_point): os.remove(env_file) os.rmdir(env_dir) finally: - umount_next_image_fs(new_image_mount) + umount(new_image_mount) + + +def migrate_sonic_packages(bootloader, binary_image_version): + """ Migrate SONiC packages to new SONiC image. """ + + SONIC_PACKAGE_MANAGER = "sonic-package-manager" + PACKAGE_MANAGER_DIR = "/var/lib/sonic-package-manager/" + DOCKER_CTL_SCRIPT = "/usr/lib/docker/docker.sh" + DOCKERD_SOCK = "docker.sock" + VAR_RUN_PATH = "/var/run/" + + tmp_dir = "tmp" + packages_file = "packages.json" + packages_path = os.path.join(PACKAGE_MANAGER_DIR, packages_file) + sonic_version = re.sub(IMAGE_PREFIX, '', binary_image_version) + new_image_dir = bootloader.get_image_path(binary_image_version) + + with contextlib.ExitStack() as stack: + def get_path(path): + """ Closure to get path by entering + a context manager of bootloader.get_path_in_image """ + + return stack.enter_context(bootloader.get_path_in_image(new_image_dir, path)) + + new_image_squashfs_path = get_path(ROOTFS_NAME) + new_image_upper_dir = get_path(UPPERDIR_NAME) + new_image_work_dir = get_path(WORKDIR_NAME) + new_image_docker_dir = get_path(DOCKERDIR_NAME) + new_image_mount = os.path.join("/", tmp_dir, "image-{0}-fs".format(sonic_version)) + new_image_docker_mount = os.path.join(new_image_mount, "var", "lib", "docker") + + try: + mount_squash_fs(new_image_squashfs_path, new_image_mount) + # make sure upper dir and work dir exist + run_command_or_raise(["mkdir", "-p", new_image_upper_dir]) + run_command_or_raise(["mkdir", "-p", new_image_work_dir]) + mount_overlay_fs(new_image_mount, new_image_upper_dir, new_image_work_dir, new_image_mount) + mount_bind(new_image_docker_dir, new_image_docker_mount) + mount_procfs_chroot(new_image_mount) + mount_sysfs_chroot(new_image_mount) + run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "start"]) + run_command_or_raise(["cp", packages_path, os.path.join(new_image_mount, tmp_dir, packages_file)]) + run_command_or_raise(["touch", os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)]) + run_command_or_raise(["mount", "--bind", + os.path.join(VAR_RUN_PATH, DOCKERD_SOCK), + os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)]) + run_command_or_raise(["chroot", new_image_mount, SONIC_PACKAGE_MANAGER, "migrate", + os.path.join("/", tmp_dir, packages_file), + "--dockerd-socket", os.path.join("/", tmp_dir, DOCKERD_SOCK), + "-y"]) + finally: + run_command("chroot {} {} stop".format(new_image_mount, DOCKER_CTL_SCRIPT)) + umount(new_image_mount, recursive=True, read_only=False, remove_dir=False) + umount(new_image_mount) + # Main entrypoint @click.group(cls=AliasedGroup) @@ -286,8 +380,10 @@ def sonic_installer(): help="Force installation of an image of a type which differs from that of the current running image") @click.option('--skip_migration', is_flag=True, help="Do not migrate current configuration to the newly installed image") +@click.option('--skip-package-migration', is_flag=True, + help="Do not migrate current packages to the newly installed image") @click.argument('url') -def install(url, force, skip_migration=False): +def install(url, force, skip_migration=False, skip_package_migration=False): """ Install image from local binary or URL""" bootloader = get_bootloader() @@ -331,7 +427,10 @@ def install(url, force, skip_migration=False): else: run_command('config-setup backup') - update_sonic_environment(click, bootloader, binary_image_version) + update_sonic_environment(bootloader, binary_image_version) + + if not skip_package_migration: + migrate_sonic_packages(bootloader, binary_image_version) # Finally, sync filesystem run_command("sync;sync;sync") diff --git a/sonic_package_manager/__init__.py b/sonic_package_manager/__init__.py new file mode 100644 index 0000000000..9d8827c5e4 --- /dev/null +++ b/sonic_package_manager/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python + +from sonic_package_manager.manager import PackageManager + +__all__ = ['PackageManager'] diff --git a/sonic_package_manager/constraint.py b/sonic_package_manager/constraint.py new file mode 100644 index 0000000000..af5a13000b --- /dev/null +++ b/sonic_package_manager/constraint.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python + +""" Package version constraints module. """ + +import re +from abc import ABC +from dataclasses import dataclass, field +from typing import Dict, Union + +import semver + + +class VersionConstraint(semver.VersionConstraint, ABC): + """ Extends VersionConstraint from semver package. """ + + @staticmethod + def parse(constraint_expression: str) -> 'VersionConstraint': + """ Parse version constraint. + + Args: + constraint_expression: Expression syntax: "[[op][version]]+". + Returns: + The resulting VersionConstraint object. + """ + + return semver.parse_constraint(constraint_expression) + + +@dataclass +class ComponentConstraints: + """ ComponentConstraints is a set of components version constraints. """ + + components: Dict[str, VersionConstraint] = field(default_factory=dict) + + @staticmethod + def parse(constraints: Dict) -> 'ComponentConstraints': + """ Parse constraint from dictionary. + + Args: + constraints: dictionary with component name + as key and constraint expression as value + + Returns: + ComponentConstraints object. + + """ + + components = {component: VersionConstraint.parse(version) + for component, version in constraints.items()} + return ComponentConstraints(components) + + def deparse(self) -> Dict[str, str]: + """ Returns the manifest representation of components constraints. + + Returns: + Dictionary of string keys and string values. + + """ + + return { + component: str(version) for component, version in self.components.items() + } + + +@dataclass +class PackageConstraint: + """ PackageConstraint is a package version constraint. """ + + name: str + constraint: VersionConstraint + _components: ComponentConstraints = ComponentConstraints({}) + + def __str__(self): return f'{self.name}{self.constraint}' + + @property + def components(self): return self._components.components + + @staticmethod + def from_string(constraint_expression: str) -> 'PackageConstraint': + """ Parse package constraint string which contains a package + name separated by a space with zero, one or more version constraint + expressions. A variety of version matching operators are supported + including >, <, ==, !=, ^, *. See Examples. + + Args: + constraint_expression: Expression syntax "[package name] [[op][version]]+". + + Returns: + PackageConstraint object. + + Examples: + >>> PackageConstraint.parse('syncd^1.0.0').constraint + =1.0.0,<2.0.0)> + >>> PackageConstraint.parse('swss>1.3.2 <4.2.1').constraint + 1.3.2,<4.2.1)> + >>> PackageConstraint.parse('swss').constraint + + """ + + REQUIREMENT_SPECIFIER_RE = \ + r'(?P[A-Za-z0-9_-]+)(?P.*)' + + match = re.match(REQUIREMENT_SPECIFIER_RE, constraint_expression) + if match is None: + raise ValueError(f'Invalid constraint {constraint_expression}') + groupdict = match.groupdict() + name = groupdict.get('name') + constraint = groupdict.get('constraint') or '*' + return PackageConstraint(name, VersionConstraint.parse(constraint)) + + @staticmethod + def from_dict(constraint_dict: Dict) -> 'PackageConstraint': + """ Parse package constraint information from dictionary. E.g: + + { + "name": "swss", + "version": "^1.0.0", + "componenets": { + "libswsscommon": "^1.0.0" + } + } + + Args: + constraint_dict: Dictionary of constraint infromation. + + Returns: + PackageConstraint object. + """ + + name = constraint_dict['name'] + version = VersionConstraint.parse(constraint_dict.get('version') or '*') + components = ComponentConstraints.parse(constraint_dict.get('components', {})) + return PackageConstraint(name, version, components) + + @staticmethod + def parse(constraint: Union[str, Dict]) -> 'PackageConstraint': + """ Parse constraint from string expression or dictionary. + + Args: + constraint: string or dictionary. Check from_str() and from_dict() methods. + + Returns: + PackageConstraint object. + + """ + + if type(constraint) is str: + return PackageConstraint.from_string(constraint) + elif type(constraint) is dict: + return PackageConstraint.from_dict(constraint) + else: + raise ValueError('Input argument should be either str or dict') + + def deparse(self) -> Dict: + """ Returns the manifest representation of package constraint. + + Returns: + Dictionary in manifest representation. + + """ + + return { + 'name': self.name, + 'version': str(self.constraint), + 'components': self._components.deparse(), + } diff --git a/sonic_package_manager/database.py b/sonic_package_manager/database.py new file mode 100644 index 0000000000..6c1cec5c07 --- /dev/null +++ b/sonic_package_manager/database.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python + +""" Repository Database interface module. """ +import json +import os +from dataclasses import dataclass, replace +from typing import Optional, Dict, Callable + +from sonic_package_manager.errors import PackageManagerError, PackageNotFoundError, PackageAlreadyExistsError +from sonic_package_manager.version import Version + +BASE_LIBRARY_PATH = '/var/lib/sonic-package-manager/' +PACKAGE_MANAGER_DB_FILE_PATH = os.path.join(BASE_LIBRARY_PATH, 'packages.json') +PACKAGE_MANAGER_LOCK_FILE = os.path.join(BASE_LIBRARY_PATH, '.lock') + + +@dataclass(order=True) +class PackageEntry: + """ Package database single entry object. + + Attributes: + name: Name of the package + repository: Default repository to pull package from. + description: Package description or None if package does not + provide a description. + default_reference: Default reference (tag or digest) or None + if default reference is not provided. + version: Installed version of the package or None if + package is not installed. + installed: Boolean flag whether the package is installed. + built_in: Boolean flag whether the package is built in. + image_id: Image ID for this package or None if package + is not installed. + """ + + name: str + repository: Optional[str] + description: Optional[str] = None + default_reference: Optional[str] = None + version: Optional[Version] = None + installed: bool = False + built_in: bool = False + image_id: Optional[str] = None + + +def package_from_dict(name: str, package_info: Dict) -> PackageEntry: + """ Parse dictionary into PackageEntry object.""" + + repository = package_info.get('repository') + description = package_info.get('description') + default_reference = package_info.get('default-reference') + version = package_info.get('installed-version') + if version: + version = Version.parse(version) + installed = package_info.get('installed', False) + built_in = package_info.get('built-in', False) + image_id = package_info.get('image-id') + + return PackageEntry(name, repository, description, + default_reference, version, installed, + built_in, image_id) + + +def package_to_dict(package: PackageEntry) -> Dict: + """ Serialize package into dictionary. """ + + return { + 'repository': package.repository, + 'description': package.description, + 'default-reference': package.default_reference, + 'installed-version': None if package.version is None else str(package.version), + 'installed': package.installed, + 'built-in': package.built_in, + 'image-id': package.image_id, + } + + +class PackageDatabase: + """ An interface to SONiC repository database """ + + def __init__(self, + database: Dict[str, PackageEntry], + on_save: Optional[Callable] = None): + """ Initialize PackageDatabase. + + Args: + database: Database dictionary + on_save: Optional callback to execute on commit() + """ + + self._database = database + self._on_save = on_save + + def add_package(self, + name: str, + repository: str, + description: Optional[str] = None, + default_reference: Optional[str] = None): + """ Adds a new package entry in database. + + Args: + name: Package name. + repository: Repository URL. + description: Description string. + default_reference: Default version string. + + Raises: + PackageAlreadyExistsError: if package already exists in database. + """ + + if self.has_package(name): + raise PackageAlreadyExistsError(name) + + package = PackageEntry(name, repository, description, default_reference) + self._database[name] = package + + def remove_package(self, name: str): + """ Removes package entry from database. + + Args: + name: repository name. + Raises: + PackageNotFoundError: Raises when package with the given name does not exist + in the database. + """ + + pkg = self.get_package(name) + + if pkg.built_in: + raise PackageManagerError(f'Package {name} is built-in, cannot remove it') + + if pkg.installed: + raise PackageManagerError(f'Package {name} is installed, uninstall it first') + + self._database.pop(name) + + def update_package(self, pkg: PackageEntry): + """ Modify repository in the database. + + Args: + pkg: Repository object. + Raises: + PackageManagerError: Raises when repository with the given name does not exist + in the database. + """ + + name = pkg.name + + if not self.has_package(name): + raise PackageNotFoundError(name) + + self._database[name] = pkg + + def get_package(self, name: str) -> PackageEntry: + """ Return a package referenced by name. + If the package is not found PackageNotFoundError is thrown. + + Args: + name: Package name. + Returns: + PackageInfo object. + Raises: + PackageNotFoundError: When package called name was not found. + """ + + try: + pkg = self._database[name] + except KeyError: + raise PackageNotFoundError(name) + + return replace(pkg) + + def has_package(self, name: str) -> bool: + """ Checks if the database contains an entry for a package. + called name. Returns True if the package exists, otherwise False. + + Args: + name: Package name. + Returns: + True if the package exists, otherwise False. + """ + + try: + self.get_package(name) + return True + except PackageNotFoundError: + return False + + def __iter__(self): + """ Iterates over packages in the database. + + Yields: + PackageInfo object. + """ + + for name, _ in self._database.items(): + yield self.get_package(name) + + @staticmethod + def from_file(db_file=PACKAGE_MANAGER_DB_FILE_PATH) -> 'PackageDatabase': + """ Read database content from file. """ + + def on_save(database): + with open(db_file, 'w') as db: + db_content = {} + for name, package in database.items(): + db_content[name] = package_to_dict(package) + json.dump(db_content, db, indent=4) + + database = {} + with open(db_file) as db: + db_content = json.load(db) + for key in db_content: + package = package_from_dict(key, db_content[key]) + database[key] = package + return PackageDatabase(database, on_save) + + def commit(self): + """ Save database content to file. """ + + if self._on_save: + self._on_save(self._database) diff --git a/sonic_package_manager/dockerapi.py b/sonic_package_manager/dockerapi.py new file mode 100644 index 0000000000..926600d0bc --- /dev/null +++ b/sonic_package_manager/dockerapi.py @@ -0,0 +1,226 @@ +#!/usr/bin/evn python + +""" Module provides Docker interface. """ + +import contextlib +import io +import tarfile +import re +from typing import Optional + +from sonic_package_manager.logger import log +from sonic_package_manager.progress import ProgressManager + + +def is_digest(ref: str): + return ref.startswith('sha256:') + + +def bytes_to_mb(bytes): + return bytes / 1024 / 1024 + + +def get_id(line): + return line['id'] + + +def get_status(line): + return line['status'] + + +def get_progress(line): + progress = line['progressDetail'] + current = bytes_to_mb(progress['current']) + total = bytes_to_mb(progress['total']) + return current, total + + +def process_progress(progress_manager, line): + try: + status = get_status(line) + id = get_id(line) + current, total = get_progress(line) + + if id not in progress_manager: + progress_manager.new(id, + total=total, + unit='Mb', + desc=f'{status} {id}') + pbar = progress_manager.get(id) + + # Complete status + if 'complete' in status: + pbar.desc = f'{status} {id}' + pbar.update(pbar.total) + return + + # Status changed + if status not in pbar.desc: + pbar.desc = f'{status} {id}' + pbar.total = total + pbar.count = 0 + + pbar.update(current - pbar.count) + except KeyError: + # not a progress line + return + + +def get_repository_from_image(image): + """ Returns the first RepoTag repository + found in image. """ + + repotags = image.attrs['RepoTags'] + for repotag in repotags: + repository, tag = repotag.split(':') + return repository + + +class DockerApi: + """ DockerApi provides a set of methods - + wrappers around docker client methods """ + + def __init__(self, + client, + progress_manager: Optional[ProgressManager] = None): + self.client = client + self.progress_manager = progress_manager + + def pull(self, repository: str, + reference: Optional[str] = None): + """ Docker 'pull' command. + Args: + repository: repository to pull + reference: tag or digest + """ + + log.debug(f'pulling image from {repository} reference={reference}') + + api = self.client.api + progress_manager = self.progress_manager + + digest = None + + with progress_manager or contextlib.nullcontext(): + for line in api.pull(repository, + reference, + stream=True, + decode=True): + log.debug(f'pull status: {line}') + + status = get_status(line) + + # Record pulled digest + digest_match = re.match(r'Digest: (?P.*)', status) + if digest_match: + digest = digest_match.groupdict()['sha'] + + if progress_manager: + process_progress(progress_manager, line) + + log.debug(f'Digest: {digest}') + log.debug(f'image from {repository} reference={reference} pulled successfully') + + return self.get_image(f'{repository}@{digest}') + + def load(self, imgpath: str): + """ Docker 'load' command. + Args: + + """ + + log.debug(f'loading image from {imgpath}') + + api = self.client.api + progress_manager = self.progress_manager + + imageid = None + repotag = None + + with progress_manager or contextlib.nullcontext(): + with open(imgpath, 'rb') as imagefile: + for line in api.load_image(imagefile, quiet=False): + log.debug(f'pull status: {line}') + + if progress_manager: + process_progress(progress_manager, line) + + if 'stream' not in line: + continue + + stream = line['stream'] + repotag_match = re.match(r'Loaded image: (?P.*)\n', stream) + if repotag_match: + repotag = repotag_match.groupdict()['repotag'] + imageid_match = re.match(r'Loaded image ID: sha256:(?P.*)\n', stream) + if imageid_match: + imageid = imageid_match.groupdict()['id'] + + imagename = repotag if repotag else imageid + log.debug(f'Loaded image {imagename}') + + return self.get_image(imagename) + + def rmi(self, image: str, **kwargs): + """ Docker 'rmi -f' command. """ + + log.debug(f'removing image {image} kwargs={kwargs}') + + self.client.images.remove(image, **kwargs) + + log.debug(f'image {image} removed successfully') + + def tag(self, image: str, repotag: str, **kwargs): + """ Docker 'tag' command """ + + log.debug(f'tagging image {image} {repotag} kwargs={kwargs}') + + img = self.client.images.get(image) + img.tag(repotag, **kwargs) + + log.debug(f'image {image} tagged {repotag} successfully') + + def rm(self, container: str, **kwargs): + """ Docker 'rm' command. """ + + self.client.containers.get(container).remove(**kwargs) + log.debug(f'removed container {container}') + + def ps(self, **kwargs): + """ Docker 'ps' command. """ + + return self.client.containers.list(**kwargs) + + def labels(self, image: str): + """ Returns a list of labels associated with image. """ + + log.debug(f'inspecting image labels {image}') + + labels = self.client.images.get(image).labels + + log.debug(f'image {image} labels successfully: {labels}') + return labels + + def get_image(self, name: str): + return self.client.images.get(name) + + def extract(self, image, src_path: str, dst_path: str): + """ Copy src_path from the docker image to host dst_path. """ + + buf = bytes() + + container = self.client.containers.create(image) + try: + bits, _ = container.get_archive(src_path) + for chunk in bits: + buf += chunk + finally: + container.remove(force=True) + + with tarfile.open(fileobj=io.BytesIO(buf)) as tar: + for member in tar: + if dst_path.endswith('/'): + tar.extract(member, dst_path) + else: + member.name = dst_path + tar.extract(member, dst_path) diff --git a/sonic_package_manager/errors.py b/sonic_package_manager/errors.py new file mode 100644 index 0000000000..17279c52c4 --- /dev/null +++ b/sonic_package_manager/errors.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python + +""" SONiC Package Manager exceptions are defined in this module. """ + +from dataclasses import dataclass +from typing import Optional + +from sonic_package_manager.constraint import PackageConstraint, VersionConstraint +from sonic_package_manager.version import Version + + +class PackageManagerError(Exception): + """ Base class for exceptions generated by SONiC package manager """ + + pass + + +class ManifestError(Exception): + """ Class for manifest validate failures. """ + + pass + + +class MetadataError(Exception): + """ Class for metadata failures. """ + + pass + + +@dataclass +class PackageNotFoundError(PackageManagerError): + """ Repository not found in repository database exception """ + + name: str + + def __str__(self): + return f'Package {self.name} is not found in packages database' + + +@dataclass +class PackageAlreadyExistsError(PackageManagerError): + """ Package already exists in the packages database exception. """ + + name: str + + def __str__(self): + return f'Package {self.name} already exists in packages database' + + +class PackageInstallationError(PackageManagerError): + """ Exception for package installation error. """ + + pass + + +class PackageUninstallationError(PackageManagerError): + """ Exception for package installation error. """ + + pass + + +class PackageUpgradeError(PackageManagerError): + """ Exception for package upgrade error. """ + + pass + + +@dataclass +class PackageSonicRequirementError(PackageInstallationError): + """ Exception for installation errors, when SONiC version requirement is not met. """ + + name: str + component: str + constraint: PackageConstraint + installed_ver: Optional[Version] = None + + def __str__(self): + if self.installed_ver is not None: + return (f'Package {self.name} requires base OS component {self.component} version {self.constraint} ' + f'while the installed version is {self.installed_ver}') + return (f'Package {self.name} requires base OS component {self.component} version {self.constraint} ' + f'but it is not present int base OS image') + + +@dataclass +class PackageDependencyError(PackageInstallationError): + """ Exception class for installation errors related to missing dependency. """ + + name: str + constraint: PackageConstraint + installed_ver: Optional[Version] = None + + def __str__(self): + if self.installed_ver: + return (f'Package {self.name} requires {self.constraint} ' + f'but version {self.installed_ver} is installed') + return f'Package {self.name} requires {self.constraint} but it is not installed' + + +@dataclass +class PackageComponentDependencyError(PackageInstallationError): + """ Exception class for installation error caused by component + version dependency. """ + + name: str + dependency: str + component: str + constraint: VersionConstraint + installed_ver: Optional[Version] = None + + def __str__(self): + if self.installed_ver: + return (f'Package {self.name} requires {self.component} {self.constraint} ' + f'in package {self.dependency} but version {self.installed_ver} is installed') + return (f'Package {self.name} requires {self.component} {self.constraint} ' + f'in package {self.dependency} but it is not installed') + + +@dataclass +class PackageConflictError(PackageInstallationError): + """ Exception class for installation errors related to missing dependency. """ + + name: str + constraint: PackageConstraint + installed_ver: Version + + def __str__(self): + return (f'Package {self.name} conflicts with {self.constraint} but ' + f'version {self.installed_ver} is installed') + + +@dataclass +class PackageComponentConflictError(PackageInstallationError): + """ Exception class for installation error caused by component + version conflict. """ + + name: str + dependency: str + component: str + constraint: VersionConstraint + installed_ver: Version + + def __str__(self): + return (f'Package {self.name} conflicts with {self.component} {self.constraint} ' + f'in package {self.dependency} but version {self.installed_ver} is installed') + diff --git a/sonic_package_manager/logger.py b/sonic_package_manager/logger.py new file mode 100644 index 0000000000..3d5e06d35f --- /dev/null +++ b/sonic_package_manager/logger.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +""" Logger for sonic-package-manager. """ + +import logging.handlers + +import click_log + + +class Formatter(click_log.ColorFormatter): + """ Click logging formatter. """ + + colors = { + 'error': dict(fg='red'), + 'exception': dict(fg='red'), + 'critical': dict(fg='red'), + 'debug': dict(fg='blue', bold=True), + 'warning': dict(fg='yellow'), + } + + +log = logging.getLogger("sonic-package-manager") +log.setLevel(logging.INFO) + +click_handler = click_log.ClickHandler() +click_handler.formatter = Formatter() + +log.addHandler(click_handler) +log.addHandler(logging.handlers.SysLogHandler()) diff --git a/sonic_package_manager/main.py b/sonic_package_manager/main.py new file mode 100644 index 0000000000..c0589ae5b5 --- /dev/null +++ b/sonic_package_manager/main.py @@ -0,0 +1,460 @@ +#!/usr/bin/env python + +import functools +import json +import os +import sys +import typing + +import click +import click_log +import tabulate +from natsort import natsorted + +from sonic_package_manager.database import PackageEntry, PackageDatabase +from sonic_package_manager.errors import PackageManagerError +from sonic_package_manager.logger import log +from sonic_package_manager.manager import PackageManager + +BULLET_UC = '\u2022' + + +def exit_cli(*args, **kwargs): + """ Print a message and exit with rc 1. """ + + click.secho(*args, **kwargs) + sys.exit(1) + + +def show_help(ctx): + """ Show help message and exit process successfully. """ + + click.echo(ctx.get_help()) + ctx.exit(0) + + +def root_privileges_required(func: typing.Callable) -> typing.Callable: + """ Decorates a function, so that the function is invoked + only if the user is root. """ + + @functools.wraps(func) + def wrapped_function(*args, **kwargs): + """ Wrapper around func. """ + + if os.geteuid() != 0: + exit_cli('Root privileges required for this operation', fg='red') + + return func(*args, **kwargs) + + wrapped_function.__doc__ += '\n\n NOTE: This command requires elevated (root) privileges to run.' + + return wrapped_function + + +def add_options(options): + """ Decorator to append options from + input list to command. """ + + def _add_options(func): + for option in reversed(options): + func = option(func) + return func + + return _add_options + + +class MutuallyExclusiveOption(click.Option): + """ This options type is extended with 'mutually_exclusive' + parameter which makes CLI to check if several options are now + used together in single command. """ + + def __init__(self, *args, **kwargs): + self.mutually_exclusive = set(kwargs.pop('mutually_exclusive', [])) + help_string = kwargs.get('help', '') + if self.mutually_exclusive: + ex_str = ', '.join(self.mutually_exclusive) + kwargs['help'] = f'{help_string} ' \ + f'NOTE: This argument is mutually ' \ + f'exclusive with arguments: [{ex_str}].' + super().__init__(*args, **kwargs) + + def handle_parse_result(self, ctx, opts, args): + if self.name in opts and opts[self.name] is not None: + for opt_name in self.mutually_exclusive.intersection(opts): + if opts[opt_name] is None: + continue + + raise click.UsageError(f'Illegal usage: {self.name} is mutually ' + f'exclusive with arguments ' + f'{", ".join(self.mutually_exclusive)}.') + + return super().handle_parse_result(ctx, opts, args) + + +PACKAGE_SOURCE_OPTIONS = [ + click.option('--from-repository', + help='Fetch package directly from image registry repository.', + cls=MutuallyExclusiveOption, + mutually_exclusive=['from_tarball', 'package_expr']), + click.option('--from-tarball', + type=click.Path(exists=True, + readable=True, + file_okay=True, + dir_okay=False), + help='Fetch package from saved image tarball.', + cls=MutuallyExclusiveOption, + mutually_exclusive=['from_repository', 'package_expr']), + click.argument('package-expr', + type=str, + required=False) +] + + +PACKAGE_COMMON_INSTALL_OPTIONS = [ + click.option('--skip-host-plugins', + is_flag=True, + help='Do not install host OS plugins provided by the package (CLI, etc). ' + 'NOTE: In case when package host OS plugins are set as mandatory in ' + 'package manifest this option will fail the installation.') +] + + +PACKAGE_COMMON_OPERATION_OPTIONS = [ + click.option('-f', '--force', + is_flag=True, + help='Force operation by ignoring package dependency tree and package manifest validation failures.'), + click.option('-y', '--yes', + is_flag=True, + help='Automatically answer yes on prompts.'), + click_log.simple_verbosity_option(log, help='Either CRITICAL, ERROR, WARNING, INFO or DEBUG. Default is INFO.'), +] + + +def get_package_status(package: PackageEntry): + """ Returns the installation status message for package. """ + + if package.built_in: + return 'Built-In' + elif package.installed: + return 'Installed' + else: + return 'Not Installed' + + +@click.group() +@click.pass_context +def cli(ctx): + """ SONiC Package Manager """ + + ctx.obj = PackageManager.get_manager() + + +@cli.group() +@click.pass_context +def repository(ctx): + """ Repository management commands. """ + + pass + + +@cli.group() +@click.pass_context +def show(ctx): + """ Package manager show commands. """ + + pass + + +@show.group() +@click.pass_context +def package(ctx): + """ Package show commands. """ + + pass + + +@cli.command() +@click.pass_context +def list(ctx): + """ List available packages. """ + + table_header = ['Name', 'Repository', 'Description', 'Version', 'Status'] + table_body = [] + + manager: PackageManager = ctx.obj + + try: + for package in natsorted(manager.database): + repository = package.repository or 'N/A' + version = package.version or 'N/A' + description = package.description or 'N/A' + status = get_package_status(package) + + table_body.append([ + package.name, + repository, + description, + version, + status + ]) + + click.echo(tabulate.tabulate(table_body, table_header)) + except PackageManagerError as err: + exit_cli(f'Failed to list repositories: {err}', fg='red') + + +@package.command() +@add_options(PACKAGE_SOURCE_OPTIONS) +@click.pass_context +def manifest(ctx, + package_expr, + from_repository, + from_tarball): + """ Show package manifest. """ + + manager: PackageManager = ctx.obj + + try: + source = manager.get_package_source(package_expr, + from_repository, + from_tarball) + package = source.get_package() + click.echo(json.dumps(package.manifest.unmarshal(), indent=4)) + except Exception as err: + exit_cli(f'Failed to print manifest: {err}', fg='red') + + +@package.command() +@click.argument('name') +@click.option('--all', is_flag=True, help='Show all available tags in repository.') +@click.option('--plain', is_flag=True, help='Plain output.') +@click.pass_context +def versions(ctx, name, all, plain): + """ Show available versions. """ + + try: + manager: PackageManager = ctx.obj + versions = manager.get_package_available_versions(name, all) + for version in versions: + if not plain: + click.secho(f'{BULLET_UC} ', bold=True, fg='green', nl=False) + click.secho(f'{version}') + except Exception as err: + exit_cli(f'Failed to get package versions for {name}: {err}', fg='red') + + +@package.command() +@add_options(PACKAGE_SOURCE_OPTIONS) +@click.pass_context +def changelog(ctx, + package_expr, + from_repository, + from_tarball): + """ Show package changelog. """ + + manager: PackageManager = ctx.obj + + try: + source = manager.get_package_source(package_expr, + from_repository, + from_tarball) + package = source.get_package() + changelog = package.manifest['package']['changelog'] + + if not changelog: + raise PackageManagerError(f'No changelog for package {package.name}') + + for version, entry in changelog.items(): + author = entry.get('author') or 'N/A' + email = entry.get('email') or 'N/A' + changes = entry.get('changes') or [] + date = entry.get('date') or 'N/A' + click.secho(f'{version}:\n', fg='green', bold=True) + for line in changes: + click.secho(f' {BULLET_UC} {line}', bold=True) + click.secho(f'\n {author} ' + f'({email}) {date}', fg='green', bold=True) + click.secho('') + + except Exception as err: + exit_cli(f'Failed to print package changelog: {err}', fg='red') + + +@repository.command() +@click.argument('name', type=str) +@click.argument('repository', type=str) +@click.option('--default-reference', type=str, help='Default installation reference. Can be a tag or sha256 digest in repository.') +@click.option('--description', type=str, help='Optional package entry description.') +@click.pass_context +@root_privileges_required +def add(ctx, name, repository, default_reference, description): + """ Add a new repository to database. """ + + manager: PackageManager = ctx.obj + + try: + manager.add_repository(name, + repository, + description=description, + default_reference=default_reference) + except Exception as err: + exit_cli(f'Failed to add repository {name}: {err}', fg='red') + + +@repository.command() +@click.argument("name") +@click.pass_context +@root_privileges_required +def remove(ctx, name): + """ Remove repository from database. """ + + manager: PackageManager = ctx.obj + + try: + manager.remove_repository(name) + except Exception as err: + exit_cli(f'Failed to remove repository {name}: {err}', fg='red') + + +@cli.command() +@click.option('--enable', + is_flag=True, + default=None, + help='Set the default state of the feature to enabled ' + 'and enable feature right after installation. ' + 'NOTE: user needs to execute "config save -y" to make ' + 'this setting persistent.') +@click.option('--set-owner', + type=click.Choice(['local', 'kube']), + default=None, + help='Default owner configuration setting for a feature.') +@click.option('--allow-downgrade', + is_flag=True, + default=None, + help='Allow package downgrade. By default an attempt to downgrade the package ' + 'will result in a failure since downgrade might not be supported by the package, ' + 'thus requires explicit request from the user.') +@add_options(PACKAGE_SOURCE_OPTIONS) +@add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@add_options(PACKAGE_COMMON_INSTALL_OPTIONS) +@click.pass_context +@root_privileges_required +def install(ctx, + package_expr, + from_repository, + from_tarball, + force, + yes, + enable, + set_owner, + skip_host_plugins, + allow_downgrade): + """ Install/Upgrade package using [PACKAGE_EXPR] in format "[=|@]". + + The repository to pull the package from is resolved by lookup in package database, + thus the package has to be added via "sonic-package-manager repository add" command. + + In case when [PACKAGE_EXPR] is a package name "" this command will install or upgrade + to a version referenced by "default-reference" in package database. """ + + manager: PackageManager = ctx.obj + + package_source = package_expr or from_repository or from_tarball + if not package_source: + exit_cli(f'Package source is not specified', fg='red') + + if not yes and not force: + click.confirm(f'{package_source} is going to be installed, ' + f'continue?', abort=True, show_default=True) + + install_opts = { + 'force': force, + 'skip_host_plugins': skip_host_plugins, + } + if enable is not None: + install_opts['enable'] = enable + if set_owner is not None: + install_opts['default_owner'] = set_owner + if allow_downgrade is not None: + install_opts['allow_downgrade'] = allow_downgrade + + try: + manager.install(package_expr, + from_repository, + from_tarball, + **install_opts) + except Exception as err: + exit_cli(f'Failed to install {package_source}: {err}', fg='red') + except KeyboardInterrupt: + exit_cli(f'Operation canceled by user', fg='red') + + +@cli.command() +@add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@add_options(PACKAGE_COMMON_INSTALL_OPTIONS) +@click.argument('name') +@click.pass_context +@root_privileges_required +def reset(ctx, name, force, yes, skip_host_plugins): + """ Reset package to the default version. """ + + manager: PackageManager = ctx.obj + + if not yes and not force: + click.confirm(f'Package {name} is going to be reset to default version, ' + f'continue?', abort=True, show_default=True) + + try: + manager.reset(name, force, skip_host_plugins) + except Exception as err: + exit_cli(f'Failed to reset package {name}: {err}', fg='red') + except KeyboardInterrupt: + exit_cli(f'Operation canceled by user', fg='red') + + +@cli.command() +@add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@click.argument('name') +@click.pass_context +@root_privileges_required +def uninstall(ctx, name, force, yes): + """ Uninstall package. """ + + manager: PackageManager = ctx.obj + + if not yes and not force: + click.confirm(f'Package {name} is going to be uninstalled, ' + f'continue?', abort=True, show_default=True) + + try: + manager.uninstall(name, force) + except Exception as err: + exit_cli(f'Failed to uninstall package {name}: {err}', fg='red') + except KeyboardInterrupt: + exit_cli(f'Operation canceled by user', fg='red') + + +@cli.command() +@add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@click.option('--dockerd-socket', type=click.Path()) +@click.argument('database', type=click.Path()) +@click.pass_context +@root_privileges_required +def migrate(ctx, database, force, yes, dockerd_socket): + """ Migrate packages from the given database file. """ + + manager: PackageManager = ctx.obj + + if not yes and not force: + click.confirm('Continue with package migration?', abort=True, show_default=True) + + try: + manager.migrate_packages(PackageDatabase.from_file(database), dockerd_socket) + except Exception as err: + exit_cli(f'Failed to migrate packages {err}', fg='red') + except KeyboardInterrupt: + exit_cli(f'Operation canceled by user', fg='red') + + +if __name__ == "__main__": + cli() diff --git a/sonic_package_manager/manager.py b/sonic_package_manager/manager.py new file mode 100644 index 0000000000..ba437534ed --- /dev/null +++ b/sonic_package_manager/manager.py @@ -0,0 +1,931 @@ +#!/usr/bin/env python + +import contextlib +import functools +import os +import pkgutil +import tempfile +from inspect import signature +from typing import Any, Iterable, Callable, Dict, Optional + +import docker +import filelock +from sonic_py_common import device_info + +from sonic_package_manager import utils +from sonic_package_manager.constraint import ( + VersionConstraint, + PackageConstraint +) +from sonic_package_manager.database import ( + PACKAGE_MANAGER_LOCK_FILE, + PackageDatabase +) +from sonic_package_manager.dockerapi import DockerApi +from sonic_package_manager.errors import ( + PackageManagerError, + PackageDependencyError, + PackageComponentDependencyError, + PackageConflictError, + PackageComponentConflictError, + PackageInstallationError, + PackageSonicRequirementError, + PackageUninstallationError, + PackageUpgradeError +) +from sonic_package_manager.logger import log +from sonic_package_manager.metadata import MetadataResolver +from sonic_package_manager.package import Package +from sonic_package_manager.progress import ProgressManager +from sonic_package_manager.reference import PackageReference +from sonic_package_manager.registry import RegistryResolver +from sonic_package_manager.service_creator.creator import ( + ServiceCreator, + run_command +) +from sonic_package_manager.service_creator.feature import FeatureRegistry +from sonic_package_manager.service_creator.sonic_db import SonicDB +from sonic_package_manager.service_creator.utils import in_chroot +from sonic_package_manager.source import ( + PackageSource, + LocalSource, + RegistrySource, + TarballSource +) +from sonic_package_manager.utils import DockerReference +from sonic_package_manager.version import ( + Version, + VersionRange, + version_to_tag, + tag_to_version +) + + +@contextlib.contextmanager +def failure_ignore(ignore: bool): + """ Ignores failures based on parameter passed. """ + + try: + yield + except Exception as err: + if ignore: + log.warning(f'ignoring error {err}') + else: + raise + + +def under_lock(func: Callable) -> Callable: + """ Execute operations under lock. """ + + @functools.wraps(func) + def wrapped_function(*args, **kwargs): + self = args[0] + with self.lock: + return func(*args, **kwargs) + + return wrapped_function + + +def opt_check(func: Callable) -> Callable: + """ Check kwargs for function. """ + + @functools.wraps(func) + def wrapped_function(*args, **kwargs): + sig = signature(func) + unsupported_opts = [opt for opt in kwargs if opt not in sig.parameters] + if unsupported_opts: + raise PackageManagerError( + f'Unsupported options {unsupported_opts} for {func.__name__}' + ) + return func(*args, **kwargs) + + return wrapped_function + + +def rollback(func, *args, **kwargs): + """ Used in rollback callbacks to ignore failure + but proceed with rollback. Error will be printed + but not fail the whole procedure of rollback. """ + + @functools.wraps(func) + def wrapper(): + try: + func(*args, **kwargs) + except Exception as err: + log.error(f'failed in rollback: {err}') + + return wrapper + + +def package_constraint_to_reference(constraint: PackageConstraint) -> PackageReference: + package_name, version_constraint = constraint.name, constraint.constraint + # Allow only specific version for now. + # Later we can improve package manager to support + # installing packages using expressions like 'package>1.0.0' + if version_constraint == VersionRange(): # empty range means any version + return PackageReference(package_name, None) + if not isinstance(version_constraint, Version): + raise PackageManagerError(f'Can only install specific version. ' + f'Use only following expression "{package_name}=" ' + f'to install specific version') + return PackageReference(package_name, version_to_tag(version_constraint)) + + +def parse_reference_expression(expression): + try: + return package_constraint_to_reference(PackageConstraint.parse(expression)) + except ValueError: + # if we failed to parse the expression as constraint expression + # we will try to parse it as reference + return PackageReference.parse(expression) + + +def validate_package_base_os_constraints(package: Package, sonic_version_info: Dict[str, str]): + """ Verify that all dependencies on base OS components are met. + Args: + package: Package to check constraints for. + sonic_version_info: SONiC components version information. + Raises: + PackageSonicRequirementError: in case dependency is not satisfied. + """ + + base_os_constraints = package.manifest['package']['base-os'].components + for component, constraint in base_os_constraints.items(): + if component not in sonic_version_info: + raise PackageSonicRequirementError(package.name, component, constraint) + + version = Version.parse(sonic_version_info[component]) + + if not constraint.allows_all(version): + raise PackageSonicRequirementError(package.name, component, constraint, version) + + +def validate_package_tree(packages: Dict[str, Package]): + """ Verify that all dependencies are met in all packages passed to this function. + Args: + packages: list of packages to check + Raises: + PackageDependencyError: if dependency is missing + PackageConflictError: if there is a conflict between packages + """ + + for name, package in packages.items(): + log.debug(f'checking dependencies for {name}') + for dependency in package.manifest['package']['depends']: + dependency_package = packages.get(dependency.name) + if dependency_package is None: + raise PackageDependencyError(package.name, dependency) + + installed_version = dependency_package.version + log.debug(f'dependency package is installed {dependency.name}: {installed_version}') + if not dependency.constraint.allows_all(installed_version): + raise PackageDependencyError(package.name, dependency, installed_version) + + dependency_components = dependency.components + if not dependency_components: + dependency_components = {} + for component, version in package.components.items(): + implicit_constraint = VersionConstraint.parse(f'^{version.major}.{version.minor}.0') + dependency_components[component] = implicit_constraint + + for component, constraint in dependency_components.items(): + if component not in dependency_package.components: + raise PackageComponentDependencyError(package.name, dependency, + component, constraint) + + component_version = dependency_package.components[component] + log.debug(f'dependency package {dependency.name}: ' + f'component {component} version is {component_version}') + + if not constraint.allows_all(component_version): + raise PackageComponentDependencyError(package.name, dependency, component, + constraint, component_version) + + log.debug(f'checking conflicts for {name}') + for conflict in package.manifest['package']['breaks']: + conflicting_package = packages.get(conflict.name) + if conflicting_package is None: + continue + + installed_version = conflicting_package.version + log.debug(f'conflicting package is installed {conflict.name}: {installed_version}') + if conflict.constraint.allows_all(installed_version): + raise PackageConflictError(package.name, conflict, installed_version) + + for component, constraint in conflicting_package.components.items(): + if component not in conflicting_package.components: + continue + + component_version = conflicting_package.components[component] + log.debug(f'conflicting package {dependency.name}: ' + f'component {component} version is {component_version}') + + if constraint.allows_all(component_version): + raise PackageComponentConflictError(package.name, dependency, component, + constraint, component_version) + + +def validate_package_cli_can_be_skipped(package: Package, skip: bool): + """ Checks whether package CLI installation can be skipped. + + Args: + package: Package to validate + skip: Whether to skip installing CLI + + Raises: + PackageManagerError + + """ + + if package.manifest['cli']['mandatory'] and skip: + raise PackageManagerError(f'CLI is mandatory for package {package.name} ' + f'but it was requested to be not installed') + elif skip: + log.warning(f'Package {package.name} CLI plugin will not be installed') + + +class PackageManager: + """ SONiC Package Manager. This class provides public API + for sonic_package_manager python library. It has functionality + for installing, uninstalling, updating SONiC packages as well as + retrieving information about the packages from different sources. """ + + def __init__(self, + docker_api: DockerApi, + registry_resolver: RegistryResolver, + database: PackageDatabase, + metadata_resolver: MetadataResolver, + service_creator: ServiceCreator, + device_information: Any, + lock: filelock.FileLock): + """ Initialize PackageManager. """ + + self.lock = lock + self.docker = docker_api + self.registry_resolver = registry_resolver + self.database = database + self.metadata_resolver = metadata_resolver + self.service_creator = service_creator + self.feature_registry = service_creator.feature_registry + self.is_multi_npu = device_information.is_multi_npu() + self.num_npus = device_information.get_num_npus() + self.version_info = device_information.get_sonic_version_info() + + @under_lock + def add_repository(self, *args, **kwargs): + """ Add repository to package database + and commit database content. + + Args: + args: Arguments to pass to PackageDatabase.add_package + kwargs: Keyword arguments to pass to PackageDatabase.add_package + """ + + self.database.add_package(*args, **kwargs) + self.database.commit() + + @under_lock + def remove_repository(self, name: str): + """ Remove repository from package database + and commit database content. + + Args: + name: package name + """ + + self.database.remove_package(name) + self.database.commit() + + @under_lock + def install(self, + expression: Optional[str] = None, + repotag: Optional[str] = None, + tarball: Optional[str] = None, + **kwargs): + """ Install/Upgrade SONiC Package from either an expression + representing the package and its version, repository and tag or + digest in same format as "docker pulL" accepts or an image tarball path. + + Args: + expression: SONiC Package reference expression + repotag: Install/Upgrade from REPO[:TAG][@DIGEST] + tarball: Install/Upgrade from tarball, path to tarball file + kwargs: Install/Upgrade options for self.install_from_source + Raises: + PackageManagerError + """ + + source = self.get_package_source(expression, repotag, tarball) + package = source.get_package() + + if self.is_installed(package.name): + self.upgrade_from_source(source, **kwargs) + else: + self.install_from_source(source, **kwargs) + + @under_lock + @opt_check + def install_from_source(self, + source: PackageSource, + force=False, + enable=False, + default_owner='local', + skip_host_plugins=False): + """ Install SONiC Package from source represented by PackageSource. + This method contains the logic of package installation. + + Args: + source: SONiC Package source. + force: Force the installation. + enable: If True the installed feature package will be enabled. + default_owner: Owner of the installed package. + skip_host_plugins: Skip CLI plugin installation. + Raises: + PackageManagerError + """ + + package = source.get_package() + name = package.name + + with failure_ignore(force): + if self.is_installed(name): + raise PackageInstallationError(f'{name} is already installed') + + version = package.manifest['package']['version'] + feature_state = 'enabled' if enable else 'disabled' + installed_packages = self._get_installed_packages_and(package) + + with failure_ignore(force): + validate_package_base_os_constraints(package, self.version_info) + validate_package_tree(installed_packages) + validate_package_cli_can_be_skipped(package, skip_host_plugins) + + # After all checks are passed we proceed to actual installation + + # When installing package from a tarball or directly from registry + # package name may not be in database. + if not self.database.has_package(package.name): + self.database.add_package(package.name, package.repository) + + try: + with contextlib.ExitStack() as exits: + source.install(package) + exits.callback(rollback(source.uninstall, package)) + + self.service_creator.create(package, state=feature_state, owner=default_owner) + exits.callback(rollback(self.service_creator.remove, package)) + + if not skip_host_plugins: + self._install_cli_plugins(package) + exits.callback(rollback(self._uninstall_cli_plugins, package)) + + exits.pop_all() + except Exception as err: + raise PackageInstallationError(f'Failed to install {package.name}: {err}') + except KeyboardInterrupt: + raise + + package.entry.installed = True + package.entry.version = version + self.database.update_package(package.entry) + self.database.commit() + + @under_lock + @opt_check + def uninstall(self, name: str, force=False): + """ Uninstall SONiC Package referenced by name. The uninstallation + can be forced if force argument is True. + + Args: + name: SONiC Package name. + force: Force the installation. + Raises: + PackageManagerError + """ + + with failure_ignore(force): + if not self.is_installed(name): + raise PackageUninstallationError(f'{name} is not installed') + + package = self.get_installed_package(name) + service_name = package.manifest['service']['name'] + + with failure_ignore(force): + if self.feature_registry.is_feature_enabled(service_name): + raise PackageUninstallationError( + f'{service_name} is enabled. Disable the feature first') + + if package.built_in: + raise PackageUninstallationError( + f'Cannot uninstall built-in package {package.name}') + + installed_packages = self._get_installed_packages_except(package) + + with failure_ignore(force): + validate_package_tree(installed_packages) + + # After all checks are passed we proceed to actual uninstallation + + try: + self._uninstall_cli_plugins(package) + self.service_creator.remove(package) + + # Clean containers based on this image + containers = self.docker.ps(filters={'ancestor': package.image_id}, + all=True) + for container in containers: + self.docker.rm(container.id, force=True) + + self.docker.rmi(package.image_id, force=True) + package.entry.image_id = None + except Exception as err: + raise PackageUninstallationError( + f'Failed to uninstall {package.name}: {err}' + ) + + package.entry.installed = False + package.entry.version = None + self.database.update_package(package.entry) + self.database.commit() + + @under_lock + @opt_check + def upgrade_from_source(self, + source: PackageSource, + force=False, + skip_host_plugins=False, + allow_downgrade=False): + """ Upgrade SONiC Package to a version the package reference + expression specifies. Can force the upgrade if force parameter + is True. Force can allow a package downgrade. + + Args: + source: SONiC Package source + force: Force the upgrade. + skip_host_plugins: Skip host OS plugins installation. + allow_downgrade: Flag to allow package downgrade. + Raises: + PackageManagerError + """ + + new_package = source.get_package() + name = new_package.name + + with failure_ignore(force): + if not self.is_installed(name): + raise PackageUpgradeError(f'{name} is not installed') + + old_package = self.get_installed_package(name) + + if old_package.built_in: + raise PackageUpgradeError( + f'Cannot upgrade built-in package {old_package.name}' + ) + + old_feature = old_package.manifest['service']['name'] + new_feature = new_package.manifest['service']['name'] + old_version = old_package.manifest['package']['version'] + new_version = new_package.manifest['package']['version'] + + with failure_ignore(force): + if old_version == new_version: + raise PackageUpgradeError(f'{new_version} is already installed') + + # TODO: Not all packages might support downgrade. + # We put a check here but we understand that for some packages + # the downgrade might be safe to do. There can be a variable in manifest + # describing package downgrade ability or downgrade-able versions. + if new_version < old_version and not allow_downgrade: + raise PackageUpgradeError( + f'Request to downgrade from {old_version} to {new_version}. ' + f'Downgrade might be not supported by the package' + ) + + # remove currently installed package from the list + installed_packages = self._get_installed_packages_and(new_package) + + with failure_ignore(force): + validate_package_base_os_constraints(new_package, self.version_info) + validate_package_tree(installed_packages) + validate_package_cli_can_be_skipped(new_package, skip_host_plugins) + + # After all checks are passed we proceed to actual upgrade + + try: + with contextlib.ExitStack() as exits: + self._uninstall_cli_plugins(old_package) + exits.callback(rollback(self._install_cli_plugins, old_package)) + + source.install(new_package) + exits.callback(rollback(source.uninstall, new_package)) + + if self.feature_registry.is_feature_enabled(old_feature): + self._systemctl_action(old_package, 'stop') + exits.callback(rollback(self._systemctl_action, + old_package, 'start')) + + self.service_creator.remove(old_package, deregister_feature=False) + exits.callback(rollback(self.service_creator.create, + old_package, register_feature=False)) + + # Clean containers based on the old image + containers = self.docker.ps(filters={'ancestor': old_package.image_id}, + all=True) + for container in containers: + self.docker.rm(container.id, force=True) + + self.service_creator.create(new_package, register_feature=False) + exits.callback(rollback(self.service_creator.remove, new_package, + register_feature=False)) + + if self.feature_registry.is_feature_enabled(new_feature): + self._systemctl_action(new_package, 'start') + exits.callback(rollback(self._systemctl_action, + new_package, 'stop')) + + if not skip_host_plugins: + self._install_cli_plugins(new_package) + exits.callback(rollback(self._uninstall_cli_plugin, old_package)) + + self.docker.rmi(old_package.image_id, force=True) + + exits.pop_all() + except Exception as err: + raise PackageUpgradeError(f'Failed to upgrade {new_package.name}: {err}') + except KeyboardInterrupt: + raise + + new_package_entry = new_package.entry + new_package_entry.installed = True + new_package_entry.version = new_version + self.database.update_package(new_package_entry) + self.database.commit() + + @under_lock + @opt_check + def reset(self, name: str, force: bool = False, skip_host_plugins: bool = False): + """ Reset package to defaults version + + Args: + name: SONiC Package name. + force: Force the installation. + skip_host_plugins: Skip host plugins installation. + Raises: + PackageManagerError + """ + + with failure_ignore(force): + if not self.is_installed(name): + raise PackageManagerError(f'{name} is not installed') + + package = self.get_installed_package(name) + default_reference = package.entry.default_reference + if default_reference is None: + raise PackageManagerError(f'package {name} has no default reference') + + package_ref = PackageReference(name, default_reference) + source = self.get_package_source(package_ref=package_ref) + self.upgrade_from_source(source, force=force, + allow_downgrade=True, + skip_host_plugins=skip_host_plugins) + + @under_lock + def migrate_packages(self, + old_package_database: PackageDatabase, + dockerd_sock: Optional[str] = None): + """ + Migrate packages from old database. This function can do a comparison between + current database and the database passed in as argument. If the package is + missing in the current database it will be added. If the package is installed + in the passed database and in the current it is not installed it will be + installed with a passed database package version. If the package is installed + in the passed database and it is installed in the current database but with + older version the package will be upgraded to the never version. If the package + is installed in the passed database and in the current it is installed but with + never version - no actions are taken. If dockerd_sock parameter is passed, the + migration process will use loaded images from docker library of the currently + installed image. + + Args: + old_package_database: SONiC Package Database to migrate packages from. + dockerd_sock: Path to dockerd socket. + Raises: + PackageManagerError + """ + + self._migrate_package_database(old_package_database) + + def migrate_package(old_package_entry, + new_package_entry): + """ Migrate package routine + + Args: + old_package_entry: Entry in old package database. + new_package_entry: Entry in new package database. + """ + + name = new_package_entry.name + version = new_package_entry.version + + if dockerd_sock: + # dockerd_sock is defined, so use docked_sock to connect to + # dockerd and fetch package image from it. + log.info(f'installing {name} from old docker library') + docker_api = DockerApi(docker.DockerClient(base_url=f'unix://{dockerd_sock}')) + + image = docker_api.get_image(old_package_entry.image_id) + + with tempfile.NamedTemporaryFile('wb') as file: + for chunk in image.save(named=True): + file.write(chunk) + + self.install(tarball=file.name) + else: + log.info(f'installing {name} version {version}') + + self.install(f'{name}={version}') + + # TODO: Topological sort packages by their dependencies first. + for old_package in old_package_database: + if not old_package.installed or old_package.built_in: + continue + + log.info(f'migrating package {old_package.name}') + + new_package = self.database.get_package(old_package.name) + + if new_package.installed: + if old_package.version > new_package.version: + log.info(f'{old_package.name} package version is greater ' + f'then installed in new image: ' + f'{old_package.version} > {new_package.version}') + log.info(f'upgrading {new_package.name} to {old_package.version}') + new_package.version = old_package.version + migrate_package(old_package, new_package) + else: + log.info(f'skipping {new_package.name} as installed version is newer') + elif new_package.default_reference is not None: + new_package_ref = PackageReference(new_package.name, new_package.default_reference) + package_source = self.get_package_source(package_ref=new_package_ref) + package = package_source.get_package() + new_package_default_version = package.manifest['package']['version'] + if old_package.version > new_package_default_version: + log.info(f'{old_package.name} package version is lower ' + f'then the default in new image: ' + f'{old_package.version} > {new_package_default_version}') + new_package.version = old_package.version + migrate_package(old_package, new_package) + else: + self.install(f'{new_package.name}={new_package_default_version}') + else: + # No default version and package is not installed. + # Migrate old package same version. + new_package.version = old_package.version + migrate_package(old_package, new_package) + + self.database.commit() + + def get_installed_package(self, name: str) -> Package: + """ Get installed package by name. + + Args: + name: package name. + Returns: + Package object. + """ + + package_entry = self.database.get_package(name) + source = LocalSource(package_entry, + self.database, + self.docker, + self.metadata_resolver) + return source.get_package() + + def get_package_source(self, + package_expression: Optional[str] = None, + repository_reference: Optional[str] = None, + tarboll_path: Optional[str] = None, + package_ref: Optional[PackageReference] = None): + """ Returns PackageSource object based on input source. + + Args: + package_expression: SONiC Package expression string + repository_reference: Install from REPO[:TAG][@DIGEST] + tarboll_path: Install from image tarball + package_ref: Package reference object + Returns: + SONiC Package object. + Raises: + ValueError if no source specified. + """ + + if package_expression: + ref = parse_reference_expression(package_expression) + return self.get_package_source(package_ref=ref) + elif repository_reference: + repo_ref = DockerReference.parse(repository_reference) + repository = repo_ref['name'] + reference = repo_ref['tag'] or repo_ref['digest'] + reference = reference or 'latest' + return RegistrySource(repository, + reference, + self.database, + self.docker, + self.metadata_resolver) + elif tarboll_path: + return TarballSource(tarboll_path, + self.database, + self.docker, + self.metadata_resolver) + elif package_ref: + package_entry = self.database.get_package(package_ref.name) + + # Determine the reference if not specified. + # If package is installed assume the installed + # one is requested, otherwise look for default + # reference defined for this package. In case package + # does not have a default reference raise an error. + if package_ref.reference is None: + if package_entry.installed: + return LocalSource(package_entry, + self.database, + self.docker, + self.metadata_resolver) + if package_entry.default_reference is not None: + package_ref.reference = package_entry.default_reference + else: + raise PackageManagerError(f'No default reference tag. ' + f'Please specify the version or tag explicitly') + + return RegistrySource(package_entry.repository, + package_ref.reference, + self.database, + self.docker, + self.metadata_resolver) + else: + raise ValueError('No package source provided') + + def get_package_available_versions(self, + name: str, + all: bool = False) -> Iterable: + """ Returns a list of available versions for package. + + Args: + name: Package name. + all: If set to True will return all tags including + those which do not follow semantic versioning. + Returns: + List of versions + """ + package_info = self.database.get_package(name) + registry = self.registry_resolver.get_registry_for(package_info.repository) + available_tags = registry.tags(package_info.repository) + + def is_semantic_ver_tag(tag: str) -> bool: + try: + tag_to_version(tag) + return True + except ValueError: + pass + return False + + if all: + return available_tags + + return map(tag_to_version, filter(is_semantic_ver_tag, available_tags)) + + def is_installed(self, name: str) -> bool: + """ Returns boolean whether a package called name is installed. + + Args: + name: Package name. + Returns: + True if package is installed, False otherwise. + """ + + if not self.database.has_package(name): + return False + package_info = self.database.get_package(name) + return package_info.installed + + def get_installed_packages(self) -> Dict[str, Package]: + """ Returns a dictionary of installed packages where + keys are package names and values are package objects. + + Returns: + Installed packages dictionary. + """ + + return { + entry.name: self.get_installed_package(entry.name) + for entry in self.database if entry.installed + } + + def _migrate_package_database(self, old_package_database: PackageDatabase): + """ Performs part of package migration process. + For every package in old_package_database that is not listed in current + database add a corresponding entry to current database. """ + + for package in old_package_database: + if not self.database.has_package(package.name): + self.database.add_package(package.name, + package.repository, + package.description, + package.default_reference) + + def _get_installed_packages_and(self, package: Package) -> Dict[str, Package]: + """ Returns a dictionary of installed packages with their names as keys + adding a package provided in the argument. """ + + packages = self.get_installed_packages() + packages[package.name] = package + return packages + + def _get_installed_packages_except(self, package: Package) -> Dict[str, Package]: + """ Returns a dictionary of installed packages with their names as keys + removing a package provided in the argument. """ + + packages = self.get_installed_packages() + packages.pop(package.name) + return packages + + # TODO: Replace with "config feature" command. + # The problem with current "config feature" command + # is that it is asynchronous, thus can't be used + # for package upgrade purposes where we need to wait + # till service stops before upgrading docker image. + # It would be really handy if we could just call + # something like: "config feature state --wait" + # instead of operating on systemd service since + # this is basically a duplicated code from "hostcfgd". + def _systemctl_action(self, package: Package, action: str): + """ Execute systemctl action for a service supporting + multi-asic services. """ + + name = package.manifest['service']['name'] + host_service = package.manifest['service']['host-service'] + asic_service = package.manifest['service']['asic-service'] + single_instance = host_service or (asic_service and not self.is_multi_npu) + multi_instance = asic_service and self.is_multi_npu + + if in_chroot(): + return + + if single_instance: + run_command(f'systemctl {action} {name}') + if multi_instance: + for npu in range(self.num_npus): + run_command(f'systemctl {action} {name}@{npu}') + + @staticmethod + def _get_cli_plugin_name(package: Package): + return utils.make_python_identifier(package.name) + '.py' + + @classmethod + def _get_cli_plugin_path(cls, package: Package, command): + pkg_loader = pkgutil.get_loader(f'{command}.plugins') + if pkg_loader is None: + raise PackageManagerError(f'Failed to get plugins path for {command} CLI') + plugins_pkg_path = os.path.dirname(pkg_loader.path) + return os.path.join(plugins_pkg_path, cls._get_cli_plugin_name(package)) + + def _install_cli_plugins(self, package: Package): + for command in ('show', 'config', 'clear'): + self._install_cli_plugin(package, command) + + def _uninstall_cli_plugins(self, package: Package): + for command in ('show', 'config', 'clear'): + self._uninstall_cli_plugin(package, command) + + def _install_cli_plugin(self, package: Package, command: str): + image_plugin_path = package.manifest['cli'][command] + if not image_plugin_path: + return + host_plugin_path = self._get_cli_plugin_path(package, command) + self.docker.extract(package.entry.image_id, image_plugin_path, host_plugin_path) + + def _uninstall_cli_plugin(self, package: Package, command: str): + image_plugin_path = package.manifest['cli'][command] + if not image_plugin_path: + return + host_plugin_path = self._get_cli_plugin_path(package, command) + if os.path.exists(host_plugin_path): + os.remove(host_plugin_path) + + @staticmethod + def get_manager() -> 'PackageManager': + """ Creates and returns PackageManager instance. + + Returns: + PackageManager + """ + + docker_api = DockerApi(docker.from_env()) + registry_resolver = RegistryResolver() + return PackageManager(DockerApi(docker.from_env(), ProgressManager()), + registry_resolver, + PackageDatabase.from_file(), + MetadataResolver(docker_api, registry_resolver), + ServiceCreator(FeatureRegistry(SonicDB), SonicDB), + device_info, + filelock.FileLock(PACKAGE_MANAGER_LOCK_FILE, timeout=0)) diff --git a/sonic_package_manager/manifest.py b/sonic_package_manager/manifest.py new file mode 100644 index 0000000000..b58a0d10f0 --- /dev/null +++ b/sonic_package_manager/manifest.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python + +from abc import ABC +from dataclasses import dataclass +from typing import Optional, List, Dict, Any + +from sonic_package_manager.constraint import ( + ComponentConstraints, + PackageConstraint +) +from sonic_package_manager.errors import ManifestError +from sonic_package_manager.version import Version + + +class ManifestSchema: + """ ManifestSchema class describes and provides marshalling + and unmarshalling methods. + """ + + class Marshaller: + """ Base class for marshaling and un-marshaling. """ + + def marshal(self, value): + """ Validates and returns a valid manifest dictionary. + + Args: + value: input value to validate. + Returns: valid manifest node. + """ + + raise NotImplementedError + + def unmarshal(self, value): + """ Un-marshals the manifest to a dictionary. + + Args: + value: input value to validate. + Returns: valid manifest node. + """ + + raise NotImplementedError + + @dataclass + class ParsedMarshaller(Marshaller): + """ Marshaller used on types which support class method "parse" """ + + type: Any + + def marshal(self, value): + try: + return self.type.parse(value) + except ValueError as err: + raise ManifestError(f'Failed to marshal {value}: {err}') + + def unmarshal(self, value): + try: + if hasattr(value, 'deparse'): + return value.deparse() + return str(value) + except Exception as err: + raise ManifestError(f'Failed to unmarshal {value}: {err}') + + @dataclass + class DefaultMarshaller(Marshaller): + """ Default marshaller that validates if the given + value is instance of given type. """ + + type: type + + def marshal(self, value): + if not isinstance(value, self.type): + raise ManifestError(f'{value} is not of type {self.type.__name__}') + return value + + def unmarshal(self, value): + return value + + @dataclass + class ManifestNode(Marshaller, ABC): + """ + Base class for any manifest object. + + Attrs: + key: String representing the key for this object. + """ + + key: str + + @dataclass + class ManifestRoot(ManifestNode): + items: List + + def marshal(self, value: Optional[dict]): + result = {} + if value is None: + value = {} + + for item in self.items: + next_value = value.get(item.key) + result[item.key] = item.marshal(next_value) + return result + + def unmarshal(self, value): + return_value = {} + for item in self.items: + return_value[item.key] = item.unmarshal(value[item.key]) + return return_value + + @dataclass + class ManifestField(ManifestNode): + type: Any + default: Optional[Any] = None + + def marshal(self, value): + if value is None: + if self.default is not None: + return self.default + raise ManifestError(f'{self.key} is a required field but it is missing') + try: + return_value = self.type.marshal(value) + except Exception as err: + raise ManifestError(f'Failed to marshal {self.key}: {err}') + return return_value + + def unmarshal(self, value): + return self.type.unmarshal(value) + + @dataclass + class ManifestArray(ManifestNode): + type: Any + + def marshal(self, value): + if value is None: + return [] + + return_value = [] + try: + for item in value: + return_value.append(self.type.marshal(item)) + except Exception as err: + raise ManifestError(f'Failed to convert {self.key}={value} to array: {err}') + + return return_value + + def unmarshal(self, value): + return [self.type.unmarshal(item) for item in value] + + # TODO: add description for each field + SCHEMA = ManifestRoot('root', [ + ManifestField('version', ParsedMarshaller(Version), Version(1, 0, 0)), + ManifestRoot('package', [ + ManifestField('version', ParsedMarshaller(Version)), + ManifestField('name', DefaultMarshaller(str)), + ManifestField('description', DefaultMarshaller(str), ''), + ManifestField('base-os', ParsedMarshaller(ComponentConstraints), ComponentConstraints()), + ManifestArray('depends', ParsedMarshaller(PackageConstraint)), + ManifestArray('breaks', ParsedMarshaller(PackageConstraint)), + ManifestField('init-cfg', DefaultMarshaller(dict), dict()), + ManifestField('changelog', DefaultMarshaller(dict), dict()), + ManifestField('debug-dump', DefaultMarshaller(str), ''), + ]), + ManifestRoot('service', [ + ManifestField('name', DefaultMarshaller(str)), + ManifestArray('requires', DefaultMarshaller(str)), + ManifestArray('requisite', DefaultMarshaller(str)), + ManifestArray('wanted-by', DefaultMarshaller(str)), + ManifestArray('after', DefaultMarshaller(str)), + ManifestArray('before', DefaultMarshaller(str)), + ManifestArray('dependent', DefaultMarshaller(str)), + ManifestArray('dependent-of', DefaultMarshaller(str)), + ManifestField('post-start-action', DefaultMarshaller(str), ''), + ManifestField('pre-shutdown-action', DefaultMarshaller(str), ''), + ManifestField('asic-service', DefaultMarshaller(bool), False), + ManifestField('host-service', DefaultMarshaller(bool), True), + ManifestField('delayed', DefaultMarshaller(bool), False), + ]), + ManifestRoot('container', [ + ManifestField('privileged', DefaultMarshaller(bool), False), + ManifestArray('volumes', DefaultMarshaller(str)), + ManifestArray('mounts', ManifestRoot('mounts', [ + ManifestField('source', DefaultMarshaller(str)), + ManifestField('target', DefaultMarshaller(str)), + ManifestField('type', DefaultMarshaller(str)), + ])), + ManifestField('environment', DefaultMarshaller(dict), dict()), + ManifestArray('tmpfs', DefaultMarshaller(str)), + ]), + ManifestArray('processes', ManifestRoot('processes', [ + ManifestField('name', DefaultMarshaller(str)), + ])), + ManifestRoot('cli', [ + ManifestField('mandatory', DefaultMarshaller(bool), False), + ManifestField('show', DefaultMarshaller(str), ''), + ManifestField('config', DefaultMarshaller(str), ''), + ManifestField('clear', DefaultMarshaller(str), '') + ]) + ]) + + +class Manifest(dict): + """ Manifest object. """ + + SCHEMA = ManifestSchema.SCHEMA + + @classmethod + def marshal(cls, input_dict: dict): + return Manifest(cls.SCHEMA.marshal(input_dict)) + + def unmarshal(self) -> Dict: + return self.SCHEMA.unmarshal(self) diff --git a/sonic_package_manager/metadata.py b/sonic_package_manager/metadata.py new file mode 100644 index 0000000000..7f7c25ceaf --- /dev/null +++ b/sonic_package_manager/metadata.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python + +from dataclasses import dataclass, field + +import json +import tarfile +from typing import Dict + +from sonic_package_manager.errors import MetadataError +from sonic_package_manager.manifest import Manifest +from sonic_package_manager.version import Version + + +def deep_update(dst: Dict, src: Dict) -> Dict: + """ Deep update dst dictionary with src dictionary. + + Args: + dst: Dictionary to update + src: Dictionary to update with + + Returns: + New merged dictionary. + """ + + for key, value in src.items(): + if isinstance(value, dict): + node = dst.setdefault(key, {}) + deep_update(node, value) + else: + dst[key] = value + return dst + + +def translate_plain_to_tree(plain: Dict[str, str], sep='.') -> Dict: + """ Convert plain key/value dictionary into + a tree by spliting the key with '.' + + Args: + plain: Dictionary to convert into tree-like structure. + Keys in this dictionary have to be in a format: + "[key0].+", e.g: "com.azure.sonic" that + will be converted into tree like struct: + + { + "com": { + "azure": { + "sonic": {} + } + } + } + sep: Seperator string + + Returns: + Tree like structure + + """ + + res = {} + for key, value in plain.items(): + if sep not in key: + res[key] = value + continue + namespace, key = key.split(sep, 1) + res.setdefault(namespace, {}) + deep_update(res[namespace], translate_plain_to_tree({key: value})) + return res + + +@dataclass +class Metadata: + """ Package metadata object that can be retrieved from + OCI image manifest. """ + + manifest: Manifest + components: Dict[str, Version] = field(default_factory=dict) + + +class MetadataResolver: + """ Resolve metadata for package from different sources. """ + + def __init__(self, docker, registry_resolver): + self.docker = docker + self.registry_resolver = registry_resolver + + def from_local(self, image: str) -> Metadata: + """ Reads manifest from locally installed docker image. + + Args: + image: Docker image ID + Returns: + Metadata + Raises: + MetadataError + """ + + labels = self.docker.labels(image) + if labels is None: + raise MetadataError('No manifest found in image labels') + + return self.from_labels(labels) + + def from_registry(self, + repository: str, + reference: str) -> Metadata: + """ Reads manifest from remote registry. + + Args: + repository: Repository to pull image from + reference: Reference, either tag or digest + Returns: + Metadata + Raises: + MetadataError + """ + + registry = self.registry_resolver.get_registry_for(repository) + + manifest = registry.manifest(repository, reference) + digest = manifest['config']['digest'] + + blob = registry.blobs(repository, digest) + labels = blob['config']['Labels'] + if labels is None: + raise MetadataError('No manifest found in image labels') + + return self.from_labels(labels) + + def from_tarball(self, image_path: str) -> Metadata: + """ Reads manifest image tarball. + Args: + image_path: Path to image tarball. + Returns: + Manifest + Raises: + MetadataError + """ + + with tarfile.open(image_path) as image: + manifest = json.loads(image.extractfile('manifest.json').read()) + + blob = manifest[0]['Config'] + image_config = json.loads(image.extractfile(blob).read()) + labels = image_config['config']['Labels'] + if labels is None: + raise MetadataError('No manifest found in image labels') + + return self.from_labels(labels) + + @classmethod + def from_labels(cls, labels: Dict[str, str]) -> Metadata: + """ Get manifest from image labels. + + Args: + labels: key, value string pairs + Returns: + Metadata + Raises: + MetadataError + """ + + metadata_dict = translate_plain_to_tree(labels) + try: + sonic_metadata = metadata_dict['com']['azure']['sonic'] + except KeyError: + raise MetadataError('No metadata found in image labels') + + try: + manifest_string = sonic_metadata['manifest'] + except KeyError: + raise MetadataError('No manifest found in image labels') + + try: + manifest_dict = json.loads(manifest_string) + except (ValueError, TypeError) as err: + raise MetadataError(f'Failed to parse manifest JSON: {err}') + + components = {} + if 'versions' in sonic_metadata: + for component, version in sonic_metadata['versions'].items(): + try: + components[component] = Version.parse(version) + except ValueError as err: + raise MetadataError(f'Failed to parse component version: {err}') + + return Metadata(Manifest.marshal(manifest_dict), components) diff --git a/sonic_package_manager/package.py b/sonic_package_manager/package.py new file mode 100644 index 0000000000..2928f17392 --- /dev/null +++ b/sonic_package_manager/package.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +from dataclasses import dataclass + +from sonic_package_manager.database import PackageEntry +from sonic_package_manager.metadata import Metadata + + +@dataclass +class Package: + """ Package class is a representation of Package. + + Attributes: + entry: Package entry in package database + metadata: Metadata object for this package + manifest: Manifest for this package + components: Components versions for this package + name: Name of the package from package database + repository: Default repository to pull this package from + image_id: Docker image ID of the installed package; + It is set to None if package is not installed. + installed: Boolean flag whether package is installed or not. + build_in: Boolean flag whether package is built in or not. + + """ + + entry: PackageEntry + metadata: Metadata + + @property + def name(self): return self.entry.name + + @property + def repository(self): return self.entry.repository + + @property + def image_id(self): return self.entry.image_id + + @property + def installed(self): return self.entry.installed + + @property + def built_in(self): return self.entry.built_in + + @property + def version(self): return self.entry.version + + @property + def manifest(self): return self.metadata.manifest + + @property + def components(self): return self.metadata.components + diff --git a/sonic_package_manager/progress.py b/sonic_package_manager/progress.py new file mode 100644 index 0000000000..5258ebab98 --- /dev/null +++ b/sonic_package_manager/progress.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +import enlighten + +BAR_FMT = '{desc}{desc_pad}{percentage:3.0f}%|{bar}| {count:{len_total}.2f}/{total:.2f}{unit_pad}{unit} ' + \ + '[{elapsed}<{eta}, {rate:.2f}{unit_pad}{unit}/s]' + +COUNTER_FMT = '{desc}{desc_pad}{count:.1f} {unit}{unit_pad}' + \ + '[{elapsed}, {rate:.2f}{unit_pad}{unit}/s]{fill}' + + +class ProgressManager: + """ ProgressManager is used for creating multiple progress bars + which nicely interact with logging and prints. """ + + def __init__(self): + self.manager = enlighten.get_manager() + self.pbars = {} + + def __enter__(self): + return self.manager.__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + return self.manager.__exit__(exc_type, exc_val, exc_tb) + + def new(self, id: str, *args, **kwargs): + """ Creates new progress bar with id. + Args: + id: progress bar identifier + *args: pass arguments for progress bar creation + **kwargs: pass keyword arguments for progress bar creation. + """ + + if 'bar_format' not in kwargs: + kwargs['bar_format'] = BAR_FMT + if 'counter_format' not in kwargs: + kwargs['counter_format'] = COUNTER_FMT + + self.pbars[id] = self.manager.counter(*args, **kwargs) + + def get(self, id: str): + """ Returns progress bar by id. + Args: + id: progress bar identifier + Returns: + Progress bar. + """ + + return self.pbars[id] + + def __contains__(self, id): + return id in self.pbars diff --git a/sonic_package_manager/reference.py b/sonic_package_manager/reference.py new file mode 100644 index 0000000000..9c4d8e825c --- /dev/null +++ b/sonic_package_manager/reference.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +import re +from dataclasses import dataclass +from typing import Optional + + +@dataclass +class PackageReference: + """ PackageReference is a package version constraint. """ + + name: str + reference: Optional[str] = None + + def __str__(self): + return f'{self.name} {self.reference}' + + @staticmethod + def parse(expression: str) -> 'PackageReference': + REQUIREMENT_SPECIFIER_RE = \ + r'(?P[A-Za-z0-9_-]+)(?P@(?P.*))' + + match = re.match(REQUIREMENT_SPECIFIER_RE, expression) + if match is None: + raise ValueError(f'Invalid reference specifier {expression}') + groupdict = match.groupdict() + name = groupdict.get('name') + reference = groupdict.get('reference') + + return PackageReference(name, reference) diff --git a/sonic_package_manager/registry.py b/sonic_package_manager/registry.py new file mode 100644 index 0000000000..8a09d9136e --- /dev/null +++ b/sonic_package_manager/registry.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python + +import json +from dataclasses import dataclass +from typing import List, Dict + +import requests +import www_authenticate +from docker_image import reference +from prettyprinter import pformat + +from sonic_package_manager.logger import log +from sonic_package_manager.utils import DockerReference + + +class AuthenticationServiceError(Exception): + """ Exception class for errors related to authentication. """ + + pass + + +class AuthenticationService: + """ AuthenticationService provides an authentication tokens. """ + + @staticmethod + def get_token(realm, service, scope) -> str: + """ Retrieve an authentication token. + + Args: + realm: Realm: url to request token. + service: service to request token for. + scope: scope to requests token for. + Returns: + token value as a string. + """ + + log.debug(f'getting authentication token: realm={realm} service={service} scope={scope}') + + response = requests.get(f'{realm}?scope={scope}&service={service}') + if response.status_code != requests.codes.ok: + raise AuthenticationServiceError(f'Failed to retrieve token') + + content = json.loads(response.content) + token = content['token'] + expires_in = content['expires_in'] + + log.debug(f'authentication token for realm={realm} service={service} scope={scope}: ' + f'token={token} expires_in={expires_in}') + + return token + + +@dataclass +class RegistryApiError(Exception): + """ Class for registry related errors. """ + + msg: str + response: requests.Response + + def __str__(self): + code = self.response.status_code + content = self.response.content.decode() + try: + content = json.loads(content) + except ValueError: + pass + return f'{self.msg}: code: {code} details: {pformat(content)}' + + +class Registry: + """ Provides a Docker registry interface. """ + + MIME_DOCKER_MANIFEST = 'application/vnd.docker.distribution.manifest.v2+json' + + def __init__(self, host: str): + self.url = host + + @staticmethod + def _execute_get_request(url, headers): + response = requests.get(url, headers=headers) + if response.status_code == requests.codes.unauthorized: + # Get authentication details from headers + # Registry should tell how to authenticate + www_authenticate_details = response.headers['Www-Authenticate'] + log.debug(f'unauthorized: retrieving authentication details ' + f'from response headers {www_authenticate_details}') + bearer = www_authenticate.parse(www_authenticate_details)['bearer'] + token = AuthenticationService.get_token(**bearer) + headers['Authorization'] = f'Bearer {token}' + # Repeat request + response = requests.get(url, headers=headers) + return response + + def _get_base_url(self, repository: str): + return f'{self.url}/v2/{repository}' + + def tags(self, repository: str) -> List[str]: + log.debug(f'getting tags for {repository}') + + _, repository = reference.Reference.split_docker_domain(repository) + headers = {'Accept': 'application/json'} + url = f'{self._get_base_url(repository)}/tags/list' + response = self._execute_get_request(url, headers) + if response.status_code != requests.codes.ok: + raise RegistryApiError(f'Failed to retrieve tags from {repository}', response) + + content = json.loads(response.content) + log.debug(f'tags list api response: f{content}') + + return content['tags'] + + def manifest(self, repository: str, ref: str) -> Dict: + log.debug(f'getting manifest for {repository}:{ref}') + + _, repository = reference.Reference.split_docker_domain(repository) + headers = {'Accept': self.MIME_DOCKER_MANIFEST} + url = f'{self._get_base_url(repository)}/manifests/{ref}' + response = self._execute_get_request(url, headers) + + if response.status_code != requests.codes.ok: + raise RegistryApiError(f'Failed to retrieve manifest for {repository}:{ref}', response) + + content = json.loads(response.content) + log.debug(f'manifest content for {repository}:{ref}: {content}') + + return content + + def blobs(self, repository: str, digest: str): + log.debug(f'retrieving blob for {repository}:{digest}') + + _, repository = reference.Reference.split_docker_domain(repository) + headers = {'Accept': self.MIME_DOCKER_MANIFEST} + url = f'{self._get_base_url(repository)}/blobs/{digest}' + response = self._execute_get_request(url, headers) + if response.status_code != requests.codes.ok: + raise RegistryApiError(f'Failed to retrieve blobs for {repository}:{digest}', response) + content = json.loads(response.content) + + log.debug(f'retrieved blob for {repository}:{digest}: {content}') + return content + + +class RegistryResolver: + """ Returns a registry object based on the input repository reference + string. """ + + DockerHubRegistry = Registry('https://index.docker.io') + + def __init__(self): + pass + + def get_registry_for(self, ref: str) -> Registry: + domain, _ = DockerReference.split_docker_domain(ref) + if domain == reference.DEFAULT_DOMAIN: + return self.DockerHubRegistry + # TODO: support insecure registries + return Registry(f'https://{domain}') diff --git a/sonic_package_manager/service_creator/__init__.py b/sonic_package_manager/service_creator/__init__.py new file mode 100644 index 0000000000..e2af81ceb5 --- /dev/null +++ b/sonic_package_manager/service_creator/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python + +ETC_SONIC_PATH = '/etc/sonic' diff --git a/sonic_package_manager/service_creator/creator.py b/sonic_package_manager/service_creator/creator.py new file mode 100644 index 0000000000..54b9315bee --- /dev/null +++ b/sonic_package_manager/service_creator/creator.py @@ -0,0 +1,342 @@ +#!/usr/bin/env python + +import contextlib +import os +import stat +import subprocess +from typing import Dict + +import jinja2 as jinja2 +from prettyprinter import pformat + +from sonic_package_manager.logger import log +from sonic_package_manager.package import Package +from sonic_package_manager.service_creator import ETC_SONIC_PATH +from sonic_package_manager.service_creator.feature import FeatureRegistry +from sonic_package_manager.service_creator.utils import in_chroot + +SERVICE_FILE_TEMPLATE = 'sonic.service.j2' +TIMER_UNIT_TEMPLATE = 'timer.unit.j2' + +SYSTEMD_LOCATION = '/usr/lib/systemd/system' + +SERVICE_MGMT_SCRIPT_TEMPLATE = 'service_mgmt.sh.j2' +SERVICE_MGMT_SCRIPT_LOCATION = '/usr/local/bin' + +DOCKER_CTL_SCRIPT_TEMPLATE = 'docker_image_ctl.j2' +DOCKER_CTL_SCRIPT_LOCATION = '/usr/bin' + +DEBUG_DUMP_SCRIPT_TEMPLATE = 'dump.sh.j2' +DEBUG_DUMP_SCRIPT_LOCATION = '/usr/local/bin/debug-dump/' + +TEMPLATES_PATH = '/usr/share/sonic/templates' + + +class ServiceCreatorError(Exception): + pass + + +def render_template(in_template: str, + outfile: str, + render_ctx: Dict, + executable: bool = False): + """ Template renderer helper routine. + Args: + in_template: Input file with template content + outfile: Output file to render template to + render_ctx: Dictionary used to generate jinja2 template + executable: Set executable bit on rendered file + """ + + log.debug(f'Rendering {in_template} to {outfile} with {pformat(render_ctx)}') + + with open(in_template, 'r') as instream: + template = jinja2.Template(instream.read()) + + with open(outfile, 'w') as outstream: + outstream.write(template.render(**render_ctx)) + + if executable: + set_executable_bit(outfile) + + +def get_tmpl_path(template_name: str) -> str: + """ Returns a path to a template. + Args: + template_name: Template file name. + """ + + return os.path.join(TEMPLATES_PATH, template_name) + + +def set_executable_bit(filepath): + """ Sets +x on filepath. """ + + st = os.stat(filepath) + os.chmod(filepath, st.st_mode | stat.S_IEXEC) + + +def run_command(command: str): + """ Run arbitrary bash command. + Args: + command: String command to execute as bash script + Raises: + PackageManagerError: Raised when the command return code + is not 0. + """ + + log.debug(f'running command: {command}') + + proc = subprocess.Popen(command, + shell=True, + executable='/bin/bash', + stdout=subprocess.PIPE) + (out, _) = proc.communicate() + if proc.returncode != 0: + raise ServiceCreatorError(f'Failed to execute "{command}"') + + +class ServiceCreator: + """ Creates and registers services in SONiC based on the package + manifest. """ + + def __init__(self, feature_registry: FeatureRegistry, sonic_db): + self.feature_registry = feature_registry + self.sonic_db = sonic_db + + def create(self, + package: Package, + register_feature=True, + state='enabled', + owner='local'): + try: + self.generate_container_mgmt(package) + self.generate_service_mgmt(package) + self.update_dependent_list_file(package) + self.generate_systemd_service(package) + self.generate_dump_script(package) + + self.set_initial_config(package) + + self.post_operation_hook() + + if register_feature: + self.feature_registry.register(package.manifest, + state, owner) + except (Exception, KeyboardInterrupt): + self.remove(package, register_feature) + raise + + def remove(self, package: Package, deregister_feature=True): + name = package.manifest['service']['name'] + + def remove_file(path): + if os.path.exists(path): + os.remove(path) + log.info(f'removed {path}') + + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) + remove_file(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) + + self.update_dependent_list_file(package, remove=True) + + self.post_operation_hook() + + if deregister_feature: + self.feature_registry.deregister(package.manifest['service']['name']) + self.remove_config(package) + + def post_operation_hook(self): + if not in_chroot(): + run_command('systemctl daemon-reload') + + def generate_container_mgmt(self, package: Package): + image_id = package.image_id + name = package.manifest['service']['name'] + container_spec = package.manifest['container'] + script_path = os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh') + script_template = get_tmpl_path(DOCKER_CTL_SCRIPT_TEMPLATE) + run_opt = [] + + if container_spec['privileged']: + run_opt.append('--privileged') + + run_opt.append('-t') + + for volume in container_spec['volumes']: + run_opt.append(f'-v {volume}') + + for mount in container_spec['mounts']: + mount_type, source, target = mount['type'], mount['source'], mount['target'] + run_opt.append(f'--mount type={mount_type},source={source},target={target}') + + for tmpfs_mount in container_spec['tmpfs']: + run_opt.append(f'--tmpfs {tmpfs_mount}') + + for env_name, value in container_spec['environment'].items(): + run_opt.append(f'-e {env_name}={value}') + + run_opt = ' '.join(run_opt) + render_ctx = { + 'docker_container_name': name, + 'docker_image_id': image_id, + 'docker_image_run_opt': run_opt, + } + render_template(script_template, script_path, render_ctx, executable=True) + log.info(f'generated {script_path}') + + def generate_service_mgmt(self, package: Package): + name = package.manifest['service']['name'] + multi_instance_services = self.feature_registry.get_multi_instance_features() + script_path = os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh') + scrip_template = get_tmpl_path(SERVICE_MGMT_SCRIPT_TEMPLATE) + render_ctx = { + 'source': get_tmpl_path(SERVICE_MGMT_SCRIPT_TEMPLATE), + 'manifest': package.manifest.unmarshal(), + 'multi_instance_services': multi_instance_services, + } + render_template(scrip_template, script_path, render_ctx, executable=True) + log.info(f'generated {script_path}') + + def generate_systemd_service(self, package: Package): + name = package.manifest['service']['name'] + multi_instance_services = self.feature_registry.get_multi_instance_features() + + template = get_tmpl_path(SERVICE_FILE_TEMPLATE) + template_vars = { + 'source': get_tmpl_path(SERVICE_FILE_TEMPLATE), + 'manifest': package.manifest.unmarshal(), + 'multi_instance': False, + 'multi_instance_services': multi_instance_services, + } + output_file = os.path.join(SYSTEMD_LOCATION, f'{name}.service') + render_template(template, output_file, template_vars) + log.info(f'generated {output_file}') + + if package.manifest['service']['asic-service']: + output_file = os.path.join(SYSTEMD_LOCATION, f'{name}@.service') + template_vars['multi_instance'] = True + render_template(template, output_file, template_vars) + log.info(f'generated {output_file}') + + if package.manifest['service']['delayed']: + template_vars = { + 'source': get_tmpl_path(TIMER_UNIT_TEMPLATE), + 'manifest': package.manifest.unmarshal(), + 'multi_instance': False, + } + output_file = os.path.join(SYSTEMD_LOCATION, f'{name}.timer') + template = os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE) + render_template(template, output_file, template_vars) + log.info(f'generated {output_file}') + + if package.manifest['service']['asic-service']: + output_file = os.path.join(SYSTEMD_LOCATION, f'{name}@.timer') + template_vars['multi_instance'] = True + render_template(template, output_file, template_vars) + log.info(f'generated {output_file}') + + def update_dependent_list_file(self, package: Package, remove=False): + name = package.manifest['service']['name'] + dependent_of = package.manifest['service']['dependent-of'] + host_service = package.manifest['service']['host-service'] + asic_service = package.manifest['service']['asic-service'] + + def update_dependent(service, name, multi_inst): + if multi_inst: + filename = f'{service}_multi_inst_dependent' + else: + filename = f'{service}_dependent' + + filepath = os.path.join(ETC_SONIC_PATH, filename) + + dependent_services = set() + if os.path.exists(filepath): + with open(filepath) as fp: + dependent_services.update({line.strip() for line in fp.readlines()}) + if remove: + with contextlib.suppress(KeyError): + dependent_services.remove(name) + else: + dependent_services.add(name) + with open(filepath, 'w') as fp: + fp.write('\n'.join(dependent_services)) + + for service in dependent_of: + if host_service: + update_dependent(service, name, multi_inst=False) + if asic_service: + update_dependent(service, name, multi_inst=True) + + def generate_dump_script(self, package): + name = package.manifest['service']['name'] + + if not package.manifest['package']['debug-dump']: + return + + if not os.path.exists(DEBUG_DUMP_SCRIPT_LOCATION): + os.mkdir(DEBUG_DUMP_SCRIPT_LOCATION) + + scrip_template = os.path.join(TEMPLATES_PATH, DEBUG_DUMP_SCRIPT_TEMPLATE) + script_path = os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}') + render_ctx = { + 'source': get_tmpl_path(SERVICE_MGMT_SCRIPT_TEMPLATE), + 'manifest': package.manifest.unmarshal(), + } + render_template(scrip_template, script_path, render_ctx, executable=True) + log.info(f'generated {script_path}') + + def get_tables(self, table_name): + tables = [] + + running_table = self.sonic_db.running_table(table_name) + if running_table is not None: + tables.append(running_table) + + persistent_table = self.sonic_db.persistent_table(table_name) + if persistent_table is not None: + tables.append(persistent_table) + + initial_table = self.sonic_db.initial_table(table_name) + if initial_table is not None: + tables.append(initial_table) + + return tables + + def set_initial_config(self, package): + init_cfg = package.manifest['package']['init-cfg'] + + for tablename, content in init_cfg.items(): + if not isinstance(content, dict): + continue + + tables = self.get_tables(tablename) + + for key in content: + for table in tables: + cfg = content[key] + exists, old_fvs = table.get(key) + if exists: + cfg.update(old_fvs) + fvs = list(cfg.items()) + table.set(key, fvs) + + def remove_config(self, package): + # Remove configuration based on init-cfg tables, so having + # init-cfg even with tables without keys might be a good idea. + # TODO: init-cfg should be validated with yang model + # TODO: remove config from tables known to yang model + init_cfg = package.manifest['package']['init-cfg'] + + for tablename, content in init_cfg.items(): + if not isinstance(content, dict): + continue + + tables = self.get_tables(tablename) + + for key in content: + for table in tables: + table._del(key) diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py new file mode 100644 index 0000000000..4df06384d2 --- /dev/null +++ b/sonic_package_manager/service_creator/feature.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python + +""" This module implements new feature registration/de-registration in SONiC system. """ + +from typing import Dict, Type + +from sonic_package_manager.manifest import Manifest +from sonic_package_manager.service_creator.sonic_db import SonicDB + +FEATURE = 'FEATURE' +DEFAULT_FEATURE_CONFIG = { + 'state': 'disabled', + 'auto_restart': 'enabled', + 'high_mem_alert': 'disabled', + 'set_owner': 'local' +} + + +class FeatureRegistry: + """ FeatureRegistry class provides an interface to + register/de-register new feature persistently. """ + + def __init__(self, sonic_db: Type[SonicDB]): + self._sonic_db = sonic_db + + def register(self, + manifest: Manifest, + state: str = 'disabled', + owner: str = 'local'): + name = manifest['service']['name'] + for table in self._get_tables(): + cfg_entries = self.get_default_feature_entries(state, owner) + non_cfg_entries = self.get_non_configurable_feature_entries(manifest) + + exists, current_cfg = table.get(name) + + new_cfg = cfg_entries.copy() + # Override configurable entries with CONFIG DB data. + new_cfg = {**new_cfg, **dict(current_cfg)} + # Override CONFIG DB data with non configurable entries. + new_cfg = {**new_cfg, **non_cfg_entries} + + table.set(name, list(new_cfg.items())) + + def deregister(self, name: str): + for table in self._get_tables(): + table._del(name) + + def is_feature_enabled(self, name: str) -> bool: + """ Returns whether the feature is current enabled + or not. Accesses running CONFIG DB. If no running CONFIG_DB + table is found in tables returns False. """ + + running_db_table = self._sonic_db.running_table(FEATURE) + if running_db_table is None: + return False + + exists, cfg = running_db_table.get(name) + if not exists: + return False + cfg = dict(cfg) + return cfg.get('state').lower() == 'enabled' + + def get_multi_instance_features(self): + res = [] + init_db_table = self._sonic_db.initial_table(FEATURE) + for feature in init_db_table.keys(): + exists, cfg = init_db_table.get(feature) + assert exists + cfg = dict(cfg) + asic_flag = str(cfg.get('has_per_asic_scope', 'False')) + if asic_flag.lower() == 'true': + res.append(feature) + return res + + @staticmethod + def get_default_feature_entries(state=None, owner=None) -> Dict[str, str]: + """ Get configurable feature table entries: + e.g. 'state', 'auto_restart', etc. """ + + cfg = DEFAULT_FEATURE_CONFIG.copy() + if state: + cfg['state'] = state + if owner: + cfg['set_owner'] = owner + return cfg + + @staticmethod + def get_non_configurable_feature_entries(manifest) -> Dict[str, str]: + """ Get non-configurable feature table entries: e.g. 'has_timer' """ + + return { + 'has_per_asic_scope': str(manifest['service']['asic-service']), + 'has_global_scope': str(manifest['service']['host-service']), + 'has_timer': str(manifest['service']['delayed']), + } + + def _get_tables(self): + tables = [] + running = self._sonic_db.running_table(FEATURE) + if running is not None: # it's Ok if there is no database container running + tables.append(running) + persistent = self._sonic_db.persistent_table(FEATURE) + if persistent is not None: # it's Ok if there is no config_db.json + tables.append(persistent) + tables.append(self._sonic_db.initial_table(FEATURE)) # init_cfg.json is must + + return tables diff --git a/sonic_package_manager/service_creator/sonic_db.py b/sonic_package_manager/service_creator/sonic_db.py new file mode 100644 index 0000000000..a064c60c4a --- /dev/null +++ b/sonic_package_manager/service_creator/sonic_db.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python + +import contextlib +import json +import os + +from swsscommon import swsscommon + +from sonic_package_manager.service_creator import ETC_SONIC_PATH +from sonic_package_manager.service_creator.utils import in_chroot + +CONFIG_DB = 'CONFIG_DB' +CONFIG_DB_JSON = os.path.join(ETC_SONIC_PATH, 'config_db.json') +INIT_CFG_JSON = os.path.join(ETC_SONIC_PATH, 'init_cfg.json') + + +class FileDbTable: + """ swsscommon.Table adapter for persistent DBs. """ + + def __init__(self, file, table): + self._file = file + self._table = table + + def keys(self): + with open(self._file) as stream: + config = json.load(stream) + return config.get(self._table, {}).keys() + + def get(self, key): + with open(self._file) as stream: + config = json.load(stream) + + table = config.get(self._table, {}) + exists = key in table + fvs_dict = table.get(key, {}) + fvs = list(fvs_dict.items()) + return exists, fvs + + def set(self, key, fvs): + with open(self._file) as stream: + config = json.load(stream) + + table = config.setdefault(self._table, {}) + table.update({key: dict(fvs)}) + + with open(self._file, 'w') as stream: + json.dump(config, stream, indent=4) + + def _del(self, key): + with open(self._file) as stream: + config = json.load(stream) + + with contextlib.suppress(KeyError): + config[self._table].pop(key) + + with open(self._file, 'w') as stream: + json.dump(config, stream, indent=4) + + +class SonicDB: + """ Store different DB access objects for + running DB and also for persistent and initial + configs. """ + + _running = None + + @classmethod + def running_table(cls, table): + """ Returns running DB table. """ + + # In chroot we can connect to a running + # DB via TCP socket, we should ignore this case. + if in_chroot(): + return None + + if cls._running is None: + try: + cls._running = swsscommon.DBConnector(CONFIG_DB, 0) + except RuntimeError: + # Failed to connect to DB. + return None + + return swsscommon.Table(cls._running, table) + + @classmethod + def persistent_table(cls, table): + """ Returns persistent DB table. """ + + if not os.path.exists(CONFIG_DB_JSON): + return None + + return FileDbTable(CONFIG_DB_JSON, table) + + @classmethod + def initial_table(cls, table): + """ Returns initial DB table. """ + + return FileDbTable(INIT_CFG_JSON, table) diff --git a/sonic_package_manager/service_creator/utils.py b/sonic_package_manager/service_creator/utils.py new file mode 100644 index 0000000000..cdeeb17abb --- /dev/null +++ b/sonic_package_manager/service_creator/utils.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python + +import os + + +def in_chroot() -> bool: + """ Verify if we are running in chroot or not + by comparing root / mount point device id and inode + with init process - /proc/1/root mount point device + id and inode. If those match we are not chroot-ed + otherwise we are. """ + + root_stat = os.stat('/') + init_root_stat = os.stat('/proc/1/root') + + return (root_stat.st_dev, root_stat.st_ino) != \ + (init_root_stat.st_dev, init_root_stat.st_ino) diff --git a/sonic_package_manager/source.py b/sonic_package_manager/source.py new file mode 100644 index 0000000000..c179e0b3ee --- /dev/null +++ b/sonic_package_manager/source.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 + +from sonic_package_manager.database import PackageDatabase, PackageEntry +from sonic_package_manager.dockerapi import DockerApi, get_repository_from_image +from sonic_package_manager.metadata import Metadata, MetadataResolver +from sonic_package_manager.package import Package + + +class PackageSource(object): + """ PackageSource abstracts the way manifest is read + and image is retrieved based on different image sources. + (i.e from registry, from tarball or locally installed) """ + + def __init__(self, + database: PackageDatabase, + docker: DockerApi, + metadata_resolver: MetadataResolver): + self.database = database + self.docker = docker + self.metadata_resolver = metadata_resolver + + def get_metadata(self) -> Metadata: + """ Returns package manifest. + Child class has to implement this method. + + Returns: + Metadata + """ + raise NotImplementedError + + def install_image(self, package: Package): + """ Install image based on package source. + Child class has to implement this method. + + Args: + package: SONiC Package + Returns: + Docker Image object. + """ + + raise NotImplementedError + + def install(self, package: Package): + """ Install image based on package source, + record installation infromation in PackageEntry.. + + Args: + package: SONiC Package + """ + + image = self.install_image(package) + package.entry.image_id = image.id + # if no repository is defined for this package + # get repository from image + if not package.repository: + package.entry.repository = get_repository_from_image(image) + + def uninstall(self, package: Package): + """ Uninstall image. + + Args: + package: SONiC Package + """ + + self.docker.rmi(package.image_id) + package.entry.image_id = None + + def get_package(self) -> Package: + """ Returns SONiC Package based on manifest. + + Returns: + SONiC Package + """ + + metadata = self.get_metadata() + manifest = metadata.manifest + + name = manifest['package']['name'] + description = manifest['package']['description'] + + # Will be resolved in install() method. + # When installing from tarball we don't know yet + # the repository for this package. + repository = None + + if self.database.has_package(name): + # inherit package database info + package_entry = self.database.get_package(name) + else: + package_entry = PackageEntry(name, repository, + description=description) + + return Package( + package_entry, + metadata + ) + + +class TarballSource(PackageSource): + """ TarballSource implements PackageSource + for locally existing image saved as tarball. """ + + def __init__(self, + tarball_path: str, + database: PackageDatabase, + docker: DockerApi, + metadata_resolver: MetadataResolver): + super().__init__(database, + docker, + metadata_resolver) + self.tarball_path = tarball_path + + def get_metadata(self) -> Metadata: + """ Returns manifest read from tarball. """ + + return self.metadata_resolver.from_tarball(self.tarball_path) + + def install_image(self, package: Package): + """ Installs image from local tarball source. """ + + return self.docker.load(self.tarball_path) + + +class RegistrySource(PackageSource): + """ RegistrySource implements PackageSource + for packages that are pulled from registry. """ + + def __init__(self, + repository: str, + reference: str, + database: PackageDatabase, + docker: DockerApi, + metadata_resolver: MetadataResolver): + super().__init__(database, + docker, + metadata_resolver) + self.repository = repository + self.reference = reference + + def get_metadata(self) -> Metadata: + """ Returns manifest read from registry. """ + + return self.metadata_resolver.from_registry(self.repository, + self.reference) + + def install_image(self, package: Package): + """ Installs image from registry. """ + + image_id = self.docker.pull(self.repository, self.reference) + if not package.entry.default_reference: + package.entry.default_reference = self.reference + return image_id + + +class LocalSource(PackageSource): + """ LocalSource accesses local docker library to retrieve manifest + but does not implement installation of the image. """ + + def __init__(self, + entry: PackageEntry, + database: PackageDatabase, + docker: DockerApi, + metadata_resolver: MetadataResolver): + super().__init__(database, + docker, + metadata_resolver) + self.entry = entry + + def get_metadata(self) -> Metadata: + """ Returns manifest read from locally installed Docker. """ + + image = self.entry.image_id + + if self.entry.built_in: + # Built-in (installed not via sonic-package-manager) + # won't have image_id in database. Using their + # repository name as image. + image = f'{self.entry.repository}:latest' + + return self.metadata_resolver.from_local(image) + + def get_package(self) -> Package: + return Package(self.entry, self.get_metadata()) diff --git a/sonic_package_manager/utils.py b/sonic_package_manager/utils.py new file mode 100644 index 0000000000..410947dd24 --- /dev/null +++ b/sonic_package_manager/utils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python + +import keyword +import re + +from docker_image.reference import Reference + +DockerReference = Reference + + +def make_python_identifier(string): + """ + Takes an arbitrary string and creates a valid Python identifier. + + Identifiers must follow the convention outlined here: + https://docs.python.org/2/reference/lexical_analysis.html#identifiers + """ + + # create a working copy (and make it lowercase, while we're at it) + s = string.lower() + + # remove leading and trailing whitespace + s = s.strip() + + # Make spaces into underscores + s = re.sub('[\\s\\t\\n]+', '_', s) + + # Remove invalid characters + s = re.sub('[^0-9a-zA-Z_]', '', s) + + # Remove leading characters until we find a letter or underscore + s = re.sub('^[^a-zA-Z_]+', '', s) + + # Check that the string is not a python identifier + while s in keyword.kwlist: + if re.match(".*?_\d+$", s): + i = re.match(".*?_(\d+)$", s).groups()[0] + s = s.strip('_'+i) + '_'+str(int(i)+1) + else: + s += '_1' + + return s diff --git a/sonic_package_manager/version.py b/sonic_package_manager/version.py new file mode 100644 index 0000000000..e5a5623d3b --- /dev/null +++ b/sonic_package_manager/version.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +""" Version and helpers routines. """ + +import semver + +Version = semver.Version +VersionRange = semver.VersionRange + + +def version_to_tag(ver: Version) -> str: + """ Converts the version to Docker compliant tag string. """ + + return str(ver).replace('+', '_') + + +def tag_to_version(tag: str) -> Version: + """ Converts the version to Docker compliant tag string. """ + + try: + return Version.parse(tag.replace('_', '+')) + except ValueError as err: + raise ValueError(f'Failed to convert {tag} to version string: {err}') diff --git a/tests/sonic_package_manager/conftest.py b/tests/sonic_package_manager/conftest.py new file mode 100644 index 0000000000..cee997596c --- /dev/null +++ b/tests/sonic_package_manager/conftest.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python + +from dataclasses import dataclass +from unittest import mock +from unittest.mock import Mock, MagicMock + +import pytest +from docker_image.reference import Reference + +from sonic_package_manager.database import PackageDatabase, PackageEntry +from sonic_package_manager.manager import DockerApi, PackageManager +from sonic_package_manager.manifest import Manifest +from sonic_package_manager.metadata import Metadata, MetadataResolver +from sonic_package_manager.registry import RegistryResolver +from sonic_package_manager.version import Version +from sonic_package_manager.service_creator.creator import * + + +@pytest.fixture +def mock_docker_api(): + docker = MagicMock(DockerApi) + + @dataclass + class Image: + id: str + + @property + def attrs(self): + return {'RepoTags': [self.id]} + + def pull(repo, ref): + return Image(f'{repo}:{ref}') + + def load(filename): + return Image(filename) + + docker.pull = MagicMock(side_effect=pull) + docker.load = MagicMock(side_effect=load) + + yield docker + + +@pytest.fixture +def mock_registry_resolver(): + yield Mock(RegistryResolver) + + +@pytest.fixture +def mock_metadata_resolver(): + yield Mock(MetadataResolver) + + +@pytest.fixture +def mock_feature_registry(): + yield MagicMock() + + +@pytest.fixture +def mock_service_creator(): + yield Mock() + + +@pytest.fixture +def mock_sonic_db(): + yield Mock() + + +@pytest.fixture +def fake_metadata_resolver(): + class FakeMetadataResolver: + def __init__(self): + self.metadata_store = {} + self.add('docker-database', 'latest', 'database', '1.0.0') + self.add('docker-orchagent', 'latest', 'swss', '1.0.0', + components={ + 'libswsscommon': Version.parse('1.0.0'), + 'libsairedis': Version.parse('1.0.0') + } + ) + self.add('Azure/docker-test', '1.6.0', 'test-package', '1.6.0') + self.add('Azure/docker-test-2', '1.5.0', 'test-package-2', '1.5.0') + self.add('Azure/docker-test-2', '2.0.0', 'test-package-2', '2.0.0') + self.add('Azure/docker-test-3', 'latest', 'test-package-3', '1.6.0') + self.add('Azure/docker-test-3', '1.5.0', 'test-package-3', '1.5.0') + self.add('Azure/docker-test-3', '1.6.0', 'test-package-3', '1.6.0') + self.add('Azure/docker-test-4', '1.5.0', 'test-package-4', '1.5.0') + self.add('Azure/docker-test-5', '1.5.0', 'test-package-5', '1.5.0') + self.add('Azure/docker-test-5', '1.9.0', 'test-package-5', '1.9.0') + self.add('Azure/docker-test-6', '1.5.0', 'test-package-6', '1.5.0') + self.add('Azure/docker-test-6', '1.9.0', 'test-package-6', '1.9.0') + self.add('Azure/docker-test-6', '2.0.0', 'test-package-6', '2.0.0') + self.add('Azure/docker-test-6', 'latest', 'test-package-6', '1.5.0') + + def from_registry(self, repository: str, reference: str): + manifest = Manifest.marshal(self.metadata_store[repository][reference]['manifest']) + components = self.metadata_store[repository][reference]['components'] + return Metadata(manifest, components) + + def from_local(self, image: str): + ref = Reference.parse(image) + manifest = Manifest.marshal(self.metadata_store[ref['name']][ref['tag']]['manifest']) + components = self.metadata_store[ref['name']][ref['tag']]['components'] + return Metadata(manifest, components) + + def from_tarball(self, filepath: str) -> Manifest: + path, ref = filepath.split(':') + manifest = Manifest.marshal(self.metadata_store[path][ref]['manifest']) + components = self.metadata_store[path][ref]['components'] + return Metadata(manifest, components) + + def add(self, repo, reference, name, version, components=None): + repo_dict = self.metadata_store.setdefault(repo, {}) + repo_dict[reference] = { + 'manifest': { + 'package': { + 'version': version, + 'name': name, + 'base-os': {}, + }, + 'service': { + 'name': name, + } + }, + 'components': components or {}, + } + + yield FakeMetadataResolver() + + +@pytest.fixture +def fake_device_info(): + class FakeDeviceInfo: + def __init__(self): + self.multi_npu = True + self.num_npus = 1 + self.version_info = { + 'libswsscommon': '1.0.0', + } + + def is_multi_npu(self): + return self.multi_npu + + def get_num_npus(self): + return self.num_npus + + def get_sonic_version_info(self): + return self.version_info + + yield FakeDeviceInfo() + + +def add_package(content, metadata_resolver, repository, reference, **kwargs): + metadata = metadata_resolver.from_registry(repository, reference) + name = metadata.manifest['package']['name'] + version = metadata.manifest['package']['version'] + installed = kwargs.get('installed', False) + built_in = kwargs.get('built-in', False) + + if installed and not built_in and 'image_id' not in kwargs: + kwargs['image_id'] = f'{repository}:{reference}' + + if installed and 'version' not in kwargs: + kwargs['version'] = version + + content[name] = PackageEntry(name, repository, **kwargs) + + +@pytest.fixture +def fake_db(fake_metadata_resolver): + content = {} + + add_package( + content, + fake_metadata_resolver, + 'docker-database', + 'latest', + description='SONiC database service', + default_reference='1.0.0', + installed=True, + built_in=True + ) + add_package( + content, + fake_metadata_resolver, + 'docker-orchagent', + 'latest', + description='SONiC switch state service', + default_reference='1.0.0', + installed=True, + built_in=True + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test', + '1.6.0', + description='SONiC Package Manager Test Package', + default_reference='1.6.0', + installed=False, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-2', + '1.5.0', + description='SONiC Package Manager Test Package #2', + default_reference='1.5.0', + installed=False, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-3', + '1.5.0', + description='SONiC Package Manager Test Package #3', + default_reference='1.5.0', + installed=True, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-5', + '1.9.0', + description='SONiC Package Manager Test Package #5', + default_reference='1.9.0', + installed=False, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-6', + '1.5.0', + description='SONiC Package Manager Test Package #6', + default_reference='1.5.0', + installed=False, + built_in=False + ) + + yield PackageDatabase(content) + + +@pytest.fixture +def fake_db_for_migration(fake_metadata_resolver): + content = {} + add_package( + content, + fake_metadata_resolver, + 'docker-database', + 'latest', + description='SONiC database service', + default_reference='1.0.0', + installed=True, + built_in=True + ) + add_package( + content, + fake_metadata_resolver, + 'docker-orchagent', + 'latest', + description='SONiC switch state service', + default_reference='1.0.0', + installed=True, + built_in=True + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test', + '1.6.0', + description='SONiC Package Manager Test Package', + default_reference='1.6.0', + installed=False, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-2', + '2.0.0', + description='SONiC Package Manager Test Package #2', + default_reference='2.0.0', + installed=False, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-3', + '1.6.0', + description='SONiC Package Manager Test Package #3', + default_reference='1.6.0', + installed=True, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-4', + '1.5.0', + description='SONiC Package Manager Test Package #4', + default_reference='1.5.0', + installed=True, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-5', + '1.5.0', + description='SONiC Package Manager Test Package #5', + default_reference='1.5.0', + installed=True, + built_in=False + ) + add_package( + content, + fake_metadata_resolver, + 'Azure/docker-test-6', + '2.0.0', + description='SONiC Package Manager Test Package #6', + default_reference='2.0.0', + installed=True, + built_in=False + ) + + yield PackageDatabase(content) + + +@pytest.fixture() +def sonic_fs(fs): + fs.create_file('/proc/1/root') + fs.create_dir(ETC_SONIC_PATH) + fs.create_dir(SYSTEMD_LOCATION) + fs.create_dir(DOCKER_CTL_SCRIPT_LOCATION) + fs.create_dir(SERVICE_MGMT_SCRIPT_LOCATION) + fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_FILE_TEMPLATE)) + fs.create_file(os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE)) + fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_MGMT_SCRIPT_TEMPLATE)) + fs.create_file(os.path.join(TEMPLATES_PATH, DOCKER_CTL_SCRIPT_TEMPLATE)) + fs.create_file(os.path.join(TEMPLATES_PATH, DEBUG_DUMP_SCRIPT_TEMPLATE)) + yield fs + + +@pytest.fixture(autouse=True) +def patch_pkgutil(): + with mock.patch('pkgutil.get_loader'): + yield + + +@pytest.fixture +def package_manager(mock_docker_api, + mock_registry_resolver, + mock_service_creator, + fake_metadata_resolver, + fake_db, + fake_device_info): + yield PackageManager(mock_docker_api, mock_registry_resolver, + fake_db, fake_metadata_resolver, + mock_service_creator, + fake_device_info, + MagicMock()) + + +@pytest.fixture +def anything(): + """ Fixture that returns Any object that can be used in + assert_called_*_with to match any object passed. """ + + class Any: + def __eq__(self, other): + return True + + yield Any() diff --git a/tests/sonic_package_manager/test_cli.py b/tests/sonic_package_manager/test_cli.py new file mode 100644 index 0000000000..695d8cba58 --- /dev/null +++ b/tests/sonic_package_manager/test_cli.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python + +from click.testing import CliRunner + +from sonic_package_manager import main + + +def test_show_changelog(package_manager, fake_metadata_resolver): + """ Test case for "sonic-package-manager package show changelog [NAME]" """ + + runner = CliRunner() + changelog = { + "1.0.0": { + "changes": ["Initial release"], + "author": "Stepan Blyshchak", + "email": "stepanb@nvidia.com", + "date": "Mon, 25 May 2020 12:24:30 +0300" + }, + "1.1.0": { + "changes": [ + "Added functionality", + "Bug fixes" + ], + "author": "Stepan Blyshchak", + "email": "stepanb@nvidia.com", + "date": "Fri, 23 Oct 2020 12:26:08 +0300" + } + } + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['changelog'] = changelog + + expected_output = """\ +1.0.0: + + • Initial release + + Stepan Blyshchak (stepanb@nvidia.com) Mon, 25 May 2020 12:24:30 +0300 + +1.1.0: + + • Added functionality + • Bug fixes + + Stepan Blyshchak (stepanb@nvidia.com) Fri, 23 Oct 2020 12:26:08 +0300 + +""" + + result = runner.invoke(main.show.commands['package'].commands['changelog'], + ['test-package'], obj=package_manager) + + assert result.exit_code == 0 + assert result.output == expected_output + + +def test_show_changelog_no_changelog(package_manager): + """ Test case for "sonic-package-manager package show changelog [NAME]" + when there is no changelog provided by package. """ + + runner = CliRunner() + result = runner.invoke(main.show.commands['package'].commands['changelog'], ['test-package'], obj=package_manager) + + assert result.exit_code == 1 + assert result.output == 'Failed to print package changelog: No changelog for package test-package\n' diff --git a/tests/sonic_package_manager/test_constraint.py b/tests/sonic_package_manager/test_constraint.py new file mode 100644 index 0000000000..1b34a301d2 --- /dev/null +++ b/tests/sonic_package_manager/test_constraint.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python + +from sonic_package_manager import version +from sonic_package_manager.constraint import PackageConstraint +from sonic_package_manager.version import Version, VersionRange + + +def test_constraint(): + package_constraint = PackageConstraint.parse('swss>1.0.0') + assert package_constraint.name == 'swss' + assert not package_constraint.constraint.allows(Version.parse('0.9.1')) + assert package_constraint.constraint.allows(Version.parse('1.1.1')) + + +def test_constraint_range(): + package_constraint = PackageConstraint.parse('swss^1.2.0') + assert package_constraint.name == 'swss' + assert not package_constraint.constraint.allows(Version.parse('1.1.1')) + assert package_constraint.constraint.allows(Version.parse('1.2.5')) + assert not package_constraint.constraint.allows(Version.parse('2.0.1')) + + +def test_constraint_strict(): + package_constraint = PackageConstraint.parse('swss==1.2.0') + assert package_constraint.name == 'swss' + assert not package_constraint.constraint.allows(Version.parse('1.1.1')) + assert package_constraint.constraint.allows(Version.parse('1.2.0')) + + +def test_constraint_match(): + package_constraint = PackageConstraint.parse('swss==1.2*.*') + assert package_constraint.name == 'swss' + assert not package_constraint.constraint.allows(Version.parse('1.1.1')) + assert package_constraint.constraint.allows(Version.parse('1.2.0')) + + +def test_constraint_multiple(): + package_constraint = PackageConstraint.parse('swss>1.2.0,<3.0.0,!=2.2.2') + assert package_constraint.name == 'swss' + assert not package_constraint.constraint.allows(Version.parse('2.2.2')) + assert not package_constraint.constraint.allows(Version.parse('3.2.0')) + assert not package_constraint.constraint.allows(Version.parse('0.2.0')) + assert package_constraint.constraint.allows(Version.parse('2.2.3')) + assert package_constraint.constraint.allows(Version.parse('1.2.3')) + + +def test_constraint_only_name(): + package_constraint = PackageConstraint.parse('swss') + assert package_constraint.name == 'swss' + assert package_constraint.constraint == VersionRange() + + +def test_constraint_from_dict(): + package_constraint = PackageConstraint.parse({ + 'name': 'swss', + 'version': '^1.0.0', + 'components': { + 'libswsscommon': '^1.1.0', + }, + }) + assert package_constraint.name == 'swss' + assert package_constraint.constraint.allows(Version.parse('1.0.0')) + assert not package_constraint.constraint.allows(Version.parse('2.0.0')) + assert package_constraint.components['libswsscommon'].allows(Version.parse('1.2.0')) + assert not package_constraint.components['libswsscommon'].allows(Version.parse('1.0.0')) + assert not package_constraint.components['libswsscommon'].allows(Version.parse('2.0.0')) + + +def test_version_to_tag(): + assert version.version_to_tag(Version.parse('1.0.0-rc0')) == '1.0.0-rc0' + assert version.version_to_tag(Version.parse('1.0.0-rc0+152')) == '1.0.0-rc0_152' + + +def test_tag_to_version(): + assert str(version.tag_to_version('1.0.0-rc0_152')) == '1.0.0-rc0+152' + assert str(version.tag_to_version('1.0.0-rc0')) == '1.0.0-rc0' diff --git a/tests/sonic_package_manager/test_database.py b/tests/sonic_package_manager/test_database.py new file mode 100644 index 0000000000..1c565d6f4c --- /dev/null +++ b/tests/sonic_package_manager/test_database.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python + +import pytest + +from sonic_package_manager.database import PackageEntry +from sonic_package_manager.errors import ( + PackageNotFoundError, + PackageAlreadyExistsError, + PackageManagerError +) +from sonic_package_manager.version import Version + + +def test_database_get_package(fake_db): + swss_package = fake_db.get_package('swss') + assert swss_package.installed + assert swss_package.built_in + assert swss_package.repository == 'docker-orchagent' + assert swss_package.default_reference == '1.0.0' + assert swss_package.version == Version(1, 0, 0) + + +def test_database_get_package_not_builtin(fake_db): + test_package = fake_db.get_package('test-package') + assert not test_package.installed + assert not test_package.built_in + assert test_package.repository == 'Azure/docker-test' + assert test_package.default_reference == '1.6.0' + assert test_package.version is None + + +def test_database_get_package_not_existing(fake_db): + with pytest.raises(PackageNotFoundError): + fake_db.get_package('abc') + + +def test_database_add_package(fake_db): + fake_db.add_package('test-package-99', 'Azure/docker-test-99') + test_package = fake_db.get_package('test-package-99') + assert not test_package.installed + assert not test_package.built_in + assert test_package.repository == 'Azure/docker-test-99' + assert test_package.default_reference is None + assert test_package.version is None + + +def test_database_add_package_existing(fake_db): + with pytest.raises(PackageAlreadyExistsError): + fake_db.add_package('swss', 'Azure/docker-orchagent') + + +def test_database_update_package(fake_db): + test_package = fake_db.get_package('test-package-2') + test_package.installed = True + test_package.version = Version(1, 2, 3) + fake_db.update_package(test_package) + test_package = fake_db.get_package('test-package-2') + assert test_package.installed + assert test_package.version == Version(1, 2, 3) + + +def test_database_update_package_non_existing(fake_db): + test_package = PackageEntry('abc', 'abc') + with pytest.raises(PackageNotFoundError): + fake_db.update_package(test_package) + + +def test_database_remove_package(fake_db): + fake_db.remove_package('test-package') + assert not fake_db.has_package('test-package') + + +def test_database_remove_package_non_existing(fake_db): + with pytest.raises(PackageNotFoundError): + fake_db.remove_package('non-existing-package') + + +def test_database_remove_package_installed(fake_db): + with pytest.raises(PackageManagerError, + match='Package test-package-3 is installed, ' + 'uninstall it first'): + fake_db.remove_package('test-package-3') + + +def test_database_remove_package_built_in(fake_db): + with pytest.raises(PackageManagerError, + match='Package swss is built-in, ' + 'cannot remove it'): + fake_db.remove_package('swss') diff --git a/tests/sonic_package_manager/test_manager.py b/tests/sonic_package_manager/test_manager.py new file mode 100644 index 0000000000..c7eb1ca7ac --- /dev/null +++ b/tests/sonic_package_manager/test_manager.py @@ -0,0 +1,322 @@ +#!/usr/bin/env python + +from unittest.mock import Mock, call + +import pytest + +from sonic_package_manager.errors import * +from sonic_package_manager.version import Version + + +def test_installation_not_installed(package_manager): + package_manager.install('test-package') + package = package_manager.get_installed_package('test-package') + assert package.installed + assert package.entry.default_reference == '1.6.0' + + +def test_installation_already_installed(package_manager): + package_manager.install('test-package') + with pytest.raises(PackageManagerError, + match='1.6.0 is already installed'): + package_manager.install('test-package') + + +def test_installation_dependencies(package_manager, fake_metadata_resolver, mock_docker_api): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['depends'] = ['swss^2.0.0'] + with pytest.raises(PackageInstallationError, + match='Package test-package requires swss>=2.0.0,<3.0.0 ' + 'but version 1.0.0 is installed'): + package_manager.install('test-package') + + +def test_installation_dependencies_missing_package(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['depends'] = ['missing-package>=1.0.0'] + with pytest.raises(PackageInstallationError, + match='Package test-package requires ' + 'missing-package>=1.0.0 but it is not installed'): + package_manager.install('test-package') + + +def test_installation_dependencies_satisfied(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['depends'] = ['database>=1.0.0', 'swss>=1.0.0'] + package_manager.install('test-package') + + +def test_installation_components_dependencies_satisfied(package_manager, fake_metadata_resolver): + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + manifest = metadata['manifest'] + metadata['components'] = { + 'libswsscommon': Version.parse('1.1.0') + } + manifest['package']['depends'] = [ + { + 'name': 'swss', + 'version': '>=1.0.0', + 'components': { + 'libswsscommon': '^1.0.0', + }, + }, + ] + package_manager.install('test-package') + + +def test_installation_components_dependencies_not_satisfied(package_manager, fake_metadata_resolver): + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + manifest = metadata['manifest'] + metadata['components'] = { + 'libswsscommon': Version.parse('1.1.0') + } + manifest['package']['depends'] = [ + { + 'name': 'swss', + 'version': '>=1.0.0', + 'components': { + 'libswsscommon': '^1.1.0', + }, + }, + ] + with pytest.raises(PackageInstallationError, + match='Package test-package requires libswsscommon >=1.1.0,<2.0.0 ' + 'in package swss>=1.0.0 but version 1.0.0 is installed'): + package_manager.install('test-package') + + +def test_installation_components_dependencies_implicit(package_manager, fake_metadata_resolver): + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + manifest = metadata['manifest'] + metadata['components'] = { + 'libswsscommon': Version.parse('2.1.0') + } + manifest['package']['depends'] = [ + { + 'name': 'swss', + 'version': '>=1.0.0', + }, + ] + with pytest.raises(PackageInstallationError, + match='Package test-package requires libswsscommon >=2.1.0,<3.0.0 ' + 'in package swss>=1.0.0 but version 1.0.0 is installed'): + package_manager.install('test-package') + + +def test_installation_components_dependencies_explicitely_allowed(package_manager, fake_metadata_resolver): + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + manifest = metadata['manifest'] + metadata['components'] = { + 'libswsscommon': Version.parse('2.1.0') + } + manifest['package']['depends'] = [ + { + 'name': 'swss', + 'version': '>=1.0.0', + 'components': { + 'libswsscommon': '>=1.0.0,<3.0.0' + } + }, + ] + package_manager.install('test-package') + + +def test_installation_breaks(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['breaks'] = ['swss^1.0.0'] + with pytest.raises(PackageInstallationError, + match='Package test-package conflicts with ' + 'swss>=1.0.0,<2.0.0 but version 1.0.0 is installed'): + package_manager.install('test-package') + + +def test_installation_breaks_missing_package(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['breaks'] = ['missing-package^1.0.0'] + package_manager.install('test-package') + + +def test_installation_breaks_not_installed_package(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['breaks'] = ['test-package-2^1.0.0'] + package_manager.install('test-package') + + +def test_installation_base_os_constraint(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['base-os']['libswsscommon'] = '>=2.0.0' + with pytest.raises(PackageSonicRequirementError, + match='Package test-package requires base OS component libswsscommon ' + 'version >=2.0.0 while the installed version is 1.0.0'): + package_manager.install('test-package') + + +def test_installation_base_os_constraint_satisfied(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['package']['base-os']['libswsscommon'] = '>=1.0.0' + package_manager.install('test-package') + + +def test_installation_cli_plugin(package_manager, fake_metadata_resolver, anything): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['cli']= {'show': '/cli/plugin.py'} + package_manager._install_cli_plugins = Mock() + package_manager.install('test-package') + package_manager._install_cli_plugins.assert_called_once_with(anything) + + +def test_installation_cli_plugin_skipped(package_manager, fake_metadata_resolver, anything): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['cli']= {'show': '/cli/plugin.py'} + package_manager._install_cli_plugins = Mock() + package_manager.install('test-package', skip_host_plugins=True) + package_manager._install_cli_plugins.assert_not_called() + + +def test_installation_cli_plugin_is_mandatory_but_skipped(package_manager, fake_metadata_resolver): + manifest = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0']['manifest'] + manifest['cli']= {'mandatory': True} + with pytest.raises(PackageManagerError, + match='CLI is mandatory for package test-package but ' + 'it was requested to be not installed'): + package_manager.install('test-package', skip_host_plugins=True) + + +def test_installation(package_manager, mock_docker_api, anything): + package_manager.install('test-package') + mock_docker_api.pull.assert_called_once_with('Azure/docker-test', '1.6.0') + + +def test_installation_using_reference(package_manager, + fake_metadata_resolver, + mock_docker_api, + anything): + ref = 'sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd' + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + fake_metadata_resolver.metadata_store['Azure/docker-test'][ref] = metadata + + package_manager.install(f'test-package@{ref}') + mock_docker_api.pull.assert_called_once_with('Azure/docker-test', f'{ref}') + + +def test_manager_installation_tag(package_manager, + mock_docker_api, + anything): + package_manager.install(f'test-package=1.6.0') + mock_docker_api.pull.assert_called_once_with('Azure/docker-test', '1.6.0') + + +def test_installation_from_file(package_manager, mock_docker_api, sonic_fs): + sonic_fs.create_file('Azure/docker-test:1.6.0') + package_manager.install(tarball='Azure/docker-test:1.6.0') + mock_docker_api.load.assert_called_once_with('Azure/docker-test:1.6.0') + + +def test_installation_from_registry(package_manager, mock_docker_api): + package_manager.install(repotag='Azure/docker-test:1.6.0') + mock_docker_api.pull.assert_called_once_with('Azure/docker-test', '1.6.0') + + +def test_installation_from_registry_using_digest(package_manager, mock_docker_api, fake_metadata_resolver): + ref = 'sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd' + metadata = fake_metadata_resolver.metadata_store['Azure/docker-test']['1.6.0'] + fake_metadata_resolver.metadata_store['Azure/docker-test'][ref] = metadata + + ref = 'sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd' + package_manager.install(repotag=f'Azure/docker-test@{ref}') + mock_docker_api.pull.assert_called_once_with('Azure/docker-test', ref) + + +def test_installation_from_file_known_package(package_manager, fake_db, sonic_fs): + repository = fake_db.get_package('test-package').repository + sonic_fs.create_file('Azure/docker-test:1.6.0') + package_manager.install(tarball='Azure/docker-test:1.6.0') + # locally installed package does not override already known package repository + assert repository == fake_db.get_package('test-package').repository + + +def test_installation_from_file_unknown_package(package_manager, fake_db, sonic_fs): + assert not fake_db.has_package('test-package-4') + sonic_fs.create_file('Azure/docker-test-4:1.5.0') + package_manager.install(tarball='Azure/docker-test-4:1.5.0') + assert fake_db.has_package('test-package-4') + + +def test_upgrade_from_file_known_package(package_manager, fake_db, sonic_fs): + repository = fake_db.get_package('test-package-6').repository + # install older version from repository + package_manager.install('test-package-6=1.5.0') + # upgrade from file + sonic_fs.create_file('Azure/docker-test-6:2.0.0') + package_manager.install(tarball='Azure/docker-test-6:2.0.0') + # locally installed package does not override already known package repository + assert repository == fake_db.get_package('test-package-6').repository + + +def test_installation_non_default_owner(package_manager, anything, mock_service_creator): + package_manager.install('test-package', default_owner='kube') + mock_service_creator.create.assert_called_once_with(anything, state='disabled', owner='kube') + + +def test_installation_enabled(package_manager, anything, mock_service_creator): + package_manager.install('test-package', enable=True) + mock_service_creator.create.assert_called_once_with(anything, state='enabled', owner='local') + + +def test_installation_fault(package_manager, mock_docker_api, mock_service_creator): + # make 'tag' to fail + mock_service_creator.create = Mock(side_effect=Exception('Failed to create service')) + # 'rmi' is called on rollback + mock_docker_api.rmi = Mock(side_effect=Exception('Failed to remove image')) + # assert that the rollback does not hide the original failure. + with pytest.raises(Exception, match='Failed to create service'): + package_manager.install('test-package') + mock_docker_api.rmi.assert_called_once() + + +def test_manager_installation_version_range(package_manager): + with pytest.raises(PackageManagerError, + match='Can only install specific version. ' + 'Use only following expression "test-package=" ' + 'to install specific version'): + package_manager.install(f'test-package>=1.6.0') + + +def test_manager_upgrade(package_manager, sonic_fs): + package_manager.install('test-package-6=1.5.0') + package = package_manager.get_installed_package('test-package-6') + + package_manager.install('test-package-6=2.0.0') + upgraded_package = package_manager.get_installed_package('test-package-6') + assert upgraded_package.entry.version == Version(2, 0, 0) + assert upgraded_package.entry.default_reference == package.entry.default_reference + + +def test_manager_package_reset(package_manager, sonic_fs): + package_manager.install('test-package-6=1.5.0') + package_manager.install('test-package-6=2.0.0') + + package_manager.reset('test-package-6') + upgraded_package = package_manager.get_installed_package('test-package-6') + assert upgraded_package.entry.version == Version(1, 5, 0) + + +def test_manager_migration(package_manager, fake_db_for_migration): + package_manager.install = Mock() + package_manager.migrate_packages(fake_db_for_migration) + + package_manager.install.assert_has_calls([ + # test-package-3 was installed but there is a newer version installed + # in fake_db_for_migration, asserting for upgrade + call('test-package-3=1.6.0'), + # test-package-4 was not present in DB at all, but it is present and installed in + # fake_db_for_migration, thus asserting that it is going to be installed. + call('test-package-4=1.5.0'), + # test-package-5 1.5.0 was installed in fake_db_for_migration but the default + # in current db is 1.9.0, assert that migration will install the newer version. + call('test-package-5=1.9.0'), + # test-package-6 2.0.0 was installed in fake_db_for_migration but the default + # in current db is 1.5.0, assert that migration will install the newer version. + call('test-package-6=2.0.0')], + any_order=True + ) diff --git a/tests/sonic_package_manager/test_manifest.py b/tests/sonic_package_manager/test_manifest.py new file mode 100644 index 0000000000..efdcc558ab --- /dev/null +++ b/tests/sonic_package_manager/test_manifest.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python + +import pytest + +from sonic_package_manager.constraint import ComponentConstraints +from sonic_package_manager.manifest import Manifest, ManifestError +from sonic_package_manager.version import VersionRange + + +def test_manifest_v1_defaults(): + manifest = Manifest.marshal({'package': {'name': 'test', + 'version': '1.0.0'}, + 'service': {'name': 'test'}}) + assert manifest['package']['depends'] == [] + assert manifest['package']['breaks'] == [] + assert manifest['package']['base-os'] == ComponentConstraints() + assert not manifest['service']['asic-service'] + assert manifest['service']['host-service'] + + +def test_manifest_v1_invalid_version(): + with pytest.raises(ManifestError): + Manifest.marshal({'package': {'version': 'abc', 'name': 'test'}, + 'service': {'name': 'test'}}) + + +def test_manifest_v1_invalid_package_constraint(): + with pytest.raises(ManifestError): + Manifest.marshal({'package': {'name': 'test', 'version': '1.0.0', + 'depends': ['swss>a']}, + 'service': {'name': 'test'}}) + + +def test_manifest_v1_service_spec(): + manifest = Manifest.marshal({'package': {'name': 'test', + 'version': '1.0.0'}, + 'service': {'name': 'test', 'asic-service': True}}) + assert manifest['service']['asic-service'] + + +def test_manifest_v1_mounts(): + manifest = Manifest.marshal({'version': '1.0.0', 'package': {'name': 'test', + 'version': '1.0.0'}, + 'service': {'name': 'cpu-report'}, + 'container': {'privileged': True, + 'mounts': [{'source': 'a', 'target': 'b', 'type': 'bind'}]}}) + assert manifest['container']['mounts'][0]['source'] == 'a' + assert manifest['container']['mounts'][0]['target'] == 'b' + assert manifest['container']['mounts'][0]['type'] == 'bind' + + +def test_manifest_v1_mounts_invalid(): + with pytest.raises(ManifestError): + Manifest.marshal({'version': '1.0.0', 'package': {'name': 'test', 'version': '1.0.0'}, + 'service': {'name': 'cpu-report'}, + 'container': {'privileged': True, + 'mounts': [{'not-source': 'a', 'target': 'b', 'type': 'bind'}]}}) + + +def test_manifest_v1_unmarshal(): + manifest_json_input = {'package': {'name': 'test', 'version': '1.0.0', + 'depends': [ + { + 'name': 'swss', + 'version': '>1.0.0', + 'components': {}, + } + ]}, + 'service': {'name': 'test'}} + manifest = Manifest.marshal(manifest_json_input) + manifest_json = manifest.unmarshal() + for key, section in manifest_json_input.items(): + for field, value in section.items(): + assert manifest_json[key][field] == value diff --git a/tests/sonic_package_manager/test_metadata.py b/tests/sonic_package_manager/test_metadata.py new file mode 100644 index 0000000000..aee2f49428 --- /dev/null +++ b/tests/sonic_package_manager/test_metadata.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +import contextlib +from unittest.mock import Mock, MagicMock + +from sonic_package_manager.database import PackageEntry +from sonic_package_manager.errors import MetadataError +from sonic_package_manager.metadata import MetadataResolver +from sonic_package_manager.version import Version + + +def test_metadata_resolver_local(mock_registry_resolver, mock_docker_api): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # it raises exception because mock manifest is not a valid manifest + # but this is not a test objective, so just suppress the error. + with contextlib.suppress(MetadataError): + metadata_resolver.from_local('image') + mock_docker_api.labels.assert_called_once() + + +def test_metadata_resolver_remote(mock_registry_resolver, mock_docker_api): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + mock_registry = MagicMock() + mock_registry.manifest = MagicMock(return_value={'config': {'digest': 'some-digest'}}) + + def return_mock_registry(repository): + return mock_registry + + mock_registry_resolver.get_registry_for = Mock(side_effect=return_mock_registry) + # it raises exception because mock manifest is not a valid manifest + # but this is not a test objective, so just suppress the error. + with contextlib.suppress(MetadataError): + metadata_resolver.from_registry('test-repository', '1.2.0') + mock_registry_resolver.get_registry_for.assert_called_once_with('test-repository') + mock_registry.manifest.assert_called_once_with('test-repository', '1.2.0') + mock_registry.blobs.assert_called_once_with('test-repository', 'some-digest') + mock_docker_api.labels.assert_not_called() diff --git a/tests/sonic_package_manager/test_reference.py b/tests/sonic_package_manager/test_reference.py new file mode 100644 index 0000000000..043b66ddd5 --- /dev/null +++ b/tests/sonic_package_manager/test_reference.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +import pytest + +from sonic_package_manager.reference import PackageReference + + +def test_reference(): + package_constraint = PackageReference.parse( + 'swss@sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd' + ) + assert package_constraint.name == 'swss' + assert package_constraint.reference == 'sha256:9780f6d83e45878749497a6297ed9906c19ee0cc48cc88dc63827564bb8768fd' + + +def test_reference_invalid(): + with pytest.raises(ValueError): + PackageReference.parse('swssfdsf') diff --git a/tests/sonic_package_manager/test_registry.py b/tests/sonic_package_manager/test_registry.py new file mode 100644 index 0000000000..0d82499df3 --- /dev/null +++ b/tests/sonic_package_manager/test_registry.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python + +from sonic_package_manager.registry import RegistryResolver + + +def test_get_registry_for(): + resolver = RegistryResolver() + registry = resolver.get_registry_for('debian') + assert registry is resolver.DockerHubRegistry + registry = resolver.get_registry_for('Azure/sonic') + assert registry is resolver.DockerHubRegistry + registry = resolver.get_registry_for('registry-server:5000/docker') + assert registry.url == 'https://registry-server:5000' + registry = resolver.get_registry_for('registry-server.com/docker') + assert registry.url == 'https://registry-server.com' diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py new file mode 100644 index 0000000000..fec8de600c --- /dev/null +++ b/tests/sonic_package_manager/test_service_creator.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python + +import os +from unittest.mock import Mock, MagicMock + +import pytest + +from sonic_package_manager.database import PackageEntry +from sonic_package_manager.manifest import Manifest +from sonic_package_manager.metadata import Metadata +from sonic_package_manager.package import Package +from sonic_package_manager.service_creator.creator import * +from sonic_package_manager.service_creator.feature import FeatureRegistry + + +@pytest.fixture +def manifest(): + return Manifest.marshal({ + 'package': { + 'name': 'test', + 'version': '1.0.0', + }, + 'service': { + 'name': 'test', + 'requires': ['database'], + 'after': ['database', 'swss', 'syncd'], + 'before': ['ntp-config'], + 'dependent-of': ['swss'], + 'asic-service': False, + 'host-service': True, + }, + 'container': { + 'privileged': True, + 'volumes': [ + '/etc/sonic:/etc/sonic:ro' + ] + } + }) + + +def test_service_creator(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) + entry = PackageEntry('test', 'azure/sonic-test') + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert sonic_fs.exists(os.path.join(ETC_SONIC_PATH, 'swss_dependent')) + assert sonic_fs.exists(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, 'test.sh')) + assert sonic_fs.exists(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, 'test.sh')) + assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.service')) + + +def test_service_creator_with_timer_unit(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) + entry = PackageEntry('test', 'azure/sonic-test') + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert not sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) + + manifest['service']['delayed'] = True + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) + + +def test_service_creator_with_debug_dump(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) + entry = PackageEntry('test', 'azure/sonic-test') + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert not sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) + + manifest['package']['debug-dump'] = '/some/command' + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) + + +def test_service_creator_initial_config(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + mock_table = Mock() + mock_table.get = Mock(return_value=(True, (('field_2', 'original_value_2'),))) + mock_sonic_db.initial_table = Mock(return_value=mock_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_table) + mock_sonic_db.running_table = Mock(return_value=mock_table) + + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) + + entry = PackageEntry('test', 'azure/sonic-test') + package = Package(entry, Metadata(manifest)) + creator.create(package) + + assert not sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) + + manifest['package']['init-cfg'] = { + 'TABLE_A': { + 'key_a': { + 'field_1': 'value_1', + 'field_2': 'value_2' + }, + }, + } + package = Package(entry, Metadata(manifest)) + + creator.create(package) + mock_table.set.assert_called_with('key_a', [('field_1', 'value_1'), + ('field_2', 'original_value_2')]) + + creator.remove(package) + mock_table._del.assert_called_with('key_a') + + +def test_feature_registration(mock_sonic_db, manifest): + mock_feature_table = Mock() + mock_feature_table.get = Mock(return_value=(False, ())) + mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) + mock_sonic_db.running_table = Mock(return_value=mock_feature_table) + feature_registry = FeatureRegistry(mock_sonic_db) + feature_registry.register(manifest) + mock_feature_table.set.assert_called_with('test', [ + ('state', 'disabled'), + ('auto_restart', 'enabled'), + ('high_mem_alert', 'disabled'), + ('set_owner', 'local'), + ('has_per_asic_scope', 'False'), + ('has_global_scope', 'True'), + ('has_timer', 'False'), + ]) + + +def test_feature_registration_with_timer(mock_sonic_db, manifest): + manifest['service']['delayed'] = True + mock_feature_table = Mock() + mock_feature_table.get = Mock(return_value=(False, ())) + mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) + mock_sonic_db.running_table = Mock(return_value=mock_feature_table) + feature_registry = FeatureRegistry(mock_sonic_db) + feature_registry.register(manifest) + mock_feature_table.set.assert_called_with('test', [ + ('state', 'disabled'), + ('auto_restart', 'enabled'), + ('high_mem_alert', 'disabled'), + ('set_owner', 'local'), + ('has_per_asic_scope', 'False'), + ('has_global_scope', 'True'), + ('has_timer', 'True'), + ]) + + +def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): + mock_feature_table = Mock() + mock_feature_table.get = Mock(return_value=(False, ())) + mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) + mock_sonic_db.running_table = Mock(return_value=mock_feature_table) + feature_registry = FeatureRegistry(mock_sonic_db) + feature_registry.register(manifest, owner='kube') + mock_feature_table.set.assert_called_with('test', [ + ('state', 'disabled'), + ('auto_restart', 'enabled'), + ('high_mem_alert', 'disabled'), + ('set_owner', 'kube'), + ('has_per_asic_scope', 'False'), + ('has_global_scope', 'True'), + ('has_timer', 'False'), + ]) diff --git a/tests/sonic_package_manager/test_utils.py b/tests/sonic_package_manager/test_utils.py new file mode 100644 index 0000000000..c4d8b15840 --- /dev/null +++ b/tests/sonic_package_manager/test_utils.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python + +from sonic_package_manager import utils + + +def test_make_python_identifier(): + assert utils.make_python_identifier('-some-package name').isidentifier() + assert utils.make_python_identifier('01 leading digit').isidentifier() From 2e09b2202283ff350a52ee5b0937240851cb298d Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Mon, 3 May 2021 23:57:39 +0800 Subject: [PATCH 22/41] Handle the new db version which mellanox_buffer_migrator isn't interested (#1566) Enhancement: handle the case that no buffer change in the latest database version Current, the following two versions are the same: - The latest version changed by mellanox_buffer_migrator - The latest version in CONFIG_DB That won't be true if another part in CONFIG_DB is updated. In that case, the latest version in CONFIG_DB will be greater than the latest version in mellanox_buffer_migrator. However, this can break the buffer migrator unit test: - The db_migrator will always migrate the database to the latest version - The config database version check will fail in case the latest version in the config database doesn't match that defined in the buffer migrator. This is to support this case. Signed-off-by: Stephen Sun --- tests/db_migrator_test.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index 87cf2c8c11..bbff2a7666 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -76,6 +76,16 @@ def check_appl_db(self, result, expected): for key in keys: assert expected.get_all(expected.APPL_DB, key) == result.get_all(result.APPL_DB, key) + def advance_version_for_expected_database(self, migrated_db, expected_db): + # In case there are new db versions greater than the latest one that mellanox buffer migrator is interested, + # we just advance the database version in the expected database to make the test pass + expected_dbversion = expected_db.get_entry('VERSIONS', 'DATABASE') + dbmgtr_dbversion = migrated_db.get_entry('VERSIONS', 'DATABASE') + if expected_dbversion and dbmgtr_dbversion: + if expected_dbversion['VERSION'] == self.version_list[-1] and dbmgtr_dbversion['VERSION'] > expected_dbversion['VERSION']: + expected_dbversion['VERSION'] = dbmgtr_dbversion['VERSION'] + expected_db.set_entry('VERSIONS', 'DATABASE', expected_dbversion) + @pytest.mark.parametrize('scenario', ['empty-config', 'non-default-config', @@ -93,6 +103,7 @@ def test_mellanox_buffer_migrator_negative_cold_reboot(self, scenario): dbmgtr = db_migrator.DBMigrator(None) dbmgtr.migrate() expected_db = self.mock_dedicated_config_db(db_after_migrate) + self.advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb) self.check_config_db(dbmgtr.configDB, expected_db.cfgdb) assert not dbmgtr.mellanox_buffer_migrator.is_buffer_config_default @@ -119,8 +130,6 @@ def test_mellanox_buffer_migrator_for_cold_reboot(self, sku_version, topo): sku, start_version = sku_version version = start_version start_index = self.version_list.index(start_version) - # Eventually, the config db should be migrated to the latest version - expected_db = self.mock_dedicated_config_db(self.make_db_name_by_sku_topo_version(sku, topo, self.version_list[-1])) # start_version represents the database version from which the SKU is supported # For each SKU, @@ -130,6 +139,9 @@ def test_mellanox_buffer_migrator_for_cold_reboot(self, sku_version, topo): import db_migrator dbmgtr = db_migrator.DBMigrator(None) dbmgtr.migrate() + # Eventually, the config db should be migrated to the latest version + expected_db = self.mock_dedicated_config_db(self.make_db_name_by_sku_topo_version(sku, topo, self.version_list[-1])) + self.advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb) self.check_config_db(dbmgtr.configDB, expected_db.cfgdb) assert dbmgtr.mellanox_buffer_migrator.is_buffer_config_default @@ -145,6 +157,7 @@ def mellanox_buffer_migrator_warm_reboot_runner(self, input_config_db, input_app import db_migrator dbmgtr = db_migrator.DBMigrator(None) dbmgtr.migrate() + self.advance_version_for_expected_database(dbmgtr.configDB, expected_config_db.cfgdb) assert dbmgtr.mellanox_buffer_migrator.is_buffer_config_default == is_buffer_config_default_expected self.check_config_db(dbmgtr.configDB, expected_config_db.cfgdb) self.check_appl_db(dbmgtr.appDB, expected_appl_db) @@ -173,6 +186,7 @@ def test_mellanox_buffer_migrator_for_warm_reboot(self, sku, topo): self.mellanox_buffer_migrator_warm_reboot_runner(input_db_name, input_db_name, expected_db_name, expected_db_name, True) def test_mellanox_buffer_migrator_negative_nondefault_for_warm_reboot(self): + device_info.get_sonic_version_info = get_sonic_version_info_mlnx expected_config_db = 'non-default-config-expected' expected_appl_db = 'non-default-expected' input_config_db = 'non-default-config-input' From 912076658883513214b769e2a670b7b2303cb67f Mon Sep 17 00:00:00 2001 From: Qi Luo Date: Mon, 3 May 2021 09:03:49 -0700 Subject: [PATCH 23/41] Relax the install_requires, no need to exact version as long as there are no broken changes with future versions (#1530) #### What I did Fixes https://github.com/Azure/sonic-buildimage/issues/7152 #### How I did it Relax the install_requires --- setup.py | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/setup.py b/setup.py index 15f93b46f7..b8b6d07229 100644 --- a/setup.py +++ b/setup.py @@ -161,32 +161,32 @@ }, install_requires=[ 'click==7.0', - 'click-log==0.3.2', - 'docker==4.4.4', - 'docker-image-py==0.1.10', - 'filelock==3.0.12', - 'enlighten==1.8.0', - 'ipaddress==1.0.23', - 'jinja2==2.11.3', - 'jsondiff==1.2.0', - 'jsonpatch==1.32.0', - 'm2crypto==0.31.0', - 'natsort==6.2.1', # 6.2.1 is the last version which supports Python 2. Can update once we no longer support Python 2 - 'netaddr==0.8.0', - 'netifaces==0.10.7', - 'pexpect==4.8.0', - 'poetry-semver==0.1.0', - 'prettyprinter==0.18.0', - 'pyroute2==0.5.14', - 'requests==2.25.0', + 'click-log>=0.3.2', + 'docker>=4.4.4', + 'docker-image-py>=0.1.10', + 'filelock>=3.0.12', + 'enlighten>=1.8.0', + 'ipaddress>=1.0.23', + 'jinja2>=2.11.3', + 'jsondiff>=1.2.0', + 'jsonpatch>=1.32.0', + 'm2crypto>=0.31.0', + 'natsort>=6.2.1', # 6.2.1 is the last version which supports Python 2. Can update once we no longer support Python 2 + 'netaddr>=0.8.0', + 'netifaces>=0.10.7', + 'pexpect>=4.8.0', + 'poetry-semver>=0.1.0', + 'prettyprinter>=0.18.0', + 'pyroute2>=0.5.14', + 'requests>=2.25.0', 'sonic-config-engine', 'sonic-platform-common', 'sonic-py-common', 'sonic-yang-mgmt', 'swsssdk>=2.0.1', - 'tabulate==0.8.2', - 'www-authenticate==0.9.2', - 'xmltodict==0.12.0', + 'tabulate>=0.8.2', + 'www-authenticate>=0.9.2', + 'xmltodict>=0.12.0', ], setup_requires= [ 'pytest-runner', From cbe21599e7e1371c70370a147bca0d4be401c56f Mon Sep 17 00:00:00 2001 From: Volodymyr Samotiy Date: Tue, 4 May 2021 05:20:34 +0300 Subject: [PATCH 24/41] [vnet] Add "vnet_route_check" script (#1300) * [vnet] Add "vnet_route_check" script * [vnet_route_check.py]: tool that verifies VNET routes consistancy between SONiC and vendor SDK DBs. Signed-off-by: Volodymyr Samotiy --- scripts/vnet_route_check.py | 363 +++++++++++++++++++++++++++++++++ setup.py | 1 + tests/vnet_route_check_test.py | 325 +++++++++++++++++++++++++++++ 3 files changed, 689 insertions(+) create mode 100755 scripts/vnet_route_check.py create mode 100644 tests/vnet_route_check_test.py diff --git a/scripts/vnet_route_check.py b/scripts/vnet_route_check.py new file mode 100755 index 0000000000..010e953451 --- /dev/null +++ b/scripts/vnet_route_check.py @@ -0,0 +1,363 @@ +#!/usr/bin/env python + +import os +import sys +import json +import syslog +from swsscommon import swsscommon + +''' vnet_route_check.py: tool that verifies VNET routes consistancy between SONiC and vendor SDK DBs. + +Logically VNET route verification logic consists of 3 parts: +1. Get VNET routes entries that are missed in ASIC_DB but present in APP_DB. +2. Get VNET routes entries that are missed in APP_DB but present in ASIC_DB. +3. Get VNET routes entries that are missed in SDK but present in ASIC_DB. + +Returns 0 if there is no inconsistancy found and all VNET routes are aligned in all DBs. +Returns -1 if there is incosistancy found and prints differences between DBs in JSON format to standart output. + +Format of differences output: +{ + "results": { + "missed_in_asic_db_routes": { + "": { + "routes": [ + "/" + ] + } + }, + "missed_in_app_db_routes": { + "": { + "routes": [ + "/" + ] + } + }, + "missed_in_sdk_routes": { + "": { + "routes": [ + "/" + ] + } + } + } +} +''' + + +RC_OK = 0 +RC_ERR = -1 + + +report_level = syslog.LOG_ERR +write_to_syslog = True + + +def set_level(lvl, log_to_syslog): + global report_level + global write_to_syslog + + write_to_syslog = log_to_syslog + report_level = lvl + + +def print_message(lvl, *args): + if (lvl <= report_level): + msg = "" + for arg in args: + msg += " " + str(arg) + print(msg) + if write_to_syslog: + syslog.syslog(lvl, msg) + + +def check_vnet_cfg(): + ''' Returns True if VNET is configured in APP_DB or False if no VNET configuration. + ''' + db = swsscommon.DBConnector('APPL_DB', 0) + + vnet_db_keys = swsscommon.Table(db, 'VNET_TABLE').getKeys() + + return True if vnet_db_keys else False + + +def get_vnet_intfs(): + ''' Returns dictionary of VNETs and related VNET interfaces. + Format: { : [ ] } + ''' + db = swsscommon.DBConnector('APPL_DB', 0) + + intfs_table = swsscommon.Table(db, 'INTF_TABLE') + intfs_keys = swsscommon.Table(db, 'INTF_TABLE').getKeys() + + vnet_intfs = {} + + for intf_key in intfs_keys: + intf_attrs = intfs_table.get(intf_key)[1] + + if 'vnet_name' in intf_attrs: + vnet_name = intf_attrs['vnet_name'] + if vnet_name in vnet_intfs: + vnet_intfs[vnet_name].append(intf_key) + else: + vnet_intfs[vnet_name] = [intf_key] + + return vnet_intfs + + +def get_all_rifs_oids(): + ''' Returns dictionary of all router interfaces and their OIDs. + Format: { : } + ''' + db = swsscommon.DBConnector('COUNTERS_DB', 0) + + rif_table = swsscommon.Table(db, 'COUNTERS_RIF_NAME_MAP') + rif_keys = rif_table.getKeys() + + rif_name_oid_map = {} + + for rif_name in rif_keys: + rif_name_oid_map[rif_name] = rif_table.get(rif_name)[1] + + return rif_name_oid_map + + +def get_vnet_rifs_oids(): + ''' Returns dictionary of VNET interfaces and their OIDs. + Format: { : } + ''' + vnet_intfs = get_vnet_intfs() + intfs_oids = get_all_rifs_oids() + + vnet_intfs = [vnet_intfs[k] for k in vnet_intfs] + vnet_intfs = [val for sublist in vnet_intfs for val in sublist] + + vnet_rifs_oids_map = {} + + for intf_name in intfs_oids or {}: + if intf_name in vnet_intfs: + vnet_rifs_oids_map[intf_name] = intfs_oids[intf_name] + + return vnet_rifs_oids_map + + +def get_vrf_entries(): + ''' Returns dictionary of VNET interfaces and corresponding VRF OIDs. + Format: { : } + ''' + db = swsscommon.DBConnector('ASIC_DB', 0) + rif_table = swsscommon.Table(db, 'ASIC_STATE') + + vnet_rifs_oids = get_vnet_rifs_oids() + + rif_vrf_map = {} + for vnet_rif_name in vnet_rifs_oids: + + db_keys = rif_table.getKeys() + + for db_key in db_keys: + if 'SAI_OBJECT_TYPE_ROUTER_INTERFACE' in db_key: + rif_attrs = rif_table.get(db_key)[1] + rif_vrf_map[vnet_rif_name] = rif_attrs['SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID'] + + return rif_vrf_map + + +def filter_out_vnet_ip2me_routes(vnet_routes): + ''' Filters out IP2ME routes from the provided dictionary with VNET routes + Format: { : { 'routes': [ ], 'vrf_oid': } } + ''' + db = swsscommon.DBConnector('APPL_DB', 0) + + all_rifs_db_keys = swsscommon.Table(db, 'INTF_TABLE').getKeys() + vnet_intfs = get_vnet_intfs() + + vnet_intfs = [vnet_intfs[k] for k in vnet_intfs] + vnet_intfs = [val for sublist in vnet_intfs for val in sublist] + + vnet_ip2me_routes = [] + for rif in all_rifs_db_keys: + rif_attrs = rif.split(':') + # Skip RIF entries without IP prefix and prefix length (they have only one attribute - RIF name) + if len(rif_attrs) == 1: + continue + + # rif_attrs[0] - RIF name + # rif_attrs[1] - IP prefix and prefix legth + # IP2ME routes have '/32' prefix length so replace it and add to the list + if rif_attrs[0] in vnet_intfs: + vnet_ip2me_routes.append(rif_attrs[1].replace('/24', '/32')) + + for vnet, vnet_attrs in vnet_routes.items(): + for route in vnet_attrs['routes']: + if route in vnet_ip2me_routes: + vnet_attrs['routes'].remove(route) + + if not vnet_attrs['routes']: + vnet_routes.pop(vnet) + + +def get_vnet_routes_from_app_db(): + ''' Returns dictionary of VNET routes configured per each VNET in APP_DB. + Format: { : { 'routes': [ ], 'vrf_oid': } } + ''' + db = swsscommon.DBConnector('APPL_DB', 0) + + vnet_intfs = get_vnet_intfs() + vnet_vrfs = get_vrf_entries() + + vnet_route_table = swsscommon.Table(db, 'VNET_ROUTE_TABLE') + vnet_route_tunnel_table = swsscommon.Table(db, 'VNET_ROUTE_TUNNEL_TABLE') + + vnet_routes_db_keys = vnet_route_table.getKeys() + vnet_route_tunnel_table.getKeys() + + vnet_routes = {} + + for vnet_route_db_key in vnet_routes_db_keys: + vnet_route_list = vnet_route_db_key.split(':') + vnet_name = vnet_route_list[0] + vnet_route = vnet_route_list[1] + + if vnet_name not in vnet_routes: + vnet_routes[vnet_name] = {} + vnet_routes[vnet_name]['routes'] = [] + + intf = vnet_intfs[vnet_name][0] + vnet_routes[vnet_name]['vrf_oid'] = vnet_vrfs.get(intf, 'None') + + vnet_routes[vnet_name]['routes'].append(vnet_route) + + return vnet_routes + + +def get_vnet_routes_from_asic_db(): + ''' Returns dictionary of VNET routes configured per each VNET in ASIC_DB. + Format: { : { 'routes': [ ], 'vrf_oid': } } + ''' + db = swsscommon.DBConnector('ASIC_DB', 0) + + tbl = swsscommon.Table(db, 'ASIC_STATE') + + vnet_vrfs = get_vrf_entries() + vnet_vrfs_oids = [vnet_vrfs[k] for k in vnet_vrfs] + + vnet_intfs = get_vnet_intfs() + + vrf_oid_to_vnet_map = {} + + for vnet_name, vnet_rifs in vnet_intfs.items(): + for vnet_rif, vrf_oid in vnet_vrfs.items(): + if vnet_rif in vnet_rifs: + vrf_oid_to_vnet_map[vrf_oid] = vnet_name + + routes_db_keys = tbl.getKeys() + + vnet_routes = {} + + for route_db_key in routes_db_keys: + route_attrs = route_db_key.lower().split('\"', -1) + + if 'sai_object_type_route_entry' not in route_attrs[0]: + continue + + # route_attrs[11] - VRF OID for the VNET route + # route_attrs[3] - VNET route IP subnet + vrf_oid = route_attrs[11] + ip_addr = route_attrs[3] + + if vrf_oid in vnet_vrfs_oids: + if vrf_oid_to_vnet_map[vrf_oid] not in vnet_routes: + vnet_name = vrf_oid_to_vnet_map[vrf_oid] + + vnet_routes[vnet_name] = {} + vnet_routes[vnet_name]['routes'] = [] + vnet_routes[vnet_name]['vrf_oid'] = vrf_oid + + vnet_routes[vnet_name]['routes'].append(ip_addr) + + filter_out_vnet_ip2me_routes(vnet_routes) + + return vnet_routes + + +def get_vnet_routes_diff(routes_1, routes_2): + ''' Returns all routes present in routes_2 dictionary but missed in routes_1 + Format: { : { 'routes': [ ] } } + ''' + + routes = {} + + for vnet_name, vnet_attrs in routes_2.items(): + if vnet_name not in routes_1: + routes[vnet_name] = routes + else: + for vnet_route in vnet_attrs['routes']: + if vnet_route not in routes_1[vnet_name]['routes']: + if vnet_name not in routes: + routes[vnet_name] = {} + routes[vnet_name]['routes'] = [] + routes[vnet_name]['routes'].append(vnet_route) + + return routes + + +def get_sdk_vnet_routes_diff(routes): + ''' Returns all routes present in routes dictionary but missed in SAI/SDK + Format: { : { 'routes': [ ], 'vrf_oid': } } + ''' + routes_diff = {} + + res = os.system('docker exec syncd test -f /usr/bin/vnet_route_check.py') + if res != 0: + return routes_diff + + for vnet_name, vnet_routes in routes.items(): + vnet_routes = routes[vnet_name]["routes"] + vnet_vrf_oid = routes[vnet_name]["vrf_oid"] + + res = os.system('docker exec syncd "/usr/bin/vnet_route_check.py {} {}"'.format(vnet_vrf_oid, vnet_routes)) + if res: + routes_diff[vnet_name] = {} + routes_diff[vnet_name]['routes'] = res + + return routes_diff + + +def main(): + + rc = RC_OK + + # Don't run VNET routes consistancy logic if there is no VNET configuration + if not check_vnet_cfg(): + return rc + + app_db_vnet_routes = get_vnet_routes_from_app_db() + asic_db_vnet_routes = get_vnet_routes_from_asic_db() + + missed_in_asic_db_routes = get_vnet_routes_diff(asic_db_vnet_routes, app_db_vnet_routes) + missed_in_app_db_routes = get_vnet_routes_diff(app_db_vnet_routes, asic_db_vnet_routes) + missed_in_sdk_routes = get_sdk_vnet_routes_diff(asic_db_vnet_routes) + + res = {} + res['results'] = {} + rc = RC_OK + + if missed_in_asic_db_routes: + res['results']['missed_in_asic_db_routes'] = missed_in_asic_db_routes + + if missed_in_app_db_routes: + res['results']['missed_in_app_db_routes'] = missed_in_app_db_routes + + if missed_in_sdk_routes: + res['results']['missed_in_sdk_routes'] = missed_in_sdk_routes + + if res['results']: + rc = RC_ERR + print_message(syslog.LOG_ERR, json.dumps(res, indent=4)) + print_message(syslog.LOG_ERR, 'Vnet Route Mismatch reported') + + return rc, res + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/setup.py b/setup.py index b8b6d07229..d857cd3723 100644 --- a/setup.py +++ b/setup.py @@ -117,6 +117,7 @@ 'scripts/reboot', 'scripts/route_check.py', 'scripts/route_check_test.sh', + 'scripts/vnet_route_check.py', 'scripts/sfpshow', 'scripts/storyteller', 'scripts/syseeprom-to-json', diff --git a/tests/vnet_route_check_test.py b/tests/vnet_route_check_test.py new file mode 100644 index 0000000000..09f35761a4 --- /dev/null +++ b/tests/vnet_route_check_test.py @@ -0,0 +1,325 @@ +import copy +import json +import os +import sys +from unittest.mock import MagicMock, patch + +import pytest + +sys.path.append("scripts") +import vnet_route_check + +DESCR = "Description" +ARGS = "args" +RET = "return" +APPL_DB = 0 +ASIC_DB = 1 +CNTR_DB = 2 +PRE = "pre-value" +UPD = "update" +RESULT = "res" + +OP_SET = "SET" +OP_DEL = "DEL" + +VXLAN_TUNNEL_TABLE = "VXLAN_TUNNEL_TABLE" +VNET_TABLE = "VNET_TABLE" +VNET_ROUTE_TABLE = "VNET_ROUTE_TABLE" +INTF_TABLE = "INTF_TABLE" +ASIC_STATE = "ASIC_STATE" + +RT_ENTRY_KEY_PREFIX = 'SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest":\"' +RT_ENTRY_KEY_SUFFIX = '\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000d4b\"}' + +current_test_name = None +current_test_no = None +current_test_data = None + +tables_returned = {} + +test_data = { + "0": { + DESCR: "All VNET routes are configured in both APP and ASIC DBs", + ARGS: "vnet_route_check", + PRE: { + APPL_DB: { + VXLAN_TUNNEL_TABLE: { + "tunnel_v4": { "src_ip": "10.1.0.32" } + }, + VNET_TABLE: { + "Vnet1": { "vxlan_tunnel": "tunnel_v4", "vni": "10001" } + }, + INTF_TABLE: { + "Vlan3001": { "vnet_name": "Vnet1" }, + "Vlan3001:30.1.10.1/24": {} + }, + VNET_ROUTE_TABLE: { + "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.2.2.0/24": { "ifname": "Vlan3001" } + } + }, + ASIC_DB: { + ASIC_STATE: { + RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.1.1.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.2.2.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + "SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000d76": { + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000d4b" + } + } + }, + CNTR_DB: { + "COUNTERS_RIF_NAME_MAP": { "Vlan3001": "oid:0x6000000000d76" } + } + }, + RESULT: { + "results": {} + } + }, + "1": { + DESCR: "VNET route is missed in ASIC DB", + ARGS: "vnet_route_check", + RET: -1, + PRE: { + APPL_DB: { + VXLAN_TUNNEL_TABLE: { + "tunnel_v4": { "src_ip": "10.1.0.32" } + }, + VNET_TABLE: { + "Vnet1": { "vxlan_tunnel": "tunnel_v4", "vni": "10001" } + }, + INTF_TABLE: { + "Vlan3001": { "vnet_name": "Vnet1" }, + "Vlan3001:30.1.10.1/24": {} + }, + VNET_ROUTE_TABLE: { + "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.2.2.0/24": { "ifname": "Vlan3001" } + } + }, + ASIC_DB: { + ASIC_STATE: { + RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.1.1.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + "SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000d76": { + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000d4b" + } + } + }, + CNTR_DB: { + "COUNTERS_RIF_NAME_MAP": { "Vlan3001": "oid:0x6000000000d76" } + } + }, + RESULT: { + "results": { + "missed_in_asic_db_routes": { + "Vnet1": { + "routes": [ + "50.2.2.0/24" + ] + } + } + } + } + }, + "2": { + DESCR: "VNET route is missed in APP DB", + ARGS: "vnet_route_check", + RET: -1, + PRE: { + APPL_DB: { + VXLAN_TUNNEL_TABLE: { + "tunnel_v4": { "src_ip": "10.1.0.32" } + }, + VNET_TABLE: { + "Vnet1": { "vxlan_tunnel": "tunnel_v4", "vni": "10001" } + }, + INTF_TABLE: { + "Vlan3001": { "vnet_name": "Vnet1" }, + "Vlan3001:30.1.10.1/24": {} + }, + VNET_ROUTE_TABLE: { + "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, + } + }, + ASIC_DB: { + ASIC_STATE: { + RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.1.1.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.2.2.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + "SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000d76": { + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000d4b" + } + } + }, + CNTR_DB: { + "COUNTERS_RIF_NAME_MAP": { "Vlan3001": "oid:0x6000000000d76" } + } + }, + RESULT: { + "results": { + "missed_in_app_db_routes": { + "Vnet1": { + "routes": [ + "50.2.2.0/24" + ] + } + } + } + } + }, + "3": { + DESCR: "VNET routes are missed in both ASIC and APP DB", + ARGS: "vnet_route_check", + RET: -1, + PRE: { + APPL_DB: { + VXLAN_TUNNEL_TABLE: { + "tunnel_v4": { "src_ip": "10.1.0.32" } + }, + VNET_TABLE: { + "Vnet1": { "vxlan_tunnel": "tunnel_v4", "vni": "10001" } + }, + INTF_TABLE: { + "Vlan3001": { "vnet_name": "Vnet1" }, + "Vlan3001:30.1.10.1/24": {} + }, + VNET_ROUTE_TABLE: { + "Vnet1:30.1.10.0/24": { "ifname": "Vlan3001" }, + "Vnet1:50.1.1.0/24": { "ifname": "Vlan3001" }, + } + }, + ASIC_DB: { + ASIC_STATE: { + RT_ENTRY_KEY_PREFIX + "30.1.10.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "50.2.2.0/24" + RT_ENTRY_KEY_SUFFIX: {}, + "SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x6000000000d76": { + "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000d4b" + } + } + }, + CNTR_DB: { + "COUNTERS_RIF_NAME_MAP": { "Vlan3001": "oid:0x6000000000d76" } + } + }, + RESULT: { + "results": { + "missed_in_app_db_routes": { + "Vnet1": { + "routes": [ + "50.2.2.0/24" + ] + } + }, + "missed_in_asic_db_routes": { + "Vnet1": { + "routes": [ + "50.1.1.0/24" + ] + } + } + } + } + } +} + + +def do_start_test(tname, tno, ctdata): + global current_test_name, current_test_no, current_test_data + global tables_returned + + current_test_name = tname + current_test_no = tno + current_test_data = ctdata + tables_returned = {} + + print("Starting test case {} number={}".format(tname, tno)) + + +class Table: + def __init__(self, db, tbl): + self.db = db + self.tbl = tbl + self.data = copy.deepcopy(self.get_val(current_test_data[PRE], [db, tbl])) + + def get_val(self, d, keys): + for k in keys: + d = d[k] if k in d else {} + return d + + def getKeys(self): + return list(self.data.keys()) + + def get(self, key): + ret = copy.deepcopy(self.data.get(key, {})) + return (True, ret) + + +db_conns = {"APPL_DB": APPL_DB, "ASIC_DB": ASIC_DB, "COUNTERS_DB": CNTR_DB} +def conn_side_effect(arg, _): + return db_conns[arg] + + +def table_side_effect(db, tbl): + if not db in tables_returned: + tables_returned[db] = {} + if not tbl in tables_returned[db]: + tables_returned[db][tbl] = Table(db, tbl) + return tables_returned[db][tbl] + + +class mock_db_conn: + def __init__(self, db): + self.db_name = None + for (k, v) in db_conns.items(): + if v == db: + self.db_name = k + assert self.db_name != None + + def getDbName(self): + return self.db_name + + +def table_side_effect(db, tbl): + if not db in tables_returned: + tables_returned[db] = {} + if not tbl in tables_returned[db]: + tables_returned[db][tbl] = Table(db, tbl) + return tables_returned[db][tbl] + + +def set_mock(mock_table, mock_conn): + mock_conn.side_effect = conn_side_effect + mock_table.side_effect = table_side_effect + + +class TestVnetRouteCheck(object): + def setup(self): + pass + + def init(self): + vnet_route_check.UNIT_TESTING = 1 + + @patch("vnet_route_check.swsscommon.DBConnector") + @patch("vnet_route_check.swsscommon.Table") + def test_vnet_route_check(self, mock_table, mock_conn): + self.init() + ret = 0 + + set_mock(mock_table, mock_conn) + for (i, ct_data) in test_data.items(): + do_start_test("route_test", i, ct_data) + + with patch('sys.argv', ct_data[ARGS].split()): + ret, res = vnet_route_check.main() + expect_ret = ct_data[RET] if RET in ct_data else 0 + expect_res = ct_data[RESULT] if RESULT in ct_data else None + if res: + print("res={}".format(json.dumps(res, indent=4))) + if expect_res: + print("expect_res={}".format(json.dumps(expect_res, indent=4))) + assert ret == expect_ret + assert res == expect_res From 0f4988bc285ee14aa383e004a42d2043716713fa Mon Sep 17 00:00:00 2001 From: Andriy Yurkiv <70649192+ayurkiv-nvda@users.noreply.github.com> Date: Tue, 4 May 2021 10:35:32 +0300 Subject: [PATCH 25/41] Add pg-drop script to sonic filesystem (#1583) - What I did Added 'pg-drop' to the files system of SONiC - How I did it Add 'scripts/pg-drop' to setup.py file - How to verify it Check that 'pg-drop' script exists in /usr/local/bin/pg-drop Signed-off-by: Andriy Yurkiv --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index d857cd3723..77a771650f 100644 --- a/setup.py +++ b/setup.py @@ -108,6 +108,7 @@ 'scripts/nbrshow', 'scripts/neighbor_advertiser', 'scripts/pcmping', + 'scripts/pg-drop', 'scripts/port2alias', 'scripts/portconfig', 'scripts/portstat', From 9492eabcf09161a5f4410eb51eb740f70570c605 Mon Sep 17 00:00:00 2001 From: Andriy Yurkiv <70649192+ayurkiv-nvda@users.noreply.github.com> Date: Tue, 4 May 2021 19:34:27 +0300 Subject: [PATCH 26/41] Use swsscommon instead of swsssdk (#1510) #### What I did Changed code to use use swsscommon instead of swsssdk in counterpoll #### How I did it Removed an explicit function call from sdsssdk module #### How to verify it run counterpoll tests --- counterpoll/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/counterpoll/main.py b/counterpoll/main.py index ff9ca49dd4..cc0060d991 100644 --- a/counterpoll/main.py +++ b/counterpoll/main.py @@ -129,7 +129,7 @@ def disable(): @click.pass_context def pg_drop(ctx): """ Ingress PG drop counter commands """ - ctx.obj = swsssdk.ConfigDBConnector() + ctx.obj = ConfigDBConnector() ctx.obj.connect() @pg_drop.command() From be974bf33604356015c1fc39f563677256ff92ef Mon Sep 17 00:00:00 2001 From: Sumukha Tumkur Vani Date: Wed, 5 May 2021 09:16:22 -0700 Subject: [PATCH 27/41] [neighbor_advertiser] Use existing tunnel if present for creating tunnel mappings (#1589) --- scripts/neighbor_advertiser | 18 ++++++++++++++---- tests/neighbor_advertiser_test.py | 9 +++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/scripts/neighbor_advertiser b/scripts/neighbor_advertiser index dc38cf26c3..5cc09d32c9 100644 --- a/scripts/neighbor_advertiser +++ b/scripts/neighbor_advertiser @@ -169,9 +169,11 @@ def get_loopback_addr(ip_ver): def get_vlan_interfaces(): vlan_info = config_db.get_table('VLAN') vlan_interfaces = [] - + vlan_intfs = config_db.get_table('VLAN_INTERFACE') + # Skip L2 VLANs for vlan_name in vlan_info: - vlan_interfaces.append(vlan_name) + if vlan_name in vlan_intfs: + vlan_interfaces.append(vlan_name) return vlan_interfaces @@ -502,6 +504,14 @@ def reset_mirror_tunnel(): # Set vxlan tunnel # +def check_existing_tunnel(): + vxlan_tunnel = config_db.get_table('VXLAN_TUNNEL') + if len(vxlan_tunnel): + global VXLAN_TUNNEL_NAME + VXLAN_TUNNEL_NAME = list(vxlan_tunnel.keys())[0] + return True + return False + def add_vxlan_tunnel(dst_ipv4_addr): vxlan_tunnel_info = { 'src_ip': get_loopback_addr(4), @@ -517,12 +527,12 @@ def add_vxlan_tunnel_map(): 'vni': get_vlan_interface_vxlan_id(vlan_intf_name), 'vlan': vlan_intf_name } - config_db.set_entry('VXLAN_TUNNEL_MAP', (VXLAN_TUNNEL_NAME, VXLAN_TUNNEL_MAP_PREFIX + str(index)), vxlan_tunnel_map_info) def set_vxlan_tunnel(ferret_server_ip): - add_vxlan_tunnel(ferret_server_ip) + if not check_existing_tunnel(): + add_vxlan_tunnel(ferret_server_ip) add_vxlan_tunnel_map() log.log_info('Finish setting vxlan tunnel; Ferret: {}'.format(ferret_server_ip)) diff --git a/tests/neighbor_advertiser_test.py b/tests/neighbor_advertiser_test.py index 4a7ab41863..3ad575c983 100644 --- a/tests/neighbor_advertiser_test.py +++ b/tests/neighbor_advertiser_test.py @@ -57,3 +57,12 @@ def test_neighbor_advertiser_slice(self, set_up): } ) assert output == expected_output + + def test_set_vxlan(self, set_up): + assert(neighbor_advertiser.check_existing_tunnel()) + neighbor_advertiser.add_vxlan_tunnel_map() + tunnel_mapping = neighbor_advertiser.config_db.get_table('VXLAN_TUNNEL_MAP') + expected_mapping = {("vtep1", "map_1"): {"vni": "1000", "vlan": "Vlan1000"}, ("vtep1", "map_2"): {"vni": "2000", "vlan": "Vlan2000"}} + for key in expected_mapping.keys(): + assert(key in tunnel_mapping.keys()) + assert(expected_mapping[key] == tunnel_mapping[key]) From fff40512b21c5e894856a3723e52b5f711cf8edd Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Wed, 5 May 2021 10:09:52 -0700 Subject: [PATCH 28/41] Fixing serial number read to get from DB if it is populated (#1580) #### What I did Modified show version command to pick serial number from STATE_DB if it was populated instead of getting it from EEPROM. #### How I did it Check state_db to see if serial number EEPROM section is populated. If yes use the data from DB. If not, read it from the decode-syseeprom --- show/main.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/show/main.py b/show/main.py index b0b2986a78..d0ca14650a 100755 --- a/show/main.py +++ b/show/main.py @@ -960,8 +960,15 @@ def version(verbose): asic_type = version_info['asic_type'] asic_count = multi_asic.get_num_asics() - serial_number_cmd = "sudo decode-syseeprom -s" - serial_number = subprocess.Popen(serial_number_cmd, shell=True, text=True, stdout=subprocess.PIPE) + serial_number = None + db = SonicV2Connector() + db.connect(db.STATE_DB) + eeprom_table = db.get_all(db.STATE_DB, 'EEPROM_INFO|0x23') + if "Name" in eeprom_table and eeprom_table["Name"] == "Serial Number" and "Value" in eeprom_table: + serial_number = eeprom_table["Value"] + else: + serial_number_cmd = "sudo decode-syseeprom -s" + serial_number = subprocess.Popen(serial_number_cmd, shell=True, text=True, stdout=subprocess.PIPE).stdout.read() sys_uptime_cmd = "uptime" sys_uptime = subprocess.Popen(sys_uptime_cmd, shell=True, text=True, stdout=subprocess.PIPE) @@ -976,7 +983,7 @@ def version(verbose): click.echo("HwSKU: {}".format(hwsku)) click.echo("ASIC: {}".format(asic_type)) click.echo("ASIC Count: {}".format(asic_count)) - click.echo("Serial Number: {}".format(serial_number.stdout.read().strip())) + click.echo("Serial Number: {}".format(serial_number.strip())) click.echo("Uptime: {}".format(sys_uptime.stdout.read().strip())) click.echo("\nDocker images:") cmd = 'sudo docker images --format "table {{.Repository}}\\t{{.Tag}}\\t{{.ID}}\\t{{.Size}}"' From 615e5312a2fa89e9f4736832338ce7a9256e7004 Mon Sep 17 00:00:00 2001 From: Travis Van Duyn Date: Wed, 5 May 2021 12:22:40 -0700 Subject: [PATCH 29/41] [show][config] Add new snmp commands (#1347) Added new SNMP show and config commands using ConfigDB as well as unittests. show commands: show runningconfiguration snmp show runningconfiguration snmp contact [--json] show runningconfiguration snmp location [--json] show runningconfiguration snmp community [--json] show runningconfiguration snmp user [--json] config commands: sudo config snmp community add/del/replace sudo config snmp contact add/del/modify sudo config snmp location add/del/modify sudo config snmp user add/del --- config/main.py | 540 +++++++++++++++++++ show/main.py | 179 ++++++- tests/config_snmp_test.py | 872 +++++++++++++++++++++++++++++++ tests/mock_tables/config_db.json | 166 ++++++ tests/show_snmp_test.py | 467 +++++++++++++++++ 5 files changed, 2210 insertions(+), 14 deletions(-) create mode 100644 tests/config_snmp_test.py create mode 100644 tests/show_snmp_test.py diff --git a/config/main.py b/config/main.py index e9bab3172d..953af72e79 100644 --- a/config/main.py +++ b/config/main.py @@ -2191,6 +2191,546 @@ def delete_snmptrap_server(ctx, ver): cmd="systemctl restart snmp" os.system (cmd) + + +# +# 'snmp' group ('config snmp ...') +# +@config.group(cls=clicommon.AbbreviationGroup, name='snmp') +@clicommon.pass_db +def snmp(db): + """SNMP configuration tasks""" + + +@snmp.group(cls=clicommon.AbbreviationGroup) +@clicommon.pass_db +def community(db): + pass + + +def is_valid_community_type(commstr_type): + commstr_types = ['RO', 'RW'] + if commstr_type not in commstr_types: + click.echo("Invalid community type. Must be either RO or RW") + return False + return True + + +def is_valid_user_type(user_type): + convert_user_type = {'noauthnopriv': 'noAuthNoPriv', 'authnopriv': 'AuthNoPriv', 'priv': 'Priv'} + if user_type not in convert_user_type: + message = ("Invalid user type. Must be one of these one of these three " + "'noauthnopriv' or 'authnopriv' or 'priv'") + click.echo(message) + return False, message + return True, convert_user_type[user_type] + + +def is_valid_auth_type(user_auth_type): + user_auth_types = ['MD5', 'SHA', 'HMAC-SHA-2'] + if user_auth_type not in user_auth_types: + click.echo("Invalid user authentication type. Must be one of these 'MD5', 'SHA', or 'HMAC-SHA-2'") + return False + return True + + +def is_valid_encrypt_type(encrypt_type): + encrypt_types = ['DES', 'AES'] + if encrypt_type not in encrypt_types: + click.echo("Invalid user encryption type. Must be one of these two 'DES' or 'AES'") + return False + return True + + +def snmp_community_secret_check(snmp_secret): + excluded_special_symbols = ['@', ":"] + if len(snmp_secret) > 32: + click.echo("SNMP community string length should be not be greater than 32") + click.echo("SNMP community string should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP community string length should be not be greater than 32") + return False + if any(char in excluded_special_symbols for char in snmp_secret): + click.echo("SNMP community string length should be not be greater than 32") + click.echo("SNMP community string should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP community string should not have any of these " + "special symbols {}".format(excluded_special_symbols)) + return False + return True + + +def snmp_username_check(snmp_username): + excluded_special_symbols = ['@', ":"] + if len(snmp_username) > 32: + click.echo("SNMP user {} length should be not be greater than 32 characters".format(snmp_username)) + click.echo("SNMP community string should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP user {} length should not be greater than 32 characters".format(snmp_username)) + return False + if any(char in excluded_special_symbols for char in snmp_username): + click.echo("SNMP user {} length should be not be greater than 32 characters".format(snmp_username)) + click.echo("SNMP community string should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP user {} should not have any of these special " + "symbols {}".format(snmp_username, excluded_special_symbols)) + return False + return True + + +def snmp_user_secret_check(snmp_secret): + excluded_special_symbols = ['@', ":"] + if len(snmp_secret) < 8: + click.echo("SNMP user password length should be at least 8 characters") + click.echo("SNMP user password length should be not be greater than 64") + click.echo("SNMP user password should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP user password length should be at least 8 characters") + return False + if len(snmp_secret) > 64: + click.echo("SNMP user password length should be at least 8 characters") + click.echo("SNMP user password length should be not be greater than 64") + click.echo("SNMP user password should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP user password length should be not be greater than 64") + return False + if any(char in excluded_special_symbols for char in snmp_secret): + click.echo("SNMP user password length should be at least 8 characters") + click.echo("SNMP user password length should be not be greater than 64") + click.echo("SNMP user password should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + click.echo("FAILED: SNMP user password should not have any of these special " + "symbols {}".format(excluded_special_symbols)) + return False + return True + + +@community.command('add') +@click.argument('community', metavar='', required=True) +@click.argument('string_type', metavar='', required=True) +@clicommon.pass_db +def add_community(db, community, string_type): + """ Add snmp community string""" + string_type = string_type.upper() + if not is_valid_community_type(string_type): + sys.exit(1) + if not snmp_community_secret_check(community): + sys.exit(2) + snmp_communities = db.cfgdb.get_table("SNMP_COMMUNITY") + if community in snmp_communities: + click.echo("SNMP community {} is already configured".format(community)) + sys.exit(3) + db.cfgdb.set_entry('SNMP_COMMUNITY', community, {'TYPE': string_type}) + click.echo("SNMP community {} added to configuration".format(community)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@community.command('del') +@click.argument('community', metavar='', required=True) +@clicommon.pass_db +def del_community(db, community): + """ Delete snmp community string""" + snmp_communities = db.cfgdb.get_table("SNMP_COMMUNITY") + if community not in snmp_communities: + click.echo("SNMP community {} is not configured".format(community)) + sys.exit(1) + else: + db.cfgdb.set_entry('SNMP_COMMUNITY', community, None) + click.echo("SNMP community {} removed from configuration".format(community)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@community.command('replace') +@click.argument('current_community', metavar='', required=True) +@click.argument('new_community', metavar='', required=True) +@clicommon.pass_db +def replace_community(db, current_community, new_community): + """ Replace snmp community string""" + snmp_communities = db.cfgdb.get_table("SNMP_COMMUNITY") + if not current_community in snmp_communities: + click.echo("Current SNMP community {} is not configured".format(current_community)) + sys.exit(1) + if not snmp_community_secret_check(new_community): + sys.exit(2) + elif new_community in snmp_communities: + click.echo("New SNMP community {} to replace current SNMP community {} already " + "configured".format(new_community, current_community)) + sys.exit(3) + else: + string_type = snmp_communities[current_community]['TYPE'] + db.cfgdb.set_entry('SNMP_COMMUNITY', new_community, {'TYPE': string_type}) + click.echo("SNMP community {} added to configuration".format(new_community)) + db.cfgdb.set_entry('SNMP_COMMUNITY', current_community, None) + click.echo('SNMP community {} replace community {}'.format(new_community, current_community)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@snmp.group(cls=clicommon.AbbreviationGroup) +@clicommon.pass_db +def contact(db): + pass + + +def is_valid_email(email): + return bool(re.search(r"^[\w\.\+\-]+\@[\w]+\.[a-z]{2,3}$", email)) + + +@contact.command('add') +@click.argument('contact', metavar='', required=True) +@click.argument('contact_email', metavar='', required=True) +@clicommon.pass_db +def add_contact(db, contact, contact_email): + """ Add snmp contact name and email """ + snmp = db.cfgdb.get_table("SNMP") + try: + if snmp['CONTACT']: + click.echo("Contact already exists. Use sudo config snmp contact modify instead") + sys.exit(1) + else: + db.cfgdb.set_entry('SNMP', 'CONTACT', {contact: contact_email}) + click.echo("Contact name {} and contact email {} have been added to " + "configuration".format(contact, contact_email)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + except KeyError: + if "CONTACT" not in snmp.keys(): + if not is_valid_email(contact_email): + click.echo("Contact email {} is not valid".format(contact_email)) + sys.exit(2) + db.cfgdb.set_entry('SNMP', 'CONTACT', {contact: contact_email}) + click.echo("Contact name {} and contact email {} have been added to " + "configuration".format(contact, contact_email)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@contact.command('del') +@click.argument('contact', metavar='', required=True) +@clicommon.pass_db +def del_contact(db, contact): + """ Delete snmp contact name and email """ + snmp = db.cfgdb.get_table("SNMP") + try: + if not contact in (list(snmp['CONTACT'].keys()))[0]: + click.echo("SNMP contact {} is not configured".format(contact)) + sys.exit(1) + else: + db.cfgdb.set_entry('SNMP', 'CONTACT', None) + click.echo("SNMP contact {} removed from configuration".format(contact)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + except KeyError: + if "CONTACT" not in snmp.keys(): + click.echo("Contact name {} is not configured".format(contact)) + sys.exit(2) + + +@contact.command('modify') +@click.argument('contact', metavar='', required=True) +@click.argument('contact_email', metavar='', required=True) +@clicommon.pass_db +def modify_contact(db, contact, contact_email): + """ Modify snmp contact""" + snmp = db.cfgdb.get_table("SNMP") + try: + current_snmp_contact_name = (list(snmp['CONTACT'].keys()))[0] + if current_snmp_contact_name == contact: + current_snmp_contact_email = snmp['CONTACT'][contact] + else: + current_snmp_contact_email = '' + if contact == current_snmp_contact_name and contact_email == current_snmp_contact_email: + click.echo("SNMP contact {} {} already exists".format(contact, contact_email)) + sys.exit(1) + elif contact == current_snmp_contact_name and contact_email != current_snmp_contact_email: + if not is_valid_email(contact_email): + click.echo("Contact email {} is not valid".format(contact_email)) + sys.exit(2) + db.cfgdb.mod_entry('SNMP', 'CONTACT', {contact: contact_email}) + click.echo("SNMP contact {} email updated to {}".format(contact, contact_email)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + else: + if not is_valid_email(contact_email): + click.echo("Contact email {} is not valid".format(contact_email)) + sys.exit(2) + db.cfgdb.set_entry('SNMP', 'CONTACT', None) + db.cfgdb.set_entry('SNMP', 'CONTACT', {contact: contact_email}) + click.echo("SNMP contact {} and contact email {} updated".format(contact, contact_email)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + except KeyError: + if "CONTACT" not in snmp.keys(): + click.echo("Contact name {} is not configured".format(contact)) + sys.exit(3) + + +@snmp.group(cls=clicommon.AbbreviationGroup) +@clicommon.pass_db +def location(db): + pass + + +@location.command('add') +@click.argument('location', metavar='', required=True, nargs=-1) +@clicommon.pass_db +def add_location(db, location): + """ Add snmp location""" + if isinstance(location, tuple): + location = " ".join(location) + elif isinstance(location, list): + location = " ".join(location) + snmp = db.cfgdb.get_table("SNMP") + try: + if snmp['LOCATION']: + click.echo("Location already exists") + sys.exit(1) + except KeyError: + if "LOCATION" not in snmp.keys(): + db.cfgdb.set_entry('SNMP', 'LOCATION', {'Location': location}) + click.echo("SNMP Location {} has been added to configuration".format(location)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@location.command('del') +@click.argument('location', metavar='', required=True, nargs=-1) +@clicommon.pass_db +def delete_location(db, location): + """ Delete snmp location""" + if isinstance(location, tuple): + location = " ".join(location) + elif isinstance(location, list): + location = " ".join(location) + snmp = db.cfgdb.get_table("SNMP") + try: + if location == snmp['LOCATION']['Location']: + db.cfgdb.set_entry('SNMP', 'LOCATION', None) + click.echo("SNMP Location {} removed from configuration".format(location)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + else: + click.echo("SNMP Location {} does not exist. The location is {}".format(location, snmp['LOCATION']['Location'])) + sys.exit(1) + except KeyError: + if "LOCATION" not in snmp.keys(): + click.echo("SNMP Location {} is not configured".format(location)) + sys.exit(2) + + +@location.command('modify') +@click.argument('location', metavar='', required=True, nargs=-1) +@clicommon.pass_db +def modify_location(db, location): + """ Modify snmp location""" + if isinstance(location, tuple): + location = " ".join(location) + elif isinstance(location, list): + location = " ".join(location) + snmp = db.cfgdb.get_table("SNMP") + try: + snmp_location = snmp['LOCATION']['Location'] + if location in snmp_location: + click.echo("SNMP location {} already exists".format(location)) + sys.exit(1) + else: + db.cfgdb.mod_entry('SNMP', 'LOCATION', {'Location': location}) + click.echo("SNMP location {} modified in configuration".format(location)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + except KeyError: + click.echo("Cannot modify SNMP Location. You must use 'config snmp location add command '") + sys.exit(2) + + +from enum import IntEnum +class SnmpUserError(IntEnum): + NameCheckFailure = 1 + TypeNoAuthNoPrivOrAuthNoPrivOrPrivCheckFailure = 2 + RoRwCheckFailure = 3 + NoAuthNoPrivHasAuthType = 4 + AuthTypeMd5OrShaOrHmacsha2IsMissing = 5 + AuthTypeMd5OrShaOrHmacsha2Failure = 6 + AuthPasswordMissing = 7 + AuthPasswordFailsComplexityRequirements = 8 + EncryptPasswordNotAllowedWithAuthNoPriv = 9 + EncryptTypeDesOrAesIsMissing = 10 + EncryptTypeFailsComplexityRequirements = 11 + EncryptPasswordMissingFailure = 12 + EncryptPasswordFailsComplexityRequirements = 13 + UserAlreadyConfigured = 14 + + +@snmp.group(cls=clicommon.AbbreviationGroup) +@clicommon.pass_db +def user(db): + pass + + +@user.command('add') +@click.argument('user', metavar='', required=True) +@click.argument('user_type', metavar='', required=True) +@click.argument('user_permission_type', metavar='', required=True) +@click.argument('user_auth_type', metavar='', required=False) +@click.argument('user_auth_password', metavar='', required=False) +@click.argument('user_encrypt_type', metavar='', required=False) +@click.argument('user_encrypt_password', metavar='', required=False) +@clicommon.pass_db +def add_user(db, user, user_type, user_permission_type, user_auth_type, user_auth_password, user_encrypt_type, + user_encrypt_password): + """ Add snmp user""" + if not snmp_username_check(user): + sys.exit(SnmpUserError.NameCheckFailure) + user_type = user_type.lower() + user_type_info = is_valid_user_type(user_type) + if not user_type_info[0]: + sys.exit(SnmpUserError.TypeNoAuthNoPrivOrAuthNoPrivOrPrivCheckFailure) + user_type = user_type_info[1] + user_permission_type = user_permission_type.upper() + if not is_valid_community_type(user_permission_type): + sys.exit(SnmpUserError.RoRwCheckFailure) + if user_type == "noAuthNoPriv": + if user_auth_type: + click.echo("User auth type not used with 'noAuthNoPriv'. Please use 'AuthNoPriv' or 'Priv' instead") + sys.exit(SnmpUserError.NoAuthNoPrivHasAuthType) + else: + if not user_auth_type: + click.echo("User auth type is missing. Must be MD5, SHA, or HMAC-SHA-2") + sys.exit(SnmpUserError.AuthTypeMd5OrShaOrHmacsha2IsMissing) + if user_auth_type: + user_auth_type = user_auth_type.upper() + if not is_valid_auth_type(user_auth_type): + sys.exit(SnmpUserError.AuthTypeMd5OrShaOrHmacsha2Failure) + elif not user_auth_password: + click.echo("User auth password is missing") + sys.exit(SnmpUserError.AuthPasswordMissing) + elif user_auth_password: + if not snmp_user_secret_check(user_auth_password): + sys.exit(SnmpUserError.AuthPasswordFailsComplexityRequirements) + if user_type == "AuthNoPriv": + if user_encrypt_type: + click.echo("User encrypt type not used with 'AuthNoPriv'. Please use 'Priv' instead") + sys.exit(SnmpUserError.EncryptPasswordNotAllowedWithAuthNoPriv) + elif user_type == "Priv": + if not user_encrypt_type: + click.echo("User encrypt type is missing. Must be DES or AES") + sys.exit(SnmpUserError.EncryptTypeDesOrAesIsMissing) + if user_encrypt_type: + user_encrypt_type = user_encrypt_type.upper() + if not is_valid_encrypt_type(user_encrypt_type): + sys.exit(SnmpUserError.EncryptTypeFailsComplexityRequirements) + elif not user_encrypt_password: + click.echo("User encrypt password is missing") + sys.exit(SnmpUserError.EncryptPasswordMissingFailure) + elif user_encrypt_password: + if not snmp_user_secret_check(user_encrypt_password): + sys.exit(SnmpUserError.EncryptPasswordFailsComplexityRequirements) + snmp_users = db.cfgdb.get_table("SNMP_USER") + if user in snmp_users.keys(): + click.echo("SNMP user {} is already configured".format(user)) + sys.exit(SnmpUserError.UserAlreadyConfigured) + else: + if not user_auth_type: + user_auth_type = '' + if not user_auth_password: + user_auth_password = '' + if not user_encrypt_type: + user_encrypt_type = '' + if not user_encrypt_password: + user_encrypt_password = '' + db.cfgdb.set_entry('SNMP_USER', user, {'SNMP_USER_TYPE': user_type, + 'SNMP_USER_PERMISSION': user_permission_type, + 'SNMP_USER_AUTH_TYPE': user_auth_type, + 'SNMP_USER_AUTH_PASSWORD': user_auth_password, + 'SNMP_USER_ENCRYPTION_TYPE': user_encrypt_type, + 'SNMP_USER_ENCRYPTION_PASSWORD': user_encrypt_password}) + click.echo("SNMP user {} added to configuration".format(user)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + + +@user.command('del') +@click.argument('user', metavar='', required=True) +@clicommon.pass_db +def del_user(db, user): + """ Del snmp user""" + snmp_users = db.cfgdb.get_table("SNMP_USER") + if user not in snmp_users: + click.echo("SNMP user {} is not configured".format(user)) + sys.exit(1) + else: + db.cfgdb.set_entry('SNMP_USER', user, None) + click.echo("SNMP user {} removed from configuration".format(user)) + try: + click.echo("Restarting SNMP service...") + clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) + clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + except SystemExit as e: + click.echo("Restart service snmp failed with error {}".format(e)) + raise click.Abort() + # # 'bgp' group ('config bgp ...') # diff --git a/show/main.py b/show/main.py index d0ca14650a..1cea9e6534 100755 --- a/show/main.py +++ b/show/main.py @@ -377,6 +377,7 @@ def snmptrap (ctx): body.append([ver, traptable[row]['DestIp'], traptable[row]['DestPort'], traptable[row]['vrf'], traptable[row]['Community']]) click.echo(tabulate(body, header)) + # # 'subinterfaces' group ("show subinterfaces ...") # @@ -1109,20 +1110,6 @@ def interfaces(interfacename, verbose): run_command(cmd, display_cmd=verbose) -# 'snmp' subcommand ("show runningconfiguration snmp") -@runningconfiguration.command() -@click.argument('server', required=False) -@click.option('--verbose', is_flag=True, help="Enable verbose output") -def snmp(server, verbose): - """Show SNMP information""" - cmd = "sudo docker exec snmp cat /etc/snmp/snmpd.conf" - - if server is not None: - cmd += " | grep -i agentAddress" - - run_command(cmd, display_cmd=verbose) - - # 'ntp' subcommand ("show runningconfiguration ntp") @runningconfiguration.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") @@ -1140,6 +1127,170 @@ def ntp(verbose): print(tabulate(ntp_dict, headers=list(ntp_dict.keys()), tablefmt="simple", stralign='left', missingval="")) + +# 'snmp' subcommand ("show runningconfiguration snmp") +@runningconfiguration.group("snmp", invoke_without_command=True) +@clicommon.pass_db +@click.pass_context +def snmp(ctx, db): + """Show SNMP running configuration""" + if ctx.invoked_subcommand is None: + show_run_snmp(db.cfgdb) + + +# ("show runningconfiguration snmp community") +@snmp.command('community') +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, + help="Display the output in JSON format") +@clicommon.pass_db +def community(db, json_output): + """show SNMP running configuration community""" + snmp_comm_header = ["Community String", "Community Type"] + snmp_comm_body = [] + snmp_comm_keys = db.cfgdb.get_table('SNMP_COMMUNITY') + snmp_comm_strings = snmp_comm_keys.keys() + if json_output: + click.echo(snmp_comm_keys) + else: + for line in snmp_comm_strings: + comm_string = line + comm_string_type = snmp_comm_keys[line]['TYPE'] + snmp_comm_body.append([comm_string, comm_string_type]) + click.echo(tabulate(natsorted(snmp_comm_body), snmp_comm_header)) + + +# ("show runningconfiguration snmp contact") +@snmp.command('contact') +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, + help="Display the output in JSON format") +@clicommon.pass_db +def contact(db, json_output): + """show SNMP running configuration contact""" + snmp = db.cfgdb.get_table('SNMP') + snmp_header = ["Contact", "Contact Email"] + snmp_body = [] + if json_output: + try: + if snmp['CONTACT']: + click.echo(snmp['CONTACT']) + except KeyError: + snmp['CONTACT'] = {} + click.echo(snmp['CONTACT']) + else: + try: + if snmp['CONTACT']: + snmp_contact = list(snmp['CONTACT'].keys()) + snmp_contact_email = [snmp['CONTACT'][snmp_contact[0]]] + snmp_body.append([snmp_contact[0], snmp_contact_email[0]]) + except KeyError: + snmp['CONTACT'] = '' + click.echo(tabulate(snmp_body, snmp_header)) + + +# ("show runningconfiguration snmp location") +@snmp.command('location') +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, + help="Display the output in JSON format") +@clicommon.pass_db +def location(db, json_output): + """show SNMP running configuration location""" + snmp = db.cfgdb.get_table('SNMP') + snmp_header = ["Location"] + snmp_body = [] + if json_output: + try: + if snmp['LOCATION']: + click.echo(snmp['LOCATION']) + except KeyError: + snmp['LOCATION'] = {} + click.echo(snmp['LOCATION']) + else: + try: + if snmp['LOCATION']: + snmp_location = [snmp['LOCATION']['Location']] + snmp_body.append(snmp_location) + except KeyError: + snmp['LOCATION'] = '' + click.echo(tabulate(snmp_body, snmp_header)) + + +# ("show runningconfiguration snmp user") +@snmp.command('user') +@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, + help="Display the output in JSON format") +@clicommon.pass_db +def users(db, json_output): + """show SNMP running configuration user""" + snmp_users = db.cfgdb.get_table('SNMP_USER') + snmp_user_header = ['User', "Permission Type", "Type", "Auth Type", "Auth Password", "Encryption Type", + "Encryption Password"] + snmp_user_body = [] + if json_output: + click.echo(snmp_users) + else: + for snmp_user, snmp_user_value in snmp_users.items(): + snmp_user_permissions_type = snmp_users[snmp_user].get('SNMP_USER_PERMISSION', 'Null') + snmp_user_auth_type = snmp_users[snmp_user].get('SNMP_USER_AUTH_TYPE', 'Null') + snmp_user_auth_password = snmp_users[snmp_user].get('SNMP_USER_AUTH_PASSWORD', 'Null') + snmp_user_encryption_type = snmp_users[snmp_user].get('SNMP_USER_ENCRYPTION_TYPE', 'Null') + snmp_user_encryption_password = snmp_users[snmp_user].get('SNMP_USER_ENCRYPTION_PASSWORD', 'Null') + snmp_user_type = snmp_users[snmp_user].get('SNMP_USER_TYPE', 'Null') + snmp_user_body.append([snmp_user, snmp_user_permissions_type, snmp_user_type, snmp_user_auth_type, + snmp_user_auth_password, snmp_user_encryption_type, snmp_user_encryption_password]) + click.echo(tabulate(natsorted(snmp_user_body), snmp_user_header)) + + +# ("show runningconfiguration snmp") +@clicommon.pass_db +def show_run_snmp(db, ctx): + snmp_contact_location_table = db.cfgdb.get_table('SNMP') + snmp_comm_table = db.cfgdb.get_table('SNMP_COMMUNITY') + snmp_users = db.cfgdb.get_table('SNMP_USER') + snmp_location_header = ["Location"] + snmp_location_body = [] + snmp_contact_header = ["SNMP_CONTACT", "SNMP_CONTACT_EMAIL"] + snmp_contact_body = [] + snmp_comm_header = ["Community String", "Community Type"] + snmp_comm_body = [] + snmp_user_header = ['User', "Permission Type", "Type", "Auth Type", "Auth Password", "Encryption Type", + "Encryption Password"] + snmp_user_body = [] + try: + if snmp_contact_location_table['LOCATION']: + snmp_location = [snmp_contact_location_table['LOCATION']['Location']] + snmp_location_body.append(snmp_location) + except KeyError: + snmp_contact_location_table['LOCATION'] = '' + click.echo(tabulate(snmp_location_body, snmp_location_header)) + click.echo("\n") + try: + if snmp_contact_location_table['CONTACT']: + snmp_contact = list(snmp_contact_location_table['CONTACT'].keys()) + snmp_contact_email = [snmp_contact_location_table['CONTACT'][snmp_contact[0]]] + snmp_contact_body.append([snmp_contact[0], snmp_contact_email[0]]) + except KeyError: + snmp_contact_location_table['CONTACT'] = '' + click.echo(tabulate(snmp_contact_body, snmp_contact_header)) + click.echo("\n") + snmp_comm_strings = snmp_comm_table.keys() + for line in snmp_comm_strings: + comm_string = line + comm_string_type = snmp_comm_table[line]['TYPE'] + snmp_comm_body.append([comm_string, comm_string_type]) + click.echo(tabulate(natsorted(snmp_comm_body), snmp_comm_header)) + click.echo("\n") + for snmp_user, snmp_user_value in snmp_users.items(): + snmp_user_permissions_type = snmp_users[snmp_user].get('SNMP_USER_PERMISSION', 'Null') + snmp_user_auth_type = snmp_users[snmp_user].get('SNMP_USER_AUTH_TYPE', 'Null') + snmp_user_auth_password = snmp_users[snmp_user].get('SNMP_USER_AUTH_PASSWORD', 'Null') + snmp_user_encryption_type = snmp_users[snmp_user].get('SNMP_USER_ENCRYPTION_TYPE', 'Null') + snmp_user_encryption_password = snmp_users[snmp_user].get('SNMP_USER_ENCRYPTION_PASSWORD', 'Null') + snmp_user_type = snmp_users[snmp_user].get('SNMP_USER_TYPE', 'Null') + snmp_user_body.append([snmp_user, snmp_user_permissions_type, snmp_user_type, snmp_user_auth_type, + snmp_user_auth_password, snmp_user_encryption_type, snmp_user_encryption_password]) + click.echo(tabulate(natsorted(snmp_user_body), snmp_user_header)) + + # 'syslog' subcommand ("show runningconfiguration syslog") @runningconfiguration.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") diff --git a/tests/config_snmp_test.py b/tests/config_snmp_test.py new file mode 100644 index 0000000000..1be2704e47 --- /dev/null +++ b/tests/config_snmp_test.py @@ -0,0 +1,872 @@ +import sys +import os +import click +from click.testing import CliRunner + +import show.main as show +import clear.main as clear +import config.main as config + +import pytest + +from unittest import mock +from unittest.mock import patch +from utilities_common.db import Db + +tabular_data_show_run_snmp_contact_expected = """\ +Contact Contact Email\n--------- --------------------\ntestuser testuser@contoso.com +""" + +json_data_show_run_snmp_contact_expected = """\ +{'testuser': 'testuser@contoso.com'} +""" + +config_snmp_contact_add_del_new_contact ="""\ +Contact name testuser and contact email testuser@contoso.com have been added to configuration +Restarting SNMP service... +""" + +config_snmp_location_add_new_location ="""\ +SNMP Location public has been added to configuration +Restarting SNMP service... +""" + + +expected_snmp_community_add_new_community_ro_output = {"TYPE": "RO"} +expected_snmp_community_add_new_community_rw_output = {"TYPE": "RW"} +expected_snmp_community_replace_existing_community_with_new_community_output = {'TYPE': 'RW'} + +expected_snmp_user_priv_ro_md5_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'MD5', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_ro_md5_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'MD5', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_ro_sha_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'SHA', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_ro_sha_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'SHA', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_ro_hmac_sha_2_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_ro_hmac_sha_2_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RO', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_md5_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'MD5', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_md5_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'MD5', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_sha_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'SHA', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_sha_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'SHA', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_hmac_sha_2_des_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'DES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} +expected_snmp_user_priv_rw_hmac_sha_2_aes_config_db_output = {'SNMP_USER_AUTH_PASSWORD': 'user_auth_pass', + 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', + 'SNMP_USER_ENCRYPTION_PASSWORD': 'user_encrypt_pass', + 'SNMP_USER_ENCRYPTION_TYPE': 'AES', + 'SNMP_USER_PERMISSION': 'RW', + 'SNMP_USER_TYPE': 'Priv'} + +class TestSNMPConfigCommands(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + # Add snmp community tests + def test_config_snmp_community_add_new_community_ro(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], + ["Everest", "ro"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP community Everest added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_COMMUNITY", "Everest") == expected_snmp_community_add_new_community_ro_output + + def test_config_snmp_community_add_new_community_rw(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], + ["Shasta", "rw"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP community Shasta added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_COMMUNITY", "Shasta") == expected_snmp_community_add_new_community_rw_output + + def test_config_snmp_community_add_new_community_with_invalid_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], ["Everest", "RT"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'Invalid community type. Must be either RO or RW' in result.output + + def test_config_snmp_community_add_invalid_community_over_32_characters(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], + ["over_32_character_community_string", "ro"]) + print(result.exit_code) + assert result.exit_code == 2 + assert 'FAILED: SNMP community string length should be not be greater than 32' in result.output + + def test_config_snmp_community_add_invalid_community_with_excluded_special_characters(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], + ["Test@snmp", "ro"]) + print(result.exit_code) + assert result.exit_code == 2 + assert 'FAILED: SNMP community string should not have any of these special symbols' in result.output + + def test_config_snmp_community_add_existing_community(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["add"], ["Rainer", "rw"]) + print(result.exit_code) + assert result.exit_code == 3 + assert 'SNMP community Rainer is already configured' in result.output + + # Del snmp community tests + def test_config_snmp_community_del_existing_community(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["del"], + ["Rainer"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP community Rainer removed from configuration' in result.output + assert db.cfgdb.get_entry("SNMP_COMMUNITY", "Everest") == {} + + def test_config_snmp_community_del_non_existing_community(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["del"], ["Everest"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP community Everest is not configured' in result.output + + # Replace snmp community tests + def test_config_snmp_community_replace_existing_community_with_new_community(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["replace"], + ["Rainer", "Everest"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP community Everest added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_COMMUNITY", "Everest") == \ + expected_snmp_community_replace_existing_community_with_new_community_output + + def test_config_snmp_community_replace_existing_community_non_existing_community(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["replace"], + ["Denali", "Everest"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'Current SNMP community Denali is not configured' in result.output + + def test_config_snmp_community_replace_new_community_already_exists(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["replace"], + ["Rainer", "msft"]) + print(result.exit_code) + assert result.exit_code == 3 + assert 'New SNMP community msft to replace current SNMP community Rainer already configured' in result.output + + def test_config_snmp_community_replace_with_invalid_new_community_bad_symbol(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["replace"], + ["Rainer", "msft@"]) + print(result.exit_code) + assert result.exit_code == 2 + assert 'FAILED: SNMP community string should not have any of these special symbols' in result.output + + def test_config_snmp_community_replace_with_invalid_new_community_over_32_chars(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["community"].commands["replace"], + ["Rainer", "over_32_characters_community_string"]) + print(result.exit_code) + assert result.exit_code == 2 + assert 'FAILED: SNMP community string length should be not be greater than 32' in result.output + + + # Del snmp contact when CONTACT not setup in REDIS + def test_config_snmp_contact_del_without_contact_redis(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["del"], ["blah"], obj=db) + print(result.exit_code) + assert result.exit_code == 2 + assert 'Contact name blah is not configured' in result.output + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {} + + def test_config_snmp_contact_modify_without_contact_redis(self): + db = Db() + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["modify"], + ["blah", "blah@contoso.com"], obj=db) + print(result.exit_code) + assert result.exit_code == 3 + assert 'Contact name blah is not configured' in result.output + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {} + + def test_config_snmp_contact_add_del_new_contact(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["del"], + ["testuser"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert 'SNMP contact testuser removed from configuration' in result.output + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {} + + # Add snmp contact tests + def test_config_snmp_contact_add_with_existing_contact(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["blah", "blah@contoso.com"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'Contact already exists. Use sudo config snmp contact modify instead' in result.output + + def test_config_snmp_contact_add_invalid_email(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testusercontoso.com"], obj=db) + print(result.exit_code) + assert result.exit_code == 2 + assert "Contact email testusercontoso.com is not valid" in result.output + + + # Delete snmp contact tests + def test_config_snmp_contact_del_new_contact_when_contact_exists(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["del"], ["blah"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP contact blah is not configured' in result.output + + def test_config_snmp_contact_del_with_existing_contact(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["del"], + ["testuser"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP contact testuser removed from configuration' in result.output + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {} + + # Modify snmp contact tests + def test_config_snmp_contact_modify_email_with_existing_contact(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["modify"], + ["testuser", "testuser@test.com"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP contact testuser email updated to testuser@test.com' in result.output + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@test.com"} + + def test_config_snmp_contact_modify_contact_and_email_with_existing_entry(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["modify"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP contact testuser testuser@contoso.com already exists' in result.output + + def test_config_snmp_contact_modify_existing_contact_with_invalid_email(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["modify"], + ["testuser", "testuser@contosocom"], obj=db) + print(result.exit_code) + assert result.exit_code == 2 + assert 'Contact email testuser@contosocom is not valid' in result.output + + + def test_config_snmp_contact_modify_new_contact_with_invalid_email(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["modify"], + ["blah", "blah@contoso@com"], obj=db) + print(result.exit_code) + assert result.exit_code == 2 + assert 'Contact email blah@contoso@com is not valid' in result.output + + # Add snmp location tests + def test_config_snmp_location_add_exiting_location_with_same_location_already_existing(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'Location already exists' in result.output + + def test_config_snmp_location_add_new_location_with_location_already_existing(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["Mile High"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'Location already exists' in result.output + + # Del snmp location tests + def test_config_snmp_location_del_with_existing_location(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["del"], + ["public"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP Location public removed from configuration' in result.output + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {} + + def test_config_snmp_location_del_new_location_with_location_already_existing(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["del"], + ["Mile High"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP Location Mile High does not exist. The location is public' in result.output + + # Modify snmp location tests + def test_config_snmp_location_modify_with_same_location(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["modify"], + ["public"], obj=db) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP location public already exists' in result.output + + def test_config_snmp_location_modify_without_redis(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["modify"], + ["Rainer"],obj=db) + print(result.exit_code) + assert result.exit_code == 2 + assert "Cannot modify SNMP Location. You must use 'config snmp location add " \ + "command '" in result.output + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {} + + def test_config_snmp_location_modify_without_existing_location(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["modify"], + ["Rainer"],obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert "SNMP location Rainer modified in configuration" in result.output + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "Rainer"} + + # Add snmp user tests + def test_config_snmp_user_add_invalid_user_name_over_32_characters(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["over_32_characters_community_user", "noAUthNoPRiv", "ro"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'FAILED: SNMP user over_32_characters_community_user length should not be greater than 32 characters' \ + in result.output + + def test_config_snmp_user_add_excluded_special_characters_in_username(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["Test@user", "noAUthNoPRiv", "ro"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'FAILED: SNMP user Test@user should not have any of these special symbols' in result.output + + def test_config_snmp_user_add_existing_user(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_1", "noAUthNoPRiv", "ro"]) + print(result.exit_code) + assert result.exit_code == 14 + assert 'SNMP user test_nopriv_RO_1 is already configured' in result.output + + def test_config_snmp_user_add_invalid_user_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "nopriv", "ro"]) + print(result.exit_code) + print(result) + print(result.output) + assert result.exit_code == 2 + assert "Invalid user type. Must be one of these one of these three 'noauthnopriv' or 'authnopriv' or 'priv'" in result.output + + def test_config_snmp_user_add_invalid_permission_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "noauthnopriv", "ab"]) + print(result.exit_code) + assert result.exit_code == 3 + assert "Invalid community type. Must be either RO or RW" in result.output + + def test_config_snmp_user_add_user_type_noauthnopriv_with_unnecessary_auth_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "noauthnopriv", "ro", "sha"]) + print(result.exit_code) + assert result.exit_code == 4 + assert "User auth type not used with 'noAuthNoPriv'. Please use 'AuthNoPriv' or 'Priv' instead" in result.output + + def test_config_snmp_user_add_user_type_authnopriv_missing_auth_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "authnopriv", "ro"]) + print(result.exit_code) + assert result.exit_code == 5 + assert "User auth type is missing. Must be MD5, SHA, or HMAC-SHA-2" in result.output + + def test_config_snmp_user_add_user_type_authnopriv_missing_auth_password(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "authnopriv", "ro", "sha"]) + print(result.exit_code) + assert result.exit_code == 7 + assert "User auth password is missing" in result.output + + def test_config_snmp_user_add_user_type_authnopriv_with_unnecessary_encrypt_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "authnopriv", "ro", "sha", "testauthpass", "DES"]) + print(result.exit_code) + assert result.exit_code == 9 + assert "User encrypt type not used with 'AuthNoPriv'. Please use 'Priv' instead" in result.output + + def test_config_snmp_user_add_user_type_priv_missing_auth_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro"]) + print(result.exit_code) + assert result.exit_code == 5 + assert "User auth type is missing. Must be MD5, SHA, or HMAC-SHA-2" in result.output + + def test_config_snmp_user_add_user_type_priv_missing_auth_password(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "md5"]) + print(result.exit_code) + assert result.exit_code == 7 + assert "User auth password is missing" in result.output + + def test_config_snmp_user_add_user_type_priv_missing_encrypt_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "md5", "testauthpass"]) + print(result.exit_code) + assert result.exit_code == 10 + assert "User encrypt type is missing. Must be DES or AES" in result.output + + def test_config_snmp_user_add_user_type_priv_invalid_encrypt_password_over_64_characters(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "md5", "testauthpass", "DES", + "superlongencryptionpasswordtotestbeingoverthesixtyfourcharacterlimit"]) + print(result.exit_code) + assert result.exit_code == 13 + assert "FAILED: SNMP user password length should be not be greater than 64" in result.output + + def test_config_snmp_user_add_user_type_priv_invalid_encrypt_password_excluded_special_characters(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "md5", "testauthpass", "DES", "testencrypt@pass"]) + print(result.exit_code) + assert result.exit_code == 13 + assert "FAILED: SNMP user password should not have any of these special symbols" in result.output + + def test_config_snmp_user_add_user_type_priv_invalid_encrypt_password_not_long_enough(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "md5", "testauthpass", "DES", "test1"]) + print(result.exit_code) + assert result.exit_code == 13 + assert "FAILED: SNMP user password length should be at least 8 characters" in result.output + + def test_config_snmp_user_add_invalid_auth_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "authnopriv", "ro", "DM5", "user_auth_pass"]) + print(result.exit_code) + assert result.exit_code == 6 + assert "Invalid user authentication type. Must be one of these 'MD5', 'SHA', or 'HMAC-SHA-2'" in result.output + + def test_config_snmp_user_add_missing_auth_password(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "authnopriv", "ro", "SHA", ""]) + print(result.exit_code) + assert result.exit_code == 7 + assert 'User auth password is missing' in result.output + + def test_config_snmp_user_add_invalid_encrypt_type(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "SHA", "user_auth_pass", "EAS", "user_encrypt_pass"]) + print(result.exit_code) + assert result.exit_code == 11 + assert "Invalid user encryption type. Must be one of these two 'DES' or 'AES'" in result.output + + def test_config_snmp_user_add_missing_encrypt_password(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_3", "priv", "ro", "SHA", "user_auth_pass", "AES"]) + print(result.exit_code) + assert result.exit_code == 12 + assert 'User encrypt password is missing' in result.output + + def test_config_snmp_user_add_user_already_existing(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_nopriv_RO_1", "noauthnopriv", "ro"]) + print(result.exit_code) + assert result.exit_code == 14 + assert 'SNMP user test_nopriv_RO_1 is already configured' in result.output + + def test_config_snmp_user_add_valid_user_priv_ro_md5_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_7", "priv", "ro", "MD5", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_7 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_7") == expected_snmp_user_priv_ro_md5_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_ro_md5_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_8", "priv", "ro", "MD5", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_8 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_8") == expected_snmp_user_priv_ro_md5_aes_config_db_output + + def test_config_snmp_user_add_valid_user_priv_ro_sha_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_9", "priv", "ro", "SHA", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_9 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_9") == expected_snmp_user_priv_ro_sha_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_ro_sha_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_10", "priv", "ro", "SHA", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_10 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_10") == expected_snmp_user_priv_ro_sha_aes_config_db_output + + def test_config_snmp_user_add_valid_user_priv_ro_hmac_sha_2_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_11", "priv", "ro", "HMAC-SHA-2", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_11 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_11") == \ + expected_snmp_user_priv_ro_hmac_sha_2_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_ro_hmac_sha_2_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RO_12", "priv", "ro", "HMAC-SHA-2", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_12 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RO_12") == \ + expected_snmp_user_priv_ro_hmac_sha_2_aes_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_md5_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_7", "priv", "rw", "MD5", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_7 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_7") == expected_snmp_user_priv_rw_md5_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_md5_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_8", "priv", "rw", "MD5", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_8 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_8") == expected_snmp_user_priv_rw_md5_aes_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_sha_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_9", "priv", "rw", "SHA", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_9 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_9") == expected_snmp_user_priv_rw_sha_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_sha_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_10", "priv", "rw", "SHA", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_10 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_10") == expected_snmp_user_priv_rw_sha_aes_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_hmac_sha_2_des(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_11", "priv", "rw", "HMAC-SHA-2", "user_auth_pass", "DES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_11 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_11") == \ + expected_snmp_user_priv_rw_hmac_sha_2_des_config_db_output + + def test_config_snmp_user_add_valid_user_priv_rw_hmac_sha_2_aes(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["add"], + ["test_priv_RW_12", "priv", "rw", "HMAC-SHA-2", "user_auth_pass", "AES", "user_encrypt_pass"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_12 added to configuration' in result.output + assert db.cfgdb.get_entry("SNMP_USER", "test_priv_RW_12") == \ + expected_snmp_user_priv_rw_hmac_sha_2_aes_config_db_output + + # Del snmp user tests + def test_config_snmp_user_del_valid_user(self): + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_nopriv_RO_1"]) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_nopriv_RO_1 removed from configuration' in result.output + + def test_config_snmp_user_del_invalid_user(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_nopriv_RO_2"]) + print(result.exit_code) + assert result.exit_code == 1 + assert 'SNMP user test_nopriv_RO_2 is not configured' in result.output + + @pytest.mark.parametrize("invalid_email", ['test@contoso', 'test.contoso.com', 'testcontoso@com', + '123_%contoso.com', 'mytest@contoso.comm']) + def test_is_valid_email(self, invalid_email): + output = config.is_valid_email(invalid_email) + assert output == False + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index f8ceebffbf..6c554f8f98 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -695,6 +695,172 @@ "peer_switch": "sonic-switch", "type": "ToRRouter" }, + "SNMP_COMMUNITY|msft": { + "TYPE": "RO" + }, + "SNMP_COMMUNITY|Rainer": { + "TYPE": "RW" + }, + "SNMP_USER|test_authpriv_RO_2": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RO_2_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_authpriv_RO_3": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RO_3_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RW_4": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RW_4_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_4_encrpytpass" + }, + "SNMP_USER|test_priv_RW_3": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RW_3_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_3_encrpytpass" + }, + "SNMP_USER|test_priv_RO_2": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_2_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_2_encrpytpass" + }, + "SNMP_USER|test_nopriv_RO_1": { + "SNMP_USER_TYPE": "noAuthNoPriv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "", + "SNMP_USER_AUTH_PASSWORD": "", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RW_1": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_1_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_1_encrpytpass" + }, + "SNMP_USER|test_authpriv_RW_1": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RW_1_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RO_6": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_6_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_6_encrpytpass" + }, + "SNMP_USER|test_priv_RO_1": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_1_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_1_encrpytpass" + }, + "SNMP_USER|test_priv_RO_5": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_5_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_5_encrpytpass" + }, + "SNMP_USER|test_nopriv_RW_1": { + "SNMP_USER_TYPE": "noAuthNoPriv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "", + "SNMP_USER_AUTH_PASSWORD": "", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RO_3": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_3_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_3_encrpytpass" + }, + "SNMP_USER|test_priv_RW_2": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_2_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_2_encrpytpass" + }, + "SNMP_USER|test_authpriv_RW_3": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RW_3_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RW_5": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RW_5_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "DES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_5_encrpytpass" + }, + "SNMP_USER|test_priv_RW_6": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "HMAC-SHA-2", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RW_6_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RW_6_encrpytpass" + }, + "SNMP_USER|test_authpriv_RW_2": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RW", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RW_2_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, + "SNMP_USER|test_priv_RO_4": { + "SNMP_USER_TYPE": "Priv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "SHA", + "SNMP_USER_AUTH_PASSWORD": "test_priv_RO_4_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "AES", + "SNMP_USER_ENCRYPTION_PASSWORD": "test_priv_RO_4_encrpytpass" + }, + "SNMP_USER|test_authpriv_RO_1": { + "SNMP_USER_TYPE": "AuthNoPriv", + "SNMP_USER_PERMISSION": "RO", + "SNMP_USER_AUTH_TYPE": "MD5", + "SNMP_USER_AUTH_PASSWORD": "test_authpriv_RO_1_authpass", + "SNMP_USER_ENCRYPTION_TYPE": "", + "SNMP_USER_ENCRYPTION_PASSWORD": "" + }, "DEVICE_NEIGHBOR|Ethernet0": { "name": "Servers", "port": "eth0" diff --git a/tests/show_snmp_test.py b/tests/show_snmp_test.py new file mode 100644 index 0000000000..753e20c418 --- /dev/null +++ b/tests/show_snmp_test.py @@ -0,0 +1,467 @@ +import sys +import os +import click +from click.testing import CliRunner +import pytest +import swsssdk +import traceback + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +import show.main as show +import clear.main as clear +import config.main as config + +import mock_tables.dbconnector + +from unittest import mock +from unittest.mock import patch +from utilities_common.db import Db + +config_snmp_location_add_new_location ="""\ +SNMP Location public has been added to configuration +Restarting SNMP service... +""" + +config_snmp_contact_add_del_new_contact ="""\ +Contact name testuser and contact email testuser@contoso.com have been added to configuration +Restarting SNMP service... +""" + +tabular_data_show_run_snmp_contact_expected = """\ +Contact Contact Email\n--------- --------------------\ntestuser testuser@contoso.com +""" + +json_data_show_run_snmp_contact_expected = """\ +{'testuser': 'testuser@contoso.com'} +""" + +tabular_data_show_run_snmp_community_expected = """\ +Community String Community Type +------------------ ---------------- +Rainer RW +msft RO +""" + +json_data_show_run_snmp_community_expected = """\ +{'msft': {'TYPE': 'RO'}, 'Rainer': {'TYPE': 'RW'}} +""" + +tabular_data_show_run_snmp_location_expected = """\ +Location +---------- +public +""" + +json_data_show_run_snmp_location_expected = """\ +{'Location': 'public'} +""" + + +tabular_data_show_run_snmp_user_expected = """\ +User Permission Type Type Auth Type Auth Password Encryption Type Encryption Password +------------------ ----------------- ------------ ----------- --------------------------- ----------------- -------------------------- +test_authpriv_RO_1 RO AuthNoPriv MD5 test_authpriv_RO_1_authpass +test_authpriv_RO_2 RO AuthNoPriv SHA test_authpriv_RO_2_authpass +test_authpriv_RO_3 RO AuthNoPriv HMAC-SHA-2 test_authpriv_RO_3_authpass +test_authpriv_RW_1 RW AuthNoPriv MD5 test_authpriv_RW_1_authpass +test_authpriv_RW_2 RW AuthNoPriv SHA test_authpriv_RW_2_authpass +test_authpriv_RW_3 RW AuthNoPriv HMAC-SHA-2 test_authpriv_RW_3_authpass +test_nopriv_RO_1 RO noAuthNoPriv +test_nopriv_RW_1 RW noAuthNoPriv +test_priv_RO_1 RO Priv MD5 test_priv_RO_1_authpass DES test_priv_RO_1_encrpytpass +test_priv_RO_2 RO Priv MD5 test_priv_RO_2_authpass AES test_priv_RO_2_encrpytpass +test_priv_RO_3 RO Priv SHA test_priv_RO_3_authpass DES test_priv_RO_3_encrpytpass +test_priv_RO_4 RO Priv SHA test_priv_RO_4_authpass AES test_priv_RO_4_encrpytpass +test_priv_RO_5 RO Priv HMAC-SHA-2 test_priv_RO_5_authpass DES test_priv_RO_5_encrpytpass +test_priv_RO_6 RO Priv HMAC-SHA-2 test_priv_RO_6_authpass AES test_priv_RO_6_encrpytpass +test_priv_RW_1 RW Priv MD5 test_priv_RO_1_authpass DES test_priv_RW_1_encrpytpass +test_priv_RW_2 RW Priv MD5 test_priv_RO_2_authpass AES test_priv_RW_2_encrpytpass +test_priv_RW_3 RW Priv SHA test_priv_RW_3_authpass DES test_priv_RW_3_encrpytpass +test_priv_RW_4 RW Priv SHA test_priv_RW_4_authpass AES test_priv_RW_4_encrpytpass +test_priv_RW_5 RW Priv HMAC-SHA-2 test_priv_RW_5_authpass DES test_priv_RW_5_encrpytpass +test_priv_RW_6 RW Priv HMAC-SHA-2 test_priv_RW_6_authpass AES test_priv_RW_6_encrpytpass +""" + + + + +json_data_show_run_snmp_user_expected = """{'test_authpriv_RO_2': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RO_2_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_authpriv_RO_3': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RO_3_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RW_4': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RW_4_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_4_encrpytpass'}, 'test_priv_RW_3': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RW_3_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_3_encrpytpass'}, 'test_priv_RO_2': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_2_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_2_encrpytpass'}, 'test_nopriv_RO_1': {'SNMP_USER_TYPE': 'noAuthNoPriv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': '', 'SNMP_USER_AUTH_PASSWORD': '', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RW_1': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_1_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_1_encrpytpass'}, 'test_authpriv_RW_1': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RW_1_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RO_6': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_6_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_6_encrpytpass'}, 'test_priv_RO_1': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_1_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_1_encrpytpass'}, 'test_priv_RO_5': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_5_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_5_encrpytpass'}, 'test_nopriv_RW_1': {'SNMP_USER_TYPE': 'noAuthNoPriv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': '', 'SNMP_USER_AUTH_PASSWORD': '', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RO_3': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_3_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_3_encrpytpass'}, 'test_priv_RW_2': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_2_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_2_encrpytpass'}, 'test_authpriv_RW_3': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RW_3_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RW_5': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RW_5_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'DES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_5_encrpytpass'}, 'test_priv_RW_6': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'HMAC-SHA-2', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RW_6_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RW_6_encrpytpass'}, 'test_authpriv_RW_2': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RW', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RW_2_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}, 'test_priv_RO_4': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'test_priv_RO_4_authpass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'test_priv_RO_4_encrpytpass'}, 'test_authpriv_RO_1': {'SNMP_USER_TYPE': 'AuthNoPriv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'MD5', 'SNMP_USER_AUTH_PASSWORD': 'test_authpriv_RO_1_authpass', 'SNMP_USER_ENCRYPTION_TYPE': '', 'SNMP_USER_ENCRYPTION_PASSWORD': ''}} +""" + +tabular_data_show_run_snmp_expected = """\ +Location +---------- +public + + +SNMP_CONTACT SNMP_CONTACT_EMAIL +-------------- -------------------- +testuser testuser@contoso.com + + +Community String Community Type +------------------ ---------------- +Rainer RW +msft RO + + +User Permission Type Type Auth Type Auth Password Encryption Type Encryption Password +------------------ ----------------- ------------ ----------- --------------------------- ----------------- -------------------------- +test_authpriv_RO_1 RO AuthNoPriv MD5 test_authpriv_RO_1_authpass +test_authpriv_RO_2 RO AuthNoPriv SHA test_authpriv_RO_2_authpass +test_authpriv_RO_3 RO AuthNoPriv HMAC-SHA-2 test_authpriv_RO_3_authpass +test_authpriv_RW_1 RW AuthNoPriv MD5 test_authpriv_RW_1_authpass +test_authpriv_RW_2 RW AuthNoPriv SHA test_authpriv_RW_2_authpass +test_authpriv_RW_3 RW AuthNoPriv HMAC-SHA-2 test_authpriv_RW_3_authpass +test_nopriv_RO_1 RO noAuthNoPriv +test_nopriv_RW_1 RW noAuthNoPriv +test_priv_RO_1 RO Priv MD5 test_priv_RO_1_authpass DES test_priv_RO_1_encrpytpass +test_priv_RO_2 RO Priv MD5 test_priv_RO_2_authpass AES test_priv_RO_2_encrpytpass +test_priv_RO_3 RO Priv SHA test_priv_RO_3_authpass DES test_priv_RO_3_encrpytpass +test_priv_RO_4 RO Priv SHA test_priv_RO_4_authpass AES test_priv_RO_4_encrpytpass +test_priv_RO_5 RO Priv HMAC-SHA-2 test_priv_RO_5_authpass DES test_priv_RO_5_encrpytpass +test_priv_RO_6 RO Priv HMAC-SHA-2 test_priv_RO_6_authpass AES test_priv_RO_6_encrpytpass +test_priv_RW_1 RW Priv MD5 test_priv_RO_1_authpass DES test_priv_RW_1_encrpytpass +test_priv_RW_2 RW Priv MD5 test_priv_RO_2_authpass AES test_priv_RW_2_encrpytpass +test_priv_RW_3 RW Priv SHA test_priv_RW_3_authpass DES test_priv_RW_3_encrpytpass +test_priv_RW_4 RW Priv SHA test_priv_RW_4_authpass AES test_priv_RW_4_encrpytpass +test_priv_RW_5 RW Priv HMAC-SHA-2 test_priv_RW_5_authpass DES test_priv_RW_5_encrpytpass +test_priv_RW_6 RW Priv HMAC-SHA-2 test_priv_RW_6_authpass AES test_priv_RW_6_encrpytpass +""" + + +class TestSNMPShowCommands(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + # mock the redis for unit test purposes # + try: + if os.environ["UTILITIES_UNIT_TESTING"] == "1": + modules_path = os.path.join(os.path.dirname(__file__), "..") + test_path = os.path.join(modules_path, "sonic-utilities-tests") + sys.path.insert(0, modules_path) + sys.path.insert(0, test_path) + import mock_tables.dbconnector + except KeyError: + pass + + def test_show_run_snmp_location_tabular(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["location"], + [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == tabular_data_show_run_snmp_location_expected + + def test_show_run_snmp_location_json(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["location"], + ["--json"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == json_data_show_run_snmp_location_expected + + def test_show_run_snmp_location_json_bad_key(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["location"], ["--json"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "{}" in result.output + + + def test_show_run_snmp_location_bad_key(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["location"], []) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "" in result.output + + def test_show_run_snmp_contact_tabular(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["contact"], + [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == tabular_data_show_run_snmp_contact_expected + + def test_show_run_snmp_contact_json(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["contact"], + ["--json"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == json_data_show_run_snmp_contact_expected + + def test_show_run_snmp_contact_json_bad_key(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["contact"], ["--json"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert '{}' in result.output + + def test_show_run_snmp_contact_tabular_bad_key(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["contact"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert '' in result.output + + + def test_show_run_snmp_community_tabular(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["community"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == tabular_data_show_run_snmp_community_expected + + def test_show_run_snmp_community_json(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["community"], + ["--json"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == json_data_show_run_snmp_community_expected + + def test_show_run_snmp_user_tabular(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["user"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == tabular_data_show_run_snmp_user_expected + + def test_show_run_snmp_user_json(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["user"], ["--json"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == json_data_show_run_snmp_user_expected + + def test_show_run_snmp_user_json_bad_key(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RO_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RO_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RO_2"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RO_2 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RO_3"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RO_3 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RW_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RW_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RW_2"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RW_2 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_authpriv_RW_3"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_authpriv_RW_3 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_nopriv_RO_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_nopriv_RO_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_nopriv_RW_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_nopriv_RW_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_2"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_2 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_3"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_3 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_4"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_4 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_5"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_5 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RO_6"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RO_6 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_1"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_1 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_2"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_2 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_3"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_3 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_4"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_4 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_5"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_5 removed from configuration' in result.output + + result = runner.invoke(config.config.commands["snmp"].commands["user"].commands["del"], + ["test_priv_RW_6"], obj=db) + print(result.exit_code) + assert result.exit_code == 0 + assert 'SNMP user test_priv_RW_6 removed from configuration' in result.output + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"].commands["user"], ["--json"], obj=db) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "{}" in result.output + + + def test_show_run_snmp_tabular(self): + db = Db() + runner = CliRunner() + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["snmp"].commands["contact"].commands["add"], + ["testuser", "testuser@contoso.com"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_contact_add_del_new_contact + assert db.cfgdb.get_entry("SNMP", "CONTACT") == {"testuser": "testuser@contoso.com"} + + result = runner.invoke(config.config.commands["snmp"].commands["location"].commands["add"], + ["public"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_snmp_location_add_new_location + assert db.cfgdb.get_entry("SNMP", "LOCATION") == {"Location": "public"} + + result = runner.invoke(show.cli.commands["runningconfiguration"].commands["snmp"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == tabular_data_show_run_snmp_expected + + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + From 9a88cb6f0fbaf5fc8c336723cc7b831c6aacf19b Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Thu, 6 May 2021 11:30:42 +0300 Subject: [PATCH 30/41] [sonic_installer] dont fail package migration (#1591) - What I did Do not fail when user is doing downgrade. Fix Azure/sonic-buildimage#7518 - How I did it Ignoring failures. - How to verify it On master image install 202012 image. Signed-off-by: Stepan Blyschak --- sonic_installer/common.py | 4 ++-- sonic_installer/main.py | 16 ++++++++++------ 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/sonic_installer/common.py b/sonic_installer/common.py index ac1416789f..5e36cedb8c 100644 --- a/sonic_installer/common.py +++ b/sonic_installer/common.py @@ -31,13 +31,13 @@ def run_command(command): sys.exit(proc.returncode) # Run bash command and return output, raise if it fails -def run_command_or_raise(argv): +def run_command_or_raise(argv, raise_exception=True): click.echo(click.style("Command: ", fg='cyan') + click.style(' '.join(argv), fg='green')) proc = subprocess.Popen(argv, text=True, stdout=subprocess.PIPE) out, _ = proc.communicate() - if proc.returncode != 0: + if proc.returncode != 0 and raise_exception: raise SonicRuntimeException("Failed to run command '{0}'".format(argv)) return out.rstrip("\n") diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 12a2ab7e0e..5f89878344 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -232,7 +232,7 @@ def mount_squash_fs(squashfs_path, mount_point): run_command_or_raise(["mount", "-t", "squashfs", squashfs_path, mount_point]) -def umount(mount_point, read_only=True, recursive=False, force=True, remove_dir=True): +def umount(mount_point, read_only=True, recursive=False, force=True, remove_dir=True, raise_exception=True): flags = [] if read_only: flags.append("-r") @@ -240,9 +240,9 @@ def umount(mount_point, read_only=True, recursive=False, force=True, remove_dir= flags.append("-f") if recursive: flags.append("-R") - run_command_or_raise(["umount", *flags, mount_point]) + run_command_or_raise(["umount", *flags, mount_point], raise_exception=raise_exception) if remove_dir: - run_command_or_raise(["rm", "-rf", mount_point]) + run_command_or_raise(["rm", "-rf", mount_point], raise_exception=raise_exception) def mount_overlay_fs(lowerdir, upperdir, workdir, mount_point): @@ -350,14 +350,18 @@ def get_path(path): run_command_or_raise(["mount", "--bind", os.path.join(VAR_RUN_PATH, DOCKERD_SOCK), os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)]) + run_command_or_raise(["chroot", new_image_mount, "sh", "-c", "command -v {}".format(SONIC_PACKAGE_MANAGER)]) + except SonicRuntimeException as err: + echo_and_log("Warning: SONiC Application Extension is not supported in this image: {}".format(err), LOG_ERR, fg="red") + else: run_command_or_raise(["chroot", new_image_mount, SONIC_PACKAGE_MANAGER, "migrate", os.path.join("/", tmp_dir, packages_file), "--dockerd-socket", os.path.join("/", tmp_dir, DOCKERD_SOCK), "-y"]) finally: - run_command("chroot {} {} stop".format(new_image_mount, DOCKER_CTL_SCRIPT)) - umount(new_image_mount, recursive=True, read_only=False, remove_dir=False) - umount(new_image_mount) + run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "stop"], raise_exception=False) + umount(new_image_mount, recursive=True, read_only=False, remove_dir=False, raise_exception=False) + umount(new_image_mount, raise_exception=False) # Main entrypoint From a71ff02336d0984b1868891405af4b2c41ecfe5d Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Thu, 6 May 2021 18:00:44 -0700 Subject: [PATCH 31/41] [sfpshow] Gracefully handle improper 'specification_compliance' field (#1594) #### What I did Gracefully handle improper 'specification_compliance' field #### How I did it The 'specification_compliance' field of transceiver info is expected to be a string representation of a dictionary. However, there is a chance, upon some kind of platform issue that a vendor's platform API returns something like 'N/A'. In this case, sfpshow would crash. Rather than crash, sfpshow should handle this gracefully and output 'N/A' instead. --- scripts/sfpshow | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/scripts/sfpshow b/scripts/sfpshow index 3ee80ea2c0..d05268f74d 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -6,6 +6,7 @@ which accesses the transceiver directly. """ +import ast import os import re import sys @@ -273,10 +274,15 @@ class SFPShow(object): output += '{}{}: {}\n'.format(indent, QSFP_DATA_MAP[key], sfp_info_dict[key]) else: output += '{}{}:\n'.format(indent, QSFP_DATA_MAP['specification_compliance']) - spefic_compliance_dict = eval(sfp_info_dict['specification_compliance']) - sorted_compliance_key_table = natsorted(spefic_compliance_dict) - for compliance_key in sorted_compliance_key_table: - output += '{}{}: {}\n'.format((indent * 2), compliance_key, spefic_compliance_dict[compliance_key]) + + spec_compliance_dict = {} + try: + spec_compliance_dict = ast.literal_eval(sfp_info_dict['specification_compliance']) + sorted_compliance_key_table = natsorted(spec_compliance_dict) + for compliance_key in sorted_compliance_key_table: + output += '{}{}: {}\n'.format((indent * 2), compliance_key, spec_compliance_dict[compliance_key]) + except ValueError as e: + output += '{}N/A\n'.format((indent * 2)) else: output += '{}{}: {}\n'.format(indent, QSFP_DATA_MAP[key], sfp_info_dict[key]) From 8c2980af96ba3921b244d7729398df4890c59e78 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Sat, 8 May 2021 00:41:07 +0800 Subject: [PATCH 32/41] [sonic-utilities] CLI support for port auto negotiation (#1568) #### What I did 1. Add CLI support for port auto negotiation feature. 2. Add db_migrator change for auto negotiation feature 2. Add unit test cases for all changes #### How I did it 1. Add new subcommands to "config interface" command group to allow user configuring port auto negotiation 2. Add new subcommands to "show interfaces" command group to allow user show auto negotiation status 3. In db_migrator.py, change auto negotiation related DB field to latest one --- config/main.py | 120 +++++++++++++++++ doc/Command-Reference.md | 127 ++++++++++++++++++ scripts/db_migrator.py | 22 ++- scripts/intfutil | 84 +++++++++++- scripts/portconfig | 114 +++++++++++++++- show/interfaces/__init__.py | 35 +++++ tests/config_an_test.py | 76 +++++++++++ .../non-default-config-expected.json | 2 +- .../config_db/port-an-expected.json | 40 ++++++ .../config_db/port-an-input.json | 39 ++++++ tests/db_migrator_test.py | 23 ++++ tests/intfutil_test.py | 48 +++++++ tests/mock_tables/appl_db.json | 11 +- tests/mock_tables/state_db.json | 3 + 14 files changed, 735 insertions(+), 9 deletions(-) create mode 100644 tests/config_an_test.py create mode 100644 tests/db_migrator_input/config_db/port-an-expected.json create mode 100644 tests/db_migrator_input/config_db/port-an-input.json diff --git a/config/main.py b/config/main.py index 953af72e79..d0c7c4258b 100644 --- a/config/main.py +++ b/config/main.py @@ -3014,6 +3014,126 @@ def speed(ctx, interface_name, interface_speed, verbose): command += " -vv" clicommon.run_command(command, display_cmd=verbose) +# +# 'autoneg' subcommand +# + +@interface.command() +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('mode', metavar='', required=True, type=click.Choice(["enabled", "disabled"])) +@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") +def autoneg(ctx, interface_name, mode, verbose): + """Set interface auto negotiation mode""" + # Get the config_db connector + config_db = ctx.obj['config_db'] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + log.log_info("'interface autoneg {} {}' executing...".format(interface_name, mode)) + + if ctx.obj['namespace'] is DEFAULT_NAMESPACE: + command = "portconfig -p {} -an {}".format(interface_name, mode) + else: + command = "portconfig -p {} -an {} -n {}".format(interface_name, mode, ctx.obj['namespace']) + + if verbose: + command += " -vv" + clicommon.run_command(command, display_cmd=verbose) + +# +# 'adv-speeds' subcommand +# + +@interface.command() +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('speed_list', metavar='', required=True) +@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") +def advertised_speeds(ctx, interface_name, speed_list, verbose): + """Set interface advertised speeds""" + # Get the config_db connector + config_db = ctx.obj['config_db'] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + log.log_info("'interface advertised_speeds {} {}' executing...".format(interface_name, speed_list)) + + if ctx.obj['namespace'] is DEFAULT_NAMESPACE: + command = "portconfig -p {} -S {}".format(interface_name, speed_list) + else: + command = "portconfig -p {} -S {} -n {}".format(interface_name, speed_list, ctx.obj['namespace']) + + if verbose: + command += " -vv" + clicommon.run_command(command, display_cmd=verbose) + +# +# 'interface-type' subcommand +# + +@interface.command(name='type') +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('interface_type_value', metavar='', required=True) +@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") +def interface_type(ctx, interface_name, interface_type_value, verbose): + """Set interface type""" + # Get the config_db connector + config_db = ctx.obj['config_db'] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + log.log_info("'interface interface_type {} {}' executing...".format(interface_name, interface_type_value)) + + if ctx.obj['namespace'] is DEFAULT_NAMESPACE: + command = "portconfig -p {} -t {}".format(interface_name, interface_type_value) + else: + command = "portconfig -p {} -t {} -n {}".format(interface_name, interface_type_value, ctx.obj['namespace']) + + if verbose: + command += " -vv" + clicommon.run_command(command, display_cmd=verbose) + +# +# 'advertised-interface-types' subcommand +# + +@interface.command() +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('interface_type_list', metavar='', required=True) +@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") +def advertised_types(ctx, interface_name, interface_type_list, verbose): + """Set interface advertised types""" + # Get the config_db connector + config_db = ctx.obj['config_db'] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + log.log_info("'interface advertised_interface_types {} {}' executing...".format(interface_name, interface_type_list)) + + if ctx.obj['namespace'] is DEFAULT_NAMESPACE: + command = "portconfig -p {} -T {}".format(interface_name, interface_type_list) + else: + command = "portconfig -p {} -T {} -n {}".format(interface_name, interface_type_list, ctx.obj['namespace']) + + if verbose: + command += " -vv" + clicommon.run_command(command, display_cmd=verbose) + # # 'breakout' subcommand # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index ab4e28dbdc..6c7a474a5c 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -3064,6 +3064,7 @@ Subsequent pages explain each of these commands in detail. -?, -h, --help Show this message and exit. Commands: + autoneg Show interface autoneg information breakout Show Breakout Mode information by interfaces counters Show interface counters description Show interface status, protocol and... @@ -3074,6 +3075,30 @@ Subsequent pages explain each of these commands in detail. transceiver Show SFP Transceiver information ``` +**show interfaces autoneg** + +This show command displays the port auto negotiation status for all interfaces i.e. interface name, auto negotiation mode, speed, advertised speeds, interface type, advertised interface types, operational status, admin status. For a single interface, provide the interface name with the sub-command. + +- Usage: + ``` + show interfaces autoneg status + show interfaces autoneg status + ``` + +- Example: + ``` + admin@sonic:~$ show interfaces autoneg status + Interface Auto-Neg Mode Speed Adv Speeds Type Adv Types Oper Admin + ----------- --------------- ------- ------------ ------ ----------- ------ ------- + Ethernet0 enabled 25G 10G,25G CR CR,CR4 up up + Ethernet4 disabled 100G all CR4 all up up + + admin@sonic:~$ show interfaces autoneg status Ethernet8 + Interface Auto-Neg Mode Speed Adv Speeds Type Adv Types Oper Admin + ----------- --------------- ------- ------------ ------ ----------- ------ ------- + Ethernet8 disabled 100G N/A CR4 N/A up up + ``` + **show interfaces breakout** This show command displays the port capability for all interfaces i.e. index, lanes, default_brkout_mode, breakout_modes(i.e. all the available breakout modes) and brkout_mode (i.e. current breakout mode). To display current breakout mode, "current-mode" subcommand can be used.For a single interface, provide the interface name with the sub-command. @@ -3380,6 +3405,10 @@ This sub-section explains the following list of configuration on the interfaces. 4) speed - to set the interface speed 5) startup - to bring up the administratively shutdown interface 6) breakout - to set interface breakout mode +7) autoneg - to set interface auto negotiation mode +8) advertised-speeds - to set interface advertised speeds +9) advertised-types - to set interface advertised types +10) type - to set interface type From 201904 release onwards, the “config interface” command syntax is changed and the format is as follows: @@ -3714,6 +3743,104 @@ kindly use, double tab i.e. to see the available breakout option cust Go Back To [Beginning of the document](#) or [Beginning of this section](#interfaces) +**config interface autoneg (Versions >= 202106)** + +This command is used to set port auto negotiation mode. + +- Usage: + ``` + sudo config interface autoneg --help + Usage: config interface autoneg [OPTIONS] + + Set interface auto negotiation mode + + Options: + -v, --verbose Enable verbose output + -h, -?, --help Show this message and exit. + ``` + +- Example: + ``` + admin@sonic:~$ sudo config interface autoneg Ethernet0 enabled + + admin@sonic:~$ sudo config interface autoneg Ethernet0 disabled + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#interfaces) + +**config interface advertised-speeds (Versions >= 202106)** + +This command is used to set port advertised speed. + +- Usage: + ``` + sudo config interface advertised-speeds --help + Usage: config interface advertised-speeds [OPTIONS] + + Set interface advertised speeds + + Options: + -v, --verbose Enable verbose output + -h, -?, --help Show this message and exit. + ``` + +- Example: + ``` + admin@sonic:~$ sudo config interface advertised-speeds Ethernet0 all + + admin@sonic:~$ sudo config interface advertised-speeds Ethernet0 50000,100000 + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#interfaces) + +**config interface advertised-types (Versions >= 202106)** + +This command is used to set port advertised interface types. + +- Usage: + ``` + sudo config interface advertised-types --help + Usage: config interface advertised-types [OPTIONS] + + Set interface advertised types + + Options: + -v, --verbose Enable verbose output + -h, -?, --help Show this message and exit. + ``` + +- Example: + ``` + admin@sonic:~$ sudo config interface advertised-types Ethernet0 all + + admin@sonic:~$ sudo config interface advertised-types Ethernet0 CR,CR4 + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#interfaces) + +**config interface type (Versions >= 202106)** + +This command is used to set port interface type. + +- Usage: + ``` + sudo config interface type --help + Usage: config interface type [OPTIONS] + + Set interface type + + Options: + -v, --verbose Enable verbose output + -h, -?, --help Show this message and exit. + ``` + +- Example: + ``` + admin@sonic:~$ sudo config interface type Ethernet0 CR4 + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#interfaces) + **config interface cable_length (Versions >= 202006)** This command is used to configure the length of the cable connected to a port. The cable_length is in unit of meters and must be suffixed with "m". diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index 6e18ca4034..4d4312e5e4 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -353,6 +353,18 @@ def prepare_dynamic_buffer_for_warm_reboot(self, buffer_pools=None, buffer_profi return True + def migrate_config_db_port_table_for_auto_neg(self): + table_name = 'PORT' + port_table = self.configDB.get_table(table_name) + for key, value in port_table.items(): + if 'autoneg' in value: + if value['autoneg'] == '1': + self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, key), 'autoneg', 'on') + if 'speed' in value and 'adv_speeds' not in value: + self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, key), 'adv_speeds', value['speed']) + elif value['autoneg'] == '0': + self.configDB.set(self.configDB.CONFIG_DB, '{}|{}'.format(table_name, key), 'autoneg', 'off') + def version_unknown(self): """ version_unknown tracks all SONiC versions that doesn't have a version @@ -470,10 +482,18 @@ def version_1_0_5(self): def version_2_0_0(self): """ - Current latest version. Nothing to do here. + Version 2_0_0. """ log.log_info('Handling version_2_0_0') + self.migrate_config_db_port_table_for_auto_neg() + self.set_version('version_2_0_1') + return 'version_2_0_1' + def version_2_0_1(self): + """ + Current latest version. Nothing to do here. + """ + log.log_info('Handling version_2_0_1') return None def get_version(self): diff --git a/scripts/intfutil b/scripts/intfutil index 3a77338b39..a409d1a29d 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -41,6 +41,10 @@ PORT_FEC = "fec" PORT_DESCRIPTION = "description" PORT_OPTICS_TYPE = "type" PORT_PFC_ASYM_STATUS = "pfc_asym" +PORT_AUTONEG = 'autoneg' +PORT_ADV_SPEEDS = 'adv_speeds' +PORT_INTERFACE_TYPE = 'interface_type' +PORT_ADV_INTERFACE_TYPES = 'adv_interface_types' VLAN_SUB_INTERFACE_SEPARATOR = "." VLAN_SUB_INTERFACE_TYPE = "802.1q-encapsulation" @@ -133,7 +137,13 @@ def appl_db_port_status_get(appl_db, intf_name, status_type): if status is None: return "N/A" if status_type == PORT_SPEED and status != "N/A": - status = '{}G'.format(status[:-3]) + status = '{}G'.format(status[:-3]) + elif status_type == PORT_ADV_SPEEDS and status != "N/A" and status != "all": + speed_list = status.split(',') + new_speed_list = [] + for s in natsorted(speed_list): + new_speed_list.append('{}G'.format(s[:-3])) + status = ','.join(new_speed_list) return status def state_db_port_optics_get(state_db, intf_name, type): @@ -506,10 +516,77 @@ class IntfDescription(object): if self.appl_db_keys: self.table += self.generate_intf_description() + +# ========================== interface-autoneg logic ========================== +header_autoneg = ['Interface', 'Auto-Neg Mode', 'Speed', 'Adv Speeds', 'Type', 'Adv Types', 'Oper', 'Admin'] + + +class IntfAutoNegStatus(object): + + def __init__(self, intf_name, namespace_option, display_option): + self.db = None + self.config_db = None + self.table = [] + self.multi_asic = multi_asic_util.MultiAsic( + display_option, namespace_option) + + if intf_name is not None and intf_name == SUB_PORT: + self.intf_name = None + else: + self.intf_name = intf_name + + def display_autoneg_status(self): + + self.get_intf_autoneg_status() + + # Sorting and tabulating the result table. + sorted_table = natsorted(self.table) + print(tabulate(sorted_table, header_autoneg, tablefmt="simple", stralign='right')) + + def generate_autoneg_status(self): + """ + Generate interface-autoneg output + """ + + i = {} + table = [] + key = [] + + # + # Iterate through all the keys and append port's associated state to + # the result table. + # + for i in self.appl_db_keys: + key = re.split(':', i, maxsplit=1)[-1].strip() + if key in self.front_panel_ports_list: + if self.multi_asic.skip_display(constants.PORT_OBJ, key): + continue + autoneg_mode = appl_db_port_status_get(self.db, key, PORT_AUTONEG) + if autoneg_mode != 'N/A': + autoneg_mode = 'enabled' if autoneg_mode == 'on' else 'disabled' + table.append((key, + autoneg_mode, + appl_db_port_status_get(self.db, key, PORT_SPEED), + appl_db_port_status_get(self.db, key, PORT_ADV_SPEEDS), + appl_db_port_status_get(self.db, key, PORT_INTERFACE_TYPE), + appl_db_port_status_get(self.db, key, PORT_ADV_INTERFACE_TYPES), + appl_db_port_status_get(self.db, key, PORT_OPER_STATUS), + appl_db_port_status_get(self.db, key, PORT_ADMIN_STATUS), + )) + return table + + @multi_asic_util.run_on_multi_asic + def get_intf_autoneg_status(self): + self.front_panel_ports_list = get_frontpanel_port_list(self.config_db) + self.appl_db_keys = appl_db_keys_get(self.db, self.front_panel_ports_list, self.intf_name) + if self.appl_db_keys: + self.table += self.generate_autoneg_status() + + def main(): parser = argparse.ArgumentParser(description='Display Interface information', formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument('-c', '--command', type=str, help='get interface status or description', default=None) + parser.add_argument('-c', '--command', type=str, help='get interface status or description or auto negotiation status', default=None) parser.add_argument('-i', '--interface', type=str, help='interface information for specific port: Ethernet0', default=None) parser = multi_asic_util.multi_asic_args(parser) args = parser.parse_args() @@ -520,6 +597,9 @@ def main(): elif args.command == "description": interface_desc = IntfDescription(args.interface, args.namespace, args.display) interface_desc.display_intf_description() + elif args.command == "autoneg": + interface_autoneg_status = IntfAutoNegStatus(args.interface, args.namespace, args.display) + interface_autoneg_status.display_autoneg_status() sys.exit(0) diff --git a/scripts/portconfig b/scripts/portconfig index b22e547101..a37a86a82f 100755 --- a/scripts/portconfig +++ b/scripts/portconfig @@ -16,15 +16,47 @@ optional arguments: -f --fec port fec mode -m --mtu port mtu in bytes -n --namesapce Namespace name + -an --autoneg port auto negotiation mode + -S --adv-speeds port advertised speeds + -t --interface-type port interface type + -T --adv-interface-types port advertised interface types """ +import os import sys import argparse -from swsscommon.swsscommon import ConfigDBConnector, SonicDBConfig +# mock the redis for unit test purposes # +try: + if os.environ["UTILITIES_UNIT_TESTING"] == "1": + modules_path = os.path.join(os.path.dirname(__file__), "..") + test_path = os.path.join(modules_path, "tests") + sys.path.insert(0, modules_path) + sys.path.insert(0, test_path) + import mock_tables.dbconnector +except KeyError: + pass + +from swsscommon.swsscommon import ConfigDBConnector, SonicDBConfig, SonicV2Connector + +# APPL_DB constants PORT_TABLE_NAME = "PORT" PORT_SPEED_CONFIG_FIELD_NAME = "speed" PORT_FEC_CONFIG_FIELD_NAME = "fec" PORT_MTU_CONFIG_FIELD_NAME = "mtu" +PORT_AUTONEG_CONFIG_FIELD_NAME = "autoneg" +PORT_ADV_SPEEDS_CONFIG_FIELD_NAME = "adv_speeds" +PORT_INTERFACE_TYPE_CONFIG_FIELD_NAME = "interface_type" +PORT_ADV_INTERFACE_TYPES_CONFIG_FIELD_NAME = "adv_interface_types" + +# STATE_DB constants +PORT_STATE_TABLE_NAME = "PORT_TABLE" +PORT_STATE_SUPPORTED_SPEEDS = "supported_speeds" + + +VALID_INTERFACE_TYPE_SET = set(['CR','CR2','CR4','SR','SR2','SR4', + 'LR','LR4','KR','KR4','CAUI','GMII', + 'SFI','XLAUI','KR2','CAUI4','XAUI', + 'XFI','XGMII']) class portconfig(object): """ @@ -32,7 +64,7 @@ class portconfig(object): """ def __init__(self, verbose, port, namespace): self.verbose = verbose - + self.namespace = namespace # Set up db connections if namespace is None: self.db = ConfigDBConnector() @@ -54,6 +86,12 @@ class portconfig(object): def set_speed(self, port, speed): if self.verbose: print("Setting speed %s on port %s" % (speed, port)) + supported_speeds_str = self.get_supported_speeds(port) + if supported_speeds_str: + if supported_speeds_str.find(str(speed)) == -1: + print('Invalid speed specified: {}'.format(speed)) + print('Valid speeds:{}'.format(supported_speeds_str)) + exit(1) self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_SPEED_CONFIG_FIELD_NAME: speed}) def set_fec(self, port, fec): @@ -66,6 +104,60 @@ class portconfig(object): print("Setting mtu %s on port %s" % (mtu, port)) self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_MTU_CONFIG_FIELD_NAME: mtu}) + def set_autoneg(self, port, mode): + if self.verbose: + print("Setting autoneg %s on port %s" % (mode, port)) + mode = 'on' if mode == 'enabled' else 'off' + self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_AUTONEG_CONFIG_FIELD_NAME: mode}) + + def set_adv_speeds(self, port, adv_speeds): + if self.verbose: + print("Setting adv_speeds %s on port %s" % (adv_speeds, port)) + + if adv_speeds != 'all': + supported_speeds_str = self.get_supported_speeds(port) + if supported_speeds_str: + supported_speeds = set(supported_speeds_str.split(',')) + config_speeds = set(adv_speeds.split(',')) + invalid_speeds = config_speeds - supported_speeds + if invalid_speeds: + print('Invalid speed specified: {}'.format(','.join(invalid_speeds))) + print('Valid speeds:{}'.format(supported_speeds_str)) + exit(1) + + self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_ADV_SPEEDS_CONFIG_FIELD_NAME: adv_speeds}) + + def set_interface_type(self, port, interface_type): + if self.verbose: + print("Setting interface_type %s on port %s" % (interface_type, port)) + if interface_type not in VALID_INTERFACE_TYPE_SET: + print("Invalid interface type specified: {}".format(interface_type)) + print("Valid interface types:{}".format(','.join(VALID_INTERFACE_TYPE_SET))) + exit(1) + self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_INTERFACE_TYPE_CONFIG_FIELD_NAME: interface_type}) + + def set_adv_interface_types(self, port, adv_interface_types): + if self.verbose: + print("Setting adv_interface_types %s on port %s" % (adv_interface_types, port)) + + if adv_interface_types != 'all': + config_interface_types = set(adv_interface_types.split(',')) + invalid_interface_types = config_interface_types - VALID_INTERFACE_TYPE_SET + if invalid_interface_types: + print("Invalid interface type specified: {}".format(','.join(invalid_interface_types))) + print("Valid interface types:{}".format(','.join(VALID_INTERFACE_TYPE_SET))) + exit(1) + self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_ADV_INTERFACE_TYPES_CONFIG_FIELD_NAME: adv_interface_types}) + + def get_supported_speeds(self, port): + if not self.namespace: + state_db = SonicV2Connector(host="127.0.0.1") + else: + state_db = SonicV2Connector(host="127.0.0.1", namesapce=self.namespace, use_unix_socket_path=True) + state_db.connect(state_db.STATE_DB) + return state_db.get(state_db.STATE_DB, '{}|{}'.format(PORT_STATE_TABLE_NAME, port), PORT_STATE_SUPPORTED_SPEEDS) + + def main(): parser = argparse.ArgumentParser(description='Set SONiC port parameters', formatter_class=argparse.RawTextHelpFormatter) @@ -78,6 +170,14 @@ def main(): parser.add_argument('-vv', '--verbose', action='store_true', help='Verbose output', default=False) parser.add_argument('-n', '--namespace', metavar='namespace details', type = str, required = False, help = 'The asic namespace whose DB instance we need to connect', default=None) + parser.add_argument('-an', '--autoneg', type = str, required = False, + help = 'port auto negotiation mode', default=None) + parser.add_argument('-S', '--adv-speeds', type = str, required = False, + help = 'port advertised speeds', default=None) + parser.add_argument('-t', '--interface-type', type = str, required = False, + help = 'port interface type', default=None) + parser.add_argument('-T', '--adv-interface-types', type = str, required = False, + help = 'port advertised interface types', default=None) args = parser.parse_args() if args.namespace is not None: @@ -89,13 +189,21 @@ def main(): port = portconfig(args.verbose, args.port, args.namespace) if args.list: port.list_params(args.port) - elif args.speed or args.fec or args.mtu: + elif args.speed or args.fec or args.mtu or args.autoneg or args.adv_speeds or args.interface_type or args.adv_interface_types: if args.speed: port.set_speed(args.port, args.speed) if args.fec: port.set_fec(args.port, args.fec) if args.mtu: port.set_mtu(args.port, args.mtu) + if args.autoneg: + port.set_autoneg(args.port, args.autoneg) + if args.adv_speeds: + port.set_adv_speeds(args.port, args.adv_speeds) + if args.interface_type: + port.set_interface_type(args.port, args.interface_type) + if args.adv_interface_types: + port.set_adv_interface_types(args.port, args.adv_interface_types) else: parser.print_help() sys.exit(1) diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index a588a1e6c5..6d31890b22 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -465,3 +465,38 @@ def detailed(interface, period, verbose): cmd += " -i {}".format(interface) clicommon.run_command(cmd, display_cmd=verbose) + + +# +# autoneg group (show interfaces autoneg ...) +# +@interfaces.group(name='autoneg', cls=clicommon.AliasedGroup) +def autoneg(): + """Show interface autoneg information""" + pass + + +# 'autoneg status' subcommand ("show interfaces autoneg status") +@autoneg.command(name='status') +@click.argument('interfacename', required=False) +@multi_asic_util.multi_asic_click_options +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def autoneg_status(interfacename, namespace, display, verbose): + """Show interface autoneg status""" + + ctx = click.get_current_context() + + cmd = "intfutil -c autoneg" + + #ignore the display option when interface name is passed + if interfacename is not None: + interfacename = try_convert_interfacename_from_alias(ctx, interfacename) + + cmd += " -i {}".format(interfacename) + else: + cmd += " -d {}".format(display) + + if namespace is not None: + cmd += " -n {}".format(namespace) + + clicommon.run_command(cmd, display_cmd=verbose) diff --git a/tests/config_an_test.py b/tests/config_an_test.py new file mode 100644 index 0000000000..cfec47a5d1 --- /dev/null +++ b/tests/config_an_test.py @@ -0,0 +1,76 @@ +import click +import config.main as config +import operator +import os +import pytest +import sys + +from click.testing import CliRunner +from utilities_common.db import Db + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, modules_path) + + +@pytest.fixture(scope='module') +def ctx(scope='module'): + db = Db() + obj = {'config_db':db.cfgdb, 'namespace': ''} + yield obj + + +class TestConfigInterface(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def test_config_autoneg(self, ctx): + self.basic_check("autoneg", ["Ethernet0", "enabled"], ctx) + self.basic_check("autoneg", ["Ethernet0", "disabled"], ctx) + self.basic_check("autoneg", ["Invalid", "enabled"], ctx, operator.ne) + self.basic_check("autoneg", ["Ethernet0", "invalid"], ctx, operator.ne) + + def test_config_speed(self, ctx): + self.basic_check("speed", ["Ethernet0", "40000"], ctx) + self.basic_check("speed", ["Invalid", "40000"], ctx, operator.ne) + # 50000 is not a supported speed + result = self.basic_check("speed", ["Ethernet0", "50000"], ctx, operator.ne) + assert 'Invalid speed' in result.output + assert 'Valid speeds:' in result.output + self.basic_check("speed", ["Ethernet0", "invalid"], ctx, operator.ne) + + def test_config_adv_speeds(self, ctx): + self.basic_check("advertised-speeds", ["Ethernet0", "40000,100000"], ctx) + self.basic_check("advertised-speeds", ["Ethernet0", "all"], ctx) + self.basic_check("advertised-speeds", ["Invalid", "40000,100000"], ctx, operator.ne) + result = self.basic_check("advertised-speeds", ["Ethernet0", "50000,100000"], ctx, operator.ne) + assert 'Invalid speed' in result.output + assert 'Valid speeds:' in result.output + + def test_config_type(self, ctx): + self.basic_check("type", ["Ethernet0", "CR4"], ctx) + self.basic_check("type", ["Invalid", "CR4"], ctx, operator.ne) + self.basic_check("type", ["Ethernet0", ""], ctx, operator.ne) + result = self.basic_check("type", ["Ethernet0", "Invalid"], ctx, operator.ne) + assert 'Invalid interface type specified' in result.output + assert 'Valid interface types:' in result.output + + def test_config_adv_types(self, ctx): + self.basic_check("advertised-types", ["Ethernet0", "CR4,KR4"], ctx) + self.basic_check("advertised-types", ["Ethernet0", "all"], ctx) + self.basic_check("advertised-types", ["Invalid", "CR4,KR4"], ctx, operator.ne) + result = self.basic_check("advertised-types", ["Ethernet0", "CR4,Invalid"], ctx, operator.ne) + assert 'Invalid interface type specified' in result.output + assert 'Valid interface types:' in result.output + self.basic_check("advertised-types", ["Ethernet0", ""], ctx, operator.ne) + + def basic_check(self, command_name, para_list, ctx, op=operator.eq, expect_result=0): + runner = CliRunner() + result = runner.invoke(config.config.commands["interface"].commands[command_name], para_list, obj = ctx) + print(result.output) + assert op(result.exit_code, expect_result) + return result diff --git a/tests/db_migrator_input/config_db/non-default-config-expected.json b/tests/db_migrator_input/config_db/non-default-config-expected.json index a31a50b45e..46e75f26f7 100644 --- a/tests/db_migrator_input/config_db/non-default-config-expected.json +++ b/tests/db_migrator_input/config_db/non-default-config-expected.json @@ -1115,6 +1115,6 @@ "speed": "50000" }, "VERSIONS|DATABASE": { - "VERSION": "version_2_0_0" + "VERSION": "version_2_0_1" } } diff --git a/tests/db_migrator_input/config_db/port-an-expected.json b/tests/db_migrator_input/config_db/port-an-expected.json new file mode 100644 index 0000000000..766ea64a94 --- /dev/null +++ b/tests/db_migrator_input/config_db/port-an-expected.json @@ -0,0 +1,40 @@ +{ + "PORT|Ethernet0": { + "index": "0", + "lanes": "0,1", + "description": "etp1a", + "mtu": "9100", + "alias": "etp1a", + "pfc_asym": "off", + "speed": "10000", + "fec": "none", + "autoneg": "on", + "adv_speeds": "10000" + }, + "PORT|Ethernet2": { + "index": "0", + "lanes": "2,3", + "description": "Servers0:eth0", + "admin_status": "up", + "mtu": "9100", + "alias": "etp1b", + "pfc_asym": "off", + "speed": "25000", + "fec": "none", + "autoneg": "off" + }, + "PORT|Ethernet4": { + "index": "1", + "lanes": "4,5", + "description": "Servers1:eth0", + "admin_status": "up", + "mtu": "9100", + "alias": "etp2a", + "pfc_asym": "off", + "speed": "50000", + "fec": "none" + }, + "VERSIONS|DATABASE": { + "VERSION": "version_2_0_1" + } +} \ No newline at end of file diff --git a/tests/db_migrator_input/config_db/port-an-input.json b/tests/db_migrator_input/config_db/port-an-input.json new file mode 100644 index 0000000000..373c9ae989 --- /dev/null +++ b/tests/db_migrator_input/config_db/port-an-input.json @@ -0,0 +1,39 @@ +{ + "PORT|Ethernet0": { + "index": "0", + "lanes": "0,1", + "description": "etp1a", + "mtu": "9100", + "alias": "etp1a", + "pfc_asym": "off", + "speed": "10000", + "fec": "none", + "autoneg": "1" + }, + "PORT|Ethernet2": { + "index": "0", + "lanes": "2,3", + "description": "Servers0:eth0", + "admin_status": "up", + "mtu": "9100", + "alias": "etp1b", + "pfc_asym": "off", + "speed": "25000", + "fec": "none", + "autoneg": "0" + }, + "PORT|Ethernet4": { + "index": "1", + "lanes": "4,5", + "description": "Servers1:eth0", + "admin_status": "up", + "mtu": "9100", + "alias": "etp2a", + "pfc_asym": "off", + "speed": "50000", + "fec": "none" + }, + "VERSIONS|DATABASE": { + "VERSION": "version_2_0_0" + } +} \ No newline at end of file diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index bbff2a7666..293ee1d09f 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -139,6 +139,7 @@ def test_mellanox_buffer_migrator_for_cold_reboot(self, sku_version, topo): import db_migrator dbmgtr = db_migrator.DBMigrator(None) dbmgtr.migrate() + # Eventually, the config db should be migrated to the latest version expected_db = self.mock_dedicated_config_db(self.make_db_name_by_sku_topo_version(sku, topo, self.version_list[-1])) self.advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb) @@ -192,3 +193,25 @@ def test_mellanox_buffer_migrator_negative_nondefault_for_warm_reboot(self): input_config_db = 'non-default-config-input' input_appl_db = 'non-default-input' self.mellanox_buffer_migrator_warm_reboot_runner(input_config_db, input_appl_db, expected_config_db, expected_appl_db, False) + + +class TestAutoNegMigrator(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + + def test_port_autoneg_migrator(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'port-an-input') + import db_migrator + dbmgtr = db_migrator.DBMigrator(None) + dbmgtr.migrate() + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'port-an-expected') + expected_db = Db() + + assert dbmgtr.configDB.get_table('PORT') == expected_db.cfgdb.get_table('PORT') + assert dbmgtr.configDB.get_table('VERSIONS') == expected_db.cfgdb.get_table('VERSIONS') diff --git a/tests/intfutil_test.py b/tests/intfutil_test.py index c350c57e50..f14f3f7f15 100644 --- a/tests/intfutil_test.py +++ b/tests/intfutil_test.py @@ -62,6 +62,31 @@ Ethernet32 up up etp9 Servers7:eth0 """ +show_interface_auto_neg_status_output = """\ + Interface Auto-Neg Mode Speed Adv Speeds Type Adv Types Oper Admin +----------- --------------- ------- ------------ ------ ----------- ------ ------- + Ethernet0 enabled 25G 10G,50G CR4 CR4,CR2 down up + Ethernet32 disabled 40G all N/A all up up +Ethernet112 N/A 40G N/A N/A N/A up up +Ethernet116 N/A 40G N/A N/A N/A up up +Ethernet120 N/A 40G N/A N/A N/A up up +Ethernet124 N/A 40G N/A N/A N/A up up +""" + +show_interface_auto_neg_status_Ethernet0_output = """\ + Interface Auto-Neg Mode Speed Adv Speeds Type Adv Types Oper Admin +----------- --------------- ------- ------------ ------ ----------- ------ ------- + Ethernet0 enabled 25G 10G,50G CR4 CR4,CR2 down up +""" + +show_interface_auto_neg_status_eth9_output = """\ + Interface Auto-Neg Mode Speed Adv Speeds Type Adv Types Oper Admin +----------- --------------- ------- ------------ ------ ----------- ------ ------- + Ethernet32 disabled 40G all N/A all up up +""" + + + class TestIntfutil(TestCase): @classmethod def setup_class(cls): @@ -227,6 +252,29 @@ def test_single_subintf_status_alias_mode_verbose(self): os.environ["SONIC_CLI_IFACE_MODE"] = "default" + def test_show_interfaces_autoneg_status(self): + result = self.runner.invoke(show.cli.commands["interfaces"].commands["autoneg"].commands["status"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_interface_auto_neg_status_output + + def test_show_interfaces_autoneg_status_Ethernet0(self): + result = self.runner.invoke(show.cli.commands["interfaces"].commands["autoneg"].commands["status"], ["Ethernet0"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_interface_auto_neg_status_Ethernet0_output + + def test_show_interfaces_autoneg_status_etp9_in_alias_mode(self): + os.environ["SONIC_CLI_IFACE_MODE"] = "alias" + result = self.runner.invoke(show.cli.commands["interfaces"].commands["autoneg"].commands["status"], ["etp9"]) + os.environ["SONIC_CLI_IFACE_MODE"] = "default" + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_interface_auto_neg_status_eth9_output + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index a73fbb0e55..c82562579a 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -34,7 +34,11 @@ "pfc_asym": "off", "mtu": "9100", "fec": "rs", - "admin_status": "up" + "admin_status": "up", + "adv_speeds": "50000,10000", + "interface_type": "CR4", + "adv_interface_types": "CR4,CR2", + "autoneg": "on" }, "PORT_TABLE:Ethernet32": { "index": "8", @@ -46,7 +50,10 @@ "pfc_asym": "off", "mtu": "9100", "fec": "rs", - "admin_status": "up" + "admin_status": "up", + "autoneg": "off", + "adv_speeds": "all", + "adv_interface_types": "all" }, "PORT_TABLE:Ethernet112": { "index": "28", diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 45057b5cf4..b13c31c812 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -608,6 +608,9 @@ "ip_address": "192.168.1.2", "access": "False" }, + "PORT_TABLE|Ethernet0": { + "supported_speeds": "10000,25000,40000,100000" + }, "PCIE_DEVICE|00:01.0": { "correctable|BadDLLP": "0", "correctable|BadTLP": "0", From 331c5a5df0a28ddeedc2d6765bf2c0bbee34b3b8 Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Mon, 10 May 2021 11:29:29 -0700 Subject: [PATCH 33/41] [config]: Use mod_entry when editing VLAN_INTERFACE (#1602) Replace set_entry with mod_entry when setting the proxy_arp value for a VLAN. Using set_entry will delete any other fields set for the key used, which is not desirable. Signed-off-by: Lawrence Lee --- config/vlan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/vlan.py b/config/vlan.py index 36ef3da0ac..c3c29eb842 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -92,7 +92,7 @@ def config_proxy_arp(db, vid, mode): if not clicommon.is_valid_vlan_interface(db.cfgdb, vlan): ctx.fail("Interface {} does not exist".format(vlan)) - db.cfgdb.set_entry('VLAN_INTERFACE', vlan, {"proxy_arp": mode}) + db.cfgdb.mod_entry('VLAN_INTERFACE', vlan, {"proxy_arp": mode}) click.echo('Proxy ARP setting saved to ConfigDB') restart_ndppd() # From fde1d95549468545fe4477a9f21b955848cc72d7 Mon Sep 17 00:00:00 2001 From: shikenghua Date: Tue, 11 May 2021 07:55:26 +0800 Subject: [PATCH 34/41] [config][vxlan] fix 'vxlan evpn_nvo add' command error (#1511) * [config][vxlan]fix 'vxlan evpn_nvo add' command error Remove extra 'CONFIG_DB' argument from db.cfgdb.get_keys() Signed-off-by: shikenghua --- config/vxlan.py | 4 ++-- tests/vxlan_test.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/config/vxlan.py b/config/vxlan.py index 382ab72815..bfda1f4eff 100644 --- a/config/vxlan.py +++ b/config/vxlan.py @@ -47,7 +47,7 @@ def del_vxlan(db, vxlan_name): if(vxlan_count > 0): ctx.fail("Please delete the EVPN NVO configuration.") - vxlan_keys = db.cfgdb.get_keys('CONFIG_DB', "VXLAN_TUNNEL_MAP|*") + vxlan_keys = db.cfgdb.get_keys("VXLAN_TUNNEL_MAP|*") if not vxlan_keys: vxlan_count = 0 else: @@ -69,7 +69,7 @@ def vxlan_evpn_nvo(): def add_vxlan_evpn_nvo(db, nvo_name, vxlan_name): """Add NVO""" ctx = click.get_current_context() - vxlan_keys = db.cfgdb.get_keys('CONFIG_DB', "VXLAN_EVPN_NVO|*") + vxlan_keys = db.cfgdb.get_keys("VXLAN_EVPN_NVO|*") if not vxlan_keys: vxlan_count = 0 else: diff --git a/tests/vxlan_test.py b/tests/vxlan_test.py index 74819f9f61..b0997c5ee0 100644 --- a/tests/vxlan_test.py +++ b/tests/vxlan_test.py @@ -215,6 +215,11 @@ def test_config_vxlan_add(self): print(result.output) assert result.exit_code == 0 + result = runner.invoke(config.config.commands["vxlan"].commands["evpn_nvo"].commands["add"], ["nvo1", "vtep1"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + result = runner.invoke(show.cli.commands["vxlan"].commands["interface"], []) print(result.exit_code) print(result.output) From 9fc630c552d5f4cfa89fb9f84f40c975e1de9168 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Tue, 11 May 2021 18:43:53 +0300 Subject: [PATCH 35/41] [sonic_installer] temporary fix: don't migrate packages on aboot platforms (#1607) What I did Skip sonic package migration on aboot platform. How I did it Added a warning and skip the migration. How to verify it I changed AbootBootloader to OnieInstallerBootloader to test on my Onie device. I don't have Aboot device. --- sonic_installer/main.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 5f89878344..57cd5adda8 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -12,6 +12,7 @@ from swsscommon.swsscommon import SonicV2Connector from .bootloader import get_bootloader +from .bootloader.aboot import AbootBootloader from .common import ( run_command, run_command_or_raise, IMAGE_PREFIX, @@ -23,7 +24,7 @@ from .exception import SonicRuntimeException SYSLOG_IDENTIFIER = "sonic-installer" -LOG_ERR = logger.Logger.LOG_PRIORITY_ERROR +LOG_ERR = logger.Logger.LOG_PRIORITY_ERROR LOG_NOTICE = logger.Logger.LOG_PRIORITY_NOTICE # Global Config object @@ -140,7 +141,7 @@ def echo_and_log(msg, priority=LOG_NOTICE, fg=None): else: click.secho(msg, fg=fg) log.log(priority, msg, False) - + # Function which validates whether a given URL specifies an existent file # on a reachable remote machine. Will abort the current operation if not @@ -323,7 +324,7 @@ def migrate_sonic_packages(bootloader, binary_image_version): with contextlib.ExitStack() as stack: def get_path(path): - """ Closure to get path by entering + """ Closure to get path by entering a context manager of bootloader.get_path_in_image """ return stack.enter_context(bootloader.get_path_in_image(new_image_dir, path)) @@ -433,6 +434,10 @@ def install(url, force, skip_migration=False, skip_package_migration=False): update_sonic_environment(bootloader, binary_image_version) + if isinstance(bootloader, AbootBootloader) and not skip_package_migration: + echo_and_log("Warning: SONiC package migration is not supported currenty on aboot platform due to https://github.com/Azure/sonic-buildimage/issues/7566.", LOG_ERR, fg="red") + skip_package_migration = True + if not skip_package_migration: migrate_sonic_packages(bootloader, binary_image_version) From 9d5b9702d0651443a1eddf96a170030a9c00e7a3 Mon Sep 17 00:00:00 2001 From: Travis Van Duyn Date: Tue, 11 May 2021 09:25:38 -0700 Subject: [PATCH 36/41] [Command-Reference.md] Document new SNMP show and config commands (#1600) * [show][config] SNMP commands added * fixed quotes for backticks * Fixed run for runningconfiguration and Del for Delete * changed admin@switch1 to admin@sonic and removed trailing prompt * updated Usage statements for SNMP * Updated for optional and required values * updated to remove unneeded example () statements * updated typo's --- doc/Command-Reference.md | 312 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 311 insertions(+), 1 deletion(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 6c7a474a5c..c72cc10f35 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -115,6 +115,9 @@ * [sFlow](#sflow) * [sFlow Show commands](#sflow-show-commands) * [sFlow Config commands](#sflow-config-commands) +* [SNMP](#snmp) + * [SNMP Show commands](#snmp-show-commands) + * [SNMP Config commands](#snmp-config-commands) * [Startup & Running Configuration](#startup--running-configuration) * [Startup Configuration](#startup-configuration) * [Running Configuration](#running-configuration) @@ -156,6 +159,7 @@ | Version | Modification Date | Details | | --- | --- | --- | +| v6 | May-06-2021 | Add SNMP show and config commands | | v5 | Nov-05-2020 | Add document for console commands | | v4 | Oct-17-2019 | Unify usage statements and other formatting; Replace tabs with spaces; Modify heading sizes; Fix spelling, grammar and other errors; Fix organization of new commands | | v3 | Jun-26-2019 | Update based on 201904 (build#19) release, "config interface" command changes related to interfacename order, FRR/Quagga show command changes, platform specific changes, ACL show changes and few formatting changes | @@ -3198,7 +3202,7 @@ The "errors" subcommand is used to display the interface errors. The "rates" subcommand is used to disply only the interface rates. -- Exmaple: +- Example: ``` admin@str-s6000-acs-11:/usr/bin$ show int counters rates IFACE STATE RX_OK RX_BPS RX_PPS RX_UTIL TX_OK TX_BPS TX_PPS TX_UTIL @@ -6691,6 +6695,312 @@ This command is used to set the counter polling interval. Default is 20 seconds. Go Back To [Beginning of the document](#) or [Beginning of this section](#sflow) +## SNMP + +### SNMP Show commands + +**show runningconfiguration snmp** + +This command displays the global SNMP configuration that includes the location, contact, community, and user settings. + +- Usage: + ``` + show runningconfiguration snmp + ``` + +- Example: + ``` + admin@sonic:~$ show runningconfiguration snmp + Location + ------------ + Emerald City + + + SNMP_CONTACT SNMP_CONTACT_EMAIL + -------------- -------------------- + joe joe@contoso.com + + + Community String Community Type + ------------------ ---------------- + Jack RW + + + User Permission Type Type Auth Type Auth Password Encryption Type Encryption Password + ------ ----------------- ------ ----------- --------------- ----------------- --------------------- + Travis RO Priv SHA TravisAuthPass AES TravisEncryptPass + ``` + +**show runningconfiguration snmp location** + +This command displays the SNMP location setting. + +- Usage: + ``` + show runningconfiguration snmp location + ``` + +- Example: + ``` + admin@sonic:~$ show runningconfiguration snmp location + Location + ------------ + Emerald City + ``` + +- Usage: + ``` + show runningconfiguration snmp location --json + ``` + +- Example: + ``` + admin@sonic:~$ show runningconfiguration snmp location --json + {'Location': 'Emerald City'} + ``` + +**show runningconfiguration snmp contact** + +This command displays the SNMP contact setting. + +- Usage: + ``` + show runningconfiguration snmp contact + ``` + +- Example: + ``` + admin@sonic:~$ show runningconfiguration snmp contact + Contact Contact Email + --------- --------------- + joe joe@contoso.com + ``` + +- Usage: + ``` + show runningconfiguration snmp contact --json + ``` + +- Example: + ``` + admin@sonic:~$ show runningconfiguration snmp contact --json + {'joe': 'joe@contoso.com'} + ``` + +**show runningconfiguration snmp community** + +This command display the SNMP community settings. + +- Usage: + ``` + show runningconfiguration snmp community + ``` + +- Example: + ``` + admin@sonic:~$ show runningconfiguration snmp community + Community String Community Type + ------------------ ---------------- + Jack RW + ``` + +- Usage: + ``` + show runningconfiguration snmp community --json + ``` + +- Example: + ``` + admin@sonic:~$ show runningconfiguration snmp community --json + {'Jack': {'TYPE': 'RW'}} + ``` + +**show runningconfiguration snmp user** + +This command display the SNMP user settings. + +- Usage: + ``` + show runningconfiguration snmp user + ``` + +- Example: + ``` + admin@sonic:~$ show runningconfiguration snmp user + User Permission Type Type Auth Type Auth Password Encryption Type Encryption Password + ------ ----------------- ------ ----------- --------------- ----------------- --------------------- + Travis RO Priv SHA TravisAuthPass AES TravisEncryptPass + ``` + +- Usage: + ``` + show runningconfiguration snmp user --json + ``` + +- Example: + ``` + admin@sonic:~$ show runningconfiguration snmp user --json + {'Travis': {'SNMP_USER_TYPE': 'Priv', 'SNMP_USER_PERMISSION': 'RO', 'SNMP_USER_AUTH_TYPE': 'SHA', 'SNMP_USER_AUTH_PASSWORD': 'TravisAuthPass', 'SNMP_USER_ENCRYPTION_TYPE': 'AES', 'SNMP_USER_ENCRYPTION_PASSWORD': 'TravisEncryptPass'}} + ``` + + +### SNMP Config commands + +This sub-section explains how to configure SNMP. + +**config snmp location add/del/modify** + +This command is used to add, delete, or modify the SNMP location. + +- Usage: + ``` + config snmp location (add | del | modify) + ``` + +- Example (Add new SNMP location "Emerald City" if it does not already exist): + ``` + admin@sonic:~$ sudo config snmp location add Emerald City + SNMP Location Emerald City has been added to configuration + Restarting SNMP service... + ``` + +- Example (Delete SNMP location "Emerald City" if it already exists): + ``` + admin@sonic:~$ sudo config snmp location del Emerald City + SNMP Location Emerald City removed from configuration + Restarting SNMP service... + ``` + +- Example (Modify SNMP location "Emerald City" to "Redmond"): + ``` + admin@sonic:~$ sudo config snmp location modify Redmond + SNMP location Redmond modified in configuration + Restarting SNMP service... + ``` + +**config snmp contact add/del/modify** + +This command is used to add, delete, or modify the SNMP contact. + +- Usage: + ``` + config snmp contact add + ``` + +- Example: + ``` + admin@sonic:~$ sudo config snmp contact add joe joe@contoso.com + Contact name joe and contact email joe@contoso.com have been added to configuration + Restarting SNMP service... + ``` + +- Usage: + ``` + config snmp contact del + ``` + +- Example: + ``` + admin@sonic:~$ sudo config snmp contact del joe + SNMP contact joe removed from configuration + Restarting SNMP service... + ``` + +- Usage: + ``` + config snmp contact modify + ``` + +- Example: + ``` + admin@sonic:~$ sudo config snmp contact modify test test@contoso.com + SNMP contact test and contact email test@contoso.com updated + Restarting SNMP service... + ``` + +**config snmp community add/del/replace** + +This command is used to add, delete, or replace the SNMP community. + +- Usage: + ``` + config snmp community add (RO | RW) + ``` + +- Example: + ``` + admin@sonic:~$ sudo config snmp community add testcomm ro + SNMP community testcomm added to configuration + Restarting SNMP service... + ``` + +- Usage: + ``` + config snmp community del + ``` + +- Example: + ``` + admin@sonic:~$ sudo config snmp community del testcomm + SNMP community testcomm removed from configuration + Restarting SNMP service... + ``` + +- Usage: + ``` + config snmp community replace + ``` + +- Example: + ``` + admin@sonic:~$ sudo config snmp community replace testcomm newtestcomm + SNMP community newtestcomm added to configuration + SNMP community newtestcomm replace community testcomm + Restarting SNMP service... + ``` + +**config snmp user add/del** + +This command is used to add or delete the SNMP user for SNMPv3. + +- Usage: + ``` + config snmp user add (noAuthNoPriv | AuthNoPriv | Priv) (RO | RW) [[(MD5 | SHA | MMAC-SHA-2) ] [(DES |AES) ]] + ``` + +- Example: + ``` + admin@sonic:~$ sudo config snmp user add testuser1 noauthnopriv ro + SNMP user testuser1 added to configuration + Restarting SNMP service... + ``` + +- Example: + ``` + admin@sonic:~$ sudo config snmp user add testuser2 authnopriv ro sha testuser2_auth_pass + SNMP user testuser2 added to configuration + Restarting SNMP service... + ``` + +- Example: + ``` + admin@sonic:~$ sudo config snmp user add testuser3 priv rw md5 testuser3_auth_pass aes testuser3_encrypt_pass + SNMP user testuser3 added to configuration + Restarting SNMP service... + ``` + +- Usage: + ``` + config snmp user del + ``` + +- Example: + ``` + admin@sonic:~$ sudo config snmp user del testuser1 + SNMP user testuser1 removed from configuration + Restarting SNMP service... + ``` + ## Startup & Running Configuration ### Startup Configuration From 0904b85aee15791032b337bbea61e54000382a74 Mon Sep 17 00:00:00 2001 From: Venkatesan Mahalingam <34145258+venkatmahalingam@users.noreply.github.com> Date: Tue, 11 May 2021 21:45:55 -0700 Subject: [PATCH 37/41] Add 'default' option for sFlow. (#1606) * Add 'default' option for sFlow. Signed-off-by: Venkatesan Mahalingam --- config/main.py | 19 +++++++++++++------ tests/sflow_test.py | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/config/main.py b/config/main.py index d0c7c4258b..969e58d594 100644 --- a/config/main.py +++ b/config/main.py @@ -5012,7 +5012,7 @@ def polling_int(ctx, interval): config_db.mod_entry('SFLOW', 'global', sflow_tbl['global']) def is_valid_sample_rate(rate): - return rate in range(256, 8388608 + 1) + return rate.isdigit() and int(rate) in range(256, 8388608 + 1) # @@ -5070,24 +5070,31 @@ def disable(ctx, ifname): # @interface.command('sample-rate') @click.argument('ifname', metavar='', required=True, type=str) -@click.argument('rate', metavar='', required=True, type=int) +@click.argument('rate', metavar='', required=True, type=str) @click.pass_context def sample_rate(ctx, ifname, rate): config_db = ctx.obj['db'] if not interface_name_is_valid(config_db, ifname) and ifname != 'all': click.echo('Invalid interface name') return - if not is_valid_sample_rate(rate): - click.echo('Error: Sample rate must be between 256 and 8388608') + if not is_valid_sample_rate(rate) and rate != 'default': + click.echo('Error: Sample rate must be between 256 and 8388608 or default') return sess_dict = config_db.get_table('SFLOW_SESSION') - if sess_dict and ifname in sess_dict: + if sess_dict and ifname in sess_dict.keys(): + if rate == 'default': + if 'sample_rate' not in sess_dict[ifname]: + return + del sess_dict[ifname]['sample_rate'] + config_db.set_entry('SFLOW_SESSION', ifname, sess_dict[ifname]) + return sess_dict[ifname]['sample_rate'] = rate config_db.mod_entry('SFLOW_SESSION', ifname, sess_dict[ifname]) else: - config_db.mod_entry('SFLOW_SESSION', ifname, {'sample_rate': rate}) + if rate != 'default': + config_db.mod_entry('SFLOW_SESSION', ifname, {'sample_rate': rate}) # diff --git a/tests/sflow_test.py b/tests/sflow_test.py index 0e15f1e027..ecb2782534 100644 --- a/tests/sflow_test.py +++ b/tests/sflow_test.py @@ -290,6 +290,45 @@ def test_config_enable_all_intf(self): sflowSession = db.cfgdb.get_table('SFLOW_SESSION') assert sflowSession["all"]["admin_state"] == "up" + def test_config_sflow_intf_sample_rate_default(self): + db = Db() + runner = CliRunner() + obj = {'db':db.cfgdb} + + # mock interface_name_is_valid + config.interface_name_is_valid = mock.MagicMock(return_value = True) + + result_out1 = runner.invoke(show.cli.commands["sflow"].commands["interface"], [], obj=Db()) + print(result_out1.exit_code, result_out1.output) + assert result_out1.exit_code == 0 + + # set sample-rate to 2500 + result = runner.invoke(config.config.commands["sflow"]. + commands["interface"].commands["sample-rate"], + ["Ethernet2", "2500"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + # we can not use 'show sflow interface', becasue 'show sflow interface' + # gets data from appDB, we need to fetch data from configDB for verification + sflowSession = db.cfgdb.get_table('SFLOW_SESSION') + assert sflowSession["Ethernet2"]["sample_rate"] == "2500" + + # set sample-rate to default + result = runner.invoke(config.config.commands["sflow"]. + commands["interface"].commands["sample-rate"], + ["Ethernet2", "default"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + result_out2 = runner.invoke(show.cli.commands["sflow"].commands["interface"], [], obj=Db()) + print(result_out2.exit_code, result_out2.output) + assert result_out2.exit_code == 0 + assert result_out1.output == result_out2.output + + return + + @classmethod def teardown_class(cls): print("TEARDOWN") From 4e45d9c03d6eefdfa28edf34e16a5d4d583d775a Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Wed, 12 May 2021 19:55:14 +0300 Subject: [PATCH 38/41] [vlan] remove dhcp-relay as dhcp-relay commands will come as a plugin (#1378) - What I did Remove dhcp relay commands from sonic-utilities. dhcp-relay commands will come as a plugin with dhcp-relay docker installation. See Azure/SONiC#682 - How I did it Remove dhcp-relay commands from vlan. Make "show vlan brief" command table output extendable. - How to verify it Install dhcp-relay docker as app.ext. Verify that "config vlan dhcp-relay" and "show vlan brief" show dhcp data. --- config/vlan.py | 73 ---------- show/vlan.py | 197 +++++++++++++++----------- tests/vlan_test.py | 335 ++++++++++++--------------------------------- 3 files changed, 207 insertions(+), 398 deletions(-) diff --git a/config/vlan.py b/config/vlan.py index c3c29eb842..9cdb0fc348 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -185,76 +185,3 @@ def del_vlan_member(db, vid, port): db.cfgdb.set_entry('VLAN_MEMBER', (vlan, port), None) -@vlan.group(cls=clicommon.AbbreviationGroup, name='dhcp_relay') -def vlan_dhcp_relay(): - pass - -@vlan_dhcp_relay.command('add') -@click.argument('vid', metavar='', required=True, type=int) -@click.argument('dhcp_relay_destination_ip', metavar='', required=True) -@clicommon.pass_db -def add_vlan_dhcp_relay_destination(db, vid, dhcp_relay_destination_ip): - """ Add a destination IP address to the VLAN's DHCP relay """ - - ctx = click.get_current_context() - - if not clicommon.is_ipaddress(dhcp_relay_destination_ip): - ctx.fail('{} is invalid IP address'.format(dhcp_relay_destination_ip)) - - vlan_name = 'Vlan{}'.format(vid) - vlan = db.cfgdb.get_entry('VLAN', vlan_name) - if len(vlan) == 0: - ctx.fail("{} doesn't exist".format(vlan_name)) - - dhcp_relay_dests = vlan.get('dhcp_servers', []) - if dhcp_relay_destination_ip in dhcp_relay_dests: - click.echo("{} is already a DHCP relay destination for {}".format(dhcp_relay_destination_ip, vlan_name)) - return - - dhcp_relay_dests.append(dhcp_relay_destination_ip) - vlan['dhcp_servers'] = dhcp_relay_dests - db.cfgdb.set_entry('VLAN', vlan_name, vlan) - click.echo("Added DHCP relay destination address {} to {}".format(dhcp_relay_destination_ip, vlan_name)) - try: - click.echo("Restarting DHCP relay service...") - clicommon.run_command("systemctl stop dhcp_relay", display_cmd=False) - clicommon.run_command("systemctl reset-failed dhcp_relay", display_cmd=False) - clicommon.run_command("systemctl start dhcp_relay", display_cmd=False) - except SystemExit as e: - ctx.fail("Restart service dhcp_relay failed with error {}".format(e)) - -@vlan_dhcp_relay.command('del') -@click.argument('vid', metavar='', required=True, type=int) -@click.argument('dhcp_relay_destination_ip', metavar='', required=True) -@clicommon.pass_db -def del_vlan_dhcp_relay_destination(db, vid, dhcp_relay_destination_ip): - """ Remove a destination IP address from the VLAN's DHCP relay """ - - ctx = click.get_current_context() - - if not clicommon.is_ipaddress(dhcp_relay_destination_ip): - ctx.fail('{} is invalid IP address'.format(dhcp_relay_destination_ip)) - - vlan_name = 'Vlan{}'.format(vid) - vlan = db.cfgdb.get_entry('VLAN', vlan_name) - if len(vlan) == 0: - ctx.fail("{} doesn't exist".format(vlan_name)) - - dhcp_relay_dests = vlan.get('dhcp_servers', []) - if not dhcp_relay_destination_ip in dhcp_relay_dests: - ctx.fail("{} is not a DHCP relay destination for {}".format(dhcp_relay_destination_ip, vlan_name)) - - dhcp_relay_dests.remove(dhcp_relay_destination_ip) - if len(dhcp_relay_dests) == 0: - del vlan['dhcp_servers'] - else: - vlan['dhcp_servers'] = dhcp_relay_dests - db.cfgdb.set_entry('VLAN', vlan_name, vlan) - click.echo("Removed DHCP relay destination address {} from {}".format(dhcp_relay_destination_ip, vlan_name)) - try: - click.echo("Restarting DHCP relay service...") - clicommon.run_command("systemctl stop dhcp_relay", display_cmd=False) - clicommon.run_command("systemctl reset-failed dhcp_relay", display_cmd=False) - clicommon.run_command("systemctl start dhcp_relay", display_cmd=False) - except SystemExit as e: - ctx.fail("Restart service dhcp_relay failed with error {}".format(e)) diff --git a/show/vlan.py b/show/vlan.py index df4149fca9..b27f282a49 100644 --- a/show/vlan.py +++ b/show/vlan.py @@ -4,105 +4,141 @@ import utilities_common.cli as clicommon + @click.group(cls=clicommon.AliasedGroup) def vlan(): """Show VLAN information""" pass + +def get_vlan_id(ctx, vlan): + vlan_prefix, vid = vlan.split('Vlan') + return vid + + +def get_vlan_ip_address(ctx, vlan): + cfg, _ = ctx + _, vlan_ip_data, _ = cfg + ip_address = "" + for key in vlan_ip_data: + if not clicommon.is_ip_prefix_in_key(key): + continue + ifname, address = key + if vlan == ifname: + ip_address += "\n{}".format(address) + + return ip_address + + +def get_vlan_ports(ctx, vlan): + cfg, db = ctx + _, _, vlan_ports_data = cfg + vlan_ports = [] + iface_alias_converter = clicommon.InterfaceAliasConverter(db) + # Here natsorting is important in relation to another + # column which prints port tagging mode. + # If we sort both in the same way using same keys + # we will result in right order in both columns. + # This should be fixed by cli code autogeneration tool + # and we won't need this specific approach with + # VlanBrief.COLUMNS anymore. + for key in natsorted(list(vlan_ports_data.keys())): + ports_key, ports_value = key + if vlan != ports_key: + continue + + if clicommon.get_interface_naming_mode() == "alias": + ports_value = iface_alias_converter.name_to_alias(ports_value) + + vlan_ports.append(ports_value) + + return '\n'.join(vlan_ports) + + +def get_vlan_ports_tagging(ctx, vlan): + cfg, db = ctx + _, _, vlan_ports_data = cfg + vlan_ports_tagging = [] + # Here natsorting is important in relation to another + # column which prints vlan ports. + # If we sort both in the same way using same keys + # we will result in right order in both columns. + # This should be fixed by cli code autogeneration tool + # and we won't need this specific approach with + # VlanBrief.COLUMNS anymore. + for key in natsorted(list(vlan_ports_data.keys())): + ports_key, ports_value = key + if vlan != ports_key: + continue + + tagging_value = vlan_ports_data[key]["tagging_mode"] + vlan_ports_tagging.append(tagging_value) + + return '\n'.join(vlan_ports_tagging) + + +def get_proxy_arp(ctx, vlan): + cfg, _ = ctx + _, vlan_ip_data, _ = cfg + proxy_arp = "disabled" + for key in vlan_ip_data: + if clicommon.is_ip_prefix_in_key(key): + continue + if vlan == key: + proxy_arp = vlan_ip_data[key].get("proxy_arp", "disabled") + + return proxy_arp + + +class VlanBrief: + """ This class is used as a namespace to + define columns for "show vlan brief" command. + The usage of this class is for external plugin + (in this case dhcp-relay) to append new columns + to this list. + """ + + COLUMNS = [ + ("VLAN ID", get_vlan_id), + ("IP Address", get_vlan_ip_address), + ("Ports", get_vlan_ports), + ("Port Tagging", get_vlan_ports_tagging), + ("Proxy ARP", get_proxy_arp) + ] + + @classmethod + def register_column(cls, column_name, callback): + """ Adds a new column to "vlan brief" output. + Expected to be used from plugins code to extend + this command with additional VLAN fields. """ + + cls.COLUMNS.append((column_name, callback)) + + @vlan.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") @clicommon.pass_db def brief(db, verbose): """Show all bridge information""" - header = ['VLAN ID', 'IP Address', 'Ports', 'Port Tagging', 'DHCP Helper Address', 'Proxy ARP'] + header = [colname for colname, getter in VlanBrief.COLUMNS] body = [] # Fetching data from config db for VLAN, VLAN_INTERFACE and VLAN_MEMBER - vlan_dhcp_helper_data = db.cfgdb.get_table('VLAN') + vlan_data = db.cfgdb.get_table('VLAN') vlan_ip_data = db.cfgdb.get_table('VLAN_INTERFACE') vlan_ports_data = db.cfgdb.get_table('VLAN_MEMBER') + vlan_cfg = (vlan_data, vlan_ip_data, vlan_ports_data) - # Defining dictionaries for DHCP Helper address, Interface Gateway IP, - # VLAN ports and port tagging - vlan_dhcp_helper_dict = {} - vlan_ip_dict = {} - vlan_ports_dict = {} - vlan_tagging_dict = {} - vlan_proxy_arp_dict = {} - - # Parsing DHCP Helpers info - for key in natsorted(list(vlan_dhcp_helper_data.keys())): - try: - if vlan_dhcp_helper_data[key]['dhcp_servers']: - vlan_dhcp_helper_dict[key.strip('Vlan')] = vlan_dhcp_helper_data[key]['dhcp_servers'] - except KeyError: - vlan_dhcp_helper_dict[key.strip('Vlan')] = " " - - # Parsing VLAN Gateway info - for key in vlan_ip_data: - if clicommon.is_ip_prefix_in_key(key): - interface_key = key[0].strip("Vlan") - interface_value = key[1] - - if interface_key in vlan_ip_dict: - vlan_ip_dict[interface_key].append(interface_value) - else: - vlan_ip_dict[interface_key] = [interface_value] - else: - interface_key = key.strip("Vlan") - if 'proxy_arp' in vlan_ip_data[key]: - proxy_arp_status = vlan_ip_data[key]['proxy_arp'] - else: - proxy_arp_status = "disabled" - - vlan_proxy_arp_dict[interface_key] = proxy_arp_status - - + for vlan in natsorted(vlan_data): + row = [] + for column in VlanBrief.COLUMNS: + column_name, getter = column + row.append(getter((vlan_cfg, db), vlan)) + body.append(row) - iface_alias_converter = clicommon.InterfaceAliasConverter(db) - - # Parsing VLAN Ports info - for key in natsorted(list(vlan_ports_data.keys())): - ports_key = key[0].strip("Vlan") - ports_value = key[1] - ports_tagging = vlan_ports_data[key]['tagging_mode'] - if ports_key in vlan_ports_dict: - if clicommon.get_interface_naming_mode() == "alias": - ports_value = iface_alias_converter.name_to_alias(ports_value) - vlan_ports_dict[ports_key].append(ports_value) - else: - if clicommon.get_interface_naming_mode() == "alias": - ports_value = iface_alias_converter.name_to_alias(ports_value) - vlan_ports_dict[ports_key] = [ports_value] - if ports_key in vlan_tagging_dict: - vlan_tagging_dict[ports_key].append(ports_tagging) - else: - vlan_tagging_dict[ports_key] = [ports_tagging] - - # Printing the following dictionaries in tablular forms: - # vlan_dhcp_helper_dict={}, vlan_ip_dict = {}, vlan_ports_dict = {} - # vlan_tagging_dict = {} - for key in natsorted(list(vlan_dhcp_helper_dict.keys())): - if key not in vlan_ip_dict: - ip_address = "" - else: - ip_address = ','.replace(',', '\n').join(vlan_ip_dict[key]) - if key not in vlan_ports_dict: - vlan_ports = "" - else: - vlan_ports = ','.replace(',', '\n').join((vlan_ports_dict[key])) - if key not in vlan_dhcp_helper_dict: - dhcp_helpers = "" - else: - dhcp_helpers = ','.replace(',', '\n').join(vlan_dhcp_helper_dict[key]) - if key not in vlan_tagging_dict: - vlan_tagging = "" - else: - vlan_tagging = ','.replace(',', '\n').join((vlan_tagging_dict[key])) - vlan_proxy_arp = vlan_proxy_arp_dict.get(key, "disabled") - body.append([key, ip_address, vlan_ports, vlan_tagging, dhcp_helpers, vlan_proxy_arp]) click.echo(tabulate(body, header, tablefmt="grid")) + @vlan.command() @clicommon.pass_db def config(db): @@ -141,3 +177,4 @@ def tablelize(keys, data): header = ['Name', 'VID', 'Member', 'Mode'] click.echo(tabulate(tablelize(keys, data), header)) + diff --git a/tests/vlan_test.py b/tests/vlan_test.py index ad3ff9fbb4..a7f533a824 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -10,79 +10,71 @@ from importlib import reload show_vlan_brief_output="""\ -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | DHCP Helper Address | Proxy ARP | -+===========+=================+=================+================+=======================+=============+ -| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | 192.0.0.1 | disabled | -| | fc02:1000::1/64 | Ethernet8 | untagged | 192.0.0.2 | | -| | | Ethernet12 | untagged | 192.0.0.3 | | -| | | Ethernet16 | untagged | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | 192.0.0.1 | enabled | -| | fc02:1011::1/64 | Ethernet28 | untagged | 192.0.0.2 | | -| | | | | 192.0.0.3 | | -| | | | | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 3000 | | | | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 4000 | | PortChannel1001 | tagged | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | +| | fc02:1000::1/64 | Ethernet8 | untagged | | +| | | Ethernet12 | untagged | | +| | | Ethernet16 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | +| | fc02:1011::1/64 | Ethernet28 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ """ show_vlan_brief_in_alias_mode_output="""\ -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | DHCP Helper Address | Proxy ARP | -+===========+=================+=================+================+=======================+=============+ -| 1000 | 192.168.0.1/21 | etp2 | untagged | 192.0.0.1 | disabled | -| | fc02:1000::1/64 | etp3 | untagged | 192.0.0.2 | | -| | | etp4 | untagged | 192.0.0.3 | | -| | | etp5 | untagged | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 2000 | 192.168.0.10/21 | etp7 | untagged | 192.0.0.1 | enabled | -| | fc02:1011::1/64 | etp8 | untagged | 192.0.0.2 | | -| | | | | 192.0.0.3 | | -| | | | | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 3000 | | | | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 4000 | | PortChannel1001 | tagged | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | etp2 | untagged | disabled | +| | fc02:1000::1/64 | etp3 | untagged | | +| | | etp4 | untagged | | +| | | etp5 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | etp7 | untagged | enabled | +| | fc02:1011::1/64 | etp8 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ """ show_vlan_brief_empty_output="""\ -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | DHCP Helper Address | Proxy ARP | -+===========+=================+=================+================+=======================+=============+ -| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | 192.0.0.1 | enabled | -| | fc02:1011::1/64 | Ethernet28 | untagged | 192.0.0.2 | | -| | | | | 192.0.0.3 | | -| | | | | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 3000 | | | | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 4000 | | PortChannel1001 | tagged | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | +| | fc02:1011::1/64 | Ethernet28 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ """ show_vlan_brief_with_portchannel_output="""\ -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | DHCP Helper Address | Proxy ARP | -+===========+=================+=================+================+=======================+=============+ -| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | 192.0.0.1 | disabled | -| | fc02:1000::1/64 | Ethernet8 | untagged | 192.0.0.2 | | -| | | Ethernet12 | untagged | 192.0.0.3 | | -| | | Ethernet16 | untagged | 192.0.0.4 | | -| | | PortChannel1001 | untagged | | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | 192.0.0.1 | enabled | -| | fc02:1011::1/64 | Ethernet28 | untagged | 192.0.0.2 | | -| | | | | 192.0.0.3 | | -| | | | | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 3000 | | | | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 4000 | | PortChannel1001 | tagged | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | +| | fc02:1000::1/64 | Ethernet8 | untagged | | +| | | Ethernet12 | untagged | | +| | | Ethernet16 | untagged | | +| | | PortChannel1001 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | +| | fc02:1011::1/64 | Ethernet28 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ """ show_vlan_config_output="""\ @@ -111,79 +103,44 @@ Vlan4000 4000 PortChannel1001 tagged """ -config_vlan_add_dhcp_relay_output="""\ -Added DHCP relay destination address 192.0.0.100 to Vlan1000 -Restarting DHCP relay service... -""" - -config_vlan_del_dhcp_relay_output="""\ -Removed DHCP relay destination address 192.0.0.100 from Vlan1000 -Restarting DHCP relay service... -""" - -show_vlan_brief_output_with_new_dhcp_relay_address="""\ -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | DHCP Helper Address | Proxy ARP | -+===========+=================+=================+================+=======================+=============+ -| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | 192.0.0.1 | disabled | -| | fc02:1000::1/64 | Ethernet8 | untagged | 192.0.0.2 | | -| | | Ethernet12 | untagged | 192.0.0.3 | | -| | | Ethernet16 | untagged | 192.0.0.4 | | -| | | | | 192.0.0.100 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | 192.0.0.1 | enabled | -| | fc02:1011::1/64 | Ethernet28 | untagged | 192.0.0.2 | | -| | | | | 192.0.0.3 | | -| | | | | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 3000 | | | | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 4000 | | PortChannel1001 | tagged | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -""" - config_add_del_vlan_and_vlan_member_output="""\ -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | DHCP Helper Address | Proxy ARP | -+===========+=================+=================+================+=======================+=============+ -| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | 192.0.0.1 | disabled | -| | fc02:1000::1/64 | Ethernet8 | untagged | 192.0.0.2 | | -| | | Ethernet12 | untagged | 192.0.0.3 | | -| | | Ethernet16 | untagged | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 1001 | | Ethernet20 | untagged | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | 192.0.0.1 | enabled | -| | fc02:1011::1/64 | Ethernet28 | untagged | 192.0.0.2 | | -| | | | | 192.0.0.3 | | -| | | | | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 3000 | | | | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 4000 | | PortChannel1001 | tagged | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | Ethernet4 | untagged | disabled | +| | fc02:1000::1/64 | Ethernet8 | untagged | | +| | | Ethernet12 | untagged | | +| | | Ethernet16 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1001 | | Ethernet20 | untagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | Ethernet24 | untagged | enabled | +| | fc02:1011::1/64 | Ethernet28 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ """ config_add_del_vlan_and_vlan_member_in_alias_mode_output="""\ -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| VLAN ID | IP Address | Ports | Port Tagging | DHCP Helper Address | Proxy ARP | -+===========+=================+=================+================+=======================+=============+ -| 1000 | 192.168.0.1/21 | etp2 | untagged | 192.0.0.1 | disabled | -| | fc02:1000::1/64 | etp3 | untagged | 192.0.0.2 | | -| | | etp4 | untagged | 192.0.0.3 | | -| | | etp5 | untagged | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 1001 | | etp6 | untagged | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 2000 | 192.168.0.10/21 | etp7 | untagged | 192.0.0.1 | enabled | -| | fc02:1011::1/64 | etp8 | untagged | 192.0.0.2 | | -| | | | | 192.0.0.3 | | -| | | | | 192.0.0.4 | | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 3000 | | | | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ -| 4000 | | PortChannel1001 | tagged | | disabled | -+-----------+-----------------+-----------------+----------------+-----------------------+-------------+ ++-----------+-----------------+-----------------+----------------+-------------+ +| VLAN ID | IP Address | Ports | Port Tagging | Proxy ARP | ++===========+=================+=================+================+=============+ +| 1000 | 192.168.0.1/21 | etp2 | untagged | disabled | +| | fc02:1000::1/64 | etp3 | untagged | | +| | | etp4 | untagged | | +| | | etp5 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 1001 | | etp6 | untagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 2000 | 192.168.0.10/21 | etp7 | untagged | enabled | +| | fc02:1011::1/64 | etp8 | untagged | | ++-----------+-----------------+-----------------+----------------+-------------+ +| 3000 | | | | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ +| 4000 | | PortChannel1001 | tagged | disabled | ++-----------+-----------------+-----------------+----------------+-------------+ """ class TestVlan(object): @classmethod @@ -503,118 +460,6 @@ def test_config_add_del_vlan_and_vlan_member_in_alias_mode(self): os.environ['SONIC_CLI_IFACE_MODE'] = "default" - def test_config_vlan_add_dhcp_relay_with_nonexist_vlanid(self): - runner = CliRunner() - - with mock.patch('utilities_common.cli.run_command') as mock_run_command: - result = runner.invoke(config.config.commands["vlan"].commands["dhcp_relay"].commands["add"], - ["1001", "192.0.0.100"]) - print(result.exit_code) - print(result.output) - # traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Error: Vlan1001 doesn't exist" in result.output - assert mock_run_command.call_count == 0 - - def test_config_vlan_add_dhcp_relay_with_invalid_vlanid(self): - runner = CliRunner() - - with mock.patch('utilities_common.cli.run_command') as mock_run_command: - result = runner.invoke(config.config.commands["vlan"].commands["dhcp_relay"].commands["add"], - ["4096", "192.0.0.100"]) - print(result.exit_code) - print(result.output) - # traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Error: Vlan4096 doesn't exist" in result.output - assert mock_run_command.call_count == 0 - - def test_config_vlan_add_dhcp_relay_with_invalid_ip(self): - runner = CliRunner() - - with mock.patch('utilities_common.cli.run_command') as mock_run_command: - result = runner.invoke(config.config.commands["vlan"].commands["dhcp_relay"].commands["add"], - ["1000", "192.0.0.1000"]) - print(result.exit_code) - print(result.output) - # traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Error: 192.0.0.1000 is invalid IP address" in result.output - assert mock_run_command.call_count == 0 - - def test_config_vlan_add_dhcp_relay_with_exist_ip(self): - runner = CliRunner() - - with mock.patch('utilities_common.cli.run_command') as mock_run_command: - result = runner.invoke(config.config.commands["vlan"].commands["dhcp_relay"].commands["add"], - ["1000", "192.0.0.1"]) - print(result.exit_code) - print(result.output) - # traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - assert "192.0.0.1 is already a DHCP relay destination for Vlan1000" in result.output - assert mock_run_command.call_count == 0 - - def test_config_vlan_add_del_dhcp_relay_dest(self): - runner = CliRunner() - db = Db() - - # add new relay dest - with mock.patch("utilities_common.cli.run_command") as mock_run_command: - result = runner.invoke(config.config.commands["vlan"].commands["dhcp_relay"].commands["add"], - ["1000", "192.0.0.100"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert result.output == config_vlan_add_dhcp_relay_output - assert mock_run_command.call_count == 3 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.output) - assert result.output == show_vlan_brief_output_with_new_dhcp_relay_address - - # del relay dest - with mock.patch("utilities_common.cli.run_command") as mock_run_command: - result = runner.invoke(config.config.commands["vlan"].commands["dhcp_relay"].commands["del"], - ["1000", "192.0.0.100"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 - assert result.output == config_vlan_del_dhcp_relay_output - assert mock_run_command.call_count == 3 - - # show output - result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) - print(result.output) - assert result.output == show_vlan_brief_output - - def test_config_vlan_remove_nonexist_dhcp_relay_dest(self): - runner = CliRunner() - - with mock.patch('utilities_common.cli.run_command') as mock_run_command: - result = runner.invoke(config.config.commands["vlan"].commands["dhcp_relay"].commands["del"], - ["1000", "192.0.0.100"]) - print(result.exit_code) - print(result.output) - # traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Error: 192.0.0.100 is not a DHCP relay destination for Vlan1000" in result.output - assert mock_run_command.call_count == 0 - - def test_config_vlan_remove_dhcp_relay_dest_with_nonexist_vlanid(self): - runner = CliRunner() - - with mock.patch('utilities_common.cli.run_command') as mock_run_command: - result = runner.invoke(config.config.commands["vlan"].commands["dhcp_relay"].commands["del"], - ["1001", "192.0.0.1"]) - print(result.exit_code) - print(result.output) - # traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Error: Vlan1001 doesn't exist" in result.output - assert mock_run_command.call_count == 0 - def test_config_vlan_proxy_arp_with_nonexist_vlan_intf_table(self): modes = ["enabled", "disabled"] runner = CliRunner() From a089e53f8b22f269da529c7de6672097c96812c0 Mon Sep 17 00:00:00 2001 From: Praveen Chaudhary Date: Wed, 12 May 2021 11:30:04 -0700 Subject: [PATCH 39/41] [DPB]: Shut down interface before dynamic port breakout (#1303) Changes: -- Shutdown the interfaces after config validation while Dy Port Breakout. -- Validate del ports before calling breakOutPorts API. Signed-off-by: Praveen Chaudhary pchaudhary@linkedin.com Fixes Azure/sonic-buildimage#6646, Azure/sonic-buildimage#6631, Signed-off-by: Praveen Chaudhary pchaudhary@linkedin.com --- config/config_mgmt.py | 30 +++++++++++++++++++++++++++--- config/main.py | 37 ++++++------------------------------- tests/config_mgmt_test.py | 38 ++++++++++++++++++++++++++++++++++++-- 3 files changed, 69 insertions(+), 36 deletions(-) diff --git a/config/config_mgmt.py b/config/config_mgmt.py index cc64b35d97..9b2021bef0 100644 --- a/config/config_mgmt.py +++ b/config/config_mgmt.py @@ -369,9 +369,12 @@ def breakOutPort(self, delPorts=list(), portJson=dict(), force=False, \ if_name_map, if_oid_map = port_util.get_interface_oid_map(dataBase) self.sysLog(syslog.LOG_DEBUG, 'if_name_map {}'.format(if_name_map)) - # If we are here, then get ready to update the Config DB, Update - # deletion of Config first, then verify in Asic DB for port deletion, - # then update addition of ports in config DB. + # If we are here, then get ready to update the Config DB as below: + # -- shutdown the ports, + # -- Update deletion of ports in Config DB, + # -- verify Asic DB for port deletion, + # -- then update addition of ports in config DB. + self._shutdownIntf(delPorts) self.writeConfigDB(delConfigToLoad) # Verify in Asic DB, self._verifyAsicDB(db=dataBase, ports=delPorts, portMap=if_name_map, \ @@ -507,6 +510,27 @@ def _addPorts(self, portJson=dict(), loadDefConfig=True): return configToLoad, True + def _shutdownIntf(self, ports): + """ + Based on the list of Ports, create a dict to shutdown port, update Config DB. + Shut down all the interfaces before deletion. + + Parameters: + ports(list): list of ports, which are getting deleted due to DPB. + + Returns: + void + """ + shutDownConf = dict(); shutDownConf["PORT"] = dict() + for intf in ports: + shutDownConf["PORT"][intf] = {"admin_status": "down"} + self.sysLog(msg='shutdown Interfaces: {}'.format(shutDownConf)) + + if len(shutDownConf["PORT"]): + self.writeConfigDB(shutDownConf) + + return + def _mergeConfigs(self, D1, D2, uniqueKeys=True): ''' Merge D2 dict in D1 dict, Note both first and second dict will change. diff --git a/config/main.py b/config/main.py index 969e58d594..cdf8b14b6e 100644 --- a/config/main.py +++ b/config/main.py @@ -114,32 +114,6 @@ def _get_breakout_options(ctx, args, incomplete): all_mode_options = [str(c) for c in breakout_mode_options if incomplete in c] return all_mode_options -def shutdown_interfaces(ctx, del_intf_dict): - """ shut down all the interfaces before deletion """ - for intf in del_intf_dict: - config_db = ctx.obj['config_db'] - if clicommon.get_interface_naming_mode() == "alias": - interface_name = interface_alias_to_name(config_db, intf) - if interface_name is None: - click.echo("[ERROR] interface name is None!") - return False - - if interface_name_is_valid(config_db, intf) is False: - click.echo("[ERROR] Interface name is invalid. Please enter a valid interface name!!") - return False - - port_dict = config_db.get_table('PORT') - if not port_dict: - click.echo("port_dict is None!") - return False - - if intf in port_dict: - config_db.mod_entry("PORT", intf, {"admin_status": "down"}) - else: - click.secho("[ERROR] Could not get the correct interface name, exiting", fg='red') - return False - return True - def _validate_interface_mode(ctx, breakout_cfg_file, interface_name, target_brkout_mode, cur_brkout_mode): """ Validate Parent interface and user selected mode before starting deletion or addition process """ breakout_file_input = readJsonFile(breakout_cfg_file)["interfaces"] @@ -3181,12 +3155,7 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load del_intf_dict = {intf: del_ports[intf]["speed"] for intf in del_ports} if del_intf_dict: - """ shut down all the interface before deletion """ - ret = shutdown_interfaces(ctx, del_intf_dict) - if not ret: - raise click.Abort() click.echo("\nPorts to be deleted : \n {}".format(json.dumps(del_intf_dict, indent=4))) - else: click.secho("[ERROR] del_intf_dict is None! No interfaces are there to be deleted", fg='red') raise click.Abort() @@ -3213,6 +3182,12 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load del_intf_dict.pop(item) add_intf_dict.pop(item) + # validate all del_ports before calling breakOutPort + for intf in del_intf_dict.keys(): + if not interface_name_is_valid(config_db, intf): + click.secho("[ERROR] Interface name {} is invalid".format(intf)) + raise click.Abort() + click.secho("\nFinal list of ports to be deleted : \n {} \nFinal list of ports to be added : \n {}".format(json.dumps(del_intf_dict, indent=4), json.dumps(add_intf_dict, indent=4), fg='green', blink=True)) if not add_intf_dict: click.secho("[ERROR] add_intf_dict is None or empty! No interfaces are there to be added", fg='red') diff --git a/tests/config_mgmt_test.py b/tests/config_mgmt_test.py index 39e3870990..a86ab9e1aa 100644 --- a/tests/config_mgmt_test.py +++ b/tests/config_mgmt_test.py @@ -78,6 +78,41 @@ def test_break_out(self): self.dpb_port4_4x25G_2x50G_f_l(curConfig) return + def test_shutdownIntf_call(self): + ''' + Verify that _shutdownIntf() is called with deleted ports while calling + breakOutPort() + ''' + curConfig = deepcopy(configDbJson) + cmdpb = self.config_mgmt_dpb(curConfig) + + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='1x50G(2)+2x25G(2)', newMode='2x50G') + + # Try to breakout and see if _shutdownIntf is called + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, \ + force=True, loadDefConfig=False) + + # verify correct function call to writeConfigDB after _shutdownIntf() + assert cmdpb.writeConfigDB.call_count == 3 + print(cmdpb.writeConfigDB.call_args_list[0]) + (args, kwargs) = cmdpb.writeConfigDB.call_args_list[0] + print(args) + + # in case of tuple also, we should have only one element + if type(args) == tuple: + args = args[0] + assert "PORT" in args + + # {"admin_status": "down"} should be set for all ports in dPorts + assert len(args["PORT"]) == len(dPorts) + # each port should have {"admin_status": "down"} + for port in args["PORT"].keys(): + assert args["PORT"][port]['admin_status'] == 'down' + + return + def tearDown(self): try: os.remove(config_mgmt.CONFIG_DB_JSON_FILE) @@ -229,7 +264,7 @@ def checkResult(self, cmdpb, delConfig, addConfig): void ''' calls = [mock.call(delConfig), mock.call(addConfig)] - assert cmdpb.writeConfigDB.call_count == 2 + assert cmdpb.writeConfigDB.call_count == 3 cmdpb.writeConfigDB.assert_has_calls(calls, any_order=False) return @@ -497,7 +532,6 @@ def dpb_port8_4x25G_2x50G_f_l(self, curConfig): } } } - assert cmdpb.writeConfigDB.call_count == 2 self.checkResult(cmdpb, delConfig, addConfig) self.postUpdateConfig(curConfig, delConfig, addConfig) return From ad801bfb81633812b4aa25f45bdd555a27121845 Mon Sep 17 00:00:00 2001 From: Dmytro Date: Thu, 13 May 2021 04:57:27 +0300 Subject: [PATCH 40/41] [config]Static routes to config_db (#1534) * Write static routes to config_db * Update configuration with "ifname" as "null" Signed-off-by: d-dashkov --- config/main.py | 286 +++++++++++++++++++--------- doc/Command-Reference.md | 78 ++++++++ tests/static_routes_test.py | 365 ++++++++++++++++++++++++++++++++++++ 3 files changed, 641 insertions(+), 88 deletions(-) mode change 100644 => 100755 config/main.py create mode 100644 tests/static_routes_test.py diff --git a/config/main.py b/config/main.py old mode 100644 new mode 100755 index cdf8b14b6e..3daf93c73d --- a/config/main.py +++ b/config/main.py @@ -11,6 +11,7 @@ import subprocess import sys import time +import itertools from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat from socket import AF_INET, AF_INET6 @@ -761,6 +762,66 @@ def validate_mirror_session_config(config_db, session_name, dst_port, src_port, return True +def cli_sroute_to_config(ctx, command_str, strict_nh = True): + if len(command_str) < 2 or len(command_str) > 9: + ctx.fail("argument is not in pattern prefix [vrf ] nexthop <[vrf ] >|>!") + if "prefix" not in command_str: + ctx.fail("argument is incomplete, prefix not found!") + if "nexthop" not in command_str and strict_nh: + ctx.fail("argument is incomplete, nexthop not found!") + + nexthop_str = None + config_entry = {} + vrf_name = "" + + if "nexthop" in command_str: + idx = command_str.index("nexthop") + prefix_str = command_str[:idx] + nexthop_str = command_str[idx:] + else: + prefix_str = command_str[:] + + if prefix_str: + if 'prefix' in prefix_str and 'vrf' in prefix_str: + # prefix_str: ['prefix', 'vrf', Vrf-name, ip] + vrf_name = prefix_str[2] + ip_prefix = prefix_str[3] + elif 'prefix' in prefix_str: + # prefix_str: ['prefix', ip] + ip_prefix = prefix_str[1] + else: + ctx.fail("prefix is not in pattern!") + + if nexthop_str: + if 'nexthop' in nexthop_str and 'vrf' in nexthop_str: + # nexthop_str: ['nexthop', 'vrf', Vrf-name, ip] + config_entry["nexthop"] = nexthop_str[3] + config_entry["nexthop-vrf"] = nexthop_str[2] + elif 'nexthop' in nexthop_str and 'dev' in nexthop_str: + # nexthop_str: ['nexthop', 'dev', ifname] + config_entry["ifname"] = nexthop_str[2] + elif 'nexthop' in nexthop_str: + # nexthop_str: ['nexthop', ip] + config_entry["nexthop"] = nexthop_str[1] + else: + ctx.fail("nexthop is not in pattern!") + + try: + ipaddress.ip_network(ip_prefix) + if 'nexthop' in config_entry: + nh = config_entry['nexthop'].split(',') + for ip in nh: + ipaddress.ip_address(ip) + except ValueError: + ctx.fail("ip address is not valid.") + + if not vrf_name == "": + key = vrf_name + "|" + ip_prefix + else: + key = ip_prefix + + return key, config_entry + def update_sonic_environment(): """Prepare sonic environment variable using SONiC environment template file. """ @@ -3932,111 +3993,160 @@ def del_vrf_vni_map(ctx, vrfname): @click.pass_context def route(ctx): """route-related configuration tasks""" - pass + config_db = ConfigDBConnector() + config_db.connect() + ctx.obj = {} + ctx.obj['config_db'] = config_db @route.command('add', context_settings={"ignore_unknown_options":True}) @click.argument('command_str', metavar='prefix [vrf ] nexthop <[vrf ] >|>', nargs=-1, type=click.Path()) @click.pass_context def add_route(ctx, command_str): """Add route command""" - if len(command_str) < 4 or len(command_str) > 9: - ctx.fail("argument is not in pattern prefix [vrf ] nexthop <[vrf ] >|>!") - if "prefix" not in command_str: - ctx.fail("argument is incomplete, prefix not found!") - if "nexthop" not in command_str: - ctx.fail("argument is incomplete, nexthop not found!") - for i in range(0, len(command_str)): - if "nexthop" == command_str[i]: - prefix_str = command_str[:i] - nexthop_str = command_str[i:] - vrf_name = "" - cmd = 'sudo vtysh -c "configure terminal" -c "ip route' - if prefix_str: - if len(prefix_str) == 2: - prefix_mask = prefix_str[1] - cmd += ' {}'.format(prefix_mask) - elif len(prefix_str) == 4: - vrf_name = prefix_str[2] - prefix_mask = prefix_str[3] - cmd += ' {}'.format(prefix_mask) + config_db = ctx.obj['config_db'] + key, route = cli_sroute_to_config(ctx, command_str) + + # If defined intf name, check if it belongs to interface + if 'ifname' in route: + if (not route['ifname'] in config_db.get_keys('VLAN_INTERFACE') and + not route['ifname'] in config_db.get_keys('INTERFACE') and + not route['ifname'] in config_db.get_keys('PORTCHANNEL_INTERFACE') and + not route['ifname'] == 'null'): + ctx.fail('interface {} doesn`t exist'.format(route['ifname'])) + + entry_counter = 1 + if 'nexthop' in route: + entry_counter = len(route['nexthop'].split(',')) + + # Alignment in case the command contains several nexthop ip + for i in range(entry_counter): + if 'nexthop-vrf' in route: + if i > 0: + vrf = route['nexthop-vrf'].split(',')[0] + route['nexthop-vrf'] += ',' + vrf else: - ctx.fail("prefix is not in pattern!") - if nexthop_str: - if len(nexthop_str) == 2: - ip = nexthop_str[1] - if vrf_name == "": - cmd += ' {}'.format(ip) - else: - cmd += ' {} vrf {}'.format(ip, vrf_name) - elif len(nexthop_str) == 3: - dev_name = nexthop_str[2] - if vrf_name == "": - cmd += ' {}'.format(dev_name) + route['nexthop-vrf'] = '' + + if not 'nexthop' in route: + route['nexthop'] = '' + + if 'ifname' in route: + if i > 0: + route['ifname'] += ',' + else: + route['ifname'] = '' + + # Set default values for distance and blackhole because the command doesn't have such an option + if 'distance' in route: + route['distance'] += ',0' + else: + route['distance'] = '0' + + if 'blackhole' in route: + route['blackhole'] += ',false' + else: + # If the user configure with "ifname" as "null", set 'blackhole' attribute as true. + if 'ifname' in route and route['ifname'] == 'null': + route['blackhole'] = 'true' else: - cmd += ' {} vrf {}'.format(dev_name, vrf_name) - elif len(nexthop_str) == 4: - vrf_name_dst = nexthop_str[2] - ip = nexthop_str[3] - if vrf_name == "": - cmd += ' {} nexthop-vrf {}'.format(ip, vrf_name_dst) + route['blackhole'] = 'false' + + # Check if exist entry with key + keys = config_db.get_keys('STATIC_ROUTE') + if key in keys: + # If exist update current entry + current_entry = config_db.get_entry('STATIC_ROUTE', key) + + for entry in ['nexthop', 'nexthop-vrf', 'ifname', 'distance', 'blackhole']: + if not entry in current_entry: + current_entry[entry] = '' + if entry in route: + current_entry[entry] += ',' + route[entry] else: - cmd += ' {} vrf {} nexthop-vrf {}'.format(ip, vrf_name, vrf_name_dst) - else: - ctx.fail("nexthop is not in pattern!") - cmd += '"' - clicommon.run_command(cmd) + current_entry[entry] += ',' + + config_db.set_entry("STATIC_ROUTE", key, current_entry) + else: + config_db.set_entry("STATIC_ROUTE", key, route) @route.command('del', context_settings={"ignore_unknown_options":True}) @click.argument('command_str', metavar='prefix [vrf ] nexthop <[vrf ] >|>', nargs=-1, type=click.Path()) @click.pass_context def del_route(ctx, command_str): """Del route command""" - if len(command_str) < 4 or len(command_str) > 9: - ctx.fail("argument is not in pattern prefix [vrf ] nexthop <[vrf ] >|>!") - if "prefix" not in command_str: - ctx.fail("argument is incomplete, prefix not found!") - if "nexthop" not in command_str: - ctx.fail("argument is incomplete, nexthop not found!") - for i in range(0, len(command_str)): - if "nexthop" == command_str[i]: - prefix_str = command_str[:i] - nexthop_str = command_str[i:] - vrf_name = "" - cmd = 'sudo vtysh -c "configure terminal" -c "no ip route' - if prefix_str: - if len(prefix_str) == 2: - prefix_mask = prefix_str[1] - cmd += ' {}'.format(prefix_mask) - elif len(prefix_str) == 4: - vrf_name = prefix_str[2] - prefix_mask = prefix_str[3] - cmd += ' {}'.format(prefix_mask) - else: - ctx.fail("prefix is not in pattern!") - if nexthop_str: - if len(nexthop_str) == 2: - ip = nexthop_str[1] - if vrf_name == "": - cmd += ' {}'.format(ip) - else: - cmd += ' {} vrf {}'.format(ip, vrf_name) - elif len(nexthop_str) == 3: - dev_name = nexthop_str[2] - if vrf_name == "": - cmd += ' {}'.format(dev_name) - else: - cmd += ' {} vrf {}'.format(dev_name, vrf_name) - elif len(nexthop_str) == 4: - vrf_name_dst = nexthop_str[2] - ip = nexthop_str[3] - if vrf_name == "": - cmd += ' {} nexthop-vrf {}'.format(ip, vrf_name_dst) + config_db = ctx.obj['config_db'] + key, route = cli_sroute_to_config(ctx, command_str, strict_nh=False) + keys = config_db.get_keys('STATIC_ROUTE') + prefix_tuple = tuple(key.split('|')) + if not key in keys and not prefix_tuple in keys: + ctx.fail('Route {} doesnt exist'.format(key)) + else: + # If not defined nexthop or intf name remove entire route + if not 'nexthop' in route and not 'ifname' in route: + config_db.set_entry("STATIC_ROUTE", key, None) + return + + current_entry = config_db.get_entry('STATIC_ROUTE', key) + + nh = [''] + nh_vrf = [''] + ifname = [''] + distance = [''] + blackhole = [''] + if 'nexthop' in current_entry: + nh = current_entry['nexthop'].split(',') + if 'nexthop-vrf' in current_entry: + nh_vrf = current_entry['nexthop-vrf'].split(',') + if 'ifname' in current_entry: + ifname = current_entry['ifname'].split(',') + if 'distance' in current_entry: + distance = current_entry['distance'].split(',') + if 'blackhole' in current_entry: + blackhole = current_entry['blackhole'].split(',') + + # Zip data from config_db into tuples + # {'nexthop': '10.0.0.2,20.0.0.2', 'vrf-nexthop': ',Vrf-RED', 'ifname': ','} + # [('10.0.0.2', '', ''), ('20.0.0.2', 'Vrf-RED', '')] + nh_zip = list(itertools.zip_longest(nh, nh_vrf, ifname, fillvalue='')) + cli_tuple = () + + # Create tuple from CLI argument + # config route add prefix 1.4.3.4/32 nexthop vrf Vrf-RED 20.0.0.2 + # ('20.0.0.2', 'Vrf-RED', '') + for entry in ['nexthop', 'nexthop-vrf', 'ifname']: + if entry in route: + cli_tuple += (route[entry],) else: - cmd += ' {} vrf {} nexthop-vrf {}'.format(ip, vrf_name, vrf_name_dst) + cli_tuple += ('',) + + if cli_tuple in nh_zip: + # If cli tuple is in config_db find its index and delete from lists + idx = nh_zip.index(cli_tuple) + if len(nh) - 1 >= idx: + del nh[idx] + if len(nh_vrf) - 1 >= idx: + del nh_vrf[idx] + if len(ifname) - 1 >= idx: + del ifname[idx] + if len(distance) - 1 >= idx: + del distance[idx] + if len(blackhole) - 1 >= idx: + del blackhole[idx] else: - ctx.fail("nexthop is not in pattern!") - cmd += '"' - clicommon.run_command(cmd) + ctx.fail('Not found {} in {}'.format(cli_tuple, key)) + + if (len(nh) == 0 or (len(nh) == 1 and nh[0] == '')) and \ + (len(ifname) == 0 or (len(ifname) == 1 and ifname[0] == '')): + # If there are no nexthop and ifname fields in the current record, delete it + config_db.set_entry("STATIC_ROUTE", key, None) + else: + # Otherwise it still has ECMP nexthop or ifname fields, so compose it from the lists into db + current_entry['nexthop'] = ','.join((str(e)) for e in nh) + current_entry['nexthop-vrf'] = ','.join((str(e)) for e in nh_vrf) + current_entry['ifname'] = ','.join((str(e)) for e in ifname) + current_entry['distance'] = ','.join((str(e)) for e in distance) + current_entry['blackhole'] = ','.join((str(e)) for e in blackhole) + config_db.set_entry("STATIC_ROUTE", key, current_entry) # # 'acl' group ('config acl ...') diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index c72cc10f35..8a247d40e3 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -121,6 +121,7 @@ * [Startup & Running Configuration](#startup--running-configuration) * [Startup Configuration](#startup-configuration) * [Running Configuration](#running-configuration) +* [Static routing](#static-routing) * [Syslog](#syslog) * [Syslog config commands](#syslog-config-commands) * [System State](#system-state) @@ -7184,6 +7185,83 @@ This command displays the running configuration of the snmp module. Go Back To [Beginning of the document](#) or [Beginning of this section](#Startup--Running-Configuration) +## Static routing + +### Static routing Config Commands + +This sub-section explains of commands is used to add or remove the static route. + +**config route add** + +This command is used to add a static route. Note that prefix /nexthop vrf`s and interface name are optional. + +- Usage: + + ``` + config route add prefix [vrf ] nexthop [vrf ] dev + ``` + +- Example: + + ``` + admin@sonic:~$ config route add prefix 2.2.3.4/32 nexthop 30.0.0.9 + ``` + +It also supports ECMP, and adding a new nexthop to the existing prefix will complement it and not overwrite them. + +- Example: + + ``` + admin@sonic:~$ sudo config route add prefix 2.2.3.4/32 nexthop vrf Vrf-RED 30.0.0.9 + admin@sonic:~$ sudo config route add prefix 2.2.3.4/32 nexthop vrf Vrf-BLUE 30.0.0.10 + ``` + +**config route del** + +This command is used to remove a static route. Note that prefix /nexthop vrf`s and interface name are optional. + +- Usage: + + ``` + config route del prefix [vrf ] nexthop [vrf ] dev + ``` + +- Example: + + ``` + admin@sonic:~$ sudo config route del prefix 2.2.3.4/32 nexthop vrf Vrf-RED 30.0.0.9 + admin@sonic:~$ sudo config route del prefix 2.2.3.4/32 nexthop vrf Vrf-BLUE 30.0.0.10 + ``` + +This sub-section explains of command is used to show current routes. + +**show ip route** + +- Usage: + + ``` + show ip route + ``` + +- Example: + + ``` + admin@sonic:~$ show ip route + Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route, q - queued, r - rejected, b - backup + + S>* 0.0.0.0/0 [200/0] via 192.168.111.3, eth0, weight 1, 3d03h58m + S> 1.2.3.4/32 [1/0] via 30.0.0.7, weight 1, 00:00:06 + C>* 10.0.0.18/31 is directly connected, Ethernet36, 3d03h57m + C>* 10.0.0.20/31 is directly connected, Ethernet40, 3d03h57m + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#static-routing) + + ## Syslog ### Syslog Config Commands diff --git a/tests/static_routes_test.py b/tests/static_routes_test.py new file mode 100644 index 0000000000..c354cb97c4 --- /dev/null +++ b/tests/static_routes_test.py @@ -0,0 +1,365 @@ +import os +import traceback + +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db + +ERROR_STR = ''' +Error: argument is not in pattern prefix [vrf ] nexthop <[vrf ] >|>! +''' +ERROR_STR_MISS_PREFIX = ''' +Error: argument is incomplete, prefix not found! +''' +ERROR_STR_MISS_NEXTHOP = ''' +Error: argument is incomplete, nexthop not found! +''' +ERROR_DEL_NONEXIST_KEY_STR = ''' +Error: Route {} doesnt exist +''' +ERROR_DEL_NONEXIST_ENTRY_STR = ''' +Error: Not found {} in {} +''' +ERROR_INVALID_IP = ''' +Error: ip address is not valid. +''' + + +class TestStaticRoutes(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + + def test_simple_static_route(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config route add prefix 1.2.3.4/32 nexthop 30.0.0.5 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "1.2.3.4/32", "nexthop", "30.0.0.5"], obj=obj) + print(result.exit_code, result.output) + assert ('1.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '1.2.3.4/32') == {'nexthop': '30.0.0.5', 'blackhole': 'false', 'distance': '0', 'ifname': '', 'nexthop-vrf': ''} + + # config route del prefix 1.2.3.4/32 nexthop 30.0.0.5 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "1.2.3.4/32", "nexthop", "30.0.0.5"], obj=obj) + print(result.exit_code, result.output) + assert not '1.2.3.4/32' in db.cfgdb.get_table('STATIC_ROUTE') + + def test_static_route_invalid_prefix_ip(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config route add prefix 1.2.3/32 nexthop 30.0.0.5 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "1.2.3/32", "nexthop", "30.0.0.5"], obj=obj) + print(result.exit_code, result.output) + assert ERROR_INVALID_IP in result.output + + def test_static_route_invalid_nexthop_ip(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config route add prefix 1.2.3.4/32 nexthop 30.0.5 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "1.2.3.4/32", "nexthop", "30.0.5"], obj=obj) + print(result.exit_code, result.output) + assert ERROR_INVALID_IP in result.output + + def test_vrf_static_route(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config route add prefix vrf Vrf-BLUE 2.2.3.4/32 nexthop 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "vrf", "Vrf-BLUE", "2.2.3.4/32", "nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert ('Vrf-BLUE', '2.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', 'Vrf-BLUE|2.2.3.4/32') == {'nexthop': '30.0.0.6', 'blackhole': 'false', 'distance': '0', 'ifname': '', 'nexthop-vrf': ''} + + # config route del prefix vrf Vrf-BLUE 2.2.3.4/32 nexthop 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "vrf", "Vrf-BLUE", "2.2.3.4/32", "nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert not ('Vrf-BLUE', '2.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + + def test_dest_vrf_static_route(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config route add prefix 3.2.3.4/32 nexthop vrf Vrf-RED 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "3.2.3.4/32", "nexthop", "vrf", "Vrf-RED", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert ('3.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '3.2.3.4/32') == {'nexthop': '30.0.0.6', 'nexthop-vrf': 'Vrf-RED', 'blackhole': 'false', 'distance': '0', 'ifname': ''} + + # config route del prefix 3.2.3.4/32 nexthop vrf Vrf-RED 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "3.2.3.4/32", "nexthop", "vrf", "Vrf-RED", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert not ('3.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + + def test_multiple_nexthops_with_vrf_static_route(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + ''' Add ''' + # config route add prefix 6.2.3.4/32 nexthop vrf Vrf-RED "30.0.0.6,30.0.0.7" + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "6.2.3.4/32", "nexthop", "vrf", "Vrf-RED", "30.0.0.6,30.0.0.7"], obj=obj) + print(result.exit_code, result.output) + assert ('6.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '6.2.3.4/32') == {'nexthop': '30.0.0.6,30.0.0.7', 'blackhole': 'false,false', 'distance': '0,0', 'ifname': ',', 'nexthop-vrf': 'Vrf-RED,Vrf-RED'} + + ''' Del ''' + # config route del prefix 6.2.3.4/32 nexthop vrf Vrf-RED 30.0.0.7 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "6.2.3.4/32", "nexthop", "vrf", "Vrf-RED", "30.0.0.7"], obj=obj) + print(result.exit_code, result.output) + assert ('6.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '6.2.3.4/32') == {'nexthop': '30.0.0.6', 'blackhole': 'false', 'distance': '0', 'ifname': '', 'nexthop-vrf': 'Vrf-RED'} + + # config route del prefix 6.2.3.4/32 nexthop vrf Vrf-RED 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "6.2.3.4/32", "nexthop", "vrf", "Vrf-RED", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert not ('6.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + + def test_multiple_nexthops_static_route(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + ''' Add ''' + # config route add prefix 6.2.3.4/32 nexthop "30.0.0.6,30.0.0.7" + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "6.2.3.4/32", "nexthop", "30.0.0.6,30.0.0.7"], obj=obj) + print(result.exit_code, result.output) + assert ('6.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '6.2.3.4/32') == {'nexthop': '30.0.0.6,30.0.0.7', 'blackhole': 'false,false', 'distance': '0,0', 'ifname': ',', 'nexthop-vrf': ','} + + # config route add prefix 6.2.3.4/32 nexthop 30.0.0.8 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "6.2.3.4/32", "nexthop", "30.0.0.8"], obj=obj) + print(result.exit_code, result.output) + assert ('6.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '6.2.3.4/32') == {'nexthop': '30.0.0.6,30.0.0.7,30.0.0.8', 'blackhole': 'false,false,false', 'distance': '0,0,0', 'ifname': ',,', 'nexthop-vrf': ',,'} + + ''' Del ''' + # config route del prefix 6.2.3.4/32 nexthop 30.0.0.8 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "6.2.3.4/32", "nexthop", "30.0.0.8"], obj=obj) + print(result.exit_code, result.output) + assert ('6.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '6.2.3.4/32') == {"nexthop": '30.0.0.6,30.0.0.7', 'blackhole': 'false,false', 'distance': '0,0', 'ifname': ',', 'nexthop-vrf': ','} + + # config route del prefix 6.2.3.4/32 nexthop 30.0.0.7 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "6.2.3.4/32", "nexthop", "30.0.0.7"], obj=obj) + print(result.exit_code, result.output) + assert ('6.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '6.2.3.4/32') == {'nexthop': '30.0.0.6', 'blackhole': 'false', 'distance': '0', 'ifname': '', 'nexthop-vrf': ''} + + # config route del prefix 6.2.3.4/32 nexthop 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "6.2.3.4/32", "nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert not ('6.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + + def test_static_route_miss_prefix(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config route add nexthop 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["add"], ["nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert ERROR_STR_MISS_PREFIX in result.output + + def test_static_route_miss_nexthop(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config route add prefix 7.2.3.4/32 + result = runner.invoke(config.config.commands["route"].commands["add"], ["prefix", "7.2.3.4/32"], obj=obj) + print(result.exit_code, result.output) + assert ERROR_STR_MISS_NEXTHOP in result.output + + def test_static_route_ECMP_nexthop(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + ''' Add ''' + # config route add prefix 10.2.3.4/32 nexthop 30.0.0.5 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "10.2.3.4/32", "nexthop", "30.0.0.5"], obj=obj) + print(result.exit_code, result.output) + assert ('10.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '10.2.3.4/32') == {'nexthop': '30.0.0.5', 'blackhole': 'false', 'distance': '0', 'ifname': '', 'nexthop-vrf': ''} + + # config route add prefix 10.2.3.4/32 nexthop 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "10.2.3.4/32", "nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert ('10.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '10.2.3.4/32') == {'nexthop': '30.0.0.5,30.0.0.6', 'blackhole': 'false,false', 'distance': '0,0', 'ifname': ',', 'nexthop-vrf': ','} + + ''' Del ''' + # config route del prefix 10.2.3.4/32 nexthop 30.0.0.5 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "10.2.3.4/32", "nexthop", "30.0.0.5"], obj=obj) + print(result.exit_code, result.output) + assert ('10.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '10.2.3.4/32') == {'nexthop': '30.0.0.6', 'blackhole': 'false', 'distance': '0', 'ifname': '', 'nexthop-vrf': ''} + + # config route del prefix 1.2.3.4/32 nexthop 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "10.2.3.4/32", "nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert not ('10.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + + def test_static_route_ECMP_nexthop_with_vrf(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + ''' Add ''' + # config route add prefix 11.2.3.4/32 nexthop vrf Vrf-RED 30.0.0.5 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "11.2.3.4/32", "nexthop", "vrf", "Vrf-RED", "30.0.0.5"], obj=obj) + print(result.exit_code, result.output) + assert ('11.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '11.2.3.4/32') == {'nexthop': '30.0.0.5', 'nexthop-vrf': 'Vrf-RED', 'blackhole': 'false', 'distance': '0', 'ifname': ''} + + # config route add prefix 11.2.3.4/32 nexthop vrf Vrf-BLUE 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "11.2.3.4/32", "nexthop", "vrf", "Vrf-BLUE", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert ('11.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '11.2.3.4/32') == {"nexthop": "30.0.0.5,30.0.0.6", "nexthop-vrf": "Vrf-RED,Vrf-BLUE", 'blackhole': 'false,false', 'distance': '0,0', 'ifname': ','} + + ''' Del ''' + # config route del prefix 11.2.3.4/32 nexthop vrf Vrf-RED 30.0.0.5 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "11.2.3.4/32", "nexthop", "vrf", "Vrf-RED", "30.0.0.5"], obj=obj) + print(result.exit_code, result.output) + assert ('11.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '11.2.3.4/32') == {"nexthop": "30.0.0.6", "nexthop-vrf": "Vrf-BLUE", 'blackhole': 'false', 'distance': '0', 'ifname': ''} + + # config route del prefix 11.2.3.4/32 nexthop vrf Vrf-BLUE 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "11.2.3.4/32", "nexthop", "vrf", "Vrf-BLUE", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert not ('11.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + + def test_static_route_ECMP_mixed_nextfop(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + ''' Add ''' + # config route add prefix 12.2.3.4/32 nexthop 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "12.2.3.4/32", "nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert ('12.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '12.2.3.4/32') == {'nexthop': '30.0.0.6', 'blackhole': 'false', 'distance': '0', 'ifname': '', 'nexthop-vrf': ''} + + # config route add prefix 12.2.3.4/32 nexthop vrf Vrf-RED 30.0.0.7 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "12.2.3.4/32", "nexthop", "vrf", "Vrf-RED", "30.0.0.7"], obj=obj) + print(result.exit_code, result.output) + assert ('12.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '12.2.3.4/32') == {'nexthop': '30.0.0.6,30.0.0.7', 'nexthop-vrf': ',Vrf-RED', 'blackhole': 'false,false', 'distance': '0,0', 'ifname': ','} + + ''' Del ''' + # config route del prefix 12.2.3.4/32 nexthop vrf Vrf-Red 30.0.0.7 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "12.2.3.4/32", "nexthop", "vrf", "Vrf-RED", "30.0.0.7"], obj=obj) + print(result.exit_code, result.output) + assert ('12.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '12.2.3.4/32') == {'nexthop': '30.0.0.6', 'nexthop-vrf': '', 'ifname': '', 'blackhole': 'false', 'distance': '0'} + + # config route del prefix 12.2.3.4/32 nexthop 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "12.2.3.4/32", "nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert not ('12.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + + def test_del_nonexist_key_static_route(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config route del prefix 10.2.3.4/32 nexthop 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "17.2.3.4/32", "nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert ERROR_DEL_NONEXIST_KEY_STR.format("17.2.3.4/32") in result.output + + def test_del_nonexist_entry_static_route(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config route add prefix 13.2.3.4/32 nexthop 30.0.0.5 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "13.2.3.4/32", "nexthop", "30.0.0.5"], obj=obj) + print(result.exit_code, result.output) + assert ('13.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '13.2.3.4/32') == {'nexthop': '30.0.0.5', 'blackhole': 'false', 'distance': '0', 'ifname': '', 'nexthop-vrf': ''} + + # config route del prefix 13.2.3.4/32 nexthop 30.0.0.6 <- nh ip that doesnt exist + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "13.2.3.4/32", "nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert ERROR_DEL_NONEXIST_ENTRY_STR.format(('30.0.0.6', '', ''), "13.2.3.4/32") in result.output + + # config route del prefix 13.2.3.4/32 nexthop 30.0.0.5 + result = runner.invoke(config.config.commands["route"].commands["del"], \ + ["prefix", "13.2.3.4/32", "nexthop", "30.0.0.5"], obj=obj) + print(result.exit_code, result.output) + assert not '13.2.3.4/32' in db.cfgdb.get_table('STATIC_ROUTE') + + def test_del_entire_ECMP_static_route(self): + db = Db() + runner = CliRunner() + obj = {'config_db':db.cfgdb} + + # config route add prefix 14.2.3.4/32 nexthop 30.0.0.5 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "14.2.3.4/32", "nexthop", "30.0.0.5"], obj=obj) + print(result.exit_code, result.output) + assert ('14.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '14.2.3.4/32') == {'nexthop': '30.0.0.5', 'blackhole': 'false', 'distance': '0', 'ifname': '', 'nexthop-vrf': ''} + + # config route add prefix 14.2.3.4/32 nexthop 30.0.0.6 + result = runner.invoke(config.config.commands["route"].commands["add"], \ + ["prefix", "14.2.3.4/32", "nexthop", "30.0.0.6"], obj=obj) + print(result.exit_code, result.output) + assert ('14.2.3.4/32') in db.cfgdb.get_table('STATIC_ROUTE') + assert db.cfgdb.get_entry('STATIC_ROUTE', '14.2.3.4/32') == {'nexthop': '30.0.0.5,30.0.0.6', 'nexthop-vrf': ',', 'ifname': ',', 'blackhole': 'false,false', 'distance': '0,0'} + + # config route del prefix 14.2.3.4/32 + result = runner.invoke(config.config.commands["route"].commands["del"], ["prefix", "14.2.3.4/32"], obj=obj) + print(result.exit_code, result.output) + assert not '14.2.3.4/32' in db.cfgdb.get_table('STATIC_ROUTE') + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + print("TEARDOWN") + From 00bd0cea4f260ca8cecc382a6bcea05272dfd070 Mon Sep 17 00:00:00 2001 From: Qi Luo Date: Tue, 18 May 2021 19:13:16 -0700 Subject: [PATCH 41/41] Limit pyroute2 version range due to upstream broken (#1622) The latest version of pyroute2 introduce breaking change --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 77a771650f..2ac87e4ad2 100644 --- a/setup.py +++ b/setup.py @@ -179,7 +179,7 @@ 'pexpect>=4.8.0', 'poetry-semver>=0.1.0', 'prettyprinter>=0.18.0', - 'pyroute2>=0.5.14', + 'pyroute2>=0.5.14, <0.6.1', 'requests>=2.25.0', 'sonic-config-engine', 'sonic-platform-common',