From 1d69f0916eccc3961cdaa2c680141f57207396a1 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Fri, 30 Sep 2022 14:38:05 +0800 Subject: [PATCH 001/174] [Mellanox] Provide dummy implementation for get_rx_los and get_tx_fault (#12231) - Why I did it get_rx_los and get_tx_fault is not supported via the exisitng interface used, need provide dummy implementation for them. NOTE: in later releases we will get them back via different interface. - How I did it Return False * lane_num for get_rx_los and get_tx_fault - How to verify it Added unit test --- .../mlnx-platform-api/sonic_platform/sfp.py | 32 +++++++++++++++++++ .../mlnx-platform-api/tests/test_sfp.py | 14 ++++++++ 2 files changed, 46 insertions(+) diff --git a/platform/mellanox/mlnx-platform-api/sonic_platform/sfp.py b/platform/mellanox/mlnx-platform-api/sonic_platform/sfp.py index 617b4f33d636..d35b869e9a29 100644 --- a/platform/mellanox/mlnx-platform-api/sonic_platform/sfp.py +++ b/platform/mellanox/mlnx-platform-api/sonic_platform/sfp.py @@ -755,6 +755,38 @@ def get_error_description(self): error_description = "Unknow SFP module status ({})".format(oper_status) return error_description + def get_rx_los(self): + """Accessing rx los is not supproted, return all False + + Returns: + list: [False] * channels + """ + api = self.get_xcvr_api() + return [False] * api.NUM_CHANNELS if api else None + + def get_tx_fault(self): + """Accessing tx fault is not supproted, return all False + + Returns: + list: [False] * channels + """ + api = self.get_xcvr_api() + return [False] * api.NUM_CHANNELS if api else None + + def get_xcvr_api(self): + """ + Retrieves the XcvrApi associated with this SFP + + Returns: + An object derived from XcvrApi that corresponds to the SFP + """ + if self._xcvr_api is None: + self.refresh_xcvr_api() + if self._xcvr_api is not None: + self._xcvr_api.get_rx_los = self.get_rx_los + self._xcvr_api.get_tx_fault = self.get_tx_fault + return self._xcvr_api + class RJ45Port(NvidiaSFPCommon): """class derived from SFP, representing RJ45 ports""" diff --git a/platform/mellanox/mlnx-platform-api/tests/test_sfp.py b/platform/mellanox/mlnx-platform-api/tests/test_sfp.py index f599e0241d25..b72a5f3ed4aa 100644 --- a/platform/mellanox/mlnx-platform-api/tests/test_sfp.py +++ b/platform/mellanox/mlnx-platform-api/tests/test_sfp.py @@ -119,3 +119,17 @@ def test_is_port_admin_status_up(self, mock_port_status): mock_port_status.return_value = (0, False) assert not SFP.is_port_admin_status_up(None, None) + + @mock.patch('sonic_platform.sfp.SFP.get_xcvr_api') + def test_dummy_apis(self, mock_get_xcvr_api): + mock_api = mock.MagicMock() + mock_api.NUM_CHANNELS = 4 + mock_get_xcvr_api.return_value = mock_api + + sfp = SFP(0) + assert sfp.get_rx_los() == [False] * 4 + assert sfp.get_tx_fault() == [False] * 4 + + mock_get_xcvr_api.return_value = None + assert sfp.get_rx_los() is None + assert sfp.get_tx_fault() is None From 92bd6dae281977c93bbf39e4456a62857a485dfb Mon Sep 17 00:00:00 2001 From: Volodymyr Samotiy Date: Fri, 30 Sep 2022 09:40:12 +0300 Subject: [PATCH 002/174] [Mellanox] Update SAI to v2205.22.1.19 and SDK/FW to v4.5.3168/v2010.3170 (#12205) - Why I did it To include latest fixes and new functionality SAI fixes and new features fix #3205239, incorrect object type returned for SG child list Fix VRF-VNI map entries remove issue ECC health event and logging [Port Buffers] restore default queue and pg configuration when all user pools are deleted Fix EVPN type3 error on removal of uc/bc flood group Fix EVPN type2 MAC move from local to remote results in SAI failure Fix Disable learning on VXLAN tunnel Fix error on VXLAN v6 tunnel removal Fix port cannot apply schedule group when it is a lag member Fix BFD add more detailed message on BFD packet not related to any existing session gcc10 compilation fixes Disable learning on VXLAN tunnel Support BFD remote-disc exchange in negotiation stage Tunnel Loopback packet action attribute implementation (for Dual TOR) Add KVD resources MIN/MAX functionality (pending CRM issue with MIN only) Support for CRC2 hash algorithm Bulk counter support for PGs, queues Support mirror sample rate attribute (SPC2+) [Functional] [QoS] | Unable to remove SCHEDULE profile table even if there is no object referencing it Next hop group optimized bulk API Reduce verbosity of shared database already exists print Span mirror policer (SPC2+), optimize pipeline for acl mirror action with policer on SPC2+ use same size descriptor pool for rx/tx fix bfd - notify Sonic for admin-down event 2201 - empty list for supported fec for RJ45 ports Fix don't disable used tunnel underlay interfaces SDK fixes 100GbE FCI DAC (10137628-4050LF/HPE PN: 845408-B21) was recognized by mistake as supporting "cable burning' which caused the switch firmware to read page 0x9f (which unsupported in the cable) and to report this cable as having "bad eeprom". Added remote peer UDP port information in BFD packet event. After editing an ECMP, the resilient ECMP next-hop counter may not count correctly. Fixed potential memory leaks in some APIs related to LPM If TTL_CMD_COPY is used in Encap direction for a packet with no TTL, then the value passed in the ttl data structure will be used if non-zero (default 255 if zero). In SN2201: When configuring Force mode, user should configure Speed and FEC on both sides In Flex Tunnel encapsulation flow, if the encapsulation is with an IPv6 header, the flow label field may not be updated as expected. In some cases, when changing speed to 400GbE over 8 lanes, the first few packets would be dropped. In some traffic patterns involving small packets, the PortRcvErrors counter may mistakenly count events of local physical errors due to an internal flow in the hardware that involves link packets. On Spectrum systems, sometimes during link failure, not all previous firmware indications cleared properly, potentially affecting the next link up attempt. On the NVIDIA Spectrum-2 switch, when receiving a packet with Symbol Errors on ports that are configured to cut-thought mode, a pipeline might get stuck. PCI calibration changes from a static to a dynamic mechanism. SDK debug dump shows "Unknown" Counter in RFC3635 Counter Group. SDK debug dump shows "Unknown" Counter in the PPCNT Traffic Class Counter Group. SDK Dump missing column headers in some GC tables may result in difficulty understanding the dump. SLL configuration is missing in SDK dump. Spectrum-2 systems, do no support 1GbE on supported 40GbE modules. When binding a UDP port which is already in use for BFD TX session, the error message appears incorrectly. When Flex Tunnel was used, Flex Modifier sometimes experienced a brief mis-configuration during ISSU. When many ports are active (e.g. 70 ports up), and the configuration of shared buffer is applied on the fly, occasionally, the firmware might get stuck. When running 1GbE speeds on SN4600 system, the port remained active while peer side was closed. When toggling many ports of the Spectrum devices while raising 10GbE link up and link maintenance is enabled, the switch may get stuck and may need to be rebooted. When trying to reconfigure the Flex Parser header and Flex transition parameters after ISSU, the switch will returned an error even if the configuration was identical to that done before performing the ISSU. While toggling the cable, and the low power mode is set to ON, an unexpected PMPE event error is received. - How I did it Updated SDK/SAI submodule and relevant makefiles with the required versions. - How to verify it Build an image and run tests from "sonic-mgmt". Signed-off-by: Volodymyr Samotiy --- platform/mellanox/fw.mk | 6 +++--- platform/mellanox/mlnx-sai.mk | 2 +- platform/mellanox/mlnx-sai/SAI-Implementation | 2 +- platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers | 2 +- platform/mellanox/sdk.mk | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/platform/mellanox/fw.mk b/platform/mellanox/fw.mk index 962ea0ae31e1..92aed0663311 100644 --- a/platform/mellanox/fw.mk +++ b/platform/mellanox/fw.mk @@ -27,17 +27,17 @@ else FW_FROM_URL = n endif -MLNX_SPC_FW_VERSION = 13.2010.2320 +MLNX_SPC_FW_VERSION = 13.2010.3170 MLNX_SPC_FW_FILE = fw-SPC-rel-$(subst .,_,$(MLNX_SPC_FW_VERSION))-EVB.mfa $(MLNX_SPC_FW_FILE)_PATH = $(MLNX_FW_BASE_PATH) $(MLNX_SPC_FW_FILE)_URL = $(MLNX_FW_BASE_URL)/$(MLNX_SPC_FW_FILE) -MLNX_SPC2_FW_VERSION = 29.2010.2320 +MLNX_SPC2_FW_VERSION = 29.2010.3170 MLNX_SPC2_FW_FILE = fw-SPC2-rel-$(subst .,_,$(MLNX_SPC2_FW_VERSION))-EVB.mfa $(MLNX_SPC2_FW_FILE)_PATH = $(MLNX_FW_BASE_PATH) $(MLNX_SPC2_FW_FILE)_URL = $(MLNX_FW_BASE_URL)/$(MLNX_SPC2_FW_FILE) -MLNX_SPC3_FW_VERSION = 30.2010.2320 +MLNX_SPC3_FW_VERSION = 30.2010.3170 MLNX_SPC3_FW_FILE = fw-SPC3-rel-$(subst .,_,$(MLNX_SPC3_FW_VERSION))-EVB.mfa $(MLNX_SPC3_FW_FILE)_PATH = $(MLNX_FW_BASE_PATH) $(MLNX_SPC3_FW_FILE)_URL = $(MLNX_FW_BASE_URL)/$(MLNX_SPC3_FW_FILE) diff --git a/platform/mellanox/mlnx-sai.mk b/platform/mellanox/mlnx-sai.mk index 6eaa1dcf80b4..90ca7430b0ad 100644 --- a/platform/mellanox/mlnx-sai.mk +++ b/platform/mellanox/mlnx-sai.mk @@ -1,6 +1,6 @@ # Mellanox SAI -MLNX_SAI_VERSION = SAIRel1.21.2.0 +MLNX_SAI_VERSION = SAIBuild2205.22.1.19 export MLNX_SAI_VERSION diff --git a/platform/mellanox/mlnx-sai/SAI-Implementation b/platform/mellanox/mlnx-sai/SAI-Implementation index f9a21df71363..82274ffaef77 160000 --- a/platform/mellanox/mlnx-sai/SAI-Implementation +++ b/platform/mellanox/mlnx-sai/SAI-Implementation @@ -1 +1 @@ -Subproject commit f9a21df713636fe648b8bb190698e4494a0f5239 +Subproject commit 82274ffaef7748120b7657362f7875fb7d6e6f5f diff --git a/platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers b/platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers index 5650c3519b55..8b1f1c0f1164 160000 --- a/platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers +++ b/platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers @@ -1 +1 @@ -Subproject commit 5650c3519b55051124810a4625f8269694b1e592 +Subproject commit 8b1f1c0f11647749f79ebc4e823c157513067412 diff --git a/platform/mellanox/sdk.mk b/platform/mellanox/sdk.mk index b620b07ee2f8..5a6864bc1e4a 100644 --- a/platform/mellanox/sdk.mk +++ b/platform/mellanox/sdk.mk @@ -16,7 +16,7 @@ # MLNX_SDK_BASE_PATH = $(PLATFORM_PATH)/sdk-src/sx-kernel/Switch-SDK-drivers/bin/ MLNX_SDK_PKG_BASE_PATH = $(MLNX_SDK_BASE_PATH)/$(BLDENV)/$(CONFIGURED_ARCH)/ -MLNX_SDK_VERSION = 4.5.2320 +MLNX_SDK_VERSION = 4.5.3168 MLNX_SDK_ISSU_VERSION = 101 MLNX_SDK_DEB_VERSION = $(subst -,.,$(subst _,.,$(MLNX_SDK_VERSION))) From eea8ebd0a9ee977765097666d1d89961a466d891 Mon Sep 17 00:00:00 2001 From: Volodymyr Samotiy Date: Fri, 30 Sep 2022 09:48:40 +0300 Subject: [PATCH 003/174] [Mellanox] Update MFT to v4.21.0-100 (#11758) - Why I did it To update MFT package to the latest version. - How I did it Updated MFT_VERSION & MFT_REVISION in platform/mellanox/mft.mk. - How to verify it Build an image and deploy to the switch Check MFT version by dpkg -l | grep mft Verify that all the SONiC services up and running Run regression testing using tests from sonic-mgmt Signed-off-by: Volodymyr Samotiy --- platform/mellanox/mft.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/platform/mellanox/mft.mk b/platform/mellanox/mft.mk index eb702a85a1d1..047e87b09086 100644 --- a/platform/mellanox/mft.mk +++ b/platform/mellanox/mft.mk @@ -16,8 +16,8 @@ # # Mellanox SAI -MFT_VERSION = 4.20.0 -MFT_REVISION = 34 +MFT_VERSION = 4.21.0 +MFT_REVISION = 100 export MFT_VERSION MFT_REVISION From 004a8b6eae203cb1496beb8b192b087cc2a32a66 Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Fri, 30 Sep 2022 15:56:46 +0800 Subject: [PATCH 004/174] [AzurePipeline] Fix vstest step failed by libyang missing. (#12240) Why I did it Fix PR merge failed because 'vstest' step does not install libyang. How I did it Install libyang in azure pipeline. How to verify it Pass vstest step. --- azure-pipelines.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 26a86dffa01e..a9004af559ef 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -113,6 +113,8 @@ stages: - script: | set -x + sudo apt-get update + sudo apt-get install libyang0.16 -y sudo dpkg -i --force-confask,confnew ../libswsscommon_1.0.0_amd64.deb sudo dpkg -i ../python3-swsscommon_1.0.0_amd64.deb sudo docker load -i ../target/docker-sonic-vs.gz From 18850e4e28bb5d536abc9c455138c57aad69811d Mon Sep 17 00:00:00 2001 From: Samuel Angebault Date: Fri, 30 Sep 2022 10:03:40 +0200 Subject: [PATCH 005/174] [Arista] Update platform submodules (#12225) Implement input power psu API Report DC power output via API Add bootloader Component in API Fix issue where naming was not unique for Component --- platform/barefoot/sonic-platform-modules-arista | 2 +- platform/broadcom/sonic-platform-modules-arista | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/platform/barefoot/sonic-platform-modules-arista b/platform/barefoot/sonic-platform-modules-arista index e12a04b24c5f..11180c37fa17 160000 --- a/platform/barefoot/sonic-platform-modules-arista +++ b/platform/barefoot/sonic-platform-modules-arista @@ -1 +1 @@ -Subproject commit e12a04b24c5f752a9ca789d62bb7b94c563e1c4b +Subproject commit 11180c37fa17421afdeef346b3896552872a2721 diff --git a/platform/broadcom/sonic-platform-modules-arista b/platform/broadcom/sonic-platform-modules-arista index e12a04b24c5f..11180c37fa17 160000 --- a/platform/broadcom/sonic-platform-modules-arista +++ b/platform/broadcom/sonic-platform-modules-arista @@ -1 +1 @@ -Subproject commit e12a04b24c5f752a9ca789d62bb7b94c563e1c4b +Subproject commit 11180c37fa17421afdeef346b3896552872a2721 From 0e33bd8b8e43becd39d6f2a04988eaa713283e29 Mon Sep 17 00:00:00 2001 From: maipbui Date: Fri, 30 Sep 2022 16:20:35 +0000 Subject: [PATCH 006/174] Test semgrep Signed-off-by: maipbui --- src/test.py | 439 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 439 insertions(+) create mode 100644 src/test.py diff --git a/src/test.py b/src/test.py new file mode 100644 index 000000000000..240922b30e70 --- /dev/null +++ b/src/test.py @@ -0,0 +1,439 @@ +import argparse +import fcntl +import inspect +import json +import os +import shutil +import ssl +import subprocess +import sys +import syslog +import tempfile +import urllib.request +import base64 +from urllib.parse import urlparse + +import yaml +import requests +from sonic_py_common import device_info +from jinja2 import Template +from swsscommon import swsscommon + +KUBE_ADMIN_CONF = "/etc/sonic/kube_admin.conf" +KUBELET_YAML = "/var/lib/kubelet/config.yaml" +SERVER_ADMIN_URL = "https://{}/admin.conf" +LOCK_FILE = "/var/lock/kube_join.lock" +FLANNEL_CONF_FILE = "/usr/share/sonic/templates/kube_cni.10-flannel.conflist" +CNI_DIR = "/etc/cni/net.d" +K8S_CA_URL = "https://{}:{}/api/v1/namespaces/default/configmaps/kube-root-ca.crt" +AME_CRT = "/etc/sonic/credentials/restapiserver.crt" +AME_KEY = "/etc/sonic/credentials/restapiserver.key" + +def log_debug(m): + msg = "{}: {}".format(inspect.stack()[1][3], m) + print(msg) + syslog.syslog(syslog.LOG_DEBUG, msg) + + +def log_error(m): + msg = "{}: {}".format(inspect.stack()[1][3], m) + print(msg) + syslog.syslog(syslog.LOG_ERR, m) + + +def to_str(s): + if isinstance(s, str): + return s + + if isinstance(s, bytes): + return s.decode('utf-8') + + return str(s) + +def get_device_name(): + return str(device_info.get_hostname()).lower() + + +def _run_command(cmd, timeout=5): + """ Run shell command and return exit code, along with stdout. """ + ret = 0 + try: + proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (o, e) = proc.communicate(timeout) + output = to_str(o) + err = to_str(e) + ret = proc.returncode + except subprocess.TimeoutExpired as error: + proc.kill() + output = "" + err = str(error) + ret = -1 + + log_debug("cmd:{}\nret={}".format(cmd, ret)) + if output: + log_debug("out:{}".format(output)) + if err: + log_debug("err:{}".format(err)) + + return (ret, output.strip(), err.strip()) + + +def kube_read_labels(): + """ Read current labels on node and return as dict. """ + KUBECTL_GET_CMD = "kubectl --kubeconfig {} get nodes {} --show-labels |tr -s ' ' | cut -f6 -d' '" + + labels = {} + ret, out, _ = _run_command(KUBECTL_GET_CMD.format( + KUBE_ADMIN_CONF, get_device_name())) + + if ret == 0: + lst = out.split(",") + + for label in lst: + tmp = label.split("=") + labels[tmp[0]] = tmp[1] + + # log_debug("{} kube labels {} ret={}".format( + # "Applied" if ret == 0 else "Failed to apply", + # json.dumps(labels, indent=4), ret)) + + return (ret, labels) + + +def kube_write_labels(set_labels): + """ Set given set_labels. + """ + KUBECTL_SET_CMD = "kubectl --kubeconfig {} label --overwrite nodes {} {}" + + ret, node_labels = kube_read_labels() + if ret != 0: + log_debug("Read before set failed. Hence skipping set {}". + format(str(set_labels))) + return ret + + del_label_str = "" + add_label_str = "" + for (name, val) in set_labels.items(): + skip = False + if name in node_labels: + if val != node_labels[name]: + # label value can't be modified. Remove it first + # and then add + del_label_str += "{}- ".format(name) + else: + # Already exists with same value. + skip = True + if not skip: + # Add label + add_label_str += "{}={} ".format(name, val) + + + if add_label_str: + # First remove if any + if del_label_str: + (ret, _, _) = _run_command(KUBECTL_SET_CMD.format( + KUBE_ADMIN_CONF, get_device_name(), del_label_str.strip())) + (ret, _, _) = _run_command(KUBECTL_SET_CMD.format( + KUBE_ADMIN_CONF, get_device_name(), add_label_str.strip())) + + log_debug("{} kube labels {} ret={}".format( + "Applied" if ret == 0 else "Failed to apply", add_label_str, ret)) + else: + log_debug("Given labels are in sync with node labels. Hence no-op") + + return ret + + +def func_get_labels(args): + """ args parser default function for get labels""" + ret, node_labels = kube_read_labels() + if ret != 0: + log_debug("Labels read failed.") + return ret + + log_debug(json.dumps(node_labels, indent=4)) + return 0 + + +def is_connected(server=""): + """ Check if we are currently connected """ + + if (os.path.exists(KUBELET_YAML) and os.path.exists(KUBE_ADMIN_CONF)): + with open(KUBE_ADMIN_CONF, 'r') as s: + d = yaml.load(s, yaml.SafeLoader) + d = d['clusters'] if 'clusters' in d else [] + d = d[0] if len(d) > 0 else {} + d = d['cluster'] if 'cluster' in d else {} + d = d['server'] if 'server' in d else "" + if d: + o = urlparse(d) + if o.hostname: + return not server or server == o.hostname + return False + + +def func_is_connected(args): + """ Get connected state """ + connected = is_connected() + log_debug("Currently {} to Kube master".format( + "connected" if connected else "not connected")) + return 0 if connected else 1 + + +def _take_lock(): + """ Take a lock to block concurrent calls """ + lock_fd = None + try: + lock_fd = open(LOCK_FILE, "w") + fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + log_debug("Lock taken {}".format(LOCK_FILE)) + except IOError as e: + lock_fd = None + log_error("Lock {} failed: {}".format(LOCK_FILE, str(e))) + return lock_fd + + +def _download_file(server, port, insecure): + """ Download file from Kube master to assist join as node. """ + + if insecure: + r = urllib.request.urlopen(SERVER_ADMIN_URL.format(server), + context=ssl._create_unverified_context()) + else: + r = urllib.request.urlopen(SERVER_ADMIN_URL.format(server)) + + (h, fname) = tempfile.mkstemp(suffix="_kube_join") + data = r.read() + os.write(h, data) + os.close(h) + log_debug("Downloaded = {}".format(fname)) + + shutil.copyfile(fname, KUBE_ADMIN_CONF) + + log_debug("{} downloaded".format(KUBE_ADMIN_CONF)) + + +def _gen_cli_kubeconf(server, port, insecure): + """generate identity which can help authenticate and + authorization to k8s cluster + """ + client_kubeconfig_template = """ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: {{ k8s_ca }} + server: https://{{ vip }}:{{ port }} + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: user + name: user@kubernetes +current-context: user@kubernetes +kind: Config +preferences: {} +users: +- name: user + user: + client-certificate-data: {{ ame_crt }} + client-key-data: {{ ame_key }} + """ + if insecure: + r = requests.get(K8S_CA_URL.format(server, port), cert=(AME_CRT, AME_KEY), verify=False) + else: + r = requests.get(K8S_CA_URL.format(server, port), cert=(AME_CRT, AME_KEY)) + if not r.ok: + raise requests.RequestException("Something wrong with AME cert or something wrong about sonic role in k8s cluster") + k8s_ca = r.json()["data"]["ca.crt"] + k8s_ca_b64 = base64.b64encode(k8s_ca.encode("utf-8")).decode("utf-8") + ame_crt_raw = open(AME_CRT, "rb") + ame_crt_b64 = base64.b64encode(ame_crt_raw.read()).decode("utf-8") + ame_key_raw = open(AME_KEY, "rb") + ame_key_b64 = base64.b64encode(ame_key_raw.read()).decode("utf-8") + client_kubeconfig_template_j2 = Template(client_kubeconfig_template) + client_kubeconfig = client_kubeconfig_template_j2.render( + k8s_ca=k8s_ca_b64, vip=server, port=port, ame_crt=ame_crt_b64, ame_key=ame_key_b64) + (h, fname) = tempfile.mkstemp(suffix="_kube_join") + os.write(h, client_kubeconfig.encode("utf-8")) + os.close(h) + log_debug("Downloaded = {}".format(fname)) + + shutil.copyfile(fname, KUBE_ADMIN_CONF) + + log_debug("{} downloaded".format(KUBE_ADMIN_CONF)) + + +def _get_local_ipv6(): + try: + config_db = swsscommon.DBConnector("CONFIG_DB", 0) + mgmt_ip_data = swsscommon.Table(config_db, 'MGMT_INTERFACE') + for key in mgmt_ip_data.getKeys(): + if key.find(":") >= 0: + return key.split("|")[1].split("/")[0] + raise IOError("IPV6 not find from MGMT_INTERFACE table") + except Exception as e: + raise IOError(str(e)) + + +def _troubleshoot_tips(): + """ log troubleshoot tips which could be handy, + when in trouble with join + """ + msg = """ +if join fails, check the following +a. Ensure both master & node run same or compatible k8s versions +b. Check if this node already exists in master + Use 'sudo kubectl --kubeconfig=${KUBE_ADMIN_CONF} get nodes' to list nodes at master. + If yes, delete it, as the node is attempting a new join. + 'kubectl --kubeconfig=${KUBE_ADMIN_CONF} drain --ignore-daemonsets' + 'kubectl --kubeconfig=${KUBE_ADMIN_CONF} delete node ' +c. In Master check if all system pods are running good. + 'kubectl get pods --namespace kube-system' + If any not running properly, say READY column has 0/1, decribe pod for more detail. + 'kubectl --namespace kube-system describe pod ' + For additional details, look into pod's logs. + @ node: /var/log/pods//... + @ master: 'kubectl logs -n kube-system ' + """ + + (h, fname) = tempfile.mkstemp(suffix="kube_hints_") + os.write(h, str.encode(msg)) + os.close(h) + + log_error("Refer file {} for troubleshooting tips".format(fname)) + + +def _do_reset(pending_join = False): + # Drain & delete self from cluster. If not, the next join would fail + # + if os.path.exists(KUBE_ADMIN_CONF): + _run_command( + "kubectl --kubeconfig {} --request-timeout 20s drain {} --ignore-daemonsets". + format(KUBE_ADMIN_CONF, get_device_name())) + + _run_command("kubectl --kubeconfig {} --request-timeout 20s delete node {}". + format(KUBE_ADMIN_CONF, get_device_name())) + + _run_command("kubeadm reset -f", 10) + _run_command("rm -rf {}".format(CNI_DIR)) + if not pending_join: + _run_command("rm -f {}".format(KUBE_ADMIN_CONF)) + _run_command("systemctl stop kubelet") + + +def _do_join(server, port, insecure): + KUBEADM_JOIN_CMD = "kubeadm join --discovery-file {} --node-name {} --apiserver-advertise-address {}" + err = "" + out = "" + ret = 0 + try: + local_ipv6 = _get_local_ipv6() + #_download_file(server, port, insecure) + _gen_cli_kubeconf(server, port, insecure) + _do_reset(True) + _run_command("modprobe br_netfilter") + # Copy flannel.conf + _run_command("mkdir -p {}".format(CNI_DIR)) + _run_command("cp {} {}".format(FLANNEL_CONF_FILE, CNI_DIR)) + (ret, _, _) = _run_command("systemctl start kubelet") + + if ret == 0: + (ret, out, err) = _run_command(KUBEADM_JOIN_CMD.format( + KUBE_ADMIN_CONF, get_device_name(), local_ipv6), timeout=60) + log_debug("ret = {}".format(ret)) + + except IOError as e: + err = "Join failed: {}".format(str(e)) + ret = -1 + out = "" + + _troubleshoot_tips() + + if (ret != 0): + log_error(err) + + return (ret, out, err) + + +def kube_join_master(server, port, insecure, force=False): + """ The main function that initiates join to master """ + + out = "" + err = "" + ret = 0 + + log_debug("join: server:{} port:{} insecure:{} force:{}". + format(server, port, insecure, force)) + + lock_fd = _take_lock() + if not lock_fd: + log_error("Lock {} is active; Bail out".format(LOCK_FILE)) + return (-1, "", "") + + if ((not force) and is_connected(server)): + _run_command("systemctl start kubelet") + err = "Master {} is already connected. " + err += "Reset or join with force".format(server) + else: + (ret, out, err) = _do_join(server, port, insecure) + + log_debug("join: ret={} out:{} err:{}".format(ret, out, err)) + return (ret, out, err) + + +def kube_reset_master(force): + err = "" + ret = 0 + + lock_fd = _take_lock() + if not lock_fd: + log_error("Lock {} is active; Bail out".format(LOCK_FILE)) + return (-1, "") + + if not force: + if not is_connected(): + err = "Currently not connected to master. " + err += "Use force reset if needed" + log_debug("Not connected ... bailing out") + ret = -1 + + if ret == 0: + _do_reset() + else: + _run_command("systemctl stop kubelet") + + return (ret, err) + + + +def main(): + syslog.openlog("kube_commands") + parser=argparse.ArgumentParser(description= + "get-labels") + subparsers = parser.add_subparsers(title='actions') + + parser_get_labels = subparsers.add_parser("get-labels", + help="Get current labels on node") + parser_get_labels.set_defaults(func=func_get_labels) + + parser_is_connected = subparsers.add_parser("connected", + help="Get connnected status") + parser_is_connected.set_defaults(func=func_is_connected) + + if len(sys.argv) < 2: + parser.print_help() + return -1 + + args = parser.parse_args() + ret = args.func(args) + + syslog.closelog() + return ret + + +if __name__ == "__main__": + if os.geteuid() != 0: + exit("Please run as root. Exiting ...") + main() + sys.exit(0) From 0a2743d5e472d25209a846e49f032998ee093261 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Sat, 1 Oct 2022 11:36:55 -0700 Subject: [PATCH 007/174] [submodule] update sonic-utilities (#12138) 0a7557bd9 [minigraph] add option to specify golden path in load_minigraph (#2350) 322aefc37 [GCU]Remove GCU unique lane check for duplicate lanes platforms (#2343) 7099fffa7 [fastboot] fastboot enhancement: Use warm-boot infrastructure for fast-boot (#2286) 09026edbb [warm-reboot] fix warm-reboot when /tmp/cache is missing (#2367) a3c404c74 Fix typo in platform_sfputil_helper.is_rj45_port (#2374) 637d834ce Vnet_route_check Vxlan tunnel route update. (#2281) 29a3e5180 Added support for tunnel route status in show vnet routes all. (#2341) 1ac584bb3 Use 'default' VRF when VRF name is not provided (#2368) 4d377a620 [subinterface]Added additional checks in portchannel and subinterface commands (#2345) bbcdf2ed7 disk_check: Publish event for RO state (#2320) 3fd537b0a Support the bandit check by GitHub Action (#2358) 491d3d380 [generate dump]Added error message when saisdkdump fails (#2356) 6830e01ec [counterpoll]Fixing counterpoll show for tunnel and acl stats (#2355) 3be2ad7de [fast-reboot]Avoid stopping masked services during fast-reboot (#2335) 0e1b0cf20 [GCU] Fix missing backend in dry run (#2347) 676c31bd0 Add verification for override (#2305) 48997c266 Add Password Hardening CLI support (#2338) 414e239ea update unit tests for swap allocator a91a4922f consider swap checking memory in installer f0ce58635 [route_check]: Ignore standalone tunnel routes (#2325) --- src/sonic-utilities | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-utilities b/src/sonic-utilities index 3af8ba4acc2b..0a7557bd9162 160000 --- a/src/sonic-utilities +++ b/src/sonic-utilities @@ -1 +1 @@ -Subproject commit 3af8ba4acc2bbc77d17be0d67943703021c7d1e1 +Subproject commit 0a7557bd9162eae40f5d4c4f6fbab92dbad7204b From 8c10851c2aebb44099611248f91da350b16df1dc Mon Sep 17 00:00:00 2001 From: Muhammad Danish <88161975+mdanish-kh@users.noreply.github.com> Date: Sun, 2 Oct 2022 11:02:10 +0500 Subject: [PATCH 008/174] Update azure.github.io links to sonic-net.github.io (#12209) Why I did it azure.github.io/SONiC/ no longer works and returns 404 Not Found. Updated it to the correct sonic-net.github.io/SONiC/ --- files/image_config/environment/motd | 2 +- platform/vs/sonic-gns3a.sh | 4 ++-- src/sonic-device-data/README.md | 2 +- src/sonic-host-services-data/README.md | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/files/image_config/environment/motd b/files/image_config/environment/motd index 8562e330fe2c..0d857e5c5f94 100644 --- a/files/image_config/environment/motd +++ b/files/image_config/environment/motd @@ -10,5 +10,5 @@ You are on Unauthorized access and/or use are prohibited. All access and/or use are subject to monitoring. -Help: http://azure.github.io/SONiC/ +Help: https://sonic-net.github.io/SONiC/ diff --git a/platform/vs/sonic-gns3a.sh b/platform/vs/sonic-gns3a.sh index 41e39cd8686a..2a772ce5a332 100644 --- a/platform/vs/sonic-gns3a.sh +++ b/platform/vs/sonic-gns3a.sh @@ -41,9 +41,9 @@ echo " \"category\": \"router\", \"description\": \"SONiC Virtual Switch/Router\", \"vendor_name\": \"SONiC\", - \"vendor_url\": \"https://azure.github.io/SONiC/\", + \"vendor_url\": \"https://sonic-net.github.io/SONiC/\", \"product_name\": \"SONiC\", - \"product_url\": \"https://azure.github.io/SONiC/\", + \"product_url\": \"https://sonic-net.github.io/SONiC/\", \"registry_version\": 3, \"status\": \"experimental\", \"maintainer\": \"SONiC\", diff --git a/src/sonic-device-data/README.md b/src/sonic-device-data/README.md index e8ccad58b819..d9d403758a86 100644 --- a/src/sonic-device-data/README.md +++ b/src/sonic-device-data/README.md @@ -1,4 +1,4 @@ # sonic-device-data Device-specific data for the SONiC project -See the [SONiC Website](http://azure.github.io/SONiC/) for more information about the SONiC project. +See the [SONiC Website](https://sonic-net.github.io/SONiC/) for more information about the SONiC project. diff --git a/src/sonic-host-services-data/README.md b/src/sonic-host-services-data/README.md index 93af66a83d6b..0b9e714932d2 100644 --- a/src/sonic-host-services-data/README.md +++ b/src/sonic-host-services-data/README.md @@ -16,4 +16,4 @@ dpkg-buildpackage -rfakeroot -Tclean --- -See the [SONiC Website](http://azure.github.io/SONiC/) for more information about the SONiC project. +See the [SONiC Website](https://sonic-net.github.io/SONiC/) for more information about the SONiC project. From 44356fa8d758fffd6e023ebfb7e1c8646a81ede1 Mon Sep 17 00:00:00 2001 From: Dror Prital <76714716+dprital@users.noreply.github.com> Date: Sun, 2 Oct 2022 11:34:24 +0300 Subject: [PATCH 009/174] [Mellanox] Add NVIDIA copyright header for NVIDIA added files (#12130) - Why I did it Add NVIDIA Copyright header for new "NVIDIA" files - How I did it Add the copyright header as remark at the head of the file --- .../ACS-MSN2700/buffers_defaults_objects.j2 | 17 +++++++++++++++++ .../buffers_defaults_objects.j2 | 17 +++++++++++++++++ .../Mellanox-SN4700-C128/port_config.ini | 17 +++++++++++++++++ .../Mellanox-SN4700-C128/sai_4700_128x100g.xml | 17 +++++++++++++++++ .../tests/input_platform/__init__.py | 16 ++++++++++++++++ .../tests/input_platform/output_sfp.py | 17 +++++++++++++++++ platform/mellanox/zero_profiles.j2 | 17 +++++++++++++++++ 7 files changed, 118 insertions(+) diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/ACS-MSN2700/buffers_defaults_objects.j2 b/device/mellanox/x86_64-mlnx_msn2700-r0/ACS-MSN2700/buffers_defaults_objects.j2 index 29a3c74e5233..e8edeca556d1 100644 --- a/device/mellanox/x86_64-mlnx_msn2700-r0/ACS-MSN2700/buffers_defaults_objects.j2 +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/ACS-MSN2700/buffers_defaults_objects.j2 @@ -1,3 +1,20 @@ +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} + {%- macro generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) %} "BUFFER_POOL": { {% if dynamic_mode is not defined and port_names_inactive|length > 0 -%} diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D48C8/buffers_defaults_objects.j2 b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D48C8/buffers_defaults_objects.j2 index f0b0e3993bd4..6bf657d1fb7b 100644 --- a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D48C8/buffers_defaults_objects.j2 +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D48C8/buffers_defaults_objects.j2 @@ -1,3 +1,20 @@ +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} + {%- macro generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) %} "BUFFER_POOL": { {% if dynamic_mode is not defined and port_names_inactive|length > 0 -%} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-C128/port_config.ini b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-C128/port_config.ini index d64b66b0b691..0d67f9b366fc 100644 --- a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-C128/port_config.ini +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-C128/port_config.ini @@ -1,3 +1,20 @@ +## +## Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. +## Apache-2.0 +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + # name lanes alias index speed Ethernet0 0,1 etp1a 1 100000 Ethernet2 2,3 etp1b 1 100000 diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-C128/sai_4700_128x100g.xml b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-C128/sai_4700_128x100g.xml index f5d49f8b86ab..2575b49f3fa0 100644 --- a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-C128/sai_4700_128x100g.xml +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-C128/sai_4700_128x100g.xml @@ -1,3 +1,20 @@ + + diff --git a/platform/mellanox/mlnx-platform-api/tests/input_platform/__init__.py b/platform/mellanox/mlnx-platform-api/tests/input_platform/__init__.py index e69de29bb2d1..07ebf17a113e 100644 --- a/platform/mellanox/mlnx-platform-api/tests/input_platform/__init__.py +++ b/platform/mellanox/mlnx-platform-api/tests/input_platform/__init__.py @@ -0,0 +1,16 @@ +# +# Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. +# Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/platform/mellanox/mlnx-platform-api/tests/input_platform/output_sfp.py b/platform/mellanox/mlnx-platform-api/tests/input_platform/output_sfp.py index 20a09d1b54f6..170b0246430f 100644 --- a/platform/mellanox/mlnx-platform-api/tests/input_platform/output_sfp.py +++ b/platform/mellanox/mlnx-platform-api/tests/input_platform/output_sfp.py @@ -1,3 +1,20 @@ +# +# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. +# Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + """ module holding the correct values for the sfp_test.py """ diff --git a/platform/mellanox/zero_profiles.j2 b/platform/mellanox/zero_profiles.j2 index a953c18409b2..007f19c83a0a 100644 --- a/platform/mellanox/zero_profiles.j2 +++ b/platform/mellanox/zero_profiles.j2 @@ -1,3 +1,20 @@ +{# + Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} + [ { "BUFFER_POOL_TABLE:ingress_zero_pool": { From 2f46689a059d3d7146b8ab27a0293e4b42383ec7 Mon Sep 17 00:00:00 2001 From: andywongarista <78833093+andywongarista@users.noreply.github.com> Date: Sun, 2 Oct 2022 22:53:34 -0700 Subject: [PATCH 010/174] [Arista] Add components for 720DT-48S (#12217) Why I did it Add components data for sonic-mgmt testing How I did it Update platform.json and add platform_components.json How to verify it Ran sonic-mgmt tests (test_chassis and test_component) --- device/arista/x86_64-arista_720dt_48s/platform.json | 9 ++++++++- .../x86_64-arista_720dt_48s/platform_components.json | 10 ++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 device/arista/x86_64-arista_720dt_48s/platform_components.json diff --git a/device/arista/x86_64-arista_720dt_48s/platform.json b/device/arista/x86_64-arista_720dt_48s/platform.json index ce405b82f955..c560f8d739f1 100644 --- a/device/arista/x86_64-arista_720dt_48s/platform.json +++ b/device/arista/x86_64-arista_720dt_48s/platform.json @@ -1,7 +1,14 @@ { "chassis": { "name": "CCS-720DT-48S", - "components": [], + "components": [ + { + "name": "Aboot()" + }, + { + "name": "Scd(addr=0000:00:18.7)" + } + ], "fan_drawers": [ { "name": "fixed1", diff --git a/device/arista/x86_64-arista_720dt_48s/platform_components.json b/device/arista/x86_64-arista_720dt_48s/platform_components.json new file mode 100644 index 000000000000..ea8bbb5e3346 --- /dev/null +++ b/device/arista/x86_64-arista_720dt_48s/platform_components.json @@ -0,0 +1,10 @@ +{ + "chassis": { + "CCS-720DT-48S": { + "component": { + "Aboot()": {}, + "Scd(addr=0000:00:18.7)": {} + } + } + } +} From 95f4af3407d32cdec98a3d0d487ab0997ef5ff3c Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Mon, 3 Oct 2022 11:38:55 -0700 Subject: [PATCH 011/174] [actions] Support Semgrep by Github Actions (#12249) Signed-off-by: maipbui #### Why I did it [Semgrep](https://github.com/returntocorp/semgrep) is a static analysis tool to find security vulnerabilities. When opening a PR or commtting to PR, Semgrep performs a diff-aware scanning, which scans changed files in PRs. When merging PR, Semgrep performs a full scan on master branch and report all findings. Ref: - [Supported Language](https://semgrep.dev/docs/supported-languages/#language-maturity) - [Semgrep Rules](https://registry.semgrep.dev/rule) #### How I did it Integrate Semgrep into this repository by committing a job configuration file #### How to verify it PR: https://github.com/maipbui/sonic-buildimage/pull/2 Master branch full scan findings: [Master branch findings results](https://github.com/maipbui/sonic-buildimage/actions/runs/3160181876/jobs/5144332404) PR https://github.com/maipbui/sonic-buildimage/pull/2 scan findings: [Pull request findings results](https://github.com/maipbui/sonic-buildimage/actions/runs/3160193505/jobs/5144357859) --- .github/workflows/semgrep.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/semgrep.yml diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml new file mode 100644 index 000000000000..8ebe082f50a4 --- /dev/null +++ b/.github/workflows/semgrep.yml @@ -0,0 +1,21 @@ +name: Semgrep + +on: + pull_request: {} + push: + branches: + - master + - '201[7-9][0-1][0-9]' + - '202[0-9][0-1][0-9]' + +jobs: + semgrep: + name: Semgrep + runs-on: ubuntu-latest + container: + image: returntocorp/semgrep + steps: + - uses: actions/checkout@v3 + - run: semgrep ci + env: + SEMGREP_RULES: p/default From c691b739591b9b5bcd0c1ba338d04aa64aa60549 Mon Sep 17 00:00:00 2001 From: Kalimuthu-Velappan <53821802+Kalimuthu-Velappan@users.noreply.github.com> Date: Wed, 5 Oct 2022 02:43:40 +0530 Subject: [PATCH 012/174] 01.Version-cache - restructuring of Makefile.work (#12000) - The Makefile.work becomes complex and it is very difficult to manage the changes across branches. - Restructured the Makefile.work and it becomes more readable. - Added $(QUIET) option to turn on command echo mode through command line option. - Exported the SONIC_BUILD_VARS variable, through which make options can be set dynamically. Eg: make SONIC_BUILD_VARS='INCLUDE_NAT=y' --- Makefile | 34 +++++--- Makefile.work | 213 +++++++++++++++++++++++++++++++++----------------- slave.mk | 66 ++++++++-------- 3 files changed, 196 insertions(+), 117 deletions(-) diff --git a/Makefile b/Makefile index af3d7086ec8e..ccfefc6c183c 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,14 @@ NOSTRETCH ?= 0 NOBUSTER ?= 0 NOBULLSEYE ?= 0 +override Q := @ +ifeq ($(QUIET),n) + override Q := +endif +override SONIC_OVERRIDE_BUILD_VARS += $(SONIC_BUILD_VARS) +override SONIC_OVERRIDE_BUILD_VARS += Q=$(Q) +export Q SONIC_OVERRIDE_BUILD_VARS + ifeq ($(NOJESSIE),0) BUILD_JESSIE=1 endif @@ -29,50 +37,50 @@ PLATFORM_CHECKOUT_CMD := $(shell if [ -f $(PLATFORM_CHECKOUT_FILE) ]; then PLATF %:: @echo "+++ --- Making $@ --- +++" ifeq ($(NOJESSIE), 0) - EXTRA_DOCKER_TARGETS=$(notdir $@) make -f Makefile.work jessie + EXTRA_DOCKER_TARGETS=$(notdir $@) $(MAKE) -f Makefile.work jessie endif ifeq ($(NOSTRETCH), 0) - EXTRA_DOCKER_TARGETS=$(notdir $@) BLDENV=stretch make -f Makefile.work stretch + EXTRA_DOCKER_TARGETS=$(notdir $@) BLDENV=stretch $(MAKE) -f Makefile.work stretch endif ifeq ($(NOBUSTER), 0) - EXTRA_DOCKER_TARGETS=$(notdir $@) BLDENV=buster make -f Makefile.work buster + EXTRA_DOCKER_TARGETS=$(notdir $@) BLDENV=buster $(MAKE) -f Makefile.work buster endif ifeq ($(NOBULLSEYE), 0) - BLDENV=bullseye make -f Makefile.work $@ + BLDENV=bullseye $(MAKE) -f Makefile.work $@ endif - BLDENV=bullseye make -f Makefile.work docker-cleanup + BLDENV=bullseye $(MAKE) -f Makefile.work docker-cleanup jessie: @echo "+++ Making $@ +++" ifeq ($(NOJESSIE), 0) - make -f Makefile.work jessie + $(MAKE) -f Makefile.work jessie endif stretch: @echo "+++ Making $@ +++" ifeq ($(NOSTRETCH), 0) - make -f Makefile.work stretch + $(MAKE) -f Makefile.work stretch endif buster: @echo "+++ Making $@ +++" ifeq ($(NOBUSTER), 0) - make -f Makefile.work buster + $(MAKE) -f Makefile.work buster endif init: @echo "+++ Making $@ +++" - make -f Makefile.work $@ + $(MAKE) -f Makefile.work $@ # # Function to invoke target $@ in Makefile.work with proper BLDENV # define make_work @echo "+++ Making $@ +++" - $(if $(BUILD_JESSIE),make -f Makefile.work $@,) - $(if $(BUILD_STRETCH),BLDENV=stretch make -f Makefile.work $@,) - $(if $(BUILD_BUSTER),BLDENV=buster make -f Makefile.work $@,) - $(if $(BUILD_BULLSEYE),BLDENV=bullseye make -f Makefile.work $@,) + $(if $(BUILD_JESSIE),$(MAKE) -f Makefile.work $@,) + $(if $(BUILD_STRETCH),BLDENV=stretch $(MAKE) -f Makefile.work $@,) + $(if $(BUILD_BUSTER),BLDENV=buster $(MAKE) -f Makefile.work $@,) + $(if $(BUILD_BULLSEYE),BLDENV=bullseye $(MAKE) -f Makefile.work $@,) endef .PHONY: $(PLATFORM_PATH) diff --git a/Makefile.work b/Makefile.work index 6171a05c5192..a6cacafce834 100644 --- a/Makefile.work +++ b/Makefile.work @@ -124,7 +124,7 @@ endif # Define a do-nothing target for rules/config.user so that when # the file is missing, make won't try to rebuld everything. rules/config.user: - @echo -n "" + $(Q)echo -n "" include rules/config -include rules/config.user @@ -173,21 +173,59 @@ endif endif # Generate the version control build info -$(shell SONIC_VERSION_CONTROL_COMPONENTS=$(SONIC_VERSION_CONTROL_COMPONENTS) \ - TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) PACKAGE_URL_PREFIX=$(PACKAGE_URL_PREFIX) \ - scripts/generate_buildinfo_config.sh) +$(shell \ + SONIC_VERSION_CONTROL_COMPONENTS=$(SONIC_VERSION_CONTROL_COMPONENTS) \ + TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) \ + PACKAGE_URL_PREFIX=$(PACKAGE_URL_PREFIX) \ + scripts/generate_buildinfo_config.sh) # Generate the slave Dockerfile, and prepare build info for it -$(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) CROSS_BUILD_ENVIRON=$(CROSS_BUILD_ENVIRON) ENABLE_FIPS_FEATURE=$(ENABLE_FIPS_FEATURE) DOCKER_EXTRA_OPTS=$(DOCKER_EXTRA_OPTS) DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) j2 $(SLAVE_DIR)/Dockerfile.j2 > $(SLAVE_DIR)/Dockerfile) -$(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) CROSS_BUILD_ENVIRON=$(CROSS_BUILD_ENVIRON) j2 $(SLAVE_DIR)/Dockerfile.user.j2 > $(SLAVE_DIR)/Dockerfile.user) -$(shell BUILD_SLAVE=y DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) scripts/prepare_docker_buildinfo.sh $(SLAVE_BASE_IMAGE) $(SLAVE_DIR)/Dockerfile $(CONFIGURED_ARCH) "" $(BLDENV)) +$(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) \ + MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) \ + CROSS_BUILD_ENVIRON=$(CROSS_BUILD_ENVIRON) \ + ENABLE_FIPS_FEATURE=$(ENABLE_FIPS_FEATURE) \ + DOCKER_EXTRA_OPTS=$(DOCKER_EXTRA_OPTS) \ + DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) \ + j2 $(SLAVE_DIR)/Dockerfile.j2 > $(SLAVE_DIR)/Dockerfile) + +$(shell CONFIGURED_ARCH=$(CONFIGURED_ARCH) \ + MULTIARCH_QEMU_ENVIRON=$(MULTIARCH_QEMU_ENVIRON) \ + CROSS_BUILD_ENVIRON=$(CROSS_BUILD_ENVIRON) \ + j2 $(SLAVE_DIR)/Dockerfile.user.j2 > $(SLAVE_DIR)/Dockerfile.user) + +PREPARE_DOCKER=BUILD_SLAVE=y \ + DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) \ + scripts/prepare_docker_buildinfo.sh \ + $(SLAVE_BASE_IMAGE) \ + $(SLAVE_DIR)/Dockerfile \ + $(CONFIGURED_ARCH) \ + "" \ + $(BLDENV) + +$(shell $(PREPARE_DOCKER) ) # Add the versions in the tag, if the version change, need to rebuild the slave -SLAVE_BASE_TAG = $(shell cat $(SLAVE_DIR)/Dockerfile $(SLAVE_DIR)/buildinfo/versions/versions-* src/sonic-build-hooks/hooks/* | sha1sum | awk '{print substr($$1,0,11);}') -# Calculate the slave TAG based on $(USER)/$(PWD)/$(CONFIGURED_PLATFORM) to get unique SHA ID -SLAVE_TAG = $(shell (cat $(SLAVE_DIR)/Dockerfile.user $(SLAVE_DIR)/Dockerfile $(SLAVE_DIR)/buildinfo/versions/versions-* .git/HEAD && echo $(USER)/$(PWD)/$(CONFIGURED_PLATFORM)) \ - | sha1sum | awk '{print substr($$1,0,11);}') +SLAVE_BASE_TAG = $(shell \ + cat $(SLAVE_DIR)/Dockerfile \ + $(SLAVE_DIR)/buildinfo/versions/versions-* \ + src/sonic-build-hooks/hooks/* 2>/dev/null \ + | sha1sum \ + | awk '{print substr($$1,0,11);}') +# Calculate the slave TAG based on $(USER)/$(PWD)/$(CONFIGURED_PLATFORM) to get unique SHA ID +SLAVE_TAG = $(shell \ + (cat $(SLAVE_DIR)/Dockerfile.user \ + $(SLAVE_DIR)/Dockerfile \ + $(SLAVE_DIR)/buildinfo/versions/versions-* \ + .git/HEAD \ + && echo $(USER)/$(PWD)/$(CONFIGURED_PLATFORM)) \ + | sha1sum \ + | awk '{print substr($$1,0,11);}') + +COLLECT_DOCKER=DEFAULT_CONTAINER_REGISTRY=$(DEFAULT_CONTAINER_REGISTRY) \ + scripts/collect_docker_version_files.sh \ + $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) \ + target OVERLAY_MODULE_CHECK := \ lsmod | grep -q "^overlay " &>/dev/null || \ zgrep -q 'CONFIG_OVERLAY_FS=y' /proc/config.gz &>/dev/null || \ @@ -329,7 +367,7 @@ DOCKER_BASE_LOG = $(SLAVE_DIR)/$(SLAVE_BASE_IMAGE)_$(SLAVE_BASE_TAG).log DOCKER_LOG = $(SLAVE_DIR)/$(SLAVE_IMAGE)_$(SLAVE_TAG).log -DOCKER_BASE_BUILD = docker build --no-cache \ +DOCKER_SLAVE_BASE_BUILD = docker build --no-cache \ -t $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) \ --build-arg http_proxy=$(http_proxy) \ --build-arg https_proxy=$(https_proxy) \ @@ -339,7 +377,7 @@ DOCKER_BASE_BUILD = docker build --no-cache \ DOCKER_BASE_PULL = docker pull \ $(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) -DOCKER_BUILD = docker build --no-cache \ +DOCKER_USER_BUILD = docker build --no-cache \ --build-arg user=$(USER) \ --build-arg uid=$(shell id -u) \ --build-arg guid=$(shell id -g) \ @@ -349,7 +387,52 @@ DOCKER_BUILD = docker build --no-cache \ -f $(SLAVE_DIR)/Dockerfile.user \ $(SLAVE_DIR) $(SPLIT_LOG) $(DOCKER_LOG) -SONIC_BUILD_INSTRUCTION := make \ + +DOCKER_SLAVE_BASE_INSPECT = \ + { \ + echo Checking sonic-slave-base image: $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG); \ + docker inspect --type image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) &> /dev/null; \ + } + +DOCKER_SLAVE_BASE_PULL_REGISTRY = \ + [ $(ENABLE_DOCKER_BASE_PULL) == y ] && \ + { \ + echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Pulling...; \ + $(DOCKER_BASE_PULL); \ + } && \ + { \ + docker tag $(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) && \ + $(COLLECT_DOCKER); \ + }\ + +SONIC_SLAVE_BASE_BUILD = \ + { \ + $(DOCKER_SLAVE_BASE_INSPECT); \ + } || \ + { \ + $(DOCKER_SLAVE_BASE_PULL_REGISTRY); \ + } || \ + { \ + echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Building... ; \ + $(PREPARE_DOCKER) ; \ + $(DOCKER_SLAVE_BASE_BUILD) ; \ + $(COLLECT_DOCKER) ; \ + } + +DOCKER_SLAVE_USER_INSPECT = \ + { \ + echo Checking sonic-slave-user image: $(SLAVE_IMAGE):$(SLAVE_TAG); \ + docker inspect --type image $(SLAVE_IMAGE):$(SLAVE_TAG) &> /dev/null; \ + } + +SONIC_SLAVE_USER_BUILD = \ + { $(DOCKER_SLAVE_USER_INSPECT) } || \ + { \ + echo Image $(SLAVE_IMAGE):$(SLAVE_TAG) not found. Building... ; \ + $(DOCKER_USER_BUILD) ; \ + } + +SONIC_BUILD_INSTRUCTION := $(MAKE) \ -f slave.mk \ PLATFORM=$(PLATFORM) \ PLATFORM_ARCH=$(PLATFORM_ARCH) \ @@ -415,87 +498,75 @@ SONIC_BUILD_INSTRUCTION := make \ .PHONY: sonic-slave-build sonic-slave-bash init reset + +ifeq ($(filter clean,$(MAKECMDGOALS)),) +COLLECT_BUILD_VERSION = { DBGOPT='$(DBGOPT)' scripts/collect_build_version_files.sh $$?; } +endif + +ifdef SOURCE_FOLDER + DOCKER_RUN += -v $(SOURCE_FOLDER):/var/$(USER)/src +endif + +ifeq "$(KEEP_SLAVE_ON)" "yes" +SLAVE_SHELL={ /bin/bash; } +endif + .DEFAULT_GOAL := all -%:: +%:: | sonic-build-hooks ifneq ($(filter y, $(MULTIARCH_QEMU_ENVIRON) $(CROSS_BUILD_ENVIRON)),) - @$(DOCKER_MULTIARCH_CHECK) + $(Q)$(DOCKER_MULTIARCH_CHECK) ifneq ($(BLDENV), ) - @$(DOCKER_SERVICE_MULTIARCH_CHECK) - @$(DOCKER_SERVICE_DOCKERFS_CHECK) -endif -endif - @$(OVERLAY_MODULE_CHECK) - - @pushd src/sonic-build-hooks; TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) make all; popd - @cp src/sonic-build-hooks/buildinfo/sonic-build-hooks* $(SLAVE_DIR)/buildinfo - @docker inspect --type image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) &> /dev/null || \ - { [ $(ENABLE_DOCKER_BASE_PULL) == y ] && { echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Pulling...; } && \ - $(DOCKER_BASE_PULL) && \ - { docker tag $(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) && \ - scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; } } || \ - { echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Building... ; \ - $(DOCKER_BASE_BUILD) ; \ - scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; } - @docker inspect --type image $(SLAVE_IMAGE):$(SLAVE_TAG) &> /dev/null || \ - { echo Image $(SLAVE_IMAGE):$(SLAVE_TAG) not found. Building... ; \ - $(DOCKER_BUILD) ; } -ifeq "$(KEEP_SLAVE_ON)" "yes" - ifdef SOURCE_FOLDER - @$(DOCKER_RUN) -v $(SOURCE_FOLDER):/var/$(USER)/src $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_BUILD_INSTRUCTION) $@; scripts/collect_build_version_files.sh \$$?; /bin/bash" - else - @$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_BUILD_INSTRUCTION) $@; scripts/collect_build_version_files.sh \$$?; /bin/bash" - endif -else - @$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_BUILD_INSTRUCTION) $@; scripts/collect_build_version_files.sh \$$?" + $(Q)$(DOCKER_SERVICE_MULTIARCH_CHECK) + $(Q)$(DOCKER_SERVICE_DOCKERFS_CHECK) +endif endif + $(Q)$(OVERLAY_MODULE_CHECK) + $(Q)$(SONIC_SLAVE_BASE_BUILD) + $(Q)$(SONIC_SLAVE_USER_BUILD) + + $(Q)$(DOCKER_RUN) \ + $(SLAVE_IMAGE):$(SLAVE_TAG) \ + bash -c "$(SONIC_BUILD_INSTRUCTION) $@;$(COLLECT_BUILD_VERSION); $(SLAVE_SHELL)" + $(Q)$(docker-image-cleanup) docker-cleanup: - $(docker-image-cleanup) + $(Q)$(docker-image-cleanup) +.PHONY: sonic-build-hooks sonic-build-hooks: - @pushd src/sonic-build-hooks; TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) make all; popd - @cp src/sonic-build-hooks/buildinfo/sonic-build-hooks* $(SLAVE_DIR)/buildinfo + $(Q)pushd src/sonic-build-hooks; TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) $(MAKE) all; popd + $(Q)mkdir -p $(SLAVE_DIR)/buildinfo + $(Q)cp src/sonic-build-hooks/buildinfo/sonic-build-hooks* $(SLAVE_DIR)/buildinfo -sonic-slave-base-build : sonic-build-hooks +sonic-slave-base-build : | sonic-build-hooks ifeq ($(MULTIARCH_QEMU_ENVIRON), y) - @$(DOCKER_MULTIARCH_CHECK) -endif - @$(OVERLAY_MODULE_CHECK) - @echo Checking sonic-slave-base image: $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) - @docker inspect --type image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) &> /dev/null || \ - { [ $(ENABLE_DOCKER_BASE_PULL) == y ] && { echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Pulling...; } && \ - $(DOCKER_BASE_PULL) && \ - { docker tag $(REGISTRY_SERVER):$(REGISTRY_PORT)/$(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) && \ - scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; } } || \ - { echo Image $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) not found. Building... ; \ - $(DOCKER_BASE_BUILD) ; \ - scripts/collect_docker_version_files.sh $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) target ; } + $(Q)$(DOCKER_MULTIARCH_CHECK) +endif + $(Q)$(OVERLAY_MODULE_CHECK) + $(Q)$(SONIC_SLAVE_BASE_BUILD) sonic-slave-build : sonic-slave-base-build - @echo Checking sonic-slave image: $(SLAVE_IMAGE):$(SLAVE_TAG) - @docker inspect --type image $(SLAVE_IMAGE):$(SLAVE_TAG) &> /dev/null || \ - { echo Image $(SLAVE_IMAGE):$(SLAVE_TAG) not found. Building... ; \ - $(DOCKER_BUILD) ; } + $(Q)$(SONIC_SLAVE_USER_BUILD) sonic-slave-bash : sonic-slave-build - @$(DOCKER_RUN) -t $(SLAVE_IMAGE):$(SLAVE_TAG) bash + $(Q)$(DOCKER_RUN) -t $(SLAVE_IMAGE):$(SLAVE_TAG) bash sonic-slave-run : sonic-slave-build - @$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_RUN_CMDS)" + $(Q)$(DOCKER_RUN) $(SLAVE_IMAGE):$(SLAVE_TAG) bash -c "$(SONIC_RUN_CMDS)" showtag: - @echo $(SLAVE_IMAGE):$(SLAVE_TAG) - @echo $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) + $(Q)echo $(SLAVE_IMAGE):$(SLAVE_TAG) + $(Q)echo $(SLAVE_BASE_IMAGE):$(SLAVE_BASE_TAG) init : - @git submodule update --init --recursive - @git submodule foreach --recursive '[ -f .git ] && echo "gitdir: $$(realpath --relative-to=. $$(cut -d" " -f2 .git))" > .git' + $(Q)git submodule update --init --recursive + $(Q)git submodule foreach --recursive '[ -f .git ] && echo "gitdir: $$(realpath --relative-to=. $$(cut -d" " -f2 .git))" > .git' .ONESHELL : reset reset : - @echo && echo -n "Warning! All local changes will be lost. Proceed? [y/N]: " - @read ans && ( + $(Q)echo && echo -n "Warning! All local changes will be lost. Proceed? [y/N]: " + $(Q)read ans && ( if [ $$ans == y ]; then echo "Resetting local repository. Please wait..."; sudo rm -rf fsroot*; diff --git a/slave.mk b/slave.mk index c521b3807356..28efed3dfc11 100644 --- a/slave.mk +++ b/slave.mk @@ -92,32 +92,32 @@ export BLDENV .platform : ifneq ($(CONFIGURED_PLATFORM),generic) - @echo Build system is not configured, please run make configure - @exit 1 + $(Q)echo Build system is not configured, please run make configure + $(Q)exit 1 endif configure : - @mkdir -p $(JESSIE_DEBS_PATH) - @mkdir -p $(STRETCH_DEBS_PATH) - @mkdir -p $(BUSTER_DEBS_PATH) - @mkdir -p $(BULLSEYE_DEBS_PATH) - @mkdir -p $(FILES_PATH) - @mkdir -p $(JESSIE_FILES_PATH) - @mkdir -p $(STRETCH_FILES_PATH) - @mkdir -p $(BUSTER_FILES_PATH) - @mkdir -p $(BULLSEYE_FILES_PATH) - @mkdir -p $(PYTHON_DEBS_PATH) - @mkdir -p $(PYTHON_WHEELS_PATH) - @mkdir -p $(DPKG_ADMINDIR_PATH) - @echo $(PLATFORM) > .platform - @echo $(PLATFORM_ARCH) > .arch + $(Q)mkdir -p $(JESSIE_DEBS_PATH) + $(Q)mkdir -p $(STRETCH_DEBS_PATH) + $(Q)mkdir -p $(BUSTER_DEBS_PATH) + $(Q)mkdir -p $(BULLSEYE_DEBS_PATH) + $(Q)mkdir -p $(FILES_PATH) + $(Q)mkdir -p $(JESSIE_FILES_PATH) + $(Q)mkdir -p $(STRETCH_FILES_PATH) + $(Q)mkdir -p $(BUSTER_FILES_PATH) + $(Q)mkdir -p $(BULLSEYE_FILES_PATH) + $(Q)mkdir -p $(PYTHON_DEBS_PATH) + $(Q)mkdir -p $(PYTHON_WHEELS_PATH) + $(Q)mkdir -p $(DPKG_ADMINDIR_PATH) + $(Q)echo $(PLATFORM) > .platform + $(Q)echo $(PLATFORM_ARCH) > .arch distclean : .platform clean - @rm -f .platform - @rm -f .arch + $(Q)rm -f .platform + $(Q)rm -f .arch list : - @$(foreach target,$(SONIC_TARGET_LIST),echo $(target);) + $(Q)$(foreach target,$(SONIC_TARGET_LIST),echo $(target);) ############################################################################### ## Include other rules @@ -177,7 +177,7 @@ endif # TODO(PINS): Remove when Bazel binaries are available for armhf ifeq ($(CONFIGURED_ARCH),armhf) ifeq ($(INCLUDE_P4RT),y) - @echo "Disabling P4RT due to incompatible CPU architecture: $(CONFIGURED_ARCH)" + $(Q)echo "Disabling P4RT due to incompatible CPU architecture: $(CONFIGURED_ARCH)" endif override INCLUDE_P4RT = n endif @@ -205,7 +205,7 @@ endif ifeq ($(ENABLE_ASAN),y) ifneq ($(CONFIGURED_ARCH),amd64) - @echo "Disabling SWSS address sanitizer due to incompatible CPU architecture: $(CONFIGURED_ARCH)" + $(Q)echo "Disabling SWSS address sanitizer due to incompatible CPU architecture: $(CONFIGURED_ARCH)" override ENABLE_ASAN = n endif endif @@ -870,12 +870,12 @@ endif # start docker daemon docker-start : - @sudo sed -i 's/--storage-driver=vfs/--storage-driver=$(SONIC_SLAVE_DOCKER_DRIVER)/' /etc/default/docker - @sudo sed -i -e '/http_proxy/d' -e '/https_proxy/d' /etc/default/docker - @sudo bash -c "{ echo \"export http_proxy=$$http_proxy\"; \ + $(Q)sudo sed -i 's/--storage-driver=vfs/--storage-driver=$(SONIC_SLAVE_DOCKER_DRIVER)/' /etc/default/docker + $(Q)sudo sed -i -e '/http_proxy/d' -e '/https_proxy/d' /etc/default/docker + $(Q)sudo bash -c "{ echo \"export http_proxy=$$http_proxy\"; \ echo \"export https_proxy=$$https_proxy\"; \ echo \"export no_proxy=$$no_proxy\"; } >> /etc/default/docker" - @test x$(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD) != x"y" && sudo service docker status &> /dev/null || ( sudo service docker start &> /dev/null && ./scripts/wait_for_docker.sh 60 ) + $(Q)test x$(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD) != x"y" && sudo service docker status &> /dev/null || ( sudo service docker start &> /dev/null && ./scripts/wait_for_docker.sh 60 ) # targets for building simple docker images that do not depend on any debian packages $(addprefix $(TARGET_PATH)/, $(SONIC_SIMPLE_DOCKER_IMAGES)) : $(TARGET_PATH)/%.gz : .platform docker-start $$(addsuffix -load,$$(addprefix $(TARGET_PATH)/,$$($$*.gz_LOAD_DOCKERS))) @@ -1414,12 +1414,12 @@ SONIC_CLEAN_FILES = $(addsuffix -clean,$(addprefix $(FILES_PATH)/, \ $(SONIC_MAKE_FILES))) $(SONIC_CLEAN_DEBS) :: $(DEBS_PATH)/%-clean : .platform $$(addsuffix -clean,$$(addprefix $(DEBS_PATH)/,$$($$*_MAIN_DEB))) - @# remove derived or extra targets if main one is removed, because we treat them - @# as part of one package - @rm -f $(addprefix $(DEBS_PATH)/, $* $($*_DERIVED_DEBS) $($*_EXTRA_DEBS)) + $(Q)# remove derived or extra targets if main one is removed, because we treat them + $(Q)# as part of one package + $(Q)rm -f $(addprefix $(DEBS_PATH)/, $* $($*_DERIVED_DEBS) $($*_EXTRA_DEBS)) $(SONIC_CLEAN_FILES) :: $(FILES_PATH)/%-clean : .platform - @rm -f $(FILES_PATH)/$* + $(Q)rm -f $(FILES_PATH)/$* SONIC_CLEAN_TARGETS += $(addsuffix -clean,$(addprefix $(TARGET_PATH)/, \ $(SONIC_DOCKER_IMAGES) \ @@ -1427,20 +1427,20 @@ SONIC_CLEAN_TARGETS += $(addsuffix -clean,$(addprefix $(TARGET_PATH)/, \ $(SONIC_SIMPLE_DOCKER_IMAGES) \ $(SONIC_INSTALLERS))) $(SONIC_CLEAN_TARGETS) :: $(TARGET_PATH)/%-clean : .platform - @rm -f $(TARGET_PATH)/$* + $(Q)rm -f $(TARGET_PATH)/$* SONIC_CLEAN_STDEB_DEBS = $(addsuffix -clean,$(addprefix $(PYTHON_DEBS_PATH)/, \ $(SONIC_PYTHON_STDEB_DEBS))) $(SONIC_CLEAN_STDEB_DEBS) :: $(PYTHON_DEBS_PATH)/%-clean : .platform - @rm -f $(PYTHON_DEBS_PATH)/$* + $(Q)rm -f $(PYTHON_DEBS_PATH)/$* SONIC_CLEAN_WHEELS = $(addsuffix -clean,$(addprefix $(PYTHON_WHEELS_PATH)/, \ $(SONIC_PYTHON_WHEELS))) $(SONIC_CLEAN_WHEELS) :: $(PYTHON_WHEELS_PATH)/%-clean : .platform - @rm -f $(PYTHON_WHEELS_PATH)/$* + $(Q)rm -f $(PYTHON_WHEELS_PATH)/$* clean-logs :: .platform - @rm -f $(TARGET_PATH)/*.log $(DEBS_PATH)/*.log $(FILES_PATH)/*.log $(PYTHON_DEBS_PATH)/*.log $(PYTHON_WHEELS_PATH)/*.log + $(Q)rm -f $(TARGET_PATH)/*.log $(DEBS_PATH)/*.log $(FILES_PATH)/*.log $(PYTHON_DEBS_PATH)/*.log $(PYTHON_WHEELS_PATH)/*.log clean :: .platform clean-logs $$(SONIC_CLEAN_DEBS) $$(SONIC_CLEAN_FILES) $$(SONIC_CLEAN_TARGETS) $$(SONIC_CLEAN_STDEB_DEBS) $$(SONIC_CLEAN_WHEELS) From 1f0699f51e51cf9ca6e36d85bc632ee0e31792e4 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Wed, 5 Oct 2022 08:10:54 +0800 Subject: [PATCH 013/174] Fix sonic-config low dpkg hit rate issue (#12244) Why I did it When sending a PR only CI change, as expected, the target target/python-wheels/buster/sonic_config_engine-1.0-py2-none-any.whl should be from the cache, because the depended files were not changed, but it rebuilt. How I did it Sort the files by name. --- rules/sonic-config.dep | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/sonic-config.dep b/rules/sonic-config.dep index 65aabe74d76f..2b8b98fcd42b 100644 --- a/rules/sonic-config.dep +++ b/rules/sonic-config.dep @@ -4,7 +4,7 @@ SPATH := $($(SONIC_CONFIG_ENGINE_PY3)_SRC_PATH) DEP_FILES := $(SONIC_COMMON_FILES_LIST) rules/sonic-config.mk rules/sonic-config.dep DEP_FILES += $(SONIC_COMMON_BASE_FILES_LIST) DEP_FILES += $(shell git ls-files $(SPATH)) -DEP_FILES += files/image_config/interfaces/interfaces.j2 dockers/docker-orchagent/ports.json.j2 dockers/docker-dhcp-relay/wait_for_intf.sh.j2 dockers/docker-dhcp-relay/docker-dhcp-relay.supervisord.conf.j2 dockers/docker-lldp/lldpd.conf.j2 dockers/docker-orchagent/ipinip.json.j2 $(shell find device -type f) files/build_templates/qos_config.j2 dockers/docker-orchagent/switch.json.j2 dockers/docker-orchagent/vxlan.json.j2 files/image_config/constants/constants.yml +DEP_FILES += files/image_config/interfaces/interfaces.j2 dockers/docker-orchagent/ports.json.j2 dockers/docker-dhcp-relay/wait_for_intf.sh.j2 dockers/docker-dhcp-relay/docker-dhcp-relay.supervisord.conf.j2 dockers/docker-lldp/lldpd.conf.j2 dockers/docker-orchagent/ipinip.json.j2 $(shell find device -type f | sort) files/build_templates/qos_config.j2 dockers/docker-orchagent/switch.json.j2 dockers/docker-orchagent/vxlan.json.j2 files/image_config/constants/constants.yml ifeq ($(ENABLE_PY2_MODULES), y) $(SONIC_CONFIG_ENGINE_PY2)_CACHE_MODE := GIT_CONTENT_SHA From 3686454c6ed56500e50df7c4fade62bd315edfc4 Mon Sep 17 00:00:00 2001 From: kannankvs Date: Thu, 6 Oct 2022 06:21:42 +0530 Subject: [PATCH 014/174] Updated the template with comment recieved (#12276) Updated the PR template with comment received on removing the reference link on GCU. Hence added text to show reference for GCU PR. --- .github/pull_request_template.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index d553f7a4d0c4..85645d8ff7ee 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -39,7 +39,7 @@ Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> -#### Ensure to add label/tag for the feature raised. example - [PR#2174](https://github.com/sonic-net/sonic-utilities/pull/2174) where, Generic Config and Update feature has been labelled as GCU. +#### Ensure to add label/tag for the feature raised. example - PR#2174 under sonic-utilities repo. where, Generic Config and Update feature has been labelled as GCU. #### Link to config_db schema for YANG module changes A{merged?} +A -- NO --> STOP +A -- YES --> A1{Approved
for 202205
Branch?} +A1 -- NO --> STOP +A1 -- YES --> A2(pr_cherrypick_prestep) +B(pr_cherrypick_prestep) +B --> B1{cherry pick
conflict?} +B1 -- YES --> B2(Add tag:
Cherry Pick Confclit_202205) --> B3(Add comment:
refer author code conflict) --> STOP1(STOP) +B1 -- NO --> B4(Create New PR) -- success --> B5(New PR add tag:
automerge) --> B6(New PR add comment:
Origin PR link) --> B7(Origin PR add tag:
Created PR to 202205 Branch) --> B8(Origin PR add comment:
New PR link) +B4 -- fail --> STOP1 +``` + +2. automerge: +```mermaid +graph +Start(PR azp finished successfully) --> A{author:
mssonicbld?} +A -- NO --> STOP +A -- YES --> B{tag:
automerge?} -- YES --> C(Merge PR) +B -- NO --> STOP +``` + +3. pr_cherrypick_poststep: +```mermaid +graph +A(PR is Merged) --> B{tag:
automerge?} +B -- YES --> B1{author:
mssonicbld?} +B1 -- YES --> B2{"title starts:
[action] [PR:123]"} +B2 -- YES --> C(Origin PR remove tag:
Created PR to 202205 Branch) --> D(Origin PR add tag:
Included in 202205 Branch) +B -- NO --> STOP +B1 -- NO --> STOP +B2 -- NO --> STOP +``` diff --git a/.github/workflows/pr_cherrypick_poststep.yml b/.github/workflows/pr_cherrypick_poststep.yml new file mode 100644 index 000000000000..1e9e497075d7 --- /dev/null +++ b/.github/workflows/pr_cherrypick_poststep.yml @@ -0,0 +1,49 @@ +name: PostCherryPick +on: + pull_request_target: + types: + - closed + branches: + - '20*' + +jobs: + post_cherry_pick: + if: github.event.pull_request.merged == true && contains(github.event.pull_request.labels.*.name, 'automerge') && github.event.pull_request.head.user.login == 'mssonicbld' && startsWith(github.event.pull_request.title, '[action]') + runs-on: ubuntu-latest + steps: + - name: Debug + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + run: echo $GITHUB_CONTEXT | jq + - name: Checkout + uses: actions/checkout@v3 + with: + persist-credentials: false + - name: Main + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + TOKEN: ${{ secrets.TOKEN }} + run: | + set -e + pr_url=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request._links.html.href") + pr_id=$(echo $GITHUB_CONTEXT | jq -r ".event.number") + base_ref=$(echo $GITHUB_CONTEXT | jq -r ".base_ref") + echo ${TOKEN} | gh auth login --with-token + title=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request.title") + origin_pr_id=$(echo $title | grep -Eo "\[action\] \[PR:[0-9]*\]" | grep -Eo "[0-9]*") + origin_pr_url=$(echo $pr_url | sed "s/$pr_id/$origin_pr_id/") + echo ============================= + echo pr_url: $pr_url + echo pr_id: $pr_id + echo base_ref: $base_ref + echo title: $title + echo origin_pr_id: $origin_pr_id + echo origin_pr_url: $origin_pr_url + echo ============================= + # Add label + if [[ "$origin_pr_id" == "" ]];then + echo "original PR didn't found." + exit 1 + fi + gh pr edit $origin_pr_url --add-label "Included in ${base_ref} Branch" + gh pr edit $origin_pr_url --remove-label "Created PR to ${base_ref} Branch,Request for ${base_ref} Branch,Approved for ${base_ref} Branch" diff --git a/.github/workflows/pr_cherrypick_prestep.yml b/.github/workflows/pr_cherrypick_prestep.yml new file mode 100644 index 000000000000..3caf3f940843 --- /dev/null +++ b/.github/workflows/pr_cherrypick_prestep.yml @@ -0,0 +1,136 @@ +name: PreCherryPick +on: + pull_request_target: + types: + - labeled + - closed + branches: + - master-test + +jobs: + pre_cherry_pick: + if: github.event.pull_request.merged == true && ( (github.event.action == 'closed' && contains(join(github.event.pull_request.labels.*.name, ','), 'Approved for 20')) || (github.event.action == 'labeled' && startsWith(github.event.label.name, 'Approved for 20')) ) + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + persist-credentials: false + - name: Debug + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + run: echo $GITHUB_CONTEXT | jq + - name: Main + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + TOKEN: ${{ secrets.TOKEN }} + run: | + set -e + + sha=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request.merge_commit_sha") + pr_id=$(echo $GITHUB_CONTEXT | jq -r ".event.number") + pr_url=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request._links.html.href") + repository=$(echo $GITHUB_CONTEXT | jq -r ".repository") + labels=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request.labels[].name") + author=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request.base.user.login") + branches=$(git branch -a --list 'origin/20????' | awk -F/ '{print$3}' | grep -E "202[0-9]{3}") + if [[ $(echo $GITHUB_CONTEXT | jq -r ".event.action") == "labeled" ]];then + labels=$(echo $GITHUB_CONTEXT | jq -r ".event.label.name") + fi + title=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request.title") + echo ============================= + echo SHA: $sha + echo PRID: $pr_id + echo pr_url: $pr_url + echo repository: $repository + echo branches: $branches + echo labels: + echo "$labels" + echo ${TOKEN} | gh auth login --with-token + echo author: $author + echo title: $title + echo ============================= + + git config user.name mssonicbld + git config user.email sonicbld@microsoft.com + git config credential.https://github.com.username mssonicbld + git remote add mssonicbld https://mssonicbld:${TOKEN}@github.com/mssonicbld/sonic-buildimage + git fetch mssonicbld + git remote -vv + + cherry_pick(){ + set -e + local create_pr='' + while read label + do + echo label: $label + if [[ "$label" == "Approved for $branch Branch" ]];then + create_pr=1 + fi + if [[ "$label" == "Created PR to $branch Branch" ]];then + echo "already has tag: Created PR to $branch Branch, return" + return 0 + fi + if [[ "$label" == "Included in $branch Branch" ]];then + echo "already has tag: Included in $branch Branch, return" + return 0 + fi + if [[ "$label" == "Cherry Pick Conflict_$branch" ]];then + echo "already has tag: Cherry Pick Conflict_$branch, return" + return 0 + fi + done <<< "$labels" + + if [[ "$create_pr" != "1" ]];then + echo "Didn't find 'Approved for $branch Branch' tag." + return 0 + fi + # Begin to cherry-pick PR + git cherry-pick --abort 2>/dev/null || true + git clean -xdff 2>/dev/null || true + git reset HEAD --hard || true + git checkout -b $branch --track origin/$branch + git status | grep "working tree clean" + + if ! git cherry-pick $sha;then + echo 'cherry-pick failed.' + git cherry-pick --abort + git status | grep "working tree clean" + # Add label + gh pr edit $pr_url --add-label "Cherry Pick Conflict_$branch" + echo 'Add label "Cherry Pick Conflict_$branch" success' + gh pr comment $pr_url --body "@${author} PR conflicts with $branch branch" + echo 'Add commnet "@${author} PR conflicts with $branch branch"' + else + # Create PR to release branch + git push mssonicbld HEAD:$branch-${pr_id} -f + result=$(gh pr create -R ${repository} -H mssonicbld:$branch-${pr_id} -B $branch -t "[action] [PR:$pr_id] $title" -b '' 2>&1) + echo $result | grep "already exists" && { echo $result; return 0; } + echo $result | grep github.com || { echo $result; return 1; } + new_pr_rul=$(echo $result | grep github.com) + echo new_pr_rul: $new_pr_rul + + # Add label to old PR + gh pr edit $pr_url --add-label "Created PR to $branch Branch" + echo Add label Created PR to $branch Branch + # Add comment to old PR + gh pr comment $pr_url --body "Cherry-pick PR to $branch: ${new_pr_rul}" + echo Add comment to old PR + + # Add label to new PR + gh pr edit $new_pr_rul --add-label "automerge" + echo Add label automerge to new PR + # Add comment to new PR + gh pr comment $new_pr_rul --body "Original PR: ${pr_url}" + echo Add comment to new PR + fi + } + + for branch in $branches + do + echo ------------------------------------------- + echo Begin to parse Branch: $branch + cherry_pick + done + From 09d4d3e6e704134b08f1bd62e151b1b640c7aa64 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Mon, 10 Oct 2022 17:11:31 +0800 Subject: [PATCH 030/174] [action] Fix trigger issue in PR pre-cherry-pick action. (#12333) --- .github/workflows/pr_cherrypick_prestep.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr_cherrypick_prestep.yml b/.github/workflows/pr_cherrypick_prestep.yml index 3caf3f940843..9d2ddc6893af 100644 --- a/.github/workflows/pr_cherrypick_prestep.yml +++ b/.github/workflows/pr_cherrypick_prestep.yml @@ -5,7 +5,7 @@ on: - labeled - closed branches: - - master-test + - master jobs: pre_cherry_pick: From 94c998965c387023deef4c86cc2dfc78f6561984 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Mon, 10 Oct 2022 10:12:26 -0400 Subject: [PATCH 031/174] [broadcom] Replace popen function (#12106) Signed-off-by: maipbui #### Why I did it `os` - not secure against maliciously constructed input and dangerous if used to evaluate dynamic content. #### How I did it `os` - use with `subprocess` #### How to verify it --- .../utils/brcm-xlr-gts-create-eeprom-file.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/platform/broadcom/sonic-platform-modules-brcm-xlr-gts/utils/brcm-xlr-gts-create-eeprom-file.py b/platform/broadcom/sonic-platform-modules-brcm-xlr-gts/utils/brcm-xlr-gts-create-eeprom-file.py index d8e7ec0ee138..e977a95f7477 100755 --- a/platform/broadcom/sonic-platform-modules-brcm-xlr-gts/utils/brcm-xlr-gts-create-eeprom-file.py +++ b/platform/broadcom/sonic-platform-modules-brcm-xlr-gts/utils/brcm-xlr-gts-create-eeprom-file.py @@ -7,6 +7,7 @@ import struct from ctypes import * import os +from sonic_py_common.general import getstatusoutput_noshell_pipe TLV_CODE_PRODUCT_NAME = 0x21 TLV_CODE_SERIAL_NUMBER = 0x23 @@ -71,7 +72,7 @@ def main(): tlvinfo_data = TLVINFO_DATA() tlvinfo_data.add_tlv_str(TLV_CODE_SERIAL_NUMBER, 'S/N') - onie_machine = os.popen("cat /host/machine.conf | grep 'onie_machine=' | sed 's/onie_machine=//'").read().strip() + _, onie_machine = getstatusoutput_noshell_pipe(["cat", "/host/machine.conf"], ["grep", 'onie_machine='], ["sed", 's/onie_machine=//']) if onie_machine == 'bcm_xlr': tlvinfo_data.add_tlv_str(TLV_CODE_PRODUCT_NAME, 'BCM9COMX2XMC') else: @@ -83,11 +84,11 @@ def main(): eth0_mac = eth0_mac_str.split(':') tlvinfo_data.add_tlv_mac(TLV_CODE_MAC_BASE, eth0_mac) - brcm_dev = os.popen("lspci | grep -m1 'Ethernet controller: Broadcom ' | grep 'Device' | sed 's/(.*//' | awk '{print $NF}'").read().strip() + _, brcm_dev = getstatusoutput_noshell_pipe(["lspci"], ["grep", "-m1", 'Ethernet controller: Broadcom '], ["grep", 'Device'], ["sed", 's/(.*//'], ["awk", '{print $NF}']) if brcm_dev == 'b960': tlvinfo_data.add_tlv_str(TLV_CODE_PLATFORM_NAME, 'BCM956960K') - onie_version = os.popen("cat /host/machine.conf | grep 'onie_version' | sed 's/onie_version=//'").read().strip() + onie_version = getstatusoutput_noshell_pipe(["cat", "/host/machine.conf"], ["grep", 'onie_version'], ["sed", 's/onie_version=//']) tlvinfo_data.add_tlv_str(TLV_CODE_ONIE_VERSION, onie_version) tlvinfo_header.totallen = len(tlvinfo_data.dump())+4; From 9b2b8e3e86d5ae26a5f1332dcbd50d5a08b28028 Mon Sep 17 00:00:00 2001 From: byu343 Date: Mon, 10 Oct 2022 13:35:06 -0700 Subject: [PATCH 032/174] Add gearbox taps to vs gearbox_config.json (#11480) Why I did it For the change to support gearbox taps by gearbox_config.json (sonic-net/sonic-swss#2158), I need to add tests to sonic-swss/tests/test_gearbox.py to satisfy the test coverage of the change. The existing code in test_gearbox.py has already used brcm_gearbox_vs and here is to add some gearbox tap value to its gearbox_config.json, for the added tests in sonic-swss/tests/test_gearbox.py. How I did it How to verify it This change itself will not affect existing code. --- .../brcm_gearbox_vs/gearbox_config.json | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/device/virtual/x86_64-kvm_x86_64-r0/brcm_gearbox_vs/gearbox_config.json b/device/virtual/x86_64-kvm_x86_64-r0/brcm_gearbox_vs/gearbox_config.json index f40be4f3eb62..40b8cd29ef15 100644 --- a/device/virtual/x86_64-kvm_x86_64-r0/brcm_gearbox_vs/gearbox_config.json +++ b/device/virtual/x86_64-kvm_x86_64-r0/brcm_gearbox_vs/gearbox_config.json @@ -20,7 +20,17 @@ "index": 0, "phy_id" : 1, "system_lanes": [200,201], - "line_lanes": [206] + "line_lanes": [206], + "system_tx_fir_pre2": [1,1], + "system_tx_fir_pre1": [-5,-5], + "system_tx_fir_main": [14,14], + "system_tx_fir_post1": [0,0], + "system_tx_fir_post2": [0,0], + "line_tx_fir_pre2": [0], + "line_tx_fir_pre1": [-1], + "line_tx_fir_main": [13], + "line_tx_fir_post1": [-5], + "line_tx_fir_post2": [0] }, { "name": "Ethernet4", From df93a1be546f2b4294f8dad26b568a33f41af699 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Tue, 11 Oct 2022 07:59:14 +0800 Subject: [PATCH 033/174] [Build][Bug] Fix apt-get remove version not lock issue (#12193) Why I did it Fix apt-get remove/purge version not locked issue when the apt-get options not specified. How I did it Add a space character before and after the command line parameters. --- src/sonic-build-hooks/hooks/apt-get | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-build-hooks/hooks/apt-get b/src/sonic-build-hooks/hooks/apt-get index 068293a3e352..3f099375c074 100755 --- a/src/sonic-build-hooks/hooks/apt-get +++ b/src/sonic-build-hooks/hooks/apt-get @@ -20,7 +20,7 @@ if [ "$INSTALL" == y ]; then [ "$lock_result" == y ] && release_apt_installation_lock exit $command_result else - if [[ "$1" == "purge" || "$@" == *" purge "* || "$@" == *" remove "* ]]; then + if [[ " $@ " == *" purge "* || " $@ " == *" remove "* ]]; then # When running the purge command, collect the debian versions dpkg-query -W -f '${Package}==${Version}\n' >> $POST_VERSION_PATH/purge-versions-deb chmod a+wr $POST_VERSION_PATH/purge-versions-deb From 304c6c80c42b808166f1fb19e6c5e163c9a8f2e4 Mon Sep 17 00:00:00 2001 From: Andriy Kokhan Date: Tue, 11 Oct 2022 04:12:28 +0300 Subject: [PATCH 034/174] [BFN] Reworked BFN platform thermals plugin (#11723) * [BFN] Updated platform.json for wedge100bf_65x Signed-off-by: Andriy Kokhan * Reworked BFN platform thermal logic * Implemented PSU thermal APIs * Updated platform.json for accton_wedge100bf_32x Signed-off-by: Andriy Kokhan * Updated BFN platform plugins initialization flow Signed-off-by: Andriy Kokhan Signed-off-by: Andriy Kokhan --- .../x86_64-accton_as9516_32d-r0/platform.json | 42 +- .../platform.json | 132 +++-- .../platform.json | 471 ++++++++++++++++-- .../platform_components.json | 4 +- .../thermal_thresholds.json | 79 +++ .../sonic_platform/chassis.py | 44 +- .../sonic_platform/component.py | 10 +- .../sonic_platform/psu.py | 31 +- .../sonic_platform/sfp.py | 41 +- .../sonic_platform/thermal.py | 172 ++++--- 10 files changed, 851 insertions(+), 175 deletions(-) create mode 100644 device/barefoot/x86_64-accton_wedge100bf_65x-r0/thermal_thresholds.json diff --git a/device/barefoot/x86_64-accton_as9516_32d-r0/platform.json b/device/barefoot/x86_64-accton_as9516_32d-r0/platform.json index abd2fec0a303..191f0c4834e4 100644 --- a/device/barefoot/x86_64-accton_as9516_32d-r0/platform.json +++ b/device/barefoot/x86_64-accton_as9516_32d-r0/platform.json @@ -133,11 +133,31 @@ "psus": [ { "name": "psu-1", - "temperature": false + "thermals": [ + { + "name": "psu_driver-i2c-7-5a:psu1-temp1" + }, + { + "name": "psu_driver-i2c-7-5a:psu1-temp2" + }, + { + "name": "psu_driver-i2c-7-5a:psu1-temp3" + } + ] }, { "name": "psu-2", - "temperature": false + "thermals": [ + { + "name": "psu_driver-i2c-7-59:psu2-temp1" + }, + { + "name": "psu_driver-i2c-7-59:psu2-temp2" + }, + { + "name": "psu_driver-i2c-7-59:psu2-temp3" + } + ] } ], "thermals": [ @@ -147,24 +167,6 @@ { "name": "com_e_driver-i2c-4-33:memory-temp" }, - { - "name": "psu_driver-i2c-7-59:psu2-temp1" - }, - { - "name": "psu_driver-i2c-7-59:psu2-temp2" - }, - { - "name": "psu_driver-i2c-7-59:psu2-temp3" - }, - { - "name": "psu_driver-i2c-7-5a:psu1-temp1" - }, - { - "name": "psu_driver-i2c-7-5a:psu1-temp2" - }, - { - "name": "psu_driver-i2c-7-5a:psu1-temp3" - }, { "name": "tmp75-i2c-3-48:chip-temp" }, diff --git a/device/barefoot/x86_64-accton_wedge100bf_32x-r0/platform.json b/device/barefoot/x86_64-accton_wedge100bf_32x-r0/platform.json index 5fee4f0eebaf..e788dbeff208 100644 --- a/device/barefoot/x86_64-accton_wedge100bf_32x-r0/platform.json +++ b/device/barefoot/x86_64-accton_wedge100bf_32x-r0/platform.json @@ -9,51 +9,131 @@ "name": "BMC" } ], + "thermal_manager": false, "fans": [ { - "name": "counter-rotating-fan-1" - }, - { - "name": "counter-rotating-fan-2" - }, - { - "name": "counter-rotating-fan-3" - }, - { - "name": "counter-rotating-fan-4" - }, - { - "name": "counter-rotating-fan-5" + "name": "counter-rotating-fan-1", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } + }, + { + "name": "counter-rotating-fan-2", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } + }, + { + "name": "counter-rotating-fan-3", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } + }, + { + "name": "counter-rotating-fan-4", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } + }, + { + "name": "counter-rotating-fan-5", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } } ], "fan_drawers": [ { "name": "fantray-1", + "status_led": { + "controllable": false + }, "fans": [ { - "name": "counter-rotating-fan-1" + "name": "counter-rotating-fan-1", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-2" + "name": "counter-rotating-fan-2", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-3" + "name": "counter-rotating-fan-3", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-4" + "name": "counter-rotating-fan-4", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-5" + "name": "counter-rotating-fan-5", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } } ] } ], "psus": [ { - "name": "psu-1" + "name": "psu-1", + "thermals": [ + { + "name": "psu_driver-i2c-7-5a:psu1-temp1" + }, + { + "name": "psu_driver-i2c-7-5a:psu1-temp2" + } + ] }, { - "name": "psu-2" + "name": "psu-2", + "thermals": [ + { + "name": "psu_driver-i2c-7-59:psu2-temp1" + }, + { + "name": "psu_driver-i2c-7-59:psu2-temp2" + } + ] } ], "thermals": [ @@ -63,18 +143,6 @@ { "name": "com_e_driver-i2c-4-33:memory-temp" }, - { - "name": "psu_driver-i2c-7-59:psu2-temp1" - }, - { - "name": "psu_driver-i2c-7-59:psu2-temp2" - }, - { - "name": "psu_driver-i2c-7-5a:psu1-temp1" - }, - { - "name": "psu_driver-i2c-7-5a:psu1-temp2" - }, { "name": "tmp75-i2c-3-48:chip-temp" }, diff --git a/device/barefoot/x86_64-accton_wedge100bf_65x-r0/platform.json b/device/barefoot/x86_64-accton_wedge100bf_65x-r0/platform.json index 73bb6008dbba..f302883f409d 100644 --- a/device/barefoot/x86_64-accton_wedge100bf_65x-r0/platform.json +++ b/device/barefoot/x86_64-accton_wedge100bf_65x-r0/platform.json @@ -1,89 +1,502 @@ { "chassis": { - "name": "Mavericks", + "name": "Wedge100BF-65X-O-AC-F-BF", + "components": [ + { + "name": "BIOS" + }, + { + "name": "BMC" + } + ], + "thermal_manager": false, "fans": [ { - "name": "counter-rotating-fan-1" + "name": "counter-rotating-fan-1", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-2" + "name": "counter-rotating-fan-2", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-3" + "name": "counter-rotating-fan-3", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-4" + "name": "counter-rotating-fan-4", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-5" + "name": "counter-rotating-fan-5", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-6" + "name": "counter-rotating-fan-6", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-7" + "name": "counter-rotating-fan-7", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-8" + "name": "counter-rotating-fan-8", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-9" + "name": "counter-rotating-fan-9", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-10" + "name": "counter-rotating-fan-10", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } } ], "fan_drawers": [ { "name": "fantray-1", + "status_led": { + "controllable": false + }, "fans": [ { - "name": "counter-rotating-fan-1" + "name": "counter-rotating-fan-1", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-2" + "name": "counter-rotating-fan-2", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-3" + "name": "counter-rotating-fan-3", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-4" + "name": "counter-rotating-fan-4", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-5" + "name": "counter-rotating-fan-5", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } } ] }, { "name": "fantray-2", + "status_led": { + "controllable": false + }, "fans": [ { - "name": "counter-rotating-fan-6" + "name": "counter-rotating-fan-6", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } + }, + { + "name": "counter-rotating-fan-7", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-7" + "name": "counter-rotating-fan-8", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-8" + "name": "counter-rotating-fan-9", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } }, { - "name": "counter-rotating-fan-9" + "name": "counter-rotating-fan-10", + "status_led": { + "controllable": false + }, + "speed": { + "controllable": false + } + } + ] + } + ], + "psus": [ + { + "name": "psu-1", + "thermals": [ + { + "name": "psu_driver-i2c-7-5a:psu1-temp1" }, { - "name": "counter-rotating-fan-10" + "name": "psu_driver-i2c-7-5a:psu1-temp2" } ] + }, + { + "name": "psu-2", + "thermals": [ + { + "name": "psu_driver-i2c-7-59:psu2-temp1" + }, + { + "name": "psu_driver-i2c-7-59:psu2-temp2" + } + ] + } + ], + "thermals": [ + { + "name": "com_e_driver-i2c-4-33:cpu-temp" + }, + { + "name": "com_e_driver-i2c-4-33:memory-temp" + }, + { + "name": "coretemp-isa-0000:core-0" + }, + { + "name": "coretemp-isa-0000:core-1" + }, + { + "name": "coretemp-isa-0000:core-2" + }, + { + "name": "coretemp-isa-0000:core-3" + }, + { + "name": "coretemp-isa-0000:package-id-0" + }, + { + "name": "max6658-i2c-9-4c:come-board-temp" + }, + { + "name": "max6658-i2c-9-4c:max6658-chip-temp" + }, + { + "name": "pch_haswell-virtual-0:temp1" + }, + { + "name": "tmp75-i2c-3-4a:exhaust-temp" + }, + { + "name": "tmp75-i2c-3-4b:intake-temp" + }, + { + "name": "tmp75-i2c-3-4c:intake2-temp" + }, + { + "name": "tmp75-i2c-3-48:chip-temp" + }, + { + "name": "tmp75-i2c-3-49:exhaust2-temp" + }, + { + "name": "tmp75-i2c-8-48:fan-board-outlet-right-temp" + }, + { + "name": "tmp75-i2c-8-49:fan-board-outlet-left-temp" + }, + { + "name": "tmp75-i2c-9-4a:upper-board-intake-temp" + }, + { + "name": "tmp75-i2c-9-4b:upper-board-tofino-temp" + }, + { + "name": "tmp75-i2c-9-48:upper-board-intake2-temp" + }, + { + "name": "tmp75-i2c-9-49:server-board-temp" + } + ], + "sfps": [ + { + "name": "sfp1" + }, + { + "name": "sfp2" + }, + { + "name": "sfp3" + }, + { + "name": "sfp4" + }, + { + "name": "sfp5" + }, + { + "name": "sfp6" + }, + { + "name": "sfp7" + }, + { + "name": "sfp8" + }, + { + "name": "sfp9" + }, + { + "name": "sfp10" + }, + { + "name": "sfp11" + }, + { + "name": "sfp12" + }, + { + "name": "sfp13" + }, + { + "name": "sfp14" + }, + { + "name": "sfp15" + }, + { + "name": "sfp16" + }, + { + "name": "sfp17" + }, + { + "name": "sfp18" + }, + { + "name": "sfp19" + }, + { + "name": "sfp20" + }, + { + "name": "sfp21" + }, + { + "name": "sfp22" + }, + { + "name": "sfp23" + }, + { + "name": "sfp24" + }, + { + "name": "sfp25" + }, + { + "name": "sfp26" + }, + { + "name": "sfp27" + }, + { + "name": "sfp28" + }, + { + "name": "sfp29" + }, + { + "name": "sfp30" + }, + { + "name": "sfp31" + }, + { + "name": "sfp32" + }, + { + "name": "sfp33" + }, + { + "name": "sfp34" + }, + { + "name": "sfp35" + }, + { + "name": "sfp36" + }, + { + "name": "sfp37" + }, + { + "name": "sfp38" + }, + { + "name": "sfp39" + }, + { + "name": "sfp40" + }, + { + "name": "sfp41" + }, + { + "name": "sfp42" + }, + { + "name": "sfp43" + }, + { + "name": "sfp44" + }, + { + "name": "sfp45" + }, + { + "name": "sfp46" + }, + { + "name": "sfp47" + }, + { + "name": "sfp48" + }, + { + "name": "sfp49" + }, + { + "name": "sfp50" + }, + { + "name": "sfp51" + }, + { + "name": "sfp52" + }, + { + "name": "sfp53" + }, + { + "name": "sfp54" + }, + { + "name": "sfp55" + }, + { + "name": "sfp56" + }, + { + "name": "sfp57" + }, + { + "name": "sfp58" + }, + { + "name": "sfp59" + }, + { + "name": "sfp60" + }, + { + "name": "sfp61" + }, + { + "name": "sfp62" + }, + { + "name": "sfp63" + }, + { + "name": "sfp64" + }, + { + "name": "sfp65" } ] }, - "psus": [ - { - "name": "psu-1" - }, - { - "name": "psu-2" - } - ], "interfaces": { "Ethernet0": { "index": "1,1,1,1", diff --git a/device/barefoot/x86_64-accton_wedge100bf_65x-r0/platform_components.json b/device/barefoot/x86_64-accton_wedge100bf_65x-r0/platform_components.json index 74e851a7c4b6..693d21643f23 100644 --- a/device/barefoot/x86_64-accton_wedge100bf_65x-r0/platform_components.json +++ b/device/barefoot/x86_64-accton_wedge100bf_65x-r0/platform_components.json @@ -2,7 +2,9 @@ "chassis": { "Wedge100BF-65X-O-AC-F-BF": { "component": { + "BIOS": { }, + "BMC": { } } } } -} \ No newline at end of file +} diff --git a/device/barefoot/x86_64-accton_wedge100bf_65x-r0/thermal_thresholds.json b/device/barefoot/x86_64-accton_wedge100bf_65x-r0/thermal_thresholds.json new file mode 100644 index 000000000000..e53dc0443c24 --- /dev/null +++ b/device/barefoot/x86_64-accton_wedge100bf_65x-r0/thermal_thresholds.json @@ -0,0 +1,79 @@ +{ + "thermals": [ + { + "com_e_driver-i2c-4-33:cpu-temp" : [99.0, 89.0, 11.0, 1.0] + }, + { + "com_e_driver-i2c-4-33:memory-temp" : [85.0, 75.0, 11.0, 1.0] + }, + { + "coretemp-isa-0000:core-0" : [99.0, 89.0, 11.0, 1.0] + }, + { + "coretemp-isa-0000:core-1" : [99.0, 89.0, 11.0, 1.0] + }, + { + "coretemp-isa-0000:core-2" : [99.0, 89.0, 11.0, 1.0] + }, + { + "coretemp-isa-0000:core-3" : [99.0, 89.0, 11.0, 1.0] + }, + { + "coretemp-isa-0000:package-id-0" : [80.0, 70.0, 11.0, 1.0] + }, + { + "max6658-i2c-9-4c:come-board-temp" : [85.0, 70.0, 11.0, 1.0] + }, + { + "max6658-i2c-9-4c:max6658-chip-temp" : [85.0, 70.0, 11.0, 1.0] + }, + { + "pch_haswell-virtual-0:temp1" : [80.0, 70.0, 11.0, 1.0] + }, + { + "psu_driver-i2c-7-5a:psu1-temp1" : [60.0, 40.0, 11.0, 1.0] + }, + { + "psu_driver-i2c-7-5a:psu1-temp2" : [80.0, 60.0, 11.0, 1.0] + }, + { + "psu_driver-i2c-7-59:psu2-temp1" : [60.0, 40.0, 11.0, 1.0] + }, + { + "psu_driver-i2c-7-59:psu2-temp2" : [80.0, 60.0, 11.0, 1.0] + }, + { + "tmp75-i2c-3-4a:exhaust-temp" : [90.0, 80.0, 11.0, 1.0] + }, + { + "tmp75-i2c-3-4b:intake-temp" : [90.0, 70.0, 11.0, 1.0] + }, + { + "tmp75-i2c-3-4c:intake2-temp" : [90.0, 70.0, 11.0, 1.0] + }, + { + "tmp75-i2c-3-48:chip-temp" : [90.0, 70.0, 11.0, 1.0] + }, + { + "tmp75-i2c-3-49:exhaust2-temp" : [90.0, 70.0, 11.0, 1.0] + }, + { + "tmp75-i2c-8-48:fan-board-outlet-right-temp" : [90.0, 70.0, 11.0, 1.0] + }, + { + "tmp75-i2c-8-49:fan-board-outlet-left-temp" : [90.0, 70.0, 11.0, 1.0] + }, + { + "tmp75-i2c-9-4a:upper-board-intake-temp" : [90.0, 70.0, 11.0, 1.0] + }, + { + "tmp75-i2c-9-4b:upper-board-tofino-temp" : [90.0, 70.0, 11.0, 1.0] + }, + { + "tmp75-i2c-9-48:upper-board-intake2-temp" : [90.0, 70.0, 11.0, 1.0] + }, + { + "tmp75-i2c-9-49:server-board-temp" : [90.0, 70.0, 11.0, 1.0] + } + ] +} diff --git a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/chassis.py b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/chassis.py index 1041561db423..5c60e49d91aa 100644 --- a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/chassis.py +++ b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/chassis.py @@ -12,7 +12,7 @@ from sonic_platform.sfp import Sfp from sonic_platform.psu import psu_list_get from sonic_platform.fan_drawer import fan_drawer_list_get - from sonic_platform.thermal import thermal_list_get + from sonic_platform.thermal import chassis_thermals_list_get from sonic_platform.platform_utils import file_create from sonic_platform.eeprom import Eeprom @@ -40,9 +40,9 @@ class Chassis(ChassisBase): def __init__(self): ChassisBase.__init__(self) - self._eeprom = Eeprom() - self.__tlv_bin_eeprom = self._eeprom.get_raw_data() - self.__tlv_dict_eeprom = self._eeprom.get_data() + self.__eeprom = None + self.__tlv_bin_eeprom = None + self.__tlv_dict_eeprom = None self.__fan_drawers = None self.__fan_list = None @@ -60,6 +60,28 @@ def __init__(self): file_create(config_dict['handlers']['file']['filename'], '646') logging.config.dictConfig(config_dict) + @property + def _eeprom(self): + if self.__eeprom is None: + self.__eeprom = Eeprom() + return self.__eeprom + + @_eeprom.setter + def _eeprom(self, value): + pass + + @property + def _tlv_bin_eeprom(self): + if self.__tlv_bin_eeprom is None: + self.__tlv_bin_eeprom = self._eeprom.get_raw_data() + return self.__tlv_bin_eeprom + + @property + def _tlv_dict_eeprom(self): + if self.__tlv_dict_eeprom is None: + self.__tlv_dict_eeprom = self._eeprom.get_data() + return self.__tlv_dict_eeprom + @property def _fan_drawer_list(self): if self.__fan_drawers is None: @@ -85,7 +107,7 @@ def _fan_list(self, value): @property def _thermal_list(self): if self.__thermals is None: - self.__thermals = thermal_list_get() + self.__thermals = chassis_thermals_list_get() return self.__thermals @_thermal_list.setter @@ -145,7 +167,7 @@ def get_name(self): Returns: string: The name of the chassis """ - return self._eeprom.modelstr(self.__tlv_bin_eeprom) + return self._eeprom.modelstr(self._tlv_bin_eeprom) def get_presence(self): """ @@ -161,7 +183,7 @@ def get_model(self): Returns: string: Model/part number of chassis """ - return self._eeprom.part_number_str(self.__tlv_bin_eeprom) + return self._eeprom.part_number_str(self._tlv_bin_eeprom) def get_serial(self): """ @@ -169,7 +191,7 @@ def get_serial(self): Returns: string: Serial number of chassis """ - return self._eeprom.serial_number_str(self.__tlv_bin_eeprom) + return self._eeprom.serial_number_str(self._tlv_bin_eeprom) def get_revision(self): """ @@ -177,7 +199,7 @@ def get_revision(self): Returns: string: Revision number of chassis """ - return self.__tlv_dict_eeprom.get( + return self._tlv_dict_eeprom.get( "0x{:X}".format(Eeprom._TLV_CODE_LABEL_REVISION), 'N/A') def get_sfp(self, index): @@ -219,7 +241,7 @@ def get_base_mac(self): A string containing the MAC address in the format 'XX:XX:XX:XX:XX:XX' """ - return self._eeprom.base_mac_addr(self.__tlv_bin_eeprom) + return self._eeprom.base_mac_addr(self._tlv_bin_eeprom) def get_system_eeprom_info(self): """ @@ -230,7 +252,7 @@ def get_system_eeprom_info(self): OCP ONIE TlvInfo EEPROM format and values are their corresponding values. """ - return self.__tlv_dict_eeprom + return self._tlv_dict_eeprom def __get_transceiver_change_event(self, timeout=0): forever = False diff --git a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/component.py b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/component.py index a7f236cb42a4..13f8171f9705 100644 --- a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/component.py +++ b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/component.py @@ -182,12 +182,10 @@ def __init__(self, component_index=0): self.name = self.bpcp.get_components_list()[self.index] except IndexError as e: print("Error: No components found in plaform_components.json") - + if (self.name == "BMC"): - self.version = get_bmc_version() self.description = "Chassis BMC" elif (self.name == "BIOS"): - self.version = get_bios_version() self.description = "Chassis BIOS" def get_name(self): @@ -214,6 +212,12 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ + if self.version == "N/A": + if (self.name == "BMC"): + self.version = get_bmc_version() + elif (self.name == "BIOS"): + self.version = get_bios_version() + return self.version def install_firmware(self, image_path): diff --git a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/psu.py b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/psu.py index fbd83d6496ae..8f7a431b82ef 100644 --- a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/psu.py +++ b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/psu.py @@ -12,8 +12,8 @@ from .platform_thrift_client import thrift_try from sonic_platform_base.psu_base import PsuBase + from sonic_platform.thermal import psu_thermals_list_get from platform_utils import cancel_on_sigterm - except ImportError as e: raise ImportError (str(e) + "- required module not found") @@ -27,6 +27,7 @@ class Psu(PsuBase): def __init__(self, index): PsuBase.__init__(self) self.__index = index + self.__thermals = None self.__info = None self.__ts = 0 # STUB IMPLEMENTATION @@ -225,6 +226,34 @@ def get_position_in_parent(self): """ return self.__index + def get_temperature(self): + """ + Retrieves current temperature reading from PSU + Returns: + A float number of current temperature in Celsius up to nearest thousandth + of one degree Celsius, e.g. 30.125 + """ + return self.get_thermal(0).get_temperature() + + def get_temperature_high_threshold(self): + """ + Retrieves the high threshold temperature of PSU + Returns: + A float number, the high threshold temperature of PSU in Celsius + up to nearest thousandth of one degree Celsius, e.g. 30.125 + """ + return self.get_thermal(0).get_high_threshold() + + @property + def _thermal_list(self): + if self.__thermals is None: + self.__thermals = psu_thermals_list_get(self.get_name()) + return self.__thermals + + @_thermal_list.setter + def _thermal_list(self, value): + pass + def psu_list_get(): psu_list = [] for i in range(1, Psu.get_num_psus() + 1): diff --git a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/sfp.py b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/sfp.py index 6a5534d8ba6b..d63c03014c16 100644 --- a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/sfp.py +++ b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/sfp.py @@ -13,15 +13,6 @@ QSFP_DD_TYPE = "QSFP_DD" EEPROM_PAGE_SIZE = 128 -try: - from thrift.Thrift import TApplicationException - - def cached_num_bytes_get(client): - return client.pltfm_mgr.pltfm_mgr_qsfp_cached_num_bytes_get(1, 0, 0, 0) - thrift_try(cached_num_bytes_get, 1) - EEPROM_CACHED_API_SUPPORT = True -except TApplicationException as e: - EEPROM_CACHED_API_SUPPORT = False class Sfp(SfpOptoeBase): """ @@ -34,15 +25,27 @@ def __init__(self, port_num): self.port_num = port_num self.sfp_type = QSFP_TYPE self.SFP_EEPROM_PATH = "/var/run/platform/sfp/" - - if not EEPROM_CACHED_API_SUPPORT: - if not os.path.exists(self.SFP_EEPROM_PATH): - try: - os.makedirs(self.SFP_EEPROM_PATH) - except OSError as e: - if e.errno != errno.EEXIST: - raise - self.eeprom_path = self.SFP_EEPROM_PATH + "sfp{}-eeprom-cache".format(self.index) + self.eeprom_path = None + self.__cached_api_supported = None + + @property + def _cached_api_supported(self): + def cached_num_bytes_get(client): + return client.pltfm_mgr.pltfm_mgr_qsfp_cached_num_bytes_get(1, 0, 0, 0) + if self.__cached_api_supported is None: + try: + thrift_try(cached_num_bytes_get, 1) + self.__cached_api_supported = True + except Exception as e: + self.__cached_api_supported = False + if not os.path.exists(self.SFP_EEPROM_PATH): + try: + os.makedirs(self.SFP_EEPROM_PATH) + except OSError as e: + if e.errno != errno.EEXIST: + raise + self.eeprom_path = self.SFP_EEPROM_PATH + "sfp{}-eeprom-cache".format(self.index) + return self.__cached_api_supported def get_presence(self): """ @@ -94,7 +97,7 @@ def read_eeprom(self, offset, num_bytes): if not self.get_presence(): return None - if not EEPROM_CACHED_API_SUPPORT: + if not self._cached_api_supported: return super().read_eeprom(offset, num_bytes) def cached_num_bytes_get(page, offset, num_bytes): diff --git a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/thermal.py b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/thermal.py index e5034f09f558..43c51c32a2d9 100644 --- a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/thermal.py +++ b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/thermal.py @@ -1,5 +1,7 @@ try: import subprocess + import time + import threading from collections import namedtuple import json from bfn_extensions.platform_sensors import platform_sensors_get @@ -23,56 +25,12 @@ ''' Threshold = namedtuple('Threshold', ['crit', 'max', 'min', 'alarm']) -def _sensors_chip_parsed(data: str): - def kv(line): - k, v, *_ = [t.strip(': ') for t in line.split(':') if t] + [''] - return k, v - - chip, *data = data.strip().split('\n') - chip = chip.strip(': ') - - sensors = [] - for line in data: - if not line.startswith(' '): - sensor_label = line.strip(': ') - sensors.append((sensor_label, {})) - continue - - if len(sensors) == 0: - raise RuntimeError(f'invalid data to parse: {data}') - - attr, value = kv(line) - sensor_label, sensor_data = sensors[-1] - sensor_data.update({attr: value}) - - return chip, dict(sensors) - -''' -Example of returned dict: -{ - 'coretemp-isa-0000': { - 'Core 1': { "temp1_input": 40, ... }, - 'Core 2': { ... } - } -} -''' -def _sensors_get() -> dict: - data = platform_sensors_get(['-A', '-u']) or '' - data += subprocess.check_output("/usr/bin/sensors -A -u", - shell=True, text=True) - data = data.split('\n\n') - data = [_sensors_chip_parsed(chip_data) for chip_data in data if chip_data] - data = dict(data) - return data - -def _value_get(d: dict, key_prefix, key_suffix=''): - for k, v in d.items(): - if k.startswith(key_prefix) and k.endswith(key_suffix): - return v - return None # Thermal -> ThermalBase -> DeviceBase class Thermal(ThermalBase): + __sensors_info = None + __timestamp = 0 + __lock = threading.Lock() _thresholds = dict() _max_temperature = 100.0 _min_temperature = 0.0 @@ -96,6 +54,84 @@ def __init__(self, chip, label, index = 0): if f is not None: self.__get_thresholds(f) + @staticmethod + def __sensors_chip_parsed(data: str): + def kv(line): + k, v, *_ = [t.strip(': ') for t in line.split(':') if t] + [''] + return k, v + + chip, *data = data.strip().split('\n') + chip = chip.strip(': ') + + sensors = [] + for line in data: + if not line.startswith(' '): + sensor_label = line.strip(': ') + sensors.append((sensor_label, {})) + continue + + if len(sensors) == 0: + raise RuntimeError(f'invalid data to parse: {data}') + + attr, value = kv(line) + sensor_label, sensor_data = sensors[-1] + sensor_data.update({attr: value}) + + return chip, dict(sensors) + + @classmethod + def __sensors_get(cls, cached=True) -> dict: + cls.__lock.acquire() + if time.time() > cls.__timestamp + 15: + # Update cache once per 15 seconds + try: + data = platform_sensors_get(['-A', '-u']) or '' + data += subprocess.check_output("/usr/bin/sensors -A -u", + shell=True, text=True) + data = data.split('\n\n') + data = [cls.__sensors_chip_parsed(chip_data) for chip_data in data if chip_data] + cls.__sensors_info = dict(data) + cls.__timestamp = time.time() + except Exception as e: + logging.warning("Failed to update sensors cache: " + str(e)) + info = cls.__sensors_info + cls.__lock.release() + return info + + @staticmethod + def __sensor_value_get(d: dict, key_prefix, key_suffix=''): + for k, v in d.items(): + if k.startswith(key_prefix) and k.endswith(key_suffix): + return v + return None + + @staticmethod + def __get_platform_json(): + hwsku_path = device_info.get_path_to_platform_dir() + platform_json_path = "/".join([hwsku_path, "platform.json"]) + f = open(platform_json_path) + return json.load(f) + + @staticmethod + def get_chassis_thermals(): + try: + platform_json = Thermal.__get_platform_json() + return platform_json["chassis"]["thermals"] + except Exception as e: + logging.exception("Failed to collect chassis thermals: " + str(e)) + return None + + @staticmethod + def get_psu_thermals(psu_name): + try: + platform_json = Thermal.__get_platform_json() + for psu in platform_json["chassis"]["psus"]: + if psu["name"] == psu_name: + return psu["thermals"] + except Exception as e: + logging.exception("Failed to collect chassis thermals: " + str(e)) + return None + def __get_thresholds(self, f): def_threshold_json = json.load(f) all_data = def_threshold_json["thermals"] @@ -119,8 +155,18 @@ def check_high_threshold(self, temperature, attr_suffix): return check_range def __get(self, attr_prefix, attr_suffix): - sensor_data = _sensors_get().get(self.__chip, {}).get(self.__label, {}) - value = _value_get(sensor_data, attr_prefix, attr_suffix) + chip_data = Thermal.__sensors_get().get(self.__chip, {}) + sensor_data = {} + for sensor, data in chip_data.items(): + if sensor.lower().replace(' ', '-') == self.__label: + sensor_data = data + break + value = Thermal.__sensor_value_get(sensor_data, attr_prefix, attr_suffix) + + # Can be float value or None + if attr_prefix == 'temp' and attr_suffix == 'input': + return value + if value is not None and self.check_in_range(value) and self.check_high_threshold(value, attr_suffix): return value elif self.__name in self._thresholds and attr_prefix == 'temp': @@ -146,6 +192,8 @@ def __get(self, attr_prefix, attr_suffix): # ThermalBase interface methods: def get_temperature(self) -> float: temp = self.__get('temp', 'input') + if temp is None: + return None self.__collect_temp.append(float(temp)) self.__collect_temp.sort() if len(self.__collect_temp) == 3: @@ -214,13 +262,19 @@ def set_low_threshold(self, temperature): return True return False -def thermal_list_get(): - l = [] - index = 0 - for chip, chip_data in _sensors_get().items(): - for sensor, sensor_data in chip_data.items(): - # add only temperature sensors - if _value_get(sensor_data, "temp") is not None: - l.append(Thermal(chip, sensor, index)) - index += 1 - return l + +def chassis_thermals_list_get(): + thermal_list = [] + thermals = Thermal.get_chassis_thermals() + for index, thermal in enumerate(thermals): + thermal = thermal["name"].split(':') + thermal_list.append(Thermal(thermal[0], thermal[1], index)) + return thermal_list + +def psu_thermals_list_get(psu_name): + thermal_list = [] + thermals = Thermal.get_psu_thermals(psu_name) + for index, thermal in enumerate(thermals): + thermal = thermal["name"].split(':') + thermal_list.append(Thermal(thermal[0], thermal[1], index)) + return thermal_list From 9d37b63824d9af4002773513dbe12672063e3f2f Mon Sep 17 00:00:00 2001 From: "Marty Y. Lok" <76118573+mlok-nokia@users.noreply.github.com> Date: Mon, 10 Oct 2022 21:49:26 -0400 Subject: [PATCH 035/174] [Nokia] Update Nokia platform IXR7250E device data (#11611) Signed-off-by: mlok --- .../platform_components.template | 19 +++++ .../platform_ndk.json | 8 +- .../platform_reboot | 19 +++++ .../Nokia-IXR7250E-SUP-10/platform.json | 3 + .../platform_components.json | 81 +++++++++++++++++++ .../platform_ndk.json | 4 + 6 files changed, 132 insertions(+), 2 deletions(-) create mode 100755 device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_components.template create mode 100644 device/nokia/x86_64-nokia_ixr7250e_sup-r0/Nokia-IXR7250E-SUP-10/platform_components.json diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_components.template b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_components.template new file mode 100755 index 000000000000..7b05296ae13e --- /dev/null +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_components.template @@ -0,0 +1,19 @@ + { + "module": { + "LINE-CARD": { + "component": {} + }, + "SUPERVISOR0": { + "component": {} + } + }, + "chassis": { + "Nokia-IXR7250E-36x400G": { + "component": { + "FPGA2": {}, + "FPGA1": {}, + "BIOS": {} + } + } + } + } diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_ndk.json b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_ndk.json index 2220f8747894..f7be1409804d 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_ndk.json +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_ndk.json @@ -1,8 +1,8 @@ { "options": [ { - "key": "sfp_init_tx_en", - "stringval": "yes" + "key": "module_direct_ipc_ue", + "stringval": "no" }, { "key": "midplane_subnet", @@ -39,6 +39,10 @@ { "key": "enable_firmware_update", "intval": 0 + }, + { + "key": "sonic_log_level", + "stringval": "error" } ] } diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_reboot b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_reboot index a32a3549bc35..b086d09bddcd 100755 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_reboot +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_reboot @@ -1,5 +1,24 @@ #!/bin/bash +update_reboot_cause_for_supervisor_reboot() +{ + DEVICE_MGR_REBOOT_FILE=/tmp/device_mgr_reboot + REBOOT_CAUSE_FILE=/host/reboot-cause/reboot-cause.txt + TMP_REBOOT_CAUSE_FILE=/tmp/tmp-reboot-cause.txt + if [ -f $DEVICE_MGR_REBOOT_FILE ]; then + if [ -f $REBOOT_CAUSE_FILE ]; then + t1=`sudo grep "User: ," $REBOOT_CAUSE_FILE` + if [ ! -z "$t1" ]; then + echo $t1 | sed 's/reboot/reboot from Supervisor/g' | sed 's/User: /User: admin/g' > $TMP_REBOOT_CAUSE_FILE + cp $TMP_REBOOT_CAUSE_FILE $REBOOT_CAUSE_FILE + fi + fi + fi +} + +# update the reboot_cuase file when reboot is trigger by device-mgr +update_reboot_cause_for_supervisor_reboot + systemctl stop nokia-watchdog.service sleep 2 echo "w" > /dev/watchdog diff --git a/device/nokia/x86_64-nokia_ixr7250e_sup-r0/Nokia-IXR7250E-SUP-10/platform.json b/device/nokia/x86_64-nokia_ixr7250e_sup-r0/Nokia-IXR7250E-SUP-10/platform.json index e41b6fc8e5e4..4d7b66229f34 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_sup-r0/Nokia-IXR7250E-SUP-10/platform.json +++ b/device/nokia/x86_64-nokia_ixr7250e_sup-r0/Nokia-IXR7250E-SUP-10/platform.json @@ -7,6 +7,9 @@ }, { "name": "FPGA1" + }, + { + "name": "SFM-FPGA" } ], "watchdog": { diff --git a/device/nokia/x86_64-nokia_ixr7250e_sup-r0/Nokia-IXR7250E-SUP-10/platform_components.json b/device/nokia/x86_64-nokia_ixr7250e_sup-r0/Nokia-IXR7250E-SUP-10/platform_components.json new file mode 100644 index 000000000000..6acb33f5bff5 --- /dev/null +++ b/device/nokia/x86_64-nokia_ixr7250e_sup-r0/Nokia-IXR7250E-SUP-10/platform_components.json @@ -0,0 +1,81 @@ +{ + "chassis": { + "Nokia-IXR7250E-SUP-10": { + "component": { + "BIOS": { }, + "FPGA1": { }, + "SFM-FPGA": { } + } + } + }, + "module": { + "SUPERVISOR0": { + "component": { + } + }, + "LINE-CARD0": { + "component": { + } + }, + "LINE-CARD1": { + "component": { + } + }, + "LINE-CARD2": { + "component": { + } + }, + "LINE-CARD3": { + "component": { + } + }, + "LINE-CARD4": { + "component": { + } + }, + "LINE-CARD5": { + "component": { + } + }, + "LINE-CARD6": { + "component": { + } + }, + "LINE-CARD7": { + "component": { + } + }, + "FABRIC-CARD0": { + "component": { + } + }, + "FABRIC-CARD1": { + "component": { + } + }, + "FABRIC-CARD2": { + "component": { + } + }, + "FABRIC-CARD3": { + "component": { + } + }, + "FABRIC-CARD4": { + "component": { + } + }, + "FABRIC-CARD5": { + "component": { + } + }, + "FABRIC-CARD6": { + "component": { + } + }, + "FABRIC-CARD7": { + "component": { + } + } + } +} diff --git a/device/nokia/x86_64-nokia_ixr7250e_sup-r0/platform_ndk.json b/device/nokia/x86_64-nokia_ixr7250e_sup-r0/platform_ndk.json index a6d6bbf8a45a..e40ac505747d 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_sup-r0/platform_ndk.json +++ b/device/nokia/x86_64-nokia_ixr7250e_sup-r0/platform_ndk.json @@ -27,6 +27,10 @@ { "key": "enable_firmware_update", "intval": 0 + }, + { + "key": "sonic_log_level", + "stringval": "error" } ] } From fc99265fd27e319719b6ef552be17edd8c395c63 Mon Sep 17 00:00:00 2001 From: "Marty Y. Lok" <76118573+mlok-nokia@users.noreply.github.com> Date: Mon, 10 Oct 2022 21:49:51 -0400 Subject: [PATCH 036/174] [Nokia] Update the nokia platform submodule for Nokia-IXR7250E platform (#12305) Signed-off-by: mlok --- platform/broadcom/sonic-platform-modules-nokia | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/platform/broadcom/sonic-platform-modules-nokia b/platform/broadcom/sonic-platform-modules-nokia index 0f27485ea43b..5ebea4ba7e14 160000 --- a/platform/broadcom/sonic-platform-modules-nokia +++ b/platform/broadcom/sonic-platform-modules-nokia @@ -1 +1 @@ -Subproject commit 0f27485ea43b98e4382aae12f3c4780b44e02c05 +Subproject commit 5ebea4ba7e14b44cfef7943443ce297ea996cba9 From 247bd78da31381173029905a8b526129b9ab31a0 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Tue, 11 Oct 2022 09:56:48 +0800 Subject: [PATCH 037/174] [action] Fix PR pre-cherry-pick action wrong author issue. (#12339) --- .github/workflows/pr_cherrypick_prestep.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr_cherrypick_prestep.yml b/.github/workflows/pr_cherrypick_prestep.yml index 9d2ddc6893af..cdfe6b682b45 100644 --- a/.github/workflows/pr_cherrypick_prestep.yml +++ b/.github/workflows/pr_cherrypick_prestep.yml @@ -33,7 +33,7 @@ jobs: pr_url=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request._links.html.href") repository=$(echo $GITHUB_CONTEXT | jq -r ".repository") labels=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request.labels[].name") - author=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request.base.user.login") + author=$(echo $GITHUB_CONTEXT | jq -r ".event.pull_request.user.login") branches=$(git branch -a --list 'origin/20????' | awk -F/ '{print$3}' | grep -E "202[0-9]{3}") if [[ $(echo $GITHUB_CONTEXT | jq -r ".event.action") == "labeled" ]];then labels=$(echo $GITHUB_CONTEXT | jq -r ".event.label.name") From c75dfe84ed5c4249dfa8116a0ebbfdd5a4eea94e Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Tue, 11 Oct 2022 11:00:51 +0800 Subject: [PATCH 038/174] [build] Fix dpkg front lock issue with apt-get (#12332) --- src/sonic-build-hooks/hooks/dpkg | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 src/sonic-build-hooks/hooks/dpkg diff --git a/src/sonic-build-hooks/hooks/dpkg b/src/sonic-build-hooks/hooks/dpkg new file mode 100644 index 000000000000..7fbc90f9fe5c --- /dev/null +++ b/src/sonic-build-hooks/hooks/dpkg @@ -0,0 +1,10 @@ +#!/bin/bash + +. /usr/local/share/buildinfo/scripts/buildinfo_base.sh +REAL_COMMAND=$(get_command dpkg) +COMMAND_INFO="Locked by command: $REAL_COMMAND $@" +lock_result=$(acquire_apt_installation_lock "$COMMAND_INFO" ) +$REAL_COMMAND "$@" +command_result=$? +[ "$lock_result" == y ] && release_apt_installation_lock +exit $command_result From cb707b7969c8f3f34d7134a661c7a6ca3f076d3b Mon Sep 17 00:00:00 2001 From: henry huang <110922668+hehuang-nokia@users.noreply.github.com> Date: Tue, 11 Oct 2022 03:04:07 -0400 Subject: [PATCH 039/174] fixed nokia platform m0 asic mismatch (#12148) changed the platform device name under nokia directory; we now need to specify marvell armhf/arm64 to provide more accurate platform identity. otherwise onie discovery won't recognize the asic being installed. Why I did it when we load images using onie discovery, the process was failing because of marvell ASIC mismatch How I did it replace the platform asic with marvell-armhf under 7215 How to verify it load a new image using http server and verify that the image can be loaded successfully --- device/nokia/armhf-nokia_ixs7215_52x-r0/platform_asic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/device/nokia/armhf-nokia_ixs7215_52x-r0/platform_asic b/device/nokia/armhf-nokia_ixs7215_52x-r0/platform_asic index a554752878b7..64b1d7dbb42b 100644 --- a/device/nokia/armhf-nokia_ixs7215_52x-r0/platform_asic +++ b/device/nokia/armhf-nokia_ixs7215_52x-r0/platform_asic @@ -1 +1 @@ -marvell +marvell-armhf From f1826586b01ffada91e99d93d4c2d28c148d8239 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Tue, 11 Oct 2022 10:17:09 -0400 Subject: [PATCH 040/174] Replace eval (#12103) Signed-off-by: maipbui #### Why I did it `eval()` - not secure against maliciously constructed input, can be dangerous if used to evaluate dynamic content. This may be a code injection vulnerability. #### How I did it `eval()` - use `literal_eval()` --- device/common/pddf/plugins/fanutil.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/device/common/pddf/plugins/fanutil.py b/device/common/pddf/plugins/fanutil.py index f34c260035e8..c2944e7b5b89 100755 --- a/device/common/pddf/plugins/fanutil.py +++ b/device/common/pddf/plugins/fanutil.py @@ -11,6 +11,7 @@ import os.path import sys +import ast sys.path.append('/usr/share/sonic/platform/plugins') import pddfparse import json @@ -170,7 +171,7 @@ def set_speed(self, val): print("Setting fan speed is not allowed !") return False else: - duty_cycle_to_pwm = eval(plugin_data['FAN']['duty_cycle_to_pwm']) + duty_cycle_to_pwm = ast.literal_eval(plugin_data['FAN']['duty_cycle_to_pwm']) pwm = duty_cycle_to_pwm(val) print("New Speed: %d%% - PWM value to be set is %d\n" % (val, pwm)) From e1765121b2fa3a5481e80c7db15da597964ab30f Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Tue, 11 Oct 2022 10:53:52 -0700 Subject: [PATCH 041/174] [Yang model] add Restapi yang file (#12287) * add Restapi Yang model --- src/sonic-restapi | 2 +- src/sonic-yang-models/doc/Configuration.md | 13 ++++ src/sonic-yang-models/setup.py | 1 + .../tests/files/sample_config_db.json | 8 +++ .../tests/yang_model_tests/tests/restapi.json | 9 +++ .../tests_config/restapi.json | 26 ++++++++ .../yang-models/sonic-restapi.yang | 63 +++++++++++++++++++ 7 files changed, 121 insertions(+), 1 deletion(-) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json create mode 100644 src/sonic-yang-models/yang-models/sonic-restapi.yang diff --git a/src/sonic-restapi b/src/sonic-restapi index bcc6f704a544..86543d0db544 160000 --- a/src/sonic-restapi +++ b/src/sonic-restapi @@ -1 +1 @@ -Subproject commit bcc6f704a54454f326f069501b01759dbb732bb3 +Subproject commit 86543d0db544362bb27912ed40449920c3d7c0d1 diff --git a/src/sonic-yang-models/doc/Configuration.md b/src/sonic-yang-models/doc/Configuration.md index bd7b51ce5ae2..d36a6b637497 100644 --- a/src/sonic-yang-models/doc/Configuration.md +++ b/src/sonic-yang-models/doc/Configuration.md @@ -46,6 +46,7 @@ Table of Contents * [Scheduler](#scheduler) * [Port QoS Map](#port-qos-map) * [Queue](#queue) + * [Restapi](#restapi) * [Tacplus Server](#tacplus-server) * [TC to Priority group map](#tc-to-priority-group-map) * [TC to Queue map](#tc-to-queue-map) @@ -1411,6 +1412,18 @@ name as object key and member list as attribute. } ``` +### Restapi +``` +{ +"RESTAPI": { + "certs": { + "ca_crt": "/etc/sonic/credentials/ame_root.pem", + "server_key": "/etc/sonic/credentials/restapiserver.key", + "server_crt": "/etc/sonic/credentials/restapiserver.crt", + "client_crt_cname": "client.sonic.net" + } +} +``` ### Tacplus Server diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index aee9d4650c7a..74c081ef145b 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -148,6 +148,7 @@ def run(self): './yang-models/sonic-scheduler.yang', './yang-models/sonic-wred-profile.yang', './yang-models/sonic-queue.yang', + './yang-models/sonic-restapi.yang', './yang-models/sonic-dscp-fc-map.yang', './yang-models/sonic-exp-fc-map.yang', './yang-models/sonic-dscp-tc-map.yang', diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index 6c1f47d80f51..7207d4337a64 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -1118,6 +1118,14 @@ "port": "50051" } }, + "RESTAPI": { + "certs": { + "ca_crt": "/etc/sonic/credentials/ame_root.pem", + "server_key": "/etc/sonic/credentials/restapiserver.key", + "server_crt": "/etc/sonic/credentials/restapiserver.crt", + "client_crt_cname": "client.sonic.net" + } + }, "FLEX_COUNTER_TABLE": { "PFCWD": { "FLEX_COUNTER_STATUS": "enable" diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json b/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json new file mode 100644 index 000000000000..0804ceac58ac --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json @@ -0,0 +1,9 @@ +{ + "RESTAPI_TABLE_WITH_INCORRECT_CERT": { + "desc": "RESTAPI TABLE_WITH_INCORRECT_CERT failure.", + "eStr": ["server_crt"] + }, + "RESTAPI_TABLE_WITH_VALID_CONFIG": { + "desc": "RESTAPI TABLE WITH VALID CONFIG." + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json new file mode 100644 index 000000000000..48505a0e0c97 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json @@ -0,0 +1,26 @@ +{ + "RESTAPI_TABLE_WITH_INCORRECT_CERT": { + "sonic-restapi:sonic-restapi": { + "sonic-restapi:RESTAPI": { + "certs": { + "ca_crt": "abcd.config", + "server_crt": "a/b/c", + "server_key": "123", + "client_crt_cname": "client" + } + } + } + }, + "RESTAPI_TABLE_WITH_VALID_CONFIG": { + "sonic-restapi:sonic-restapi": { + "sonic-restapi:RESTAPI": { + "certs": { + "ca_crt": "/etc/sonic/credentials/ame_root.pem", + "server_crt": "/etc/sonic/credentials/restapiserver.crt", + "server_key": "/etc/sonic/credentials/restapiserver.key", + "client_crt_cname": "client.sonic.net" + } + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-restapi.yang b/src/sonic-yang-models/yang-models/sonic-restapi.yang new file mode 100644 index 000000000000..d42db486480a --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-restapi.yang @@ -0,0 +1,63 @@ +module sonic-restapi { + + yang-version 1.1; + + namespace "http://github.com/Azure/sonic-restapi"; + prefix restapi; + + import ietf-inet-types { + prefix inet; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description "RESTAPI YANG Module for SONiC OS"; + + revision 2022-10-05 { + description "First Revision"; + } + + container sonic-restapi { + + container RESTAPI { + + description "RESTAPI TABLE part of config_db.json"; + + container certs { + + leaf ca_crt { + type string { + pattern '(/[a-zA-Z0-9_-]+)*/([a-zA-Z0-9_-]+).pem'; + } + description "Local path for ca_crt."; + } + + leaf server_crt { + type string { + pattern '(/[a-zA-Z0-9_-]+)*/([a-zA-Z0-9_-]+).crt'; + } + description "Local path for server_crt."; + } + + leaf client_crt_cname { + type string { + pattern '(/[a-zA-Z0-9_-.]+)*/([a-zA-Z0-9_-.]+)./[a-z]{3}'; + } + description "Client cert name."; + } + + leaf server_key { + type string { + pattern '(/[a-zA-Z0-9_-]+)*/([a-zA-Z0-9_-]+).key'; + } + description "Local path for server_key."; + } + + } + } + } +} From aee1466a83b43e0db53cb5880d80e5a45880a1dc Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Tue, 11 Oct 2022 20:35:42 -0700 Subject: [PATCH 042/174] sonic-swss-common submodule update (#12356) | * cb707b7 fixed nokia platform m0 asic mismatch (fixed nokia platform m0 asic mismatch #12148) | * c75dfe8 [build] Fix dpkg front lock issue with apt-get ([build] Fix dpkg front lock issue caused by apt-get install #12332) | * 247bd78 [action] Fix PR pre-cherry-pick action wrong author issue. ([action] Fix PR pre-cherry-pick action wrong author issue. #12339) | * fc99265 [Nokia] Update the nokia platform submodule for Nokia-IXR7250E platform ([Nokia] Update the nokia platform submodule for Nokia-IXR7250E platform #12305) | * 9d37b63 [Nokia] Update Nokia platform IXR7250E device data ([Nokia] Update Nokia platform IXR7250E device data #11611) | * 304c6c8 [BFN] Reworked BFN platform thermals plugin ([BFN] Reworked BFN platform thermals plugin #11723) | * df93a1b [Build][Bug] Fix apt-get remove version not lock issue ([Build][Bug] Fix apt-get remove version not locked issue #12193) | * 9b2b8e3 Add gearbox taps to vs gearbox_config.json (Add gearbox taps to vs gearbox_config.json #11480) --- src/sonic-swss-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-swss-common b/src/sonic-swss-common index 651f52b8e511..bcf48b26361f 160000 --- a/src/sonic-swss-common +++ b/src/sonic-swss-common @@ -1 +1 @@ -Subproject commit 651f52b8e51107112c8205d12608723357ecbe5e +Subproject commit bcf48b26361f94e10a0eafc2c49c0bf0f440b2d5 From 257cc96d7c167879a44955bdd3f933afed6e2dcf Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Wed, 12 Oct 2022 13:04:14 +0800 Subject: [PATCH 043/174] Remove swsssdk from sonic OS image and docker container image (#12323) Remove swsssdk from sonic OS image and docker image #### Why I did it swsssdk is deprecated, so need remove from image. #### How I did it Update config file to remove swsssdk from image. #### How to verify it Pass all test case. #### Which release branch to backport (provide reason below if selected) - [ ] 201811 - [ ] 201911 - [ ] 202006 - [ ] 202012 - [ ] 202106 - [ ] 202111 - [ ] 202205 #### Description for the changelog Remove swsssdk from sonic OS image and docker image #### Ensure to add label/tag for the feature raised. example - PR#2174 under sonic-utilities repo. where, Generic Config and Update feature has been labelled as GCU. #### Link to config_db schema for YANG module changes #### A picture of a cute animal (not mandatory but encouraged) --- files/build_templates/sonic_debian_extension.j2 | 7 ------- platform/vs/docker-sonic-vs.mk | 5 +---- slave.mk | 2 -- 3 files changed, 1 insertion(+), 13 deletions(-) diff --git a/files/build_templates/sonic_debian_extension.j2 b/files/build_templates/sonic_debian_extension.j2 index 972ebf7bdff8..0e219e9fa2c7 100644 --- a/files/build_templates/sonic_debian_extension.j2 +++ b/files/build_templates/sonic_debian_extension.j2 @@ -130,13 +130,6 @@ if [[ $CONFIGURED_ARCH == amd64 ]]; then sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install "grpcio-tools==1.39.0" fi -# Install SwSS SDK Python 3 package -# Note: the scripts will be overwritten by corresponding Python 2 package -SWSSSDK_PY3_WHEEL_NAME=$(basename {{swsssdk_py3_wheel_path}}) -sudo cp {{swsssdk_py3_wheel_path}} $FILESYSTEM_ROOT/$SWSSSDK_PY3_WHEEL_NAME -sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install $SWSSSDK_PY3_WHEEL_NAME -sudo rm -rf $FILESYSTEM_ROOT/$SWSSSDK_PY3_WHEEL_NAME - # Install sonic-py-common Python 3 package SONIC_PY_COMMON_PY3_WHEEL_NAME=$(basename {{sonic_py_common_py3_wheel_path}}) sudo cp {{sonic_py_common_py3_wheel_path}} $FILESYSTEM_ROOT/$SONIC_PY_COMMON_PY3_WHEEL_NAME diff --git a/platform/vs/docker-sonic-vs.mk b/platform/vs/docker-sonic-vs.mk index 400d57c76d8d..4adc9c10ac25 100644 --- a/platform/vs/docker-sonic-vs.mk +++ b/platform/vs/docker-sonic-vs.mk @@ -16,10 +16,7 @@ $(DOCKER_SONIC_VS)_DEPENDS += $(SWSS) \ $(SONIC_HOST_SERVICES_DATA) \ $(IPROUTE2) -# swsssdk is a dependency of sonic-py-common -# TODO: sonic-py-common should depend on swsscommon instead -$(DOCKER_SONIC_VS)_PYTHON_WHEELS += $(SWSSSDK_PY3) \ - $(SONIC_PY_COMMON_PY3) \ +$(DOCKER_SONIC_VS)_PYTHON_WHEELS += $(SONIC_PY_COMMON_PY3) \ $(SONIC_PLATFORM_COMMON_PY3) \ $(SONIC_YANG_MODELS_PY3) \ $(SONIC_YANG_MGMT_PY3) \ diff --git a/slave.mk b/slave.mk index 28efed3dfc11..b91dcd349629 100644 --- a/slave.mk +++ b/slave.mk @@ -1214,8 +1214,6 @@ $(addprefix $(TARGET_PATH)/, $(SONIC_INSTALLERS)) : $(TARGET_PATH)/% : \ export sonic_py_common_py3_wheel_path="$(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_PY_COMMON_PY3))" export config_engine_py2_wheel_path="$(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_CONFIG_ENGINE_PY2))" export config_engine_py3_wheel_path="$(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_CONFIG_ENGINE_PY3))" - export swsssdk_py2_wheel_path="$(addprefix $(PYTHON_WHEELS_PATH)/,$(SWSSSDK_PY2))" - export swsssdk_py3_wheel_path="$(addprefix $(PYTHON_WHEELS_PATH)/,$(SWSSSDK_PY3))" export platform_common_py3_wheel_path="$(addprefix $(PYTHON_WHEELS_PATH)/,$(SONIC_PLATFORM_COMMON_PY3))" export redis_dump_load_py2_wheel_path="$(addprefix $(PYTHON_WHEELS_PATH)/,$(REDIS_DUMP_LOAD_PY2))" export redis_dump_load_py3_wheel_path="$(addprefix $(PYTHON_WHEELS_PATH)/,$(REDIS_DUMP_LOAD_PY3))" From 34f9a642dd03540795c66afb91bea10858e1568a Mon Sep 17 00:00:00 2001 From: Vivek Date: Wed, 12 Oct 2022 01:46:20 -0700 Subject: [PATCH 044/174] [DHCP_RELAY] Updated wait_for_intf.sh to wait for ipv6 global and link local addr (#12273) - Why I did it Fixes #11431 - How I did it dhcp6relay binds to ipv6 addresses configured on these vlan interfaces Thus check if they are ready before launching dhcp6relay - How to verify it Unit Tests Tested on a live device Signed-off-by: Vivek Reddy Karri --- dockers/docker-dhcp-relay/wait_for_intf.sh.j2 | 26 +++++++++++++++++++ .../tests/dhcp-relay-sample.json | 5 ++++ .../tests/sample_output/py2/wait_for_intf.sh | 22 ++++++++++++++++ .../tests/sample_output/py3/wait_for_intf.sh | 22 ++++++++++++++++ src/sonic-config-engine/tests/test_j2files.py | 3 ++- 5 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 src/sonic-config-engine/tests/dhcp-relay-sample.json diff --git a/dockers/docker-dhcp-relay/wait_for_intf.sh.j2 b/dockers/docker-dhcp-relay/wait_for_intf.sh.j2 index b224a697b5ba..13499a6e6c54 100644 --- a/dockers/docker-dhcp-relay/wait_for_intf.sh.j2 +++ b/dockers/docker-dhcp-relay/wait_for_intf.sh.j2 @@ -21,8 +21,28 @@ function wait_until_iface_ready echo "Interface ${IFACE_NAME} is ready!" } +function check_for_ipv6_link_local +{ + IFACE_NAME=$1 + echo "Waiting until interface ${IFACE_NAME} has a link-local ipv6 address configured...." + + # Status of link local address is not populated in STATE_DB + while true; do + HAS_LL=$(ip -6 addr show ${IFACE_NAME} scope link 2> /dev/null) + RC=$? + if [[ ${RC} == "0" ]] && [[ ! -z ${HAS_LL} ]]; then + break + fi + + sleep 1 + done + + echo "Link-Local address is configured on ${IFACE_NAME}" +} # Wait for all interfaces with IPv4 addresses to be up and ready +# dhcp6relay binds to ipv6 addresses configured on these vlan ifaces +# Thus check if they are ready before launching dhcp6relay {% for (name, prefix) in INTERFACE|pfx_filter %} {% if prefix | ipv4 %} wait_until_iface_ready {{ name }} {{ prefix }} @@ -32,6 +52,12 @@ wait_until_iface_ready {{ name }} {{ prefix }} {% if prefix | ipv4 %} wait_until_iface_ready {{ name }} {{ prefix }} {% endif %} +{% if prefix | ipv6 %} +{% if DHCP_RELAY and name in DHCP_RELAY %} +wait_until_iface_ready {{ name }} {{ prefix }} +check_for_ipv6_link_local {{ name }} +{% endif %} +{% endif %} {% endfor %} {% for (name, prefix) in PORTCHANNEL_INTERFACE|pfx_filter %} {% if prefix | ipv4 %} diff --git a/src/sonic-config-engine/tests/dhcp-relay-sample.json b/src/sonic-config-engine/tests/dhcp-relay-sample.json new file mode 100644 index 000000000000..dfd29ed80f43 --- /dev/null +++ b/src/sonic-config-engine/tests/dhcp-relay-sample.json @@ -0,0 +1,5 @@ +{ + "VLAN_INTERFACE": { + "Vlan1000|fc02:2000::2/24": {} + } +} diff --git a/src/sonic-config-engine/tests/sample_output/py2/wait_for_intf.sh b/src/sonic-config-engine/tests/sample_output/py2/wait_for_intf.sh index 8ba15b1c8355..1d58bc956bd1 100644 --- a/src/sonic-config-engine/tests/sample_output/py2/wait_for_intf.sh +++ b/src/sonic-config-engine/tests/sample_output/py2/wait_for_intf.sh @@ -21,10 +21,32 @@ function wait_until_iface_ready echo "Interface ${IFACE_NAME} is ready!" } +function check_for_ipv6_link_local +{ + IFACE_NAME=$1 + echo "Waiting until interface ${IFACE_NAME} has a link-local ipv6 address configured...." + + # Status of link local address is not populated in STATE_DB + while true; do + HAS_LL=$(ip -6 addr show ${IFACE_NAME} scope link 2> /dev/null) + RC=$? + if [[ ${RC} == "0" ]] && [[ ! -z ${HAS_LL} ]]; then + break + fi + + sleep 1 + done + + echo "Link-Local address is configured on ${IFACE_NAME}" +} # Wait for all interfaces with IPv4 addresses to be up and ready +# dhcp6relay binds to ipv6 addresses configured on these vlan ifaces +# Thus check if they are ready before launching dhcp6relay wait_until_iface_ready Vlan2000 192.168.200.1/27 wait_until_iface_ready Vlan1000 192.168.0.1/27 +wait_until_iface_ready Vlan1000 fc02:2000::2/24 +check_for_ipv6_link_local Vlan1000 wait_until_iface_ready PortChannel02 10.0.0.58/31 wait_until_iface_ready PortChannel03 10.0.0.60/31 wait_until_iface_ready PortChannel04 10.0.0.62/31 diff --git a/src/sonic-config-engine/tests/sample_output/py3/wait_for_intf.sh b/src/sonic-config-engine/tests/sample_output/py3/wait_for_intf.sh index 6e5012d5939c..5f0f46a59147 100644 --- a/src/sonic-config-engine/tests/sample_output/py3/wait_for_intf.sh +++ b/src/sonic-config-engine/tests/sample_output/py3/wait_for_intf.sh @@ -21,8 +21,30 @@ function wait_until_iface_ready echo "Interface ${IFACE_NAME} is ready!" } +function check_for_ipv6_link_local +{ + IFACE_NAME=$1 + echo "Waiting until interface ${IFACE_NAME} has a link-local ipv6 address configured...." + + # Status of link local address is not populated in STATE_DB + while true; do + HAS_LL=$(ip -6 addr show ${IFACE_NAME} scope link 2> /dev/null) + RC=$? + if [[ ${RC} == "0" ]] && [[ ! -z ${HAS_LL} ]]; then + break + fi + + sleep 1 + done + + echo "Link-Local address is configured on ${IFACE_NAME}" +} # Wait for all interfaces with IPv4 addresses to be up and ready +# dhcp6relay binds to ipv6 addresses configured on these vlan ifaces +# Thus check if they are ready before launching dhcp6relay +wait_until_iface_ready Vlan1000 fc02:2000::2/24 +check_for_ipv6_link_local Vlan1000 wait_until_iface_ready Vlan1000 192.168.0.1/27 wait_until_iface_ready Vlan2000 192.168.200.1/27 wait_until_iface_ready PortChannel01 10.0.0.56/31 diff --git a/src/sonic-config-engine/tests/test_j2files.py b/src/sonic-config-engine/tests/test_j2files.py index fef27be40b7e..3ac219468e43 100644 --- a/src/sonic-config-engine/tests/test_j2files.py +++ b/src/sonic-config-engine/tests/test_j2files.py @@ -142,8 +142,9 @@ def test_ports_json(self): def test_dhcp_relay(self): # Test generation of wait_for_intf.sh + dhc_sample_data = os.path.join(self.test_dir, "dhcp-relay-sample.json") template_path = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-dhcp-relay', 'wait_for_intf.sh.j2') - argument = '-m ' + self.t0_minigraph + ' -p ' + self.t0_port_config + ' -t ' + template_path + ' > ' + self.output_file + argument = '-m ' + self.t0_minigraph + ' -j ' + dhc_sample_data + ' -p ' + self.t0_port_config + ' -t ' + template_path + ' > ' + self.output_file self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'wait_for_intf.sh'), self.output_file)) From 7087763af4edae764d08196f410c19470a58db9a Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Wed, 12 Oct 2022 08:44:50 -0700 Subject: [PATCH 045/174] Revert "[Yang model] add Restapi yang file (#12287)" (#12374) This is causing a build failure for all builds. The PR build was incorrectly marked as passing due to a different build issue. libyang[0]: Regular expression "(/[a-zA-Z0-9_-.]+)*/([a-zA-Z0-9_-.]+)./[a-z]{3}" is not valid (".]+)*/([a-zA-Z0-9_-.]+)./[a-z]{3})$": range out of order in character class). libyang[0]: Module "sonic-restapi" parsing failed. ERROR:YANG-TEST: Exception >Module "sonic-restapi" parsing failed.< in /sonic/src/sonic-yang-models/tests/yang_model_tests/test_yang_model.py:114 ERROR:YANG-TEST: Exception >Module "sonic-restapi" parsing failed.< in /sonic/src/sonic-yang-models/tests/yang_model_test This reverts commit e1765121b2fa3a5481e80c7db15da597964ab30f. --- src/sonic-restapi | 2 +- src/sonic-yang-models/doc/Configuration.md | 13 ---- src/sonic-yang-models/setup.py | 1 - .../tests/files/sample_config_db.json | 8 --- .../tests/yang_model_tests/tests/restapi.json | 9 --- .../tests_config/restapi.json | 26 -------- .../yang-models/sonic-restapi.yang | 63 ------------------- 7 files changed, 1 insertion(+), 121 deletions(-) delete mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json delete mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json delete mode 100644 src/sonic-yang-models/yang-models/sonic-restapi.yang diff --git a/src/sonic-restapi b/src/sonic-restapi index 86543d0db544..bcc6f704a544 160000 --- a/src/sonic-restapi +++ b/src/sonic-restapi @@ -1 +1 @@ -Subproject commit 86543d0db544362bb27912ed40449920c3d7c0d1 +Subproject commit bcc6f704a54454f326f069501b01759dbb732bb3 diff --git a/src/sonic-yang-models/doc/Configuration.md b/src/sonic-yang-models/doc/Configuration.md index d36a6b637497..bd7b51ce5ae2 100644 --- a/src/sonic-yang-models/doc/Configuration.md +++ b/src/sonic-yang-models/doc/Configuration.md @@ -46,7 +46,6 @@ Table of Contents * [Scheduler](#scheduler) * [Port QoS Map](#port-qos-map) * [Queue](#queue) - * [Restapi](#restapi) * [Tacplus Server](#tacplus-server) * [TC to Priority group map](#tc-to-priority-group-map) * [TC to Queue map](#tc-to-queue-map) @@ -1412,18 +1411,6 @@ name as object key and member list as attribute. } ``` -### Restapi -``` -{ -"RESTAPI": { - "certs": { - "ca_crt": "/etc/sonic/credentials/ame_root.pem", - "server_key": "/etc/sonic/credentials/restapiserver.key", - "server_crt": "/etc/sonic/credentials/restapiserver.crt", - "client_crt_cname": "client.sonic.net" - } -} -``` ### Tacplus Server diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index 74c081ef145b..aee9d4650c7a 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -148,7 +148,6 @@ def run(self): './yang-models/sonic-scheduler.yang', './yang-models/sonic-wred-profile.yang', './yang-models/sonic-queue.yang', - './yang-models/sonic-restapi.yang', './yang-models/sonic-dscp-fc-map.yang', './yang-models/sonic-exp-fc-map.yang', './yang-models/sonic-dscp-tc-map.yang', diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index 7207d4337a64..6c1f47d80f51 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -1118,14 +1118,6 @@ "port": "50051" } }, - "RESTAPI": { - "certs": { - "ca_crt": "/etc/sonic/credentials/ame_root.pem", - "server_key": "/etc/sonic/credentials/restapiserver.key", - "server_crt": "/etc/sonic/credentials/restapiserver.crt", - "client_crt_cname": "client.sonic.net" - } - }, "FLEX_COUNTER_TABLE": { "PFCWD": { "FLEX_COUNTER_STATUS": "enable" diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json b/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json deleted file mode 100644 index 0804ceac58ac..000000000000 --- a/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "RESTAPI_TABLE_WITH_INCORRECT_CERT": { - "desc": "RESTAPI TABLE_WITH_INCORRECT_CERT failure.", - "eStr": ["server_crt"] - }, - "RESTAPI_TABLE_WITH_VALID_CONFIG": { - "desc": "RESTAPI TABLE WITH VALID CONFIG." - } -} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json deleted file mode 100644 index 48505a0e0c97..000000000000 --- a/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "RESTAPI_TABLE_WITH_INCORRECT_CERT": { - "sonic-restapi:sonic-restapi": { - "sonic-restapi:RESTAPI": { - "certs": { - "ca_crt": "abcd.config", - "server_crt": "a/b/c", - "server_key": "123", - "client_crt_cname": "client" - } - } - } - }, - "RESTAPI_TABLE_WITH_VALID_CONFIG": { - "sonic-restapi:sonic-restapi": { - "sonic-restapi:RESTAPI": { - "certs": { - "ca_crt": "/etc/sonic/credentials/ame_root.pem", - "server_crt": "/etc/sonic/credentials/restapiserver.crt", - "server_key": "/etc/sonic/credentials/restapiserver.key", - "client_crt_cname": "client.sonic.net" - } - } - } - } -} diff --git a/src/sonic-yang-models/yang-models/sonic-restapi.yang b/src/sonic-yang-models/yang-models/sonic-restapi.yang deleted file mode 100644 index d42db486480a..000000000000 --- a/src/sonic-yang-models/yang-models/sonic-restapi.yang +++ /dev/null @@ -1,63 +0,0 @@ -module sonic-restapi { - - yang-version 1.1; - - namespace "http://github.com/Azure/sonic-restapi"; - prefix restapi; - - import ietf-inet-types { - prefix inet; - } - - organization - "SONiC"; - - contact - "SONiC"; - - description "RESTAPI YANG Module for SONiC OS"; - - revision 2022-10-05 { - description "First Revision"; - } - - container sonic-restapi { - - container RESTAPI { - - description "RESTAPI TABLE part of config_db.json"; - - container certs { - - leaf ca_crt { - type string { - pattern '(/[a-zA-Z0-9_-]+)*/([a-zA-Z0-9_-]+).pem'; - } - description "Local path for ca_crt."; - } - - leaf server_crt { - type string { - pattern '(/[a-zA-Z0-9_-]+)*/([a-zA-Z0-9_-]+).crt'; - } - description "Local path for server_crt."; - } - - leaf client_crt_cname { - type string { - pattern '(/[a-zA-Z0-9_-.]+)*/([a-zA-Z0-9_-.]+)./[a-z]{3}'; - } - description "Client cert name."; - } - - leaf server_key { - type string { - pattern '(/[a-zA-Z0-9_-]+)*/([a-zA-Z0-9_-]+).key'; - } - description "Local path for server_key."; - } - - } - } - } -} From 50b77a5d441d0f6ceb7e6ef0190aa4ea02197c92 Mon Sep 17 00:00:00 2001 From: Kalimuthu-Velappan <53821802+Kalimuthu-Velappan@users.noreply.github.com> Date: Thu, 13 Oct 2022 20:29:09 +0530 Subject: [PATCH 046/174] Fixes the exit status of slave build. (#12369) This PR fixes the issue reported in PR#12367 https://github.com/sonic-net/sonic-buildimage/pull/12367 The issue is that exit code always being 0 for the builds that are failed. Fix is added in the Makefile.work to return the error code when the slave build is failed with an error. --- Makefile.work | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Makefile.work b/Makefile.work index a6cacafce834..56f0cde6c331 100644 --- a/Makefile.work +++ b/Makefile.work @@ -500,7 +500,7 @@ SONIC_BUILD_INSTRUCTION := $(MAKE) \ ifeq ($(filter clean,$(MAKECMDGOALS)),) -COLLECT_BUILD_VERSION = { DBGOPT='$(DBGOPT)' scripts/collect_build_version_files.sh $$?; } +COLLECT_BUILD_VERSION = { DBGOPT='$(DBGOPT)' scripts/collect_build_version_files.sh \$$?; } endif ifdef SOURCE_FOLDER @@ -508,10 +508,11 @@ ifdef SOURCE_FOLDER endif ifeq "$(KEEP_SLAVE_ON)" "yes" -SLAVE_SHELL={ /bin/bash; } +SLAVE_SHELL={ ret=\$$?; /bin/bash; exit \$$ret; } endif .DEFAULT_GOAL := all +.SHELLFLAGS += -e %:: | sonic-build-hooks ifneq ($(filter y, $(MULTIARCH_QEMU_ENVIRON) $(CROSS_BUILD_ENVIRON)),) @@ -527,7 +528,7 @@ endif $(Q)$(DOCKER_RUN) \ $(SLAVE_IMAGE):$(SLAVE_TAG) \ - bash -c "$(SONIC_BUILD_INSTRUCTION) $@;$(COLLECT_BUILD_VERSION); $(SLAVE_SHELL)" + bash -c "$(SONIC_BUILD_INSTRUCTION) $@; $(COLLECT_BUILD_VERSION); $(SLAVE_SHELL)" $(Q)$(docker-image-cleanup) docker-cleanup: From bc684fef0bb79cb8cead1374ea826144609f2a94 Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Thu, 13 Oct 2022 09:24:06 -0700 Subject: [PATCH 047/174] [BGP] starting BGP service after swss (#12381) Why I did it BGP service has always been starting after interface-config. However, recently we discovered an issue where some BGP sessions are unable to establish due to BGP daemon not able to read the interface IP. This issue was clearly observed after upgrading to FRR 8.2.2. See more details in #12380. How I did it Delaying starting BGP seems to be a workaround for this issue. However, caution is that this delay might impact warm reboot timing and other timing sequences. This workaround is reducing the probability of hitting the issue by close to 100X. However, this workaround is not bulletproof as test shows. It is still preferrable to have a proper FRR fix and revert this change in the future. How to verify it Continuously issuing config reload and check BGP session status afterwards. Signed-off-by: Ying Xie --- files/build_templates/per_namespace/bgp.service.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/files/build_templates/per_namespace/bgp.service.j2 b/files/build_templates/per_namespace/bgp.service.j2 index 61eb71c4db9a..e9c1a923109b 100644 --- a/files/build_templates/per_namespace/bgp.service.j2 +++ b/files/build_templates/per_namespace/bgp.service.j2 @@ -7,6 +7,7 @@ After=updategraph.service BindsTo=sonic.target After=sonic.target Before=ntp-config.service +After=swss{% if multi_instance == 'true' %}@%i{% endif %}.service After=interfaces-config.service StartLimitIntervalSec=1200 StartLimitBurst=3 From 86fec0892838eb65decda9030e0dd71242d82497 Mon Sep 17 00:00:00 2001 From: Praveen Chaudhary Date: Thu, 13 Oct 2022 10:12:35 -0700 Subject: [PATCH 048/174] [sonic-mpls-tc-map.yang]: yang model for mpls_tc_to_tc_map table. (#12176) changes: -- yang model for mpls_tc_to_tc_map table. -- tests. #### Why I did it yang model for mpls_tc_to_tc_map table. #### How I did it -- yang model for mpls_tc_to_tc_map table. -- yang model tests. #### How to verify it -- yang model build time tests. --- src/sonic-yang-mgmt/sonic_yang_ext.py | 5 +- src/sonic-yang-models/setup.py | 1 + .../tests/files/sample_config_db.json | 15 +++- .../tests/mpls_tc_to_tc_map.json | 11 +++ .../tests_config/mpls_tc_to_tc_map.json | 36 ++++++++++ .../yang-models/sonic-mpls-tc-map.yang | 70 +++++++++++++++++++ 6 files changed, 134 insertions(+), 4 deletions(-) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/mpls_tc_to_tc_map.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/mpls_tc_to_tc_map.json create mode 100644 src/sonic-yang-models/yang-models/sonic-mpls-tc-map.yang diff --git a/src/sonic-yang-mgmt/sonic_yang_ext.py b/src/sonic-yang-mgmt/sonic_yang_ext.py index 8f4279091882..6fd4f4ef4ef5 100644 --- a/src/sonic-yang-mgmt/sonic_yang_ext.py +++ b/src/sonic-yang-mgmt/sonic_yang_ext.py @@ -17,7 +17,8 @@ 'PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_LIST', 'DSCP_TO_FC_MAP_LIST', 'EXP_TO_FC_MAP_LIST', - 'CABLE_LENGTH_LIST' + 'CABLE_LENGTH_LIST', + 'MPLS_TC_TO_TC_MAP_LIST' ] # Workaround for those fields who is defined as leaf-list in YANG model but have string value in config DB. @@ -422,7 +423,7 @@ def _yangConvert(val): vValue = list() if isinstance(value, str) and (self.elementPath[0], self.elementPath[-1]) in LEAF_LIST_WITH_STRING_VALUE_DICT: # For field defined as leaf-list but has string value in CONFIG DB, need do special handling here. For exampe: - # port.adv_speeds in CONFIG DB has value "100,1000,10000", it shall be transferred to [100,1000,10000] as YANG value here to + # port.adv_speeds in CONFIG DB has value "100,1000,10000", it shall be transferred to [100,1000,10000] as YANG value here to # make it align with its YANG definition. value = (x.strip() for x in value.split(LEAF_LIST_WITH_STRING_VALUE_DICT[(self.elementPath[0], self.elementPath[-1])])) for v in value: diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index aee9d4650c7a..076ce59b554d 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -117,6 +117,7 @@ def run(self): './yang-models/sonic-mgmt_port.yang', './yang-models/sonic-mgmt_vrf.yang', './yang-models/sonic-mirror-session.yang', + './yang-models/sonic-mpls-tc-map.yang', './yang-models/sonic-mux-cable.yang', './yang-models/sonic-ntp.yang', './yang-models/sonic-nat.yang', diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index 6c1f47d80f51..fe31a650a08d 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -830,7 +830,7 @@ "role": "Int", "speed": "40000", "tpid": "0x8100" - } + } }, "ACL_TABLE": { "V4-ACL-TABLE": { @@ -1680,6 +1680,17 @@ } }, + "MPLS_TC_TO_TC_MAP": { + "Mpls_tc_to_tc_map1": { + "1": "1", + "3": "2" + }, + "Mpls_tc_to_tc_map2": { + "2": "3", + "4": "4" + } + }, + "DOT1P_TO_TC_MAP": { "Dot1p_to_tc_map1": { "1": "1", @@ -1831,7 +1842,7 @@ "server_ipv6": "fc02:1000::30/128", "soc_ipv4": "192.168.0.3/32", "state": "auto" - }, + }, "Ethernet0": { "server_ipv4": "192.168.0.2/32", "server_ipv6": "fc02:1000::30/128", diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/mpls_tc_to_tc_map.json b/src/sonic-yang-models/tests/yang_model_tests/tests/mpls_tc_to_tc_map.json new file mode 100644 index 000000000000..1362a8424e16 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/mpls_tc_to_tc_map.json @@ -0,0 +1,11 @@ +{ + "MPLS_TCP_MAP_INCORRECT_MPLS_PATTERN": { + "desc": "Configure INCORRECT_MPLS_PATTERN in MPLS_TCP_MAP Table.", + "eStr": ["Invalid MPLS"] + + }, + "MPLS_TCP_MAP_INCORRECT_TC_PATTERN": { + "desc": "Configure INCORRECT_TC_PATTERN in MPLS_TCP_MAP Table.", + "eStr": ["Invalid Traffic Class"] + } +} \ No newline at end of file diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/mpls_tc_to_tc_map.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/mpls_tc_to_tc_map.json new file mode 100644 index 000000000000..dc11e184424e --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/mpls_tc_to_tc_map.json @@ -0,0 +1,36 @@ +{ + "MPLS_TCP_MAP_INCORRECT_MPLS_PATTERN": { + "sonic-mpls-tc-map:sonic-mpls-tc-map": { + "sonic-mpls-tc-map:MPLS_TC_TO_TC_MAP": { + "MPLS_TC_TO_TC_MAP_LIST": [ + { + "name": "Mpls_tc_to_tc_map1", + "MPLS_TC_TO_TC_MAP": [ + { + "mpls": "8", + "tc": "1" + } + ] + } + ] + } + } + }, + "MPLS_TCP_MAP_INCORRECT_TC_PATTERN": { + "sonic-mpls-tc-map:sonic-mpls-tc-map": { + "sonic-mpls-tc-map:MPLS_TC_TO_TC_MAP": { + "MPLS_TC_TO_TC_MAP_LIST": [ + { + "name": "Mpls_tc_to_tc_map2", + "MPLS_TC_TO_TC_MAP": [ + { + "mpls": "1", + "tc": "16" + } + ] + } + ] + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-mpls-tc-map.yang b/src/sonic-yang-models/yang-models/sonic-mpls-tc-map.yang new file mode 100644 index 000000000000..064e448d305c --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-mpls-tc-map.yang @@ -0,0 +1,70 @@ +module sonic-mpls-tc-map { + + yang-version 1.1; + + namespace "http://github.com/Azure/sonic-mpls-tc-map"; + + prefix mpls_tc_map; + + import sonic-types { + prefix stypes; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "MPLS_TC_TO_TC_MAP yang Module for SONiC OS"; + + revision 2021-04-15 { + description + "Initial revision."; + } + + container sonic-mpls-tc-map { + + container MPLS_TC_TO_TC_MAP { + + description "MPLS_TC_TO_TC_MAP part of config_db.json"; + + list MPLS_TC_TO_TC_MAP_LIST { + + key "name"; + + leaf name { + description "Name of MPLS TC Mpping List"; + type string { + pattern '[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,31})'; + length 1..32 { + error-message "Invalid length for map name."; + error-app-tag map-name-invalid-length; + } + } + } + + list MPLS_TC_TO_TC_MAP { //this is list inside list for storing mapping between two fields + + key "mpls"; + + leaf mpls { + description "MPLS TC Value"; + type string { + pattern "[0-7]?" { + error-message "Invalid MPLS"; + error-app-tag mpls-invalid; + } + } + } + + leaf tc { + description "Matching TC Value"; + type stypes:tc_type; + } + } + } + } + } +} From 56d2c81f26df59a6c369ac55daa2fbccc709dfda Mon Sep 17 00:00:00 2001 From: Praveen Chaudhary Date: Thu, 13 Oct 2022 10:14:11 -0700 Subject: [PATCH 049/174] [sonic-dhcp-server.yang]: yang model for dhcp_server table. (#12175) changes: -- yang model for dhcp_server table. -- tests. Why I did it yang model for dhcp_server table. How I did it -- yang model for dhcp_server table. -- yang model tests. How to verify it -- yang model build time tests. --- src/sonic-yang-models/setup.py | 1 + .../tests/files/sample_config_db.json | 4 ++ .../yang_model_tests/tests/dhcp_server.json | 5 ++ .../tests_config/dhcp_server.json | 16 ++++++ .../yang-models/sonic-dhcp-server.yang | 52 +++++++++++++++++++ 5 files changed, 78 insertions(+) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/dhcp_server.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/dhcp_server.json create mode 100644 src/sonic-yang-models/yang-models/sonic-dhcp-server.yang diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index 076ce59b554d..9ccee07404cc 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -104,6 +104,7 @@ def run(self): './yang-models/sonic-device_metadata.yang', './yang-models/sonic-device_neighbor.yang', './yang-models/sonic-device_neighbor_metadata.yang', + './yang-models/sonic-dhcp-server.yang', './yang-models/sonic-dhcpv6-relay.yang', './yang-models/sonic-extension.yang', './yang-models/sonic-flex_counter.yang', diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index fe31a650a08d..4981275d685b 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -4,6 +4,10 @@ "Vrf_blue": { } }, + "DHCP_SERVER": { + "192.0.0.8": {}, + "192.0.0.8": {} + }, "BUFFER_POOL": { "ingress_lossy_pool": { "mode": "static", diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/dhcp_server.json b/src/sonic-yang-models/tests/yang_model_tests/tests/dhcp_server.json new file mode 100644 index 000000000000..bf9242f51bee --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/dhcp_server.json @@ -0,0 +1,5 @@ +{ + "DHCP_SERVER_POSITIVE_CONFIG": { + "desc": "Configure DHCP_SERVER POSITIVE_CONFIGs." + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/dhcp_server.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/dhcp_server.json new file mode 100644 index 000000000000..9fb7ccce99f8 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/dhcp_server.json @@ -0,0 +1,16 @@ +{ + "DHCP_SERVER_POSITIVE_CONFIG": { + "sonic-dhcp-server:sonic-dhcp-server": { + "sonic-dhcp-server:DHCP_SERVER": { + "DHCP_SERVER_LIST": [ + { + "ip": "10.1.1.2" + }, + { + "ip": "10.1.9.2" + } + ] + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-dhcp-server.yang b/src/sonic-yang-models/yang-models/sonic-dhcp-server.yang new file mode 100644 index 000000000000..8dc88c541423 --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-dhcp-server.yang @@ -0,0 +1,52 @@ +module sonic-dhcp-server { + + yang-version 1.1; + + namespace "http://github.com/Azure/sonic-dhcp-server"; + + prefix dhcp-server; + + import ietf-inet-types { + prefix inet; + } + + import sonic-port { + prefix port; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description "DHCP SERVER YANG module for SONiC OS"; + + revision 2022-09-23 { + description "Initial version"; + } + + container sonic-dhcp-server { + + container DHCP_SERVER { + + description "DHCP_SERVER part of config_db.json"; + + list DHCP_SERVER_LIST { + + description "List of IPs in DHCP_SERVER Table"; + + key "ip"; + + leaf ip { + description "IP as DHCP_SERVER"; + type inet:ipv4-address; + } + + } /* end of list IPS_LIST */ + + } /* end of container DHCP_SERVER */ + + } /* end of container sonic-dhcp-server */ + +} /* end of module sonic-dhcp-server */ From 0c68a43712cc385b1d0ea565caf238d19e8f1240 Mon Sep 17 00:00:00 2001 From: tjchadaga <85581939+tjchadaga@users.noreply.github.com> Date: Thu, 13 Oct 2022 10:18:53 -0700 Subject: [PATCH 050/174] Add yang model definition for VOQ_INBAND_INTERFACE (#12306) --- .../tests/sample-voq-graph.xml | 5 - src/sonic-config-engine/tests/test_cfggen.py | 13 -- src/sonic-yang-models/doc/Configuration.md | 17 ++- src/sonic-yang-models/setup.py | 1 + .../tests/files/sample_config_db.json | 8 ++ .../tests/voq-inband-interface.json | 25 ++++ .../tests_config/voq-inband-interface.json | 121 ++++++++++++++++++ .../sonic-voq-inband-interface.yang | 56 ++++++++ 8 files changed, 227 insertions(+), 19 deletions(-) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/voq-inband-interface.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/voq-inband-interface.json create mode 100644 src/sonic-yang-models/yang-models/sonic-voq-inband-interface.yang diff --git a/src/sonic-config-engine/tests/sample-voq-graph.xml b/src/sonic-config-engine/tests/sample-voq-graph.xml index a43e64cf6599..b152d74df8a8 100644 --- a/src/sonic-config-engine/tests/sample-voq-graph.xml +++ b/src/sonic-config-engine/tests/sample-voq-graph.xml @@ -50,11 +50,6 @@ - - Vlan3094 - Vlan - 1.1.1.1/24 - Ethernet-IB0 port diff --git a/src/sonic-config-engine/tests/test_cfggen.py b/src/sonic-config-engine/tests/test_cfggen.py index ca5c83e88c79..bcc0625ec8a8 100644 --- a/src/sonic-config-engine/tests/test_cfggen.py +++ b/src/sonic-config-engine/tests/test_cfggen.py @@ -913,19 +913,6 @@ def test_minigraph_voq_port_macsec_enabled(self): utils.to_dict("{'lanes': '6,7', 'fec': 'rs', 'alias': 'Ethernet1/1', 'index': '1', 'role': 'Ext', 'speed': '100000', 'macsec': 'macsec-profile', 'description': 'Ethernet1/1', 'mtu': '9100', 'tpid': '0x8100', 'pfc_asym': 'off'}") ) - def test_minigraph_voq_inband_interface_vlan(self): - argument = "-j {} -m {} -p {} --var-json VOQ_INBAND_INTERFACE".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) - output = self.run_script(argument) - output_dict = utils.to_dict(output.strip()) - self.assertDictEqual( - output_dict['Vlan3094'], - {'inband_type': 'Vlan'} - ) - self.assertDictEqual( - output_dict['Vlan3094|1.1.1.1/24'], - {} - ) - def test_minigraph_voq_inband_interface_port(self): argument = "-j {} -m {} -p {} --var-json VOQ_INBAND_INTERFACE".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) output = self.run_script(argument) diff --git a/src/sonic-yang-models/doc/Configuration.md b/src/sonic-yang-models/doc/Configuration.md index bd7b51ce5ae2..ff18aff97970 100644 --- a/src/sonic-yang-models/doc/Configuration.md +++ b/src/sonic-yang-models/doc/Configuration.md @@ -52,7 +52,8 @@ Table of Contents * [Telemetry](#telemetry) * [Versions](#versions) * [VLAN](#vlan) - * [VLAN_MEMBER](#vlan_member) + * [VLAN_MEMBER](#vlan_member) + * [VOQ Inband Interface](#voq-inband-interface) * [VXLAN](#vxlan) * [Virtual router](#virtual-router) * [WRED_PROFILE](#wred_profile) @@ -1551,6 +1552,20 @@ channel name as object key, and tagging mode as attributes. } ``` +### VOQ INBAND INTERFACE + +VOQ_INBAND_INTERFACE holds the name of the inband system port dedicated for cpu communication. At this time, only inband_type of "port" is supported + +``` +"VOQ_INBAND_INTERFACE": { + "Ethernet-IB0": { + "inband_type": "port" + }, + "Ethernet-IB0|3.3.3.1/32": {}, + "Ethernet-IB0|3333::3:5/128": {} +} +``` + ### VXLAN VXLAN_TUNNEL holds the VTEP source ip configuration. diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index 9ccee07404cc..208321754141 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -141,6 +141,7 @@ def run(self): './yang-models/sonic-types.yang', './yang-models/sonic-versions.yang', './yang-models/sonic-vlan.yang', + './yang-models/sonic-voq-inband-interface.yang', './yang-models/sonic-vxlan.yang', './yang-models/sonic-vrf.yang', './yang-models/sonic-mclag.yang', diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index 4981275d685b..7beca6d2af07 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -1902,6 +1902,14 @@ } }, + "VOQ_INBAND_INTERFACE": { + "Ethernet-IB0": { + "inband_type": "port" + }, + "Ethernet-IB0|3.3.3.1/32": {}, + "Ethernet-IB0|3333::3:5/128": {} + }, + "PASSW_HARDENING": { "POLICIES": { "state": "enabled", diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/voq-inband-interface.json b/src/sonic-yang-models/tests/yang_model_tests/tests/voq-inband-interface.json new file mode 100644 index 000000000000..44531e73a8c4 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/voq-inband-interface.json @@ -0,0 +1,25 @@ +{ + "VOQ_INBAND_INTERFACE_TEST": { + "desc": "Configure voq inband interface" + }, + "VOQ_INBAND_INTERFACE_WRONG_PATTERN_FOR_INTERFACE_NAME_TEST": { + "desc": "Configure incorrect interface name in VOQ_INBAND_INTERFACE table", + "eStrKey" : "Pattern", + "eStr": ["Ethernet-IB[0-9]+"] + }, + "VOQ_INBAND_INTERFACE_DEFAULT_INBAND_TYPE_TEST": { + "desc": "Configure voq_inband_interface with default inband type" + }, + "VOQ_INBAND_INTERFACE_INVALID_INBAND_TYPE_TEST": { + "desc": "Configure incorrect inband type", + "eStrKey": "Pattern" + }, + "VOQ_INBAND_INTERFACE_IP_PREFIX_PORT_NON_EXISTING_LEAF_TEST": { + "desc": "Configure ip prefix on voq-inband-interface with non-existing reference", + "eStrKey": "LeafRef" + }, + "VOQ_INBAND_INTERFACE_IP_PREFIX_EMPTY_STRING_TEST": { + "desc": "Configure ip prefix voq-inband-interface with empty ip prefix", + "eStrKey": "InvalidValue" + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/voq-inband-interface.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/voq-inband-interface.json new file mode 100644 index 000000000000..7fdb07bb2ba3 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/voq-inband-interface.json @@ -0,0 +1,121 @@ +{ + "VOQ_INBAND_INTERFACE_TEST": { + "sonic-voq-inband-interface:sonic-voq-inband-interface" : { + "sonic-voq-inband-interface:VOQ_INBAND_INTERFACE": { + "VOQ_INBAND_INTERFACE_LIST": [ + { + "name": "Ethernet-IB0", + "inband_type": "port" + } + ], + "VOQ_INBAND_INTERFACE_IPPREFIX_LIST": [ + { + "name": "Ethernet-IB0", + "ip-prefix": "3.3.3.1/32" + }, + { + "name": "Ethernet-IB0", + "ip-prefix": "3333::3:5/128" + } + ] + } + } + }, + "VOQ_INBAND_INTERFACE_WRONG_PATTERN_FOR_INTERFACE_NAME_TEST": { + "sonic-voq-inband-interface:sonic-voq-inband-interface" : { + "sonic-voq-inband-interface:VOQ_INBAND_INTERFACE": { + "VOQ_INBAND_INTERFACE_LIST": [ + { + "name": "Ethernet-0IB", + "inband_type": "port" + } + ], + "VOQ_INBAND_INTERFACE_IPPREFIX_LIST": [ + { + "name": "Ethernet-0IB", + "ip-prefix": "3.3.3.1/32" + }, + { + "name": "Ethernet-0IB", + "ip-prefix": "3333::3:5/128" + } + ] + } + } + }, + "VOQ_INBAND_INTERFACE_DEFAULT_INBAND_TYPE_TEST": { + "sonic-voq-inband-interface:sonic-voq-inband-interface" : { + "sonic-voq-inband-interface:VOQ_INBAND_INTERFACE": { + "VOQ_INBAND_INTERFACE_LIST": [ + { + "name": "Ethernet-IB0" + } + ], + "VOQ_INBAND_INTERFACE_IPPREFIX_LIST": [ + { + "name": "Ethernet-IB0", + "ip-prefix": "3.3.3.1/32" + } + ] + } + } + }, + "VOQ_INBAND_INTERFACE_INVALID_INBAND_TYPE_TEST": { + "sonic-voq-inband-interface:sonic-voq-inband-interface" : { + "sonic-voq-inband-interface:VOQ_INBAND_INTERFACE": { + "VOQ_INBAND_INTERFACE_LIST": [ + { + "name": "Ethernet-IB0", + "inband_type": "System-port" + } + ], + "VOQ_INBAND_INTERFACE_IPPREFIX_LIST": [ + { + "name": "Ethernet-IB0", + "ip-prefix": "3.3.3.1/32" + } + ] + } + } + }, + "VOQ_INBAND_INTERFACE_IP_PREFIX_PORT_NON_EXISTING_LEAF_TEST": { + "sonic-voq-inband-interface:sonic-voq-inband-interface" : { + "sonic-voq-inband-interface:VOQ_INBAND_INTERFACE": { + "VOQ_INBAND_INTERFACE_LIST": [ + { + "name": "Ethernet-IB0", + "inband_type": "port" + } + ], + "VOQ_INBAND_INTERFACE_IPPREFIX_LIST": [ + { + "name": "Ethernet-IB0", + "ip-prefix": "3.3.3.1/32" + }, + { + "name": "Ethernet-IB1", + "ip-prefix": "3333::3:5/128" + } + ] + } + } + }, + "VOQ_INBAND_INTERFACE_IP_PREFIX_EMPTY_STRING_TEST": { + "sonic-voq-inband-interface:sonic-voq-inband-interface" : { + "sonic-voq-inband-interface:VOQ_INBAND_INTERFACE": { + "VOQ_INBAND_INTERFACE_LIST": [ + { + "name": "Ethernet-IB0", + "inband_type": "port" + } + ], + "VOQ_INBAND_INTERFACE_IPPREFIX_LIST": [ + { + "name": "Ethernet-IB0", + "ip-prefix": "" + } + ] + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-voq-inband-interface.yang b/src/sonic-yang-models/yang-models/sonic-voq-inband-interface.yang new file mode 100644 index 000000000000..d197a91f80ec --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-voq-inband-interface.yang @@ -0,0 +1,56 @@ +module sonic-voq-inband-interface { + namespace "http://github.com/sonic-net/sonic-voq-inband-interface"; + prefix voq-inband-intf; + yang-version 1.1; + + import sonic-types { + prefix stypes; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC BGP Internal Neighbor for voq chassis platforms"; + + revision 2022-10-06 { + description + "Initial revision."; + } + + container sonic-voq-inband-interface { + container VOQ_INBAND_INTERFACE { + description "VOQ inband interface in VOQ Chassis"; + list VOQ_INBAND_INTERFACE_LIST { + key "name"; + leaf name { + type string { + pattern "Ethernet-IB[0-9]+"; + } + } + leaf inband_type { + description "Type of inband interface"; + type string { + pattern "port|Port"; + } + default "port"; + } + } + list VOQ_INBAND_INTERFACE_IPPREFIX_LIST { + description "Prefix for VOQ_INBAND_INTERFACE"; + key "name ip-prefix"; + leaf name { + type leafref { + path "../../VOQ_INBAND_INTERFACE_LIST/name"; + } + } + leaf ip-prefix { + type stypes:sonic-ip-prefix; + } + } + } + } +} From fad4034000c4175a7477e1a2da4f1d2249472cff Mon Sep 17 00:00:00 2001 From: Bohan Yang <105526392+abohanyang@users.noreply.github.com> Date: Thu, 13 Oct 2022 20:36:44 -0700 Subject: [PATCH 051/174] Add 36 port 400g SKU for x86_64-arista_7800r3a_36d series of Linecards. (#11872) Add 36 port 400g SKU for x86_64-arista_7800r3a_36d series of Linecards. --- .../Arista-7800R3A-36D-D36 | 1 + .../Arista-7800R3A-36D2-D36/0/buffers.json.j2 | 2 + .../0/buffers_defaults_t2.j2 | 37 + .../0/context_config.json | 25 + .../0/j2p-a7800r3a-36d-36x400G.config.bcm | 1022 +++++++++++++++++ .../0/pg_profile_lookup.ini | 17 + .../Arista-7800R3A-36D2-D36/0/port_config.ini | 21 + .../Arista-7800R3A-36D2-D36/0/qos.json.j2 | 1 + .../Arista-7800R3A-36D2-D36/0/sai.profile | 2 + .../Arista-7800R3A-36D2-D36/1/buffers.json.j2 | 2 + .../1/buffers_defaults_t2.j2 | 37 + .../1/context_config.json | 1 + .../1/j2p-a7800r3a-36d-36x400G.config.bcm | 1022 +++++++++++++++++ .../1/pg_profile_lookup.ini | 17 + .../Arista-7800R3A-36D2-D36/1/port_config.ini | 21 + .../Arista-7800R3A-36D2-D36/1/qos.json.j2 | 1 + .../Arista-7800R3A-36D2-D36/1/sai.profile | 1 + .../Arista-7800R3A-36DM2-D36 | 1 + .../Arista-7800R3A-36P-P36 | 1 + .../Arista-7800R3AK-36D2-D36 | 1 + .../Arista-7800R3AK-36DM2-D36 | 1 + 21 files changed, 2234 insertions(+) create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D-D36 create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/buffers.json.j2 create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/buffers_defaults_t2.j2 create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/context_config.json create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/j2p-a7800r3a-36d-36x400G.config.bcm create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/pg_profile_lookup.ini create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/port_config.ini create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/qos.json.j2 create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/sai.profile create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/buffers.json.j2 create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/buffers_defaults_t2.j2 create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/context_config.json create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/j2p-a7800r3a-36d-36x400G.config.bcm create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/pg_profile_lookup.ini create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/port_config.ini create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/qos.json.j2 create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/sai.profile create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36DM2-D36 create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36P-P36 create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36D2-D36 create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36DM2-D36 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D-D36 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D-D36 new file mode 120000 index 000000000000..e029ef37b78a --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D-D36 @@ -0,0 +1 @@ +Arista-7800R3A-36D2-D36 \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/buffers.json.j2 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/buffers.json.j2 new file mode 100644 index 000000000000..f34a844f4a87 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/buffers.json.j2 @@ -0,0 +1,2 @@ +{%- set default_topo = 't2' %} +{%- include 'buffers_config.j2' %} diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/buffers_defaults_t2.j2 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/buffers_defaults_t2.j2 new file mode 100644 index 000000000000..e7af0aff4934 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/buffers_defaults_t2.j2 @@ -0,0 +1,37 @@ +{%- set default_cable = '300m' %} + +{%- macro generate_port_lists(PORT_ALL) %} + {# Generate list of ports #} + {%- for port_idx in range(0,144,8) %} + {%- if PORT_ALL.append("Ethernet%d" % (port_idx)) %}{%- endif %} + {%- endfor %} +{%- endmacro %} + +{%- macro generate_buffer_pool_and_profiles() %} + "BUFFER_POOL": { + "ingress_lossless_pool": { + "size": "6441610000", + "type": "both", + "mode": "dynamic", + "xoff": "11354112" + } + }, + "BUFFER_PROFILE": { + "ingress_lossy_profile": { + "pool":"ingress_lossless_pool", + "size":"1280", + "xon_offset": "2560", + "dynamic_th":"0" + }, + "egress_lossless_profile": { + "pool":"ingress_lossless_pool", + "size":"0", + "static_th":"33030144" + }, + "egress_lossy_profile": { + "pool":"ingress_lossless_pool", + "size":"0", + "dynamic_th":"-1" + } + }, +{%- endmacro %} diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/context_config.json b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/context_config.json new file mode 100644 index 000000000000..2c126e71899e --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/context_config.json @@ -0,0 +1,25 @@ +{ + "CONTEXTS": [ + { + "guid" : 0, + "name" : "syncd", + "dbAsic" : "ASIC_DB", + "dbCounters" : "COUNTERS_DB", + "dbFlex": "FLEX_COUNTER_DB", + "dbState" : "STATE_DB", + "zmq_enable": false, + "zmq_endpoint": "tcp://127.0.0.1:5555", + "zmq_ntf_endpoint": "tcp://127.0.0.1:5556", + "switches": [ + { + "index" : 0, + "hwinfo" : "06:00.0" + }, + { + "index" : 1, + "hwinfo" : "07:00.0" + } + ] + } + ] +} diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/j2p-a7800r3a-36d-36x400G.config.bcm b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/j2p-a7800r3a-36d-36x400G.config.bcm new file mode 100644 index 000000000000..62fe61d49c9c --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/j2p-a7800r3a-36d-36x400G.config.bcm @@ -0,0 +1,1022 @@ +soc_family=BCM8885X +system_ref_core_clock_khz=1600000 + +dpp_db_path=/usr/share/bcm/db + +#################################################### +##Reference applications related properties - Start +#################################################### + +## PMF small EXEM connected stage: +# Options: IPMF2 - Ingress PMF 2 stage can perform small EXEM lookups. +# IPMF3 - Ingress PMF 3 stage can perform small EXEM lookups. +## PMF small EXEM connected stage: +# Options: IPMF2 - Ingress PMF 2 stage can perform small EXEM lookups. +# IPMF3 - Ingress PMF 3 stage can perform small EXEM lookups. +pmf_sexem3_stage=IPMF2 + +#################################################### +##Reference applications related properties - End +#################################################### + +# Jericho2-mode (description 0x1 used for Jericho 2 mode) +system_headers_mode=1 + +# HW mode to support 1024 16-member system wide LAGs +trunk_group_max_members=16 + +# Disable link-training +port_init_cl72=0 + +###Default interfaces for Jericho2Plus +#CPU interfaces +ucode_port_0=CPU.0:core_0.0 +ucode_port_200=CPU.8:core_1.200 +ucode_port_201=CPU.16:core_0.201 +ucode_port_202=CPU.24:core_1.202 +ucode_port_203=CPU.32:core_0.203 + +#NIF ETH interfaces on device +ucode_port_1=CDGE9:core_1.1 +ucode_port_2=CDGE10:core_1.2 +ucode_port_3=CDGE11:core_1.3 +ucode_port_4=CDGE12:core_1.4 +ucode_port_5=CDGE13:core_1.5 +ucode_port_6=CDGE14:core_1.6 +ucode_port_7=CDGE15:core_1.7 +ucode_port_8=CDGE16:core_1.8 +ucode_port_9=CDGE17:core_1.9 + +ucode_port_10=CDGE8:core_0.10 +ucode_port_11=CDGE7:core_0.11 +ucode_port_12=CDGE6:core_0.12 +ucode_port_13=CDGE5:core_0.13 +ucode_port_14=CDGE4:core_0.14 +ucode_port_15=CDGE3:core_0.15 +ucode_port_16=CDGE2:core_0.16 +ucode_port_17=CDGE1:core_0.17 +ucode_port_18=CDGE0:core_0.18 + +#NIF default speeds +port_init_speed_xe=10000 +port_init_speed_xl=40000 +port_init_speed_le=50000 +port_init_speed_ce=100000 +port_init_speed_cc=200000 +port_init_speed_cd=400000 +port_init_speed_il=10312 + +port_priorities=8 + +#special ports +ucode_port_240=OLP:core_0.240 + +# NIF lane mapping +lane_to_serdes_map_nif_lane0=rx3:tx4 +lane_to_serdes_map_nif_lane1=rx6:tx1 +lane_to_serdes_map_nif_lane2=rx7:tx5 +lane_to_serdes_map_nif_lane3=rx4:tx7 +lane_to_serdes_map_nif_lane4=rx1:tx2 +lane_to_serdes_map_nif_lane5=rx0:tx0 +lane_to_serdes_map_nif_lane6=rx5:tx3 +lane_to_serdes_map_nif_lane7=rx2:tx6 +lane_to_serdes_map_nif_lane8=rx10:tx11 +lane_to_serdes_map_nif_lane9=rx8:tx8 +lane_to_serdes_map_nif_lane10=rx14:tx12 +lane_to_serdes_map_nif_lane11=rx15:tx15 +lane_to_serdes_map_nif_lane12=rx13:tx10 +lane_to_serdes_map_nif_lane13=rx9:tx9 +lane_to_serdes_map_nif_lane14=rx11:tx13 +lane_to_serdes_map_nif_lane15=rx12:tx14 +lane_to_serdes_map_nif_lane16=rx16:tx17 +lane_to_serdes_map_nif_lane17=rx19:tx21 +lane_to_serdes_map_nif_lane18=rx21:tx18 +lane_to_serdes_map_nif_lane19=rx18:tx16 +lane_to_serdes_map_nif_lane20=rx17:tx23 +lane_to_serdes_map_nif_lane21=rx20:tx22 +lane_to_serdes_map_nif_lane22=rx22:tx20 +lane_to_serdes_map_nif_lane23=rx23:tx19 +lane_to_serdes_map_nif_lane24=rx26:tx28 +lane_to_serdes_map_nif_lane25=rx29:tx31 +lane_to_serdes_map_nif_lane26=rx31:tx29 +lane_to_serdes_map_nif_lane27=rx28:tx27 +lane_to_serdes_map_nif_lane28=rx25:tx25 +lane_to_serdes_map_nif_lane29=rx24:tx30 +lane_to_serdes_map_nif_lane30=rx30:tx24 +lane_to_serdes_map_nif_lane31=rx27:tx26 +lane_to_serdes_map_nif_lane32=rx32:tx39 +lane_to_serdes_map_nif_lane33=rx33:tx38 +lane_to_serdes_map_nif_lane34=rx38:tx32 +lane_to_serdes_map_nif_lane35=rx39:tx33 +lane_to_serdes_map_nif_lane36=rx35:tx37 +lane_to_serdes_map_nif_lane37=rx34:tx36 +lane_to_serdes_map_nif_lane38=rx36:tx34 +lane_to_serdes_map_nif_lane39=rx37:tx35 +lane_to_serdes_map_nif_lane40=rx40:tx41 +lane_to_serdes_map_nif_lane41=rx43:tx45 +lane_to_serdes_map_nif_lane42=rx45:tx42 +lane_to_serdes_map_nif_lane43=rx42:tx40 +lane_to_serdes_map_nif_lane44=rx41:tx47 +lane_to_serdes_map_nif_lane45=rx44:tx46 +lane_to_serdes_map_nif_lane46=rx46:tx44 +lane_to_serdes_map_nif_lane47=rx47:tx43 +lane_to_serdes_map_nif_lane48=rx50:tx52 +lane_to_serdes_map_nif_lane49=rx53:tx55 +lane_to_serdes_map_nif_lane50=rx55:tx53 +lane_to_serdes_map_nif_lane51=rx52:tx51 +lane_to_serdes_map_nif_lane52=rx49:tx49 +lane_to_serdes_map_nif_lane53=rx48:tx54 +lane_to_serdes_map_nif_lane54=rx54:tx48 +lane_to_serdes_map_nif_lane55=rx51:tx50 +lane_to_serdes_map_nif_lane56=rx56:tx63 +lane_to_serdes_map_nif_lane57=rx57:tx62 +lane_to_serdes_map_nif_lane58=rx62:tx56 +lane_to_serdes_map_nif_lane59=rx63:tx57 +lane_to_serdes_map_nif_lane60=rx59:tx61 +lane_to_serdes_map_nif_lane61=rx58:tx60 +lane_to_serdes_map_nif_lane62=rx60:tx58 +lane_to_serdes_map_nif_lane63=rx61:tx59 +lane_to_serdes_map_nif_lane64=rx64:tx65 +lane_to_serdes_map_nif_lane65=rx67:tx69 +lane_to_serdes_map_nif_lane66=rx69:tx66 +lane_to_serdes_map_nif_lane67=rx66:tx64 +lane_to_serdes_map_nif_lane68=rx65:tx71 +lane_to_serdes_map_nif_lane69=rx68:tx70 +lane_to_serdes_map_nif_lane70=rx70:tx68 +lane_to_serdes_map_nif_lane71=rx71:tx67 +lane_to_serdes_map_nif_lane72=rx79:tx74 +lane_to_serdes_map_nif_lane73=rx76:tx75 +lane_to_serdes_map_nif_lane74=rx72:tx76 +lane_to_serdes_map_nif_lane75=rx74:tx73 +lane_to_serdes_map_nif_lane76=rx77:tx79 +lane_to_serdes_map_nif_lane77=rx78:tx78 +lane_to_serdes_map_nif_lane78=rx73:tx77 +lane_to_serdes_map_nif_lane79=rx75:tx72 +lane_to_serdes_map_nif_lane80=rx86:tx86 +lane_to_serdes_map_nif_lane81=rx83:tx87 +lane_to_serdes_map_nif_lane82=rx82:tx81 +lane_to_serdes_map_nif_lane83=rx85:tx80 +lane_to_serdes_map_nif_lane84=rx87:tx85 +lane_to_serdes_map_nif_lane85=rx84:tx84 +lane_to_serdes_map_nif_lane86=rx80:tx82 +lane_to_serdes_map_nif_lane87=rx81:tx83 +lane_to_serdes_map_nif_lane88=rx95:tx90 +lane_to_serdes_map_nif_lane89=rx92:tx88 +lane_to_serdes_map_nif_lane90=rx88:tx92 +lane_to_serdes_map_nif_lane91=rx91:tx95 +lane_to_serdes_map_nif_lane92=rx94:tx89 +lane_to_serdes_map_nif_lane93=rx93:tx91 +lane_to_serdes_map_nif_lane94=rx89:tx93 +lane_to_serdes_map_nif_lane95=rx90:tx94 +lane_to_serdes_map_nif_lane96=rx103:tx97 +lane_to_serdes_map_nif_lane97=rx100:tx96 +lane_to_serdes_map_nif_lane98=rx96:tx100 +lane_to_serdes_map_nif_lane99=rx99:tx103 +lane_to_serdes_map_nif_lane100=rx102:tx99 +lane_to_serdes_map_nif_lane101=rx101:tx98 +lane_to_serdes_map_nif_lane102=rx97:tx101 +lane_to_serdes_map_nif_lane103=rx98:tx102 +lane_to_serdes_map_nif_lane104=rx110:tx107 +lane_to_serdes_map_nif_lane105=rx108:tx105 +lane_to_serdes_map_nif_lane106=rx104:tx108 +lane_to_serdes_map_nif_lane107=rx107:tx110 +lane_to_serdes_map_nif_lane108=rx111:tx106 +lane_to_serdes_map_nif_lane109=rx109:tx104 +lane_to_serdes_map_nif_lane110=rx105:tx109 +lane_to_serdes_map_nif_lane111=rx106:tx111 +lane_to_serdes_map_nif_lane112=rx119:tx114 +lane_to_serdes_map_nif_lane113=rx116:tx112 +lane_to_serdes_map_nif_lane114=rx112:tx116 +lane_to_serdes_map_nif_lane115=rx115:tx119 +lane_to_serdes_map_nif_lane116=rx118:tx113 +lane_to_serdes_map_nif_lane117=rx117:tx115 +lane_to_serdes_map_nif_lane118=rx113:tx117 +lane_to_serdes_map_nif_lane119=rx114:tx118 +lane_to_serdes_map_nif_lane120=rx127:tx121 +lane_to_serdes_map_nif_lane121=rx124:tx120 +lane_to_serdes_map_nif_lane122=rx120:tx124 +lane_to_serdes_map_nif_lane123=rx123:tx127 +lane_to_serdes_map_nif_lane124=rx126:tx123 +lane_to_serdes_map_nif_lane125=rx125:tx122 +lane_to_serdes_map_nif_lane126=rx121:tx125 +lane_to_serdes_map_nif_lane127=rx122:tx126 +lane_to_serdes_map_nif_lane128=rx134:tx131 +lane_to_serdes_map_nif_lane129=rx132:tx129 +lane_to_serdes_map_nif_lane130=rx128:tx132 +lane_to_serdes_map_nif_lane131=rx131:tx134 +lane_to_serdes_map_nif_lane132=rx135:tx130 +lane_to_serdes_map_nif_lane133=rx133:tx128 +lane_to_serdes_map_nif_lane134=rx129:tx133 +lane_to_serdes_map_nif_lane135=rx130:tx135 +lane_to_serdes_map_nif_lane136=rx143:tx138 +lane_to_serdes_map_nif_lane137=rx140:tx136 +lane_to_serdes_map_nif_lane138=rx136:tx140 +lane_to_serdes_map_nif_lane139=rx139:tx143 +lane_to_serdes_map_nif_lane140=rx142:tx137 +lane_to_serdes_map_nif_lane141=rx141:tx139 +lane_to_serdes_map_nif_lane142=rx137:tx141 +lane_to_serdes_map_nif_lane143=rx138:tx142 + +######################### +### High Availability ### +######################### + +sw_state_max_size=750000000 + +#location of warmboot NV memory +#Allowed options for dnx are - 3:external storage in filesystem 4:driver will save the state directly in shared memory +stable_location=4 + +# Note that each unit should have a unique filename and that adapter does not play well with tmp and dev/shm folders. +stable_filename=/dev/shm/warmboot_data_0 +stable_filename.1=/dev/shm/warmboot_data_1 +stable_filename.2=/dev/shm/warmboot_data_2 + +#Maximum size for NVM used for WB storage, must be larger than sw_state_max_size.BCM8885X +stable_size=800000000 + +######################### +######################### +######################### + +tm_port_header_type_in_0=INJECTED_2_PP +tm_port_header_type_out_0=CPU + +tm_port_header_type_in_200=INJECTED_2_PP +tm_port_header_type_out_200=ETH +tm_port_header_type_in_201=INJECTED_2_PP +tm_port_header_type_out_201=ETH +tm_port_header_type_in_202=INJECTED_2_PP +tm_port_header_type_out_202=ETH +tm_port_header_type_in_203=INJECTED_2_PP +tm_port_header_type_out_203=ETH + +### SAT +## Enable SAT Interface. 0 - Disable, 1 - Enable (Default) +sat_enable=1 +ucode_port_218=SAT:core_0.218 +tm_port_header_type_out_218=CPU +tm_port_header_type_in_218=INJECTED_2 +ucode_port_219=SAT:core_1.219 +tm_port_header_type_out_219=CPU +tm_port_header_type_in_219=INJECTED_2 +port_init_speed_sat=400000 + +### RCY +sai_recycle_port_lane_base=0 +ucode_port_221=RCY.21:core_0.221 +ucode_port_222=RCY.22:core_1.222 +tm_port_header_type_out_221=ETH +tm_port_header_type_in_221=ETH +tm_port_header_type_out_222=ETH +tm_port_header_type_in_222=ETH +port_init_speed_221=400000 +port_init_speed_222=400000 + +#OLP port +tm_port_header_type_in_240=INJECTED_2 +tm_port_header_type_out_240=RAW + +# Set statically the region mode per region id +dtm_flow_mapping_mode_region_257=3 +dtm_flow_mapping_mode_region_258=3 +dtm_flow_mapping_mode_region_259=3 +dtm_flow_mapping_mode_region_260=3 +dtm_flow_mapping_mode_region_261=3 +dtm_flow_mapping_mode_region_262=3 +dtm_flow_mapping_mode_region_263=3 +dtm_flow_mapping_mode_region_264=3 +dtm_flow_mapping_mode_region_265=3 +dtm_flow_mapping_mode_region_266=7 +dtm_flow_mapping_mode_region_267=3 +dtm_flow_mapping_mode_region_268=3 +dtm_flow_mapping_mode_region_269=3 +dtm_flow_mapping_mode_region_270=3 +dtm_flow_mapping_mode_region_271=3 +dtm_flow_mapping_mode_region_272=3 +dtm_flow_mapping_mode_region_273=3 +dtm_flow_mapping_mode_region_274=3 +dtm_flow_mapping_mode_region_275=3 +dtm_flow_mapping_mode_region_276=3 +dtm_flow_mapping_mode_region_277=3 +dtm_flow_mapping_mode_region_278=3 +dtm_flow_mapping_mode_region_279=3 +dtm_flow_mapping_mode_region_280=3 +dtm_flow_mapping_mode_region_281=3 +dtm_flow_mapping_mode_region_282=3 +dtm_flow_mapping_mode_region_283=3 +dtm_flow_mapping_mode_region_284=3 +dtm_flow_mapping_mode_region_285=3 +dtm_flow_mapping_mode_region_286=3 +dtm_flow_mapping_mode_region_287=3 + +## Configure number of symmetric cores each region supports ## +dtm_flow_nof_remote_cores_region_1=2 +dtm_flow_nof_remote_cores_region_2=2 +dtm_flow_nof_remote_cores_region_3=2 +dtm_flow_nof_remote_cores_region_4=2 +dtm_flow_nof_remote_cores_region_5=2 +dtm_flow_nof_remote_cores_region_6=2 +dtm_flow_nof_remote_cores_region_7=2 +dtm_flow_nof_remote_cores_region_8=2 +dtm_flow_nof_remote_cores_region_9=2 +dtm_flow_nof_remote_cores_region_10=2 +dtm_flow_nof_remote_cores_region_11=2 +dtm_flow_nof_remote_cores_region_12=2 +dtm_flow_nof_remote_cores_region_13=2 +dtm_flow_nof_remote_cores_region_14=2 +dtm_flow_nof_remote_cores_region_15=2 +dtm_flow_nof_remote_cores_region_16=2 +dtm_flow_nof_remote_cores_region_17=2 +dtm_flow_nof_remote_cores_region_18=2 +dtm_flow_nof_remote_cores_region_19=2 +dtm_flow_nof_remote_cores_region_20=2 +dtm_flow_nof_remote_cores_region_21=2 +dtm_flow_nof_remote_cores_region_22=2 +dtm_flow_nof_remote_cores_region_23=2 +dtm_flow_nof_remote_cores_region_24=2 +dtm_flow_nof_remote_cores_region_25=2 +dtm_flow_nof_remote_cores_region_26=2 +dtm_flow_nof_remote_cores_region_27=2 +dtm_flow_nof_remote_cores_region_28=2 +dtm_flow_nof_remote_cores_region_29=2 +dtm_flow_nof_remote_cores_region_30=2 +dtm_flow_nof_remote_cores_region_31=2 +dtm_flow_nof_remote_cores_region_32=2 +dtm_flow_nof_remote_cores_region_33=2 +dtm_flow_nof_remote_cores_region_34=2 +dtm_flow_nof_remote_cores_region_35=2 +dtm_flow_nof_remote_cores_region_36=2 +dtm_flow_nof_remote_cores_region_37=2 +dtm_flow_nof_remote_cores_region_38=2 +dtm_flow_nof_remote_cores_region_39=2 +dtm_flow_nof_remote_cores_region_40=2 +dtm_flow_nof_remote_cores_region_41=2 +dtm_flow_nof_remote_cores_region_42=2 +dtm_flow_nof_remote_cores_region_43=2 +dtm_flow_nof_remote_cores_region_44=2 +dtm_flow_nof_remote_cores_region_45=2 +dtm_flow_nof_remote_cores_region_46=2 +dtm_flow_nof_remote_cores_region_47=2 +dtm_flow_nof_remote_cores_region_48=2 +dtm_flow_nof_remote_cores_region_49=2 +dtm_flow_nof_remote_cores_region_50=2 +dtm_flow_nof_remote_cores_region_51=2 +dtm_flow_nof_remote_cores_region_52=2 +dtm_flow_nof_remote_cores_region_53=2 +dtm_flow_nof_remote_cores_region_54=2 +dtm_flow_nof_remote_cores_region_55=2 +dtm_flow_nof_remote_cores_region_56=2 +dtm_flow_nof_remote_cores_region_57=2 +dtm_flow_nof_remote_cores_region_58=2 +dtm_flow_nof_remote_cores_region_59=2 +dtm_flow_nof_remote_cores_region_60=2 + +### MDB configuration ### +mdb_profile=balanced-exem + +### Descriptor-DMA configuration ### +dma_desc_aggregator_chain_length_max=1000 +dma_desc_aggregator_buff_size_kb=100 +dma_desc_aggregator_timeout_usec=1000 +dma_desc_aggregator_enable_specific_MDB_LPM=1 +dma_desc_aggregator_enable_specific_MDB_FEC=1 + +### Outlif configuarion ### +outlif_logical_to_physical_phase_map_1=S1 +outlif_logical_to_physical_phase_map_2=L1 +outlif_logical_to_physical_phase_map_3=XL +outlif_logical_to_physical_phase_map_4=L2 +outlif_logical_to_physical_phase_map_5=M1 +outlif_logical_to_physical_phase_map_6=M2 +outlif_logical_to_physical_phase_map_7=M3 +outlif_logical_to_physical_phase_map_8=S2 + +### Outlif data granularity configuration ### +outlif_physical_phase_data_granularity_S1=60 +outlif_physical_phase_data_granularity_S2=60 +outlif_physical_phase_data_granularity_M1=60 +outlif_physical_phase_data_granularity_M2=60 +outlif_physical_phase_data_granularity_M3=60 +outlif_physical_phase_data_granularity_L1=60 +outlif_physical_phase_data_granularity_L2=60 +outlif_physical_phase_data_granularity_XL=60 + +### Fabric configuration ### +# Enable link-training +port_init_cl72_sfi=1 +serdes_lane_config_cl72_auto_polarity_en=0 +serdes_lane_config_cl72_auto_polarity_en_sfi=1 +serdes_lane_config_cl72_restart_timeout_en=0 + +#SFI speed rate +port_init_speed_fabric=53125 + +## Fabric transmission mode +# Set the Connect mode to the Fabric +# Options: FE - presence of a Fabric device (single stage) +# SINGLE_FAP - stand-alone device +# MESH - devices in Mesh +# Note: If 'diag_chassis' is on, value will be override in dnx.soc +# to be FE instead of SINGLE_FAP. +fabric_connect_mode=FE + +fabric_logical_port_base=512 + +# Fabric lane mapping +lane_to_serdes_map_fabric_lane0=rx0:tx0 +lane_to_serdes_map_fabric_lane1=rx1:tx1 +lane_to_serdes_map_fabric_lane2=rx2:tx2 +lane_to_serdes_map_fabric_lane3=rx3:tx3 +lane_to_serdes_map_fabric_lane4=rx4:tx4 +lane_to_serdes_map_fabric_lane5=rx5:tx5 +lane_to_serdes_map_fabric_lane6=rx6:tx6 +lane_to_serdes_map_fabric_lane7=rx7:tx7 +lane_to_serdes_map_fabric_lane8=rx8:tx10 +lane_to_serdes_map_fabric_lane9=rx9:tx11 +lane_to_serdes_map_fabric_lane10=rx10:tx9 +lane_to_serdes_map_fabric_lane11=rx11:tx8 +lane_to_serdes_map_fabric_lane12=rx12:tx12 +lane_to_serdes_map_fabric_lane13=rx13:tx15 +lane_to_serdes_map_fabric_lane14=rx14:tx14 +lane_to_serdes_map_fabric_lane15=rx15:tx13 +lane_to_serdes_map_fabric_lane16=rx16:tx17 +lane_to_serdes_map_fabric_lane17=rx17:tx18 +lane_to_serdes_map_fabric_lane18=rx18:tx16 +lane_to_serdes_map_fabric_lane19=rx19:tx19 +lane_to_serdes_map_fabric_lane20=rx20:tx21 +lane_to_serdes_map_fabric_lane21=rx21:tx23 +lane_to_serdes_map_fabric_lane22=rx22:tx20 +lane_to_serdes_map_fabric_lane23=rx23:tx22 +lane_to_serdes_map_fabric_lane24=rx24:tx26 +lane_to_serdes_map_fabric_lane25=rx25:tx24 +lane_to_serdes_map_fabric_lane26=rx26:tx25 +lane_to_serdes_map_fabric_lane27=rx27:tx27 +lane_to_serdes_map_fabric_lane28=rx28:tx31 +lane_to_serdes_map_fabric_lane29=rx29:tx30 +lane_to_serdes_map_fabric_lane30=rx30:tx29 +lane_to_serdes_map_fabric_lane31=rx31:tx28 +lane_to_serdes_map_fabric_lane32=rx32:tx32 +lane_to_serdes_map_fabric_lane33=rx33:tx33 +lane_to_serdes_map_fabric_lane34=rx34:tx34 +lane_to_serdes_map_fabric_lane35=rx35:tx35 +lane_to_serdes_map_fabric_lane36=rx36:tx36 +lane_to_serdes_map_fabric_lane37=rx37:tx37 +lane_to_serdes_map_fabric_lane38=rx38:tx38 +lane_to_serdes_map_fabric_lane39=rx39:tx39 +lane_to_serdes_map_fabric_lane40=rx40:tx43 +lane_to_serdes_map_fabric_lane41=rx41:tx42 +lane_to_serdes_map_fabric_lane42=rx42:tx41 +lane_to_serdes_map_fabric_lane43=rx43:tx40 +lane_to_serdes_map_fabric_lane44=rx44:tx47 +lane_to_serdes_map_fabric_lane45=rx45:tx46 +lane_to_serdes_map_fabric_lane46=rx46:tx45 +lane_to_serdes_map_fabric_lane47=rx47:tx44 +lane_to_serdes_map_fabric_lane48=rx48:tx48 +lane_to_serdes_map_fabric_lane49=rx49:tx49 +lane_to_serdes_map_fabric_lane50=rx50:tx50 +lane_to_serdes_map_fabric_lane51=rx51:tx51 +lane_to_serdes_map_fabric_lane52=rx52:tx52 +lane_to_serdes_map_fabric_lane53=rx53:tx53 +lane_to_serdes_map_fabric_lane54=rx54:tx54 +lane_to_serdes_map_fabric_lane55=rx55:tx55 +lane_to_serdes_map_fabric_lane56=rx56:tx59 +lane_to_serdes_map_fabric_lane57=rx57:tx58 +lane_to_serdes_map_fabric_lane58=rx58:tx57 +lane_to_serdes_map_fabric_lane59=rx59:tx56 +lane_to_serdes_map_fabric_lane60=rx60:tx63 +lane_to_serdes_map_fabric_lane61=rx61:tx62 +lane_to_serdes_map_fabric_lane62=rx62:tx61 +lane_to_serdes_map_fabric_lane63=rx63:tx60 +lane_to_serdes_map_fabric_lane64=rx64:tx64 +lane_to_serdes_map_fabric_lane65=rx65:tx65 +lane_to_serdes_map_fabric_lane66=rx66:tx66 +lane_to_serdes_map_fabric_lane67=rx67:tx67 +lane_to_serdes_map_fabric_lane68=rx68:tx68 +lane_to_serdes_map_fabric_lane69=rx69:tx69 +lane_to_serdes_map_fabric_lane70=rx70:tx70 +lane_to_serdes_map_fabric_lane71=rx71:tx71 +lane_to_serdes_map_fabric_lane72=rx72:tx75 +lane_to_serdes_map_fabric_lane73=rx73:tx74 +lane_to_serdes_map_fabric_lane74=rx74:tx73 +lane_to_serdes_map_fabric_lane75=rx75:tx72 +lane_to_serdes_map_fabric_lane76=rx76:tx79 +lane_to_serdes_map_fabric_lane77=rx77:tx78 +lane_to_serdes_map_fabric_lane78=rx78:tx77 +lane_to_serdes_map_fabric_lane79=rx79:tx76 +lane_to_serdes_map_fabric_lane80=rx80:tx80 +lane_to_serdes_map_fabric_lane81=rx81:tx81 +lane_to_serdes_map_fabric_lane82=rx82:tx83 +lane_to_serdes_map_fabric_lane83=rx83:tx82 +lane_to_serdes_map_fabric_lane84=rx84:tx85 +lane_to_serdes_map_fabric_lane85=rx85:tx86 +lane_to_serdes_map_fabric_lane86=rx86:tx84 +lane_to_serdes_map_fabric_lane87=rx87:tx87 +lane_to_serdes_map_fabric_lane88=rx88:tx90 +lane_to_serdes_map_fabric_lane89=rx89:tx88 +lane_to_serdes_map_fabric_lane90=rx90:tx91 +lane_to_serdes_map_fabric_lane91=rx91:tx89 +lane_to_serdes_map_fabric_lane92=rx92:tx93 +lane_to_serdes_map_fabric_lane93=rx93:tx92 +lane_to_serdes_map_fabric_lane94=rx94:tx94 +lane_to_serdes_map_fabric_lane95=rx95:tx95 +lane_to_serdes_map_fabric_lane96=rx96:tx96 +lane_to_serdes_map_fabric_lane97=rx97:tx97 +lane_to_serdes_map_fabric_lane98=rx98:tx98 +lane_to_serdes_map_fabric_lane99=rx99:tx99 +lane_to_serdes_map_fabric_lane100=rx100:tx100 +lane_to_serdes_map_fabric_lane101=rx101:tx101 +lane_to_serdes_map_fabric_lane102=rx102:tx102 +lane_to_serdes_map_fabric_lane103=rx103:tx103 +lane_to_serdes_map_fabric_lane104=rx104:tx105 +lane_to_serdes_map_fabric_lane105=rx105:tx106 +lane_to_serdes_map_fabric_lane106=rx106:tx107 +lane_to_serdes_map_fabric_lane107=rx107:tx104 +lane_to_serdes_map_fabric_lane108=rx108:tx111 +lane_to_serdes_map_fabric_lane109=rx109:tx109 +lane_to_serdes_map_fabric_lane110=rx110:tx110 +lane_to_serdes_map_fabric_lane111=rx111:tx108 +lane_to_serdes_map_fabric_lane112=rx112:tx114 +lane_to_serdes_map_fabric_lane113=rx113:tx113 +lane_to_serdes_map_fabric_lane114=rx114:tx112 +lane_to_serdes_map_fabric_lane115=rx115:tx115 +lane_to_serdes_map_fabric_lane116=rx116:tx117 +lane_to_serdes_map_fabric_lane117=rx117:tx116 +lane_to_serdes_map_fabric_lane118=rx118:tx119 +lane_to_serdes_map_fabric_lane119=rx119:tx118 +lane_to_serdes_map_fabric_lane120=rx120:tx123 +lane_to_serdes_map_fabric_lane121=rx121:tx120 +lane_to_serdes_map_fabric_lane122=rx122:tx122 +lane_to_serdes_map_fabric_lane123=rx123:tx121 +lane_to_serdes_map_fabric_lane124=rx124:tx127 +lane_to_serdes_map_fabric_lane125=rx125:tx125 +lane_to_serdes_map_fabric_lane126=rx126:tx124 +lane_to_serdes_map_fabric_lane127=rx127:tx126 +lane_to_serdes_map_fabric_lane128=rx128:tx128 +lane_to_serdes_map_fabric_lane129=rx129:tx129 +lane_to_serdes_map_fabric_lane130=rx130:tx130 +lane_to_serdes_map_fabric_lane131=rx131:tx131 +lane_to_serdes_map_fabric_lane132=rx132:tx132 +lane_to_serdes_map_fabric_lane133=rx133:tx133 +lane_to_serdes_map_fabric_lane134=rx134:tx134 +lane_to_serdes_map_fabric_lane135=rx135:tx135 +lane_to_serdes_map_fabric_lane136=rx136:tx139 +lane_to_serdes_map_fabric_lane137=rx137:tx138 +lane_to_serdes_map_fabric_lane138=rx138:tx137 +lane_to_serdes_map_fabric_lane139=rx139:tx136 +lane_to_serdes_map_fabric_lane140=rx140:tx140 +lane_to_serdes_map_fabric_lane141=rx141:tx142 +lane_to_serdes_map_fabric_lane142=rx142:tx141 +lane_to_serdes_map_fabric_lane143=rx143:tx143 +lane_to_serdes_map_fabric_lane144=rx144:tx144 +lane_to_serdes_map_fabric_lane145=rx145:tx145 +lane_to_serdes_map_fabric_lane146=rx146:tx146 +lane_to_serdes_map_fabric_lane147=rx147:tx147 +lane_to_serdes_map_fabric_lane148=rx148:tx148 +lane_to_serdes_map_fabric_lane149=rx149:tx149 +lane_to_serdes_map_fabric_lane150=rx150:tx150 +lane_to_serdes_map_fabric_lane151=rx151:tx151 +lane_to_serdes_map_fabric_lane152=rx152:tx155 +lane_to_serdes_map_fabric_lane153=rx153:tx154 +lane_to_serdes_map_fabric_lane154=rx154:tx153 +lane_to_serdes_map_fabric_lane155=rx155:tx152 +lane_to_serdes_map_fabric_lane156=rx156:tx159 +lane_to_serdes_map_fabric_lane157=rx157:tx158 +lane_to_serdes_map_fabric_lane158=rx158:tx157 +lane_to_serdes_map_fabric_lane159=rx159:tx156 +lane_to_serdes_map_fabric_lane160=rx160:tx160 +lane_to_serdes_map_fabric_lane161=rx161:tx161 +lane_to_serdes_map_fabric_lane162=rx162:tx162 +lane_to_serdes_map_fabric_lane163=rx163:tx163 +lane_to_serdes_map_fabric_lane164=rx164:tx164 +lane_to_serdes_map_fabric_lane165=rx165:tx165 +lane_to_serdes_map_fabric_lane166=rx166:tx166 +lane_to_serdes_map_fabric_lane167=rx167:tx167 +lane_to_serdes_map_fabric_lane168=rx168:tx171 +lane_to_serdes_map_fabric_lane169=rx169:tx170 +lane_to_serdes_map_fabric_lane170=rx170:tx169 +lane_to_serdes_map_fabric_lane171=rx171:tx168 +lane_to_serdes_map_fabric_lane172=rx172:tx175 +lane_to_serdes_map_fabric_lane173=rx173:tx174 +lane_to_serdes_map_fabric_lane174=rx174:tx173 +lane_to_serdes_map_fabric_lane175=rx175:tx172 +lane_to_serdes_map_fabric_lane176=rx176:tx176 +lane_to_serdes_map_fabric_lane177=rx177:tx177 +lane_to_serdes_map_fabric_lane178=rx178:tx179 +lane_to_serdes_map_fabric_lane179=rx179:tx178 +lane_to_serdes_map_fabric_lane180=rx180:tx181 +lane_to_serdes_map_fabric_lane181=rx181:tx182 +lane_to_serdes_map_fabric_lane182=rx182:tx180 +lane_to_serdes_map_fabric_lane183=rx183:tx183 +lane_to_serdes_map_fabric_lane184=rx184:tx186 +lane_to_serdes_map_fabric_lane185=rx185:tx184 +lane_to_serdes_map_fabric_lane186=rx186:tx185 +lane_to_serdes_map_fabric_lane187=rx187:tx187 +lane_to_serdes_map_fabric_lane188=rx188:tx188 +lane_to_serdes_map_fabric_lane189=rx189:tx189 +lane_to_serdes_map_fabric_lane190=rx190:tx190 +lane_to_serdes_map_fabric_lane191=rx191:tx191 + +# +##Protocol trap look-up mode: +# Options: IN_LIF - Look-ups in the profile table are done by IN-LIF +# IN_PORT - Look-ups in the profile table are done by IN-PORT +protocol_traps_mode=IN_LIF + +# access definitions +schan_intr_enable=0 +tdma_intr_enable=0 +tslam_intr_enable=0 +miim_intr_enable=0 +schan_timeout_usec=300000 +tdma_timeout_usec=1000000 +tslam_timeout_usec=1000000 + +### Interrupts +appl_enable_intr_init=1 +polled_irq_mode=1 +# reduce CPU load, configure delay 100ms +polled_irq_delay=1000 + +# reduce the CPU load over adapter (caused by counter thread) +bcm_stat_interval=1000 + +# shadow memory +mem_cache_enable_ecc=1 +mem_cache_enable_parity=1 + +# serdes_nif/fabric_clk_freq_in/out configuration +serdes_nif_clk_freq_in=2 +serdes_nif_clk_freq_out=1 +serdes_fabric_clk_freq_in=2 +serdes_fabric_clk_freq_out=1 + +dport_map_direct=1 + +rif_id_max=0x6000 + +phy_rx_polarity_flip_phy0=0 +phy_rx_polarity_flip_phy1=0 +phy_rx_polarity_flip_phy2=0 +phy_rx_polarity_flip_phy3=0 +phy_rx_polarity_flip_phy4=0 +phy_rx_polarity_flip_phy5=0 +phy_rx_polarity_flip_phy6=0 +phy_rx_polarity_flip_phy7=0 +phy_rx_polarity_flip_phy8=1 +phy_rx_polarity_flip_phy9=1 +phy_rx_polarity_flip_phy10=0 +phy_rx_polarity_flip_phy11=1 +phy_rx_polarity_flip_phy12=1 +phy_rx_polarity_flip_phy13=1 +phy_rx_polarity_flip_phy14=1 +phy_rx_polarity_flip_phy15=1 +phy_rx_polarity_flip_phy16=0 +phy_rx_polarity_flip_phy17=0 +phy_rx_polarity_flip_phy18=0 +phy_rx_polarity_flip_phy19=0 +phy_rx_polarity_flip_phy20=0 +phy_rx_polarity_flip_phy21=0 +phy_rx_polarity_flip_phy22=0 +phy_rx_polarity_flip_phy23=0 +phy_rx_polarity_flip_phy24=0 +phy_rx_polarity_flip_phy25=0 +phy_rx_polarity_flip_phy26=0 +phy_rx_polarity_flip_phy27=0 +phy_rx_polarity_flip_phy28=0 +phy_rx_polarity_flip_phy29=0 +phy_rx_polarity_flip_phy30=0 +phy_rx_polarity_flip_phy31=0 +phy_rx_polarity_flip_phy32=0 +phy_rx_polarity_flip_phy33=0 +phy_rx_polarity_flip_phy34=0 +phy_rx_polarity_flip_phy35=0 +phy_rx_polarity_flip_phy36=0 +phy_rx_polarity_flip_phy37=0 +phy_rx_polarity_flip_phy38=0 +phy_rx_polarity_flip_phy39=0 +phy_rx_polarity_flip_phy40=0 +phy_rx_polarity_flip_phy41=0 +phy_rx_polarity_flip_phy42=0 +phy_rx_polarity_flip_phy43=0 +phy_rx_polarity_flip_phy44=0 +phy_rx_polarity_flip_phy45=0 +phy_rx_polarity_flip_phy46=0 +phy_rx_polarity_flip_phy47=0 +phy_rx_polarity_flip_phy48=0 +phy_rx_polarity_flip_phy49=0 +phy_rx_polarity_flip_phy50=0 +phy_rx_polarity_flip_phy51=0 +phy_rx_polarity_flip_phy52=0 +phy_rx_polarity_flip_phy53=0 +phy_rx_polarity_flip_phy54=0 +phy_rx_polarity_flip_phy55=0 +phy_rx_polarity_flip_phy56=0 +phy_rx_polarity_flip_phy57=0 +phy_rx_polarity_flip_phy58=0 +phy_rx_polarity_flip_phy59=0 +phy_rx_polarity_flip_phy60=0 +phy_rx_polarity_flip_phy61=0 +phy_rx_polarity_flip_phy62=0 +phy_rx_polarity_flip_phy63=0 +phy_rx_polarity_flip_phy64=0 +phy_rx_polarity_flip_phy65=0 +phy_rx_polarity_flip_phy66=0 +phy_rx_polarity_flip_phy67=0 +phy_rx_polarity_flip_phy68=0 +phy_rx_polarity_flip_phy69=0 +phy_rx_polarity_flip_phy70=0 +phy_rx_polarity_flip_phy71=0 +phy_rx_polarity_flip_phy72=1 +phy_rx_polarity_flip_phy73=1 +phy_rx_polarity_flip_phy74=1 +phy_rx_polarity_flip_phy75=1 +phy_rx_polarity_flip_phy76=1 +phy_rx_polarity_flip_phy77=1 +phy_rx_polarity_flip_phy78=1 +phy_rx_polarity_flip_phy79=1 +phy_rx_polarity_flip_phy80=0 +phy_rx_polarity_flip_phy81=0 +phy_rx_polarity_flip_phy82=0 +phy_rx_polarity_flip_phy83=0 +phy_rx_polarity_flip_phy84=0 +phy_rx_polarity_flip_phy85=0 +phy_rx_polarity_flip_phy86=0 +phy_rx_polarity_flip_phy87=0 +phy_rx_polarity_flip_phy88=0 +phy_rx_polarity_flip_phy89=0 +phy_rx_polarity_flip_phy90=1 +phy_rx_polarity_flip_phy91=0 +phy_rx_polarity_flip_phy92=0 +phy_rx_polarity_flip_phy93=0 +phy_rx_polarity_flip_phy94=0 +phy_rx_polarity_flip_phy95=0 +phy_rx_polarity_flip_phy96=0 +phy_rx_polarity_flip_phy97=0 +phy_rx_polarity_flip_phy98=0 +phy_rx_polarity_flip_phy99=0 +phy_rx_polarity_flip_phy100=0 +phy_rx_polarity_flip_phy101=0 +phy_rx_polarity_flip_phy102=0 +phy_rx_polarity_flip_phy103=0 +phy_rx_polarity_flip_phy104=0 +phy_rx_polarity_flip_phy105=0 +phy_rx_polarity_flip_phy106=0 +phy_rx_polarity_flip_phy107=0 +phy_rx_polarity_flip_phy108=0 +phy_rx_polarity_flip_phy109=0 +phy_rx_polarity_flip_phy110=0 +phy_rx_polarity_flip_phy111=0 +phy_rx_polarity_flip_phy112=0 +phy_rx_polarity_flip_phy113=0 +phy_rx_polarity_flip_phy114=0 +phy_rx_polarity_flip_phy115=0 +phy_rx_polarity_flip_phy116=0 +phy_rx_polarity_flip_phy117=0 +phy_rx_polarity_flip_phy118=0 +phy_rx_polarity_flip_phy119=0 +phy_rx_polarity_flip_phy120=0 +phy_rx_polarity_flip_phy121=0 +phy_rx_polarity_flip_phy122=0 +phy_rx_polarity_flip_phy123=0 +phy_rx_polarity_flip_phy124=0 +phy_rx_polarity_flip_phy125=0 +phy_rx_polarity_flip_phy126=0 +phy_rx_polarity_flip_phy127=0 +phy_rx_polarity_flip_phy128=0 +phy_rx_polarity_flip_phy129=0 +phy_rx_polarity_flip_phy130=0 +phy_rx_polarity_flip_phy131=0 +phy_rx_polarity_flip_phy132=0 +phy_rx_polarity_flip_phy133=0 +phy_rx_polarity_flip_phy134=0 +phy_rx_polarity_flip_phy135=0 +phy_rx_polarity_flip_phy136=0 +phy_rx_polarity_flip_phy137=0 +phy_rx_polarity_flip_phy138=0 +phy_rx_polarity_flip_phy139=0 +phy_rx_polarity_flip_phy140=0 +phy_rx_polarity_flip_phy141=0 +phy_rx_polarity_flip_phy142=0 +phy_rx_polarity_flip_phy143=0 +phy_tx_polarity_flip_phy0=1 +phy_tx_polarity_flip_phy1=1 +phy_tx_polarity_flip_phy2=1 +phy_tx_polarity_flip_phy3=1 +phy_tx_polarity_flip_phy4=1 +phy_tx_polarity_flip_phy5=1 +phy_tx_polarity_flip_phy6=1 +phy_tx_polarity_flip_phy7=1 +phy_tx_polarity_flip_phy8=1 +phy_tx_polarity_flip_phy9=0 +phy_tx_polarity_flip_phy10=1 +phy_tx_polarity_flip_phy11=1 +phy_tx_polarity_flip_phy12=1 +phy_tx_polarity_flip_phy13=1 +phy_tx_polarity_flip_phy14=1 +phy_tx_polarity_flip_phy15=1 +phy_tx_polarity_flip_phy16=1 +phy_tx_polarity_flip_phy17=1 +phy_tx_polarity_flip_phy18=1 +phy_tx_polarity_flip_phy19=1 +phy_tx_polarity_flip_phy20=1 +phy_tx_polarity_flip_phy21=1 +phy_tx_polarity_flip_phy22=1 +phy_tx_polarity_flip_phy23=1 +phy_tx_polarity_flip_phy24=1 +phy_tx_polarity_flip_phy25=1 +phy_tx_polarity_flip_phy26=1 +phy_tx_polarity_flip_phy27=1 +phy_tx_polarity_flip_phy28=1 +phy_tx_polarity_flip_phy29=1 +phy_tx_polarity_flip_phy30=1 +phy_tx_polarity_flip_phy31=1 +phy_tx_polarity_flip_phy32=1 +phy_tx_polarity_flip_phy33=1 +phy_tx_polarity_flip_phy34=1 +phy_tx_polarity_flip_phy35=1 +phy_tx_polarity_flip_phy36=1 +phy_tx_polarity_flip_phy37=1 +phy_tx_polarity_flip_phy38=1 +phy_tx_polarity_flip_phy39=1 +phy_tx_polarity_flip_phy40=1 +phy_tx_polarity_flip_phy41=1 +phy_tx_polarity_flip_phy42=1 +phy_tx_polarity_flip_phy43=1 +phy_tx_polarity_flip_phy44=1 +phy_tx_polarity_flip_phy45=1 +phy_tx_polarity_flip_phy46=1 +phy_tx_polarity_flip_phy47=1 +phy_tx_polarity_flip_phy48=1 +phy_tx_polarity_flip_phy49=1 +phy_tx_polarity_flip_phy50=1 +phy_tx_polarity_flip_phy51=1 +phy_tx_polarity_flip_phy52=1 +phy_tx_polarity_flip_phy53=1 +phy_tx_polarity_flip_phy54=1 +phy_tx_polarity_flip_phy55=1 +phy_tx_polarity_flip_phy56=1 +phy_tx_polarity_flip_phy57=1 +phy_tx_polarity_flip_phy58=1 +phy_tx_polarity_flip_phy59=1 +phy_tx_polarity_flip_phy60=1 +phy_tx_polarity_flip_phy61=1 +phy_tx_polarity_flip_phy62=1 +phy_tx_polarity_flip_phy63=1 +phy_tx_polarity_flip_phy64=1 +phy_tx_polarity_flip_phy65=1 +phy_tx_polarity_flip_phy66=1 +phy_tx_polarity_flip_phy67=1 +phy_tx_polarity_flip_phy68=1 +phy_tx_polarity_flip_phy69=1 +phy_tx_polarity_flip_phy70=1 +phy_tx_polarity_flip_phy71=1 +phy_tx_polarity_flip_phy72=0 +phy_tx_polarity_flip_phy73=0 +phy_tx_polarity_flip_phy74=0 +phy_tx_polarity_flip_phy75=0 +phy_tx_polarity_flip_phy76=0 +phy_tx_polarity_flip_phy77=0 +phy_tx_polarity_flip_phy78=0 +phy_tx_polarity_flip_phy79=0 +phy_tx_polarity_flip_phy80=0 +phy_tx_polarity_flip_phy81=0 +phy_tx_polarity_flip_phy82=0 +phy_tx_polarity_flip_phy83=0 +phy_tx_polarity_flip_phy84=0 +phy_tx_polarity_flip_phy85=0 +phy_tx_polarity_flip_phy86=0 +phy_tx_polarity_flip_phy87=0 +phy_tx_polarity_flip_phy88=1 +phy_tx_polarity_flip_phy89=1 +phy_tx_polarity_flip_phy90=1 +phy_tx_polarity_flip_phy91=1 +phy_tx_polarity_flip_phy92=1 +phy_tx_polarity_flip_phy93=1 +phy_tx_polarity_flip_phy94=1 +phy_tx_polarity_flip_phy95=1 +phy_tx_polarity_flip_phy96=1 +phy_tx_polarity_flip_phy97=1 +phy_tx_polarity_flip_phy98=1 +phy_tx_polarity_flip_phy99=1 +phy_tx_polarity_flip_phy100=1 +phy_tx_polarity_flip_phy101=1 +phy_tx_polarity_flip_phy102=1 +phy_tx_polarity_flip_phy103=1 +phy_tx_polarity_flip_phy104=1 +phy_tx_polarity_flip_phy105=1 +phy_tx_polarity_flip_phy106=1 +phy_tx_polarity_flip_phy107=1 +phy_tx_polarity_flip_phy108=1 +phy_tx_polarity_flip_phy109=1 +phy_tx_polarity_flip_phy110=1 +phy_tx_polarity_flip_phy111=1 +phy_tx_polarity_flip_phy112=1 +phy_tx_polarity_flip_phy113=1 +phy_tx_polarity_flip_phy114=1 +phy_tx_polarity_flip_phy115=1 +phy_tx_polarity_flip_phy116=1 +phy_tx_polarity_flip_phy117=1 +phy_tx_polarity_flip_phy118=1 +phy_tx_polarity_flip_phy119=1 +phy_tx_polarity_flip_phy120=1 +phy_tx_polarity_flip_phy121=1 +phy_tx_polarity_flip_phy122=1 +phy_tx_polarity_flip_phy123=1 +phy_tx_polarity_flip_phy124=1 +phy_tx_polarity_flip_phy125=1 +phy_tx_polarity_flip_phy126=1 +phy_tx_polarity_flip_phy127=1 +phy_tx_polarity_flip_phy128=1 +phy_tx_polarity_flip_phy129=1 +phy_tx_polarity_flip_phy130=1 +phy_tx_polarity_flip_phy131=1 +phy_tx_polarity_flip_phy132=1 +phy_tx_polarity_flip_phy133=1 +phy_tx_polarity_flip_phy134=1 +phy_tx_polarity_flip_phy135=1 +phy_tx_polarity_flip_phy136=1 +phy_tx_polarity_flip_phy137=1 +phy_tx_polarity_flip_phy138=1 +phy_tx_polarity_flip_phy139=1 +phy_tx_polarity_flip_phy140=1 +phy_tx_polarity_flip_phy141=1 +phy_tx_polarity_flip_phy142=1 +phy_tx_polarity_flip_phy143=1 + +serdes_tx_taps_1=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_2=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_3=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_4=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_5=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_6=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_7=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_8=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_9=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_10=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_11=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_12=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_13=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_14=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_15=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_16=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_17=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_18=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_19=nrz:-6:85:-21:0:0:0 +serdes_tx_taps_20=nrz:-5:83:-22:0:0:0 +serdes_tx_taps_21=nrz:-4:75:-21:0:0:0 +serdes_tx_taps_22=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_23=nrz:-6:85:-21:0:0:0 +serdes_tx_taps_24=nrz:-5:83:-22:0:0:0 +serdes_tx_taps_25=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_26=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_27=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_28=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_29=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_30=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_31=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_32=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_33=nrz:-5:83:-22:0:0:0 +serdes_tx_taps_34=nrz:-5:83:-22:0:0:0 +serdes_tx_taps_35=nrz:-4:75:-21:0:0:0 +serdes_tx_taps_36=nrz:-8:89:-29:0:0:0 + +serdes_tx_taps_1=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_2=pam4:-16:141:-5:3:-2:-3 +serdes_tx_taps_3=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_4=pam4:-16:141:-5:3:-2:-3 +serdes_tx_taps_5=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_6=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_7=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_8=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_9=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_10=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_11=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_12=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_13=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_14=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_15=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_16=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_17=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_18=pam4:-16:137:-12:2:0:-3 +serdes_tx_taps_19=pam4:-17:144:-1:2:-3:-3 +serdes_tx_taps_20=pam4:-16:141:-5:3:-2:-3 +serdes_tx_taps_21=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_22=pam4:-16:141:-5:3:-2:-3 +serdes_tx_taps_23=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_24=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_25=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_26=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_27=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_28=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_29=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_30=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_31=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_32=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_33=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_34=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_35=pam4:-16:141:-5:3:-2:-3 +serdes_tx_taps_36=pam4:-16:137:-12:2:0:-3 + +xflow_macsec_secure_chan_to_num_secure_assoc_encrypt=2 +xflow_macsec_secure_chan_to_num_secure_assoc_decrypt=4 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/pg_profile_lookup.ini b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/pg_profile_lookup.ini new file mode 100644 index 000000000000..e8289ab03112 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/pg_profile_lookup.ini @@ -0,0 +1,17 @@ +# PG lossless profiles. +# speed cable size xon xoff threshold xon_offset + 40000 300m 1280 2560 69632 0 1280 + 100000 300m 1280 2560 110592 0 1280 + 400000 300m 1280 2560 315392 0 1280 + 40000 1000m 1280 2560 114688 0 1280 + 100000 1000m 1280 2560 225280 0 1280 + 400000 1000m 1280 2560 778240 0 1280 + 40000 2000m 1280 2560 184320 0 1280 + 100000 2000m 1280 2560 393216 0 1280 + 400000 2000m 1280 2560 1445888 0 1280 + 40000 80000m 1280 2560 5369856 0 1280 + 100000 80000m 1280 2560 13357056 0 1280 + 400000 80000m 1280 2560 53305344 0 1280 + 40000 120000m 1280 2560 8028160 0 1280 + 100000 120000m 1280 2560 20004864 0 1280 + 400000 120000m 1280 2560 79900672 0 1280 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/port_config.ini b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/port_config.ini new file mode 100644 index 000000000000..4d20baee09fa --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/port_config.ini @@ -0,0 +1,21 @@ +# name lanes alias index role speed asic_port_name coreId corePortId numVoq +Ethernet0 72,73,74,75,76,77,78,79 Ethernet1/1 1 Ext 400000 Eth0-ASIC0 1 1 8 +Ethernet8 80,81,82,83,84,85,86,87 Ethernet2/1 2 Ext 400000 Eth8-ASIC0 1 2 8 +Ethernet16 88,89,90,91,92,93,94,95 Ethernet3/1 3 Ext 400000 Eth16-ASIC0 1 3 8 +Ethernet24 96,97,98,99,100,101,102,103 Ethernet4/1 4 Ext 400000 Eth24-ASIC0 1 4 8 +Ethernet32 104,105,106,107,108,109,110,111 Ethernet5/1 5 Ext 400000 Eth32-ASIC0 1 5 8 +Ethernet40 112,113,114,115,116,117,118,119 Ethernet6/1 6 Ext 400000 Eth40-ASIC0 1 6 8 +Ethernet48 120,121,122,123,124,125,126,127 Ethernet7/1 7 Ext 400000 Eth48-ASIC0 1 7 8 +Ethernet56 128,129,130,131,132,133,134,135 Ethernet8/1 8 Ext 400000 Eth56-ASIC0 1 8 8 +Ethernet64 136,137,138,139,140,141,142,143 Ethernet9/1 9 Ext 400000 Eth64-ASIC0 1 9 8 +Ethernet72 64,65,66,67,68,69,70,71 Ethernet10/1 10 Ext 400000 Eth72-ASIC0 0 10 8 +Ethernet80 56,57,58,59,60,61,62,63 Ethernet11/1 11 Ext 400000 Eth80-ASIC0 0 11 8 +Ethernet88 48,49,50,51,52,53,54,55 Ethernet12/1 12 Ext 400000 Eth88-ASIC0 0 12 8 +Ethernet96 40,41,42,43,44,45,46,47 Ethernet13/1 13 Ext 400000 Eth96-ASIC0 0 13 8 +Ethernet104 32,33,34,35,36,37,38,39 Ethernet14/1 14 Ext 400000 Eth104-ASIC0 0 14 8 +Ethernet112 24,25,26,27,28,29,30,31 Ethernet15/1 15 Ext 400000 Eth112-ASIC0 0 15 8 +Ethernet120 16,17,18,19,20,21,22,23 Ethernet16/1 16 Ext 400000 Eth120-ASIC0 0 16 8 +Ethernet128 8,9,10,11,12,13,14,15 Ethernet17/1 17 Ext 400000 Eth128-ASIC0 0 17 8 +Ethernet136 0,1,2,3,4,5,6,7 Ethernet18/1 18 Ext 400000 Eth136-ASIC0 0 18 8 +Ethernet-Rec0 221 Recirc0/0 19 Rec 400000 Rcy0-ASIC0 0 221 8 +Ethernet-IB0 222 Recirc0/1 20 Inb 400000 Rcy1-ASIC0 1 222 8 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/qos.json.j2 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/qos.json.j2 new file mode 100644 index 000000000000..3e548325ea30 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/qos.json.j2 @@ -0,0 +1 @@ +{%- include 'qos_config.j2' %} diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/sai.profile b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/sai.profile new file mode 100644 index 000000000000..894b300ad733 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/0/sai.profile @@ -0,0 +1,2 @@ +SAI_INIT_CONFIG_FILE=/usr/share/sonic/hwsku/j2p-a7800r3a-36d-36x400G.config.bcm + diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/buffers.json.j2 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/buffers.json.j2 new file mode 100644 index 000000000000..f34a844f4a87 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/buffers.json.j2 @@ -0,0 +1,2 @@ +{%- set default_topo = 't2' %} +{%- include 'buffers_config.j2' %} diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/buffers_defaults_t2.j2 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/buffers_defaults_t2.j2 new file mode 100644 index 000000000000..3555175244d4 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/buffers_defaults_t2.j2 @@ -0,0 +1,37 @@ +{%- set default_cable = '300m' %} + +{%- macro generate_port_lists(PORT_ALL) %} + {# Generate list of ports #} + {%- for port_idx in range(144,288,8) %} + {%- if PORT_ALL.append("Ethernet%d" % (port_idx)) %}{%- endif %} + {%- endfor %} +{%- endmacro %} + +{%- macro generate_buffer_pool_and_profiles() %} + "BUFFER_POOL": { + "ingress_lossless_pool": { + "size": "6441610000", + "type": "both", + "mode": "dynamic", + "xoff": "11354112" + } + }, + "BUFFER_PROFILE": { + "ingress_lossy_profile": { + "pool":"ingress_lossless_pool", + "size":"1280", + "xon_offset": "2560", + "dynamic_th":"0" + }, + "egress_lossless_profile": { + "pool":"ingress_lossless_pool", + "size":"0", + "static_th":"33030144" + }, + "egress_lossy_profile": { + "pool":"ingress_lossless_pool", + "size":"0", + "dynamic_th":"-1" + } + }, +{%- endmacro %} diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/context_config.json b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/context_config.json new file mode 120000 index 000000000000..3db0e8ed3d9b --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/context_config.json @@ -0,0 +1 @@ +../0/context_config.json \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/j2p-a7800r3a-36d-36x400G.config.bcm b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/j2p-a7800r3a-36d-36x400G.config.bcm new file mode 100644 index 000000000000..56d425f9f9c2 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/j2p-a7800r3a-36d-36x400G.config.bcm @@ -0,0 +1,1022 @@ +soc_family=BCM8885X +system_ref_core_clock_khz=1600000 + +dpp_db_path=/usr/share/bcm/db + +#################################################### +##Reference applications related properties - Start +#################################################### + +## PMF small EXEM connected stage: +# Options: IPMF2 - Ingress PMF 2 stage can perform small EXEM lookups. +# IPMF3 - Ingress PMF 3 stage can perform small EXEM lookups. +## PMF small EXEM connected stage: +# Options: IPMF2 - Ingress PMF 2 stage can perform small EXEM lookups. +# IPMF3 - Ingress PMF 3 stage can perform small EXEM lookups. +pmf_sexem3_stage=IPMF2 + +#################################################### +##Reference applications related properties - End +#################################################### + +# Jericho2-mode (description 0x1 used for Jericho 2 mode) +system_headers_mode=1 + +# HW mode to support 1024 16-member system wide LAGs +trunk_group_max_members=16 + +# Disable link-training +port_init_cl72=0 + +###Default interfaces for Jericho2Plus +#CPU interfaces +ucode_port_0=CPU.0:core_0.0 +ucode_port_200=CPU.8:core_1.200 +ucode_port_201=CPU.16:core_0.201 +ucode_port_202=CPU.24:core_1.202 +ucode_port_203=CPU.32:core_0.203 + +#NIF ETH interfaces on device +ucode_port_1=CDGE9:core_1.1 +ucode_port_2=CDGE10:core_1.2 +ucode_port_3=CDGE11:core_1.3 +ucode_port_4=CDGE12:core_1.4 +ucode_port_5=CDGE13:core_1.5 +ucode_port_6=CDGE14:core_1.6 +ucode_port_7=CDGE15:core_1.7 +ucode_port_8=CDGE16:core_1.8 +ucode_port_9=CDGE17:core_1.9 + +ucode_port_10=CDGE8:core_0.10 +ucode_port_11=CDGE7:core_0.11 +ucode_port_12=CDGE6:core_0.12 +ucode_port_13=CDGE5:core_0.13 +ucode_port_14=CDGE4:core_0.14 +ucode_port_15=CDGE3:core_0.15 +ucode_port_16=CDGE2:core_0.16 +ucode_port_17=CDGE1:core_0.17 +ucode_port_18=CDGE0:core_0.18 + +#NIF default speeds +port_init_speed_xe=10000 +port_init_speed_xl=40000 +port_init_speed_le=50000 +port_init_speed_ce=100000 +port_init_speed_cc=200000 +port_init_speed_cd=400000 +port_init_speed_il=10312 + +port_priorities=8 + +#special ports +ucode_port_240=OLP:core_0.240 + +# NIF lane mapping +lane_to_serdes_map_nif_lane0=rx3:tx4 +lane_to_serdes_map_nif_lane1=rx6:tx1 +lane_to_serdes_map_nif_lane2=rx7:tx5 +lane_to_serdes_map_nif_lane3=rx4:tx7 +lane_to_serdes_map_nif_lane4=rx1:tx2 +lane_to_serdes_map_nif_lane5=rx0:tx0 +lane_to_serdes_map_nif_lane6=rx5:tx3 +lane_to_serdes_map_nif_lane7=rx2:tx6 +lane_to_serdes_map_nif_lane8=rx10:tx11 +lane_to_serdes_map_nif_lane9=rx8:tx8 +lane_to_serdes_map_nif_lane10=rx14:tx12 +lane_to_serdes_map_nif_lane11=rx15:tx15 +lane_to_serdes_map_nif_lane12=rx13:tx10 +lane_to_serdes_map_nif_lane13=rx9:tx9 +lane_to_serdes_map_nif_lane14=rx11:tx13 +lane_to_serdes_map_nif_lane15=rx12:tx14 +lane_to_serdes_map_nif_lane16=rx16:tx17 +lane_to_serdes_map_nif_lane17=rx19:tx21 +lane_to_serdes_map_nif_lane18=rx21:tx18 +lane_to_serdes_map_nif_lane19=rx18:tx16 +lane_to_serdes_map_nif_lane20=rx17:tx23 +lane_to_serdes_map_nif_lane21=rx20:tx22 +lane_to_serdes_map_nif_lane22=rx22:tx20 +lane_to_serdes_map_nif_lane23=rx23:tx19 +lane_to_serdes_map_nif_lane24=rx26:tx28 +lane_to_serdes_map_nif_lane25=rx29:tx31 +lane_to_serdes_map_nif_lane26=rx31:tx29 +lane_to_serdes_map_nif_lane27=rx28:tx27 +lane_to_serdes_map_nif_lane28=rx25:tx25 +lane_to_serdes_map_nif_lane29=rx24:tx30 +lane_to_serdes_map_nif_lane30=rx30:tx24 +lane_to_serdes_map_nif_lane31=rx27:tx26 +lane_to_serdes_map_nif_lane32=rx32:tx39 +lane_to_serdes_map_nif_lane33=rx33:tx38 +lane_to_serdes_map_nif_lane34=rx38:tx32 +lane_to_serdes_map_nif_lane35=rx39:tx33 +lane_to_serdes_map_nif_lane36=rx35:tx37 +lane_to_serdes_map_nif_lane37=rx34:tx36 +lane_to_serdes_map_nif_lane38=rx36:tx34 +lane_to_serdes_map_nif_lane39=rx37:tx35 +lane_to_serdes_map_nif_lane40=rx40:tx41 +lane_to_serdes_map_nif_lane41=rx43:tx45 +lane_to_serdes_map_nif_lane42=rx45:tx42 +lane_to_serdes_map_nif_lane43=rx42:tx40 +lane_to_serdes_map_nif_lane44=rx41:tx47 +lane_to_serdes_map_nif_lane45=rx44:tx46 +lane_to_serdes_map_nif_lane46=rx46:tx44 +lane_to_serdes_map_nif_lane47=rx47:tx43 +lane_to_serdes_map_nif_lane48=rx50:tx52 +lane_to_serdes_map_nif_lane49=rx53:tx55 +lane_to_serdes_map_nif_lane50=rx55:tx53 +lane_to_serdes_map_nif_lane51=rx52:tx51 +lane_to_serdes_map_nif_lane52=rx49:tx49 +lane_to_serdes_map_nif_lane53=rx48:tx54 +lane_to_serdes_map_nif_lane54=rx54:tx48 +lane_to_serdes_map_nif_lane55=rx51:tx50 +lane_to_serdes_map_nif_lane56=rx56:tx63 +lane_to_serdes_map_nif_lane57=rx57:tx62 +lane_to_serdes_map_nif_lane58=rx62:tx56 +lane_to_serdes_map_nif_lane59=rx63:tx57 +lane_to_serdes_map_nif_lane60=rx59:tx61 +lane_to_serdes_map_nif_lane61=rx58:tx60 +lane_to_serdes_map_nif_lane62=rx60:tx58 +lane_to_serdes_map_nif_lane63=rx61:tx59 +lane_to_serdes_map_nif_lane64=rx64:tx65 +lane_to_serdes_map_nif_lane65=rx67:tx69 +lane_to_serdes_map_nif_lane66=rx69:tx66 +lane_to_serdes_map_nif_lane67=rx66:tx64 +lane_to_serdes_map_nif_lane68=rx65:tx71 +lane_to_serdes_map_nif_lane69=rx68:tx70 +lane_to_serdes_map_nif_lane70=rx70:tx68 +lane_to_serdes_map_nif_lane71=rx71:tx67 +lane_to_serdes_map_nif_lane72=rx79:tx74 +lane_to_serdes_map_nif_lane73=rx76:tx75 +lane_to_serdes_map_nif_lane74=rx72:tx76 +lane_to_serdes_map_nif_lane75=rx74:tx73 +lane_to_serdes_map_nif_lane76=rx77:tx79 +lane_to_serdes_map_nif_lane77=rx78:tx78 +lane_to_serdes_map_nif_lane78=rx73:tx77 +lane_to_serdes_map_nif_lane79=rx75:tx72 +lane_to_serdes_map_nif_lane80=rx86:tx86 +lane_to_serdes_map_nif_lane81=rx83:tx87 +lane_to_serdes_map_nif_lane82=rx82:tx81 +lane_to_serdes_map_nif_lane83=rx85:tx80 +lane_to_serdes_map_nif_lane84=rx87:tx85 +lane_to_serdes_map_nif_lane85=rx84:tx84 +lane_to_serdes_map_nif_lane86=rx80:tx82 +lane_to_serdes_map_nif_lane87=rx81:tx83 +lane_to_serdes_map_nif_lane88=rx95:tx90 +lane_to_serdes_map_nif_lane89=rx92:tx88 +lane_to_serdes_map_nif_lane90=rx88:tx92 +lane_to_serdes_map_nif_lane91=rx91:tx95 +lane_to_serdes_map_nif_lane92=rx94:tx89 +lane_to_serdes_map_nif_lane93=rx93:tx91 +lane_to_serdes_map_nif_lane94=rx89:tx93 +lane_to_serdes_map_nif_lane95=rx90:tx94 +lane_to_serdes_map_nif_lane96=rx103:tx97 +lane_to_serdes_map_nif_lane97=rx100:tx96 +lane_to_serdes_map_nif_lane98=rx96:tx100 +lane_to_serdes_map_nif_lane99=rx99:tx103 +lane_to_serdes_map_nif_lane100=rx102:tx99 +lane_to_serdes_map_nif_lane101=rx101:tx98 +lane_to_serdes_map_nif_lane102=rx97:tx101 +lane_to_serdes_map_nif_lane103=rx98:tx102 +lane_to_serdes_map_nif_lane104=rx110:tx107 +lane_to_serdes_map_nif_lane105=rx108:tx105 +lane_to_serdes_map_nif_lane106=rx104:tx108 +lane_to_serdes_map_nif_lane107=rx107:tx110 +lane_to_serdes_map_nif_lane108=rx111:tx106 +lane_to_serdes_map_nif_lane109=rx109:tx104 +lane_to_serdes_map_nif_lane110=rx105:tx109 +lane_to_serdes_map_nif_lane111=rx106:tx111 +lane_to_serdes_map_nif_lane112=rx119:tx114 +lane_to_serdes_map_nif_lane113=rx116:tx112 +lane_to_serdes_map_nif_lane114=rx112:tx116 +lane_to_serdes_map_nif_lane115=rx115:tx119 +lane_to_serdes_map_nif_lane116=rx118:tx113 +lane_to_serdes_map_nif_lane117=rx117:tx115 +lane_to_serdes_map_nif_lane118=rx113:tx117 +lane_to_serdes_map_nif_lane119=rx114:tx118 +lane_to_serdes_map_nif_lane120=rx127:tx121 +lane_to_serdes_map_nif_lane121=rx124:tx120 +lane_to_serdes_map_nif_lane122=rx120:tx124 +lane_to_serdes_map_nif_lane123=rx123:tx127 +lane_to_serdes_map_nif_lane124=rx126:tx123 +lane_to_serdes_map_nif_lane125=rx125:tx122 +lane_to_serdes_map_nif_lane126=rx121:tx125 +lane_to_serdes_map_nif_lane127=rx122:tx126 +lane_to_serdes_map_nif_lane128=rx134:tx131 +lane_to_serdes_map_nif_lane129=rx132:tx129 +lane_to_serdes_map_nif_lane130=rx128:tx132 +lane_to_serdes_map_nif_lane131=rx131:tx134 +lane_to_serdes_map_nif_lane132=rx135:tx130 +lane_to_serdes_map_nif_lane133=rx133:tx128 +lane_to_serdes_map_nif_lane134=rx129:tx133 +lane_to_serdes_map_nif_lane135=rx130:tx135 +lane_to_serdes_map_nif_lane136=rx143:tx138 +lane_to_serdes_map_nif_lane137=rx140:tx136 +lane_to_serdes_map_nif_lane138=rx136:tx140 +lane_to_serdes_map_nif_lane139=rx139:tx143 +lane_to_serdes_map_nif_lane140=rx142:tx137 +lane_to_serdes_map_nif_lane141=rx141:tx139 +lane_to_serdes_map_nif_lane142=rx137:tx141 +lane_to_serdes_map_nif_lane143=rx138:tx142 + +######################### +### High Availability ### +######################### + +sw_state_max_size=750000000 + +#location of warmboot NV memory +#Allowed options for dnx are - 3:external storage in filesystem 4:driver will save the state directly in shared memory +stable_location=4 + +# Note that each unit should have a unique filename and that adapter does not play well with tmp and dev/shm folders. +stable_filename=/dev/shm/warmboot_data_0 +stable_filename.1=/dev/shm/warmboot_data_1 +stable_filename.2=/dev/shm/warmboot_data_2 + +#Maximum size for NVM used for WB storage, must be larger than sw_state_max_size.BCM8885X +stable_size=800000000 + +######################### +######################### +######################### + +tm_port_header_type_in_0=INJECTED_2_PP +tm_port_header_type_out_0=CPU + +tm_port_header_type_in_200=INJECTED_2_PP +tm_port_header_type_out_200=ETH +tm_port_header_type_in_201=INJECTED_2_PP +tm_port_header_type_out_201=ETH +tm_port_header_type_in_202=INJECTED_2_PP +tm_port_header_type_out_202=ETH +tm_port_header_type_in_203=INJECTED_2_PP +tm_port_header_type_out_203=ETH + +### SAT +## Enable SAT Interface. 0 - Disable, 1 - Enable (Default) +sat_enable=1 +ucode_port_218=SAT:core_0.218 +tm_port_header_type_out_218=CPU +tm_port_header_type_in_218=INJECTED_2 +ucode_port_219=SAT:core_1.219 +tm_port_header_type_out_219=CPU +tm_port_header_type_in_219=INJECTED_2 +port_init_speed_sat=400000 + +### RCY +sai_recycle_port_lane_base=0 +ucode_port_221=RCY.21:core_0.221 +ucode_port_222=RCY.22:core_1.222 +tm_port_header_type_out_221=ETH +tm_port_header_type_in_221=ETH +tm_port_header_type_out_222=ETH +tm_port_header_type_in_222=ETH +port_init_speed_221=400000 +port_init_speed_222=400000 + +#OLP port +tm_port_header_type_in_240=INJECTED_2 +tm_port_header_type_out_240=RAW + +# Set statically the region mode per region id +dtm_flow_mapping_mode_region_257=3 +dtm_flow_mapping_mode_region_258=3 +dtm_flow_mapping_mode_region_259=3 +dtm_flow_mapping_mode_region_260=3 +dtm_flow_mapping_mode_region_261=3 +dtm_flow_mapping_mode_region_262=3 +dtm_flow_mapping_mode_region_263=3 +dtm_flow_mapping_mode_region_264=3 +dtm_flow_mapping_mode_region_265=3 +dtm_flow_mapping_mode_region_266=7 +dtm_flow_mapping_mode_region_267=3 +dtm_flow_mapping_mode_region_268=3 +dtm_flow_mapping_mode_region_269=3 +dtm_flow_mapping_mode_region_270=3 +dtm_flow_mapping_mode_region_271=3 +dtm_flow_mapping_mode_region_272=3 +dtm_flow_mapping_mode_region_273=3 +dtm_flow_mapping_mode_region_274=3 +dtm_flow_mapping_mode_region_275=3 +dtm_flow_mapping_mode_region_276=3 +dtm_flow_mapping_mode_region_277=3 +dtm_flow_mapping_mode_region_278=3 +dtm_flow_mapping_mode_region_279=3 +dtm_flow_mapping_mode_region_280=3 +dtm_flow_mapping_mode_region_281=3 +dtm_flow_mapping_mode_region_282=3 +dtm_flow_mapping_mode_region_283=3 +dtm_flow_mapping_mode_region_284=3 +dtm_flow_mapping_mode_region_285=3 +dtm_flow_mapping_mode_region_286=3 +dtm_flow_mapping_mode_region_287=3 + +## Configure number of symmetric cores each region supports ## +dtm_flow_nof_remote_cores_region_1=2 +dtm_flow_nof_remote_cores_region_2=2 +dtm_flow_nof_remote_cores_region_3=2 +dtm_flow_nof_remote_cores_region_4=2 +dtm_flow_nof_remote_cores_region_5=2 +dtm_flow_nof_remote_cores_region_6=2 +dtm_flow_nof_remote_cores_region_7=2 +dtm_flow_nof_remote_cores_region_8=2 +dtm_flow_nof_remote_cores_region_9=2 +dtm_flow_nof_remote_cores_region_10=2 +dtm_flow_nof_remote_cores_region_11=2 +dtm_flow_nof_remote_cores_region_12=2 +dtm_flow_nof_remote_cores_region_13=2 +dtm_flow_nof_remote_cores_region_14=2 +dtm_flow_nof_remote_cores_region_15=2 +dtm_flow_nof_remote_cores_region_16=2 +dtm_flow_nof_remote_cores_region_17=2 +dtm_flow_nof_remote_cores_region_18=2 +dtm_flow_nof_remote_cores_region_19=2 +dtm_flow_nof_remote_cores_region_20=2 +dtm_flow_nof_remote_cores_region_21=2 +dtm_flow_nof_remote_cores_region_22=2 +dtm_flow_nof_remote_cores_region_23=2 +dtm_flow_nof_remote_cores_region_24=2 +dtm_flow_nof_remote_cores_region_25=2 +dtm_flow_nof_remote_cores_region_26=2 +dtm_flow_nof_remote_cores_region_27=2 +dtm_flow_nof_remote_cores_region_28=2 +dtm_flow_nof_remote_cores_region_29=2 +dtm_flow_nof_remote_cores_region_30=2 +dtm_flow_nof_remote_cores_region_31=2 +dtm_flow_nof_remote_cores_region_32=2 +dtm_flow_nof_remote_cores_region_33=2 +dtm_flow_nof_remote_cores_region_34=2 +dtm_flow_nof_remote_cores_region_35=2 +dtm_flow_nof_remote_cores_region_36=2 +dtm_flow_nof_remote_cores_region_37=2 +dtm_flow_nof_remote_cores_region_38=2 +dtm_flow_nof_remote_cores_region_39=2 +dtm_flow_nof_remote_cores_region_40=2 +dtm_flow_nof_remote_cores_region_41=2 +dtm_flow_nof_remote_cores_region_42=2 +dtm_flow_nof_remote_cores_region_43=2 +dtm_flow_nof_remote_cores_region_44=2 +dtm_flow_nof_remote_cores_region_45=2 +dtm_flow_nof_remote_cores_region_46=2 +dtm_flow_nof_remote_cores_region_47=2 +dtm_flow_nof_remote_cores_region_48=2 +dtm_flow_nof_remote_cores_region_49=2 +dtm_flow_nof_remote_cores_region_50=2 +dtm_flow_nof_remote_cores_region_51=2 +dtm_flow_nof_remote_cores_region_52=2 +dtm_flow_nof_remote_cores_region_53=2 +dtm_flow_nof_remote_cores_region_54=2 +dtm_flow_nof_remote_cores_region_55=2 +dtm_flow_nof_remote_cores_region_56=2 +dtm_flow_nof_remote_cores_region_57=2 +dtm_flow_nof_remote_cores_region_58=2 +dtm_flow_nof_remote_cores_region_59=2 +dtm_flow_nof_remote_cores_region_60=2 + +### MDB configuration ### +mdb_profile=balanced-exem + +### Descriptor-DMA configuration ### +dma_desc_aggregator_chain_length_max=1000 +dma_desc_aggregator_buff_size_kb=100 +dma_desc_aggregator_timeout_usec=1000 +dma_desc_aggregator_enable_specific_MDB_LPM=1 +dma_desc_aggregator_enable_specific_MDB_FEC=1 + +### Outlif configuarion ### +outlif_logical_to_physical_phase_map_1=S1 +outlif_logical_to_physical_phase_map_2=L1 +outlif_logical_to_physical_phase_map_3=XL +outlif_logical_to_physical_phase_map_4=L2 +outlif_logical_to_physical_phase_map_5=M1 +outlif_logical_to_physical_phase_map_6=M2 +outlif_logical_to_physical_phase_map_7=M3 +outlif_logical_to_physical_phase_map_8=S2 + +### Outlif data granularity configuration ### +outlif_physical_phase_data_granularity_S1=60 +outlif_physical_phase_data_granularity_S2=60 +outlif_physical_phase_data_granularity_M1=60 +outlif_physical_phase_data_granularity_M2=60 +outlif_physical_phase_data_granularity_M3=60 +outlif_physical_phase_data_granularity_L1=60 +outlif_physical_phase_data_granularity_L2=60 +outlif_physical_phase_data_granularity_XL=60 + +### Fabric configuration ### +# Enable link-training +port_init_cl72_sfi=1 +serdes_lane_config_cl72_auto_polarity_en=0 +serdes_lane_config_cl72_auto_polarity_en_sfi=1 +serdes_lane_config_cl72_restart_timeout_en=0 + +#SFI speed rate +port_init_speed_fabric=53125 + +## Fabric transmission mode +# Set the Connect mode to the Fabric +# Options: FE - presence of a Fabric device (single stage) +# SINGLE_FAP - stand-alone device +# MESH - devices in Mesh +# Note: If 'diag_chassis' is on, value will be override in dnx.soc +# to be FE instead of SINGLE_FAP. +fabric_connect_mode=FE + +fabric_logical_port_base=512 + +# Fabric lane mapping +lane_to_serdes_map_fabric_lane0=rx0:tx0 +lane_to_serdes_map_fabric_lane1=rx1:tx1 +lane_to_serdes_map_fabric_lane2=rx2:tx2 +lane_to_serdes_map_fabric_lane3=rx3:tx3 +lane_to_serdes_map_fabric_lane4=rx4:tx4 +lane_to_serdes_map_fabric_lane5=rx5:tx5 +lane_to_serdes_map_fabric_lane6=rx6:tx6 +lane_to_serdes_map_fabric_lane7=rx7:tx7 +lane_to_serdes_map_fabric_lane8=rx8:tx10 +lane_to_serdes_map_fabric_lane9=rx9:tx11 +lane_to_serdes_map_fabric_lane10=rx10:tx9 +lane_to_serdes_map_fabric_lane11=rx11:tx8 +lane_to_serdes_map_fabric_lane12=rx12:tx12 +lane_to_serdes_map_fabric_lane13=rx13:tx15 +lane_to_serdes_map_fabric_lane14=rx14:tx14 +lane_to_serdes_map_fabric_lane15=rx15:tx13 +lane_to_serdes_map_fabric_lane16=rx16:tx17 +lane_to_serdes_map_fabric_lane17=rx17:tx18 +lane_to_serdes_map_fabric_lane18=rx18:tx16 +lane_to_serdes_map_fabric_lane19=rx19:tx19 +lane_to_serdes_map_fabric_lane20=rx20:tx21 +lane_to_serdes_map_fabric_lane21=rx21:tx23 +lane_to_serdes_map_fabric_lane22=rx22:tx20 +lane_to_serdes_map_fabric_lane23=rx23:tx22 +lane_to_serdes_map_fabric_lane24=rx24:tx26 +lane_to_serdes_map_fabric_lane25=rx25:tx24 +lane_to_serdes_map_fabric_lane26=rx26:tx25 +lane_to_serdes_map_fabric_lane27=rx27:tx27 +lane_to_serdes_map_fabric_lane28=rx28:tx31 +lane_to_serdes_map_fabric_lane29=rx29:tx30 +lane_to_serdes_map_fabric_lane30=rx30:tx29 +lane_to_serdes_map_fabric_lane31=rx31:tx28 +lane_to_serdes_map_fabric_lane32=rx32:tx32 +lane_to_serdes_map_fabric_lane33=rx33:tx33 +lane_to_serdes_map_fabric_lane34=rx34:tx34 +lane_to_serdes_map_fabric_lane35=rx35:tx35 +lane_to_serdes_map_fabric_lane36=rx36:tx36 +lane_to_serdes_map_fabric_lane37=rx37:tx37 +lane_to_serdes_map_fabric_lane38=rx38:tx38 +lane_to_serdes_map_fabric_lane39=rx39:tx39 +lane_to_serdes_map_fabric_lane40=rx40:tx43 +lane_to_serdes_map_fabric_lane41=rx41:tx42 +lane_to_serdes_map_fabric_lane42=rx42:tx41 +lane_to_serdes_map_fabric_lane43=rx43:tx40 +lane_to_serdes_map_fabric_lane44=rx44:tx47 +lane_to_serdes_map_fabric_lane45=rx45:tx46 +lane_to_serdes_map_fabric_lane46=rx46:tx45 +lane_to_serdes_map_fabric_lane47=rx47:tx44 +lane_to_serdes_map_fabric_lane48=rx48:tx48 +lane_to_serdes_map_fabric_lane49=rx49:tx49 +lane_to_serdes_map_fabric_lane50=rx50:tx50 +lane_to_serdes_map_fabric_lane51=rx51:tx51 +lane_to_serdes_map_fabric_lane52=rx52:tx52 +lane_to_serdes_map_fabric_lane53=rx53:tx53 +lane_to_serdes_map_fabric_lane54=rx54:tx54 +lane_to_serdes_map_fabric_lane55=rx55:tx55 +lane_to_serdes_map_fabric_lane56=rx56:tx59 +lane_to_serdes_map_fabric_lane57=rx57:tx58 +lane_to_serdes_map_fabric_lane58=rx58:tx57 +lane_to_serdes_map_fabric_lane59=rx59:tx56 +lane_to_serdes_map_fabric_lane60=rx60:tx63 +lane_to_serdes_map_fabric_lane61=rx61:tx62 +lane_to_serdes_map_fabric_lane62=rx62:tx61 +lane_to_serdes_map_fabric_lane63=rx63:tx60 +lane_to_serdes_map_fabric_lane64=rx64:tx64 +lane_to_serdes_map_fabric_lane65=rx65:tx65 +lane_to_serdes_map_fabric_lane66=rx66:tx66 +lane_to_serdes_map_fabric_lane67=rx67:tx67 +lane_to_serdes_map_fabric_lane68=rx68:tx68 +lane_to_serdes_map_fabric_lane69=rx69:tx69 +lane_to_serdes_map_fabric_lane70=rx70:tx70 +lane_to_serdes_map_fabric_lane71=rx71:tx71 +lane_to_serdes_map_fabric_lane72=rx72:tx75 +lane_to_serdes_map_fabric_lane73=rx73:tx74 +lane_to_serdes_map_fabric_lane74=rx74:tx73 +lane_to_serdes_map_fabric_lane75=rx75:tx72 +lane_to_serdes_map_fabric_lane76=rx76:tx79 +lane_to_serdes_map_fabric_lane77=rx77:tx78 +lane_to_serdes_map_fabric_lane78=rx78:tx77 +lane_to_serdes_map_fabric_lane79=rx79:tx76 +lane_to_serdes_map_fabric_lane80=rx80:tx80 +lane_to_serdes_map_fabric_lane81=rx81:tx81 +lane_to_serdes_map_fabric_lane82=rx82:tx83 +lane_to_serdes_map_fabric_lane83=rx83:tx82 +lane_to_serdes_map_fabric_lane84=rx84:tx85 +lane_to_serdes_map_fabric_lane85=rx85:tx86 +lane_to_serdes_map_fabric_lane86=rx86:tx84 +lane_to_serdes_map_fabric_lane87=rx87:tx87 +lane_to_serdes_map_fabric_lane88=rx88:tx90 +lane_to_serdes_map_fabric_lane89=rx89:tx88 +lane_to_serdes_map_fabric_lane90=rx90:tx91 +lane_to_serdes_map_fabric_lane91=rx91:tx89 +lane_to_serdes_map_fabric_lane92=rx92:tx93 +lane_to_serdes_map_fabric_lane93=rx93:tx92 +lane_to_serdes_map_fabric_lane94=rx94:tx94 +lane_to_serdes_map_fabric_lane95=rx95:tx95 +lane_to_serdes_map_fabric_lane96=rx96:tx96 +lane_to_serdes_map_fabric_lane97=rx97:tx97 +lane_to_serdes_map_fabric_lane98=rx98:tx98 +lane_to_serdes_map_fabric_lane99=rx99:tx99 +lane_to_serdes_map_fabric_lane100=rx100:tx100 +lane_to_serdes_map_fabric_lane101=rx101:tx101 +lane_to_serdes_map_fabric_lane102=rx102:tx102 +lane_to_serdes_map_fabric_lane103=rx103:tx103 +lane_to_serdes_map_fabric_lane104=rx104:tx105 +lane_to_serdes_map_fabric_lane105=rx105:tx106 +lane_to_serdes_map_fabric_lane106=rx106:tx107 +lane_to_serdes_map_fabric_lane107=rx107:tx104 +lane_to_serdes_map_fabric_lane108=rx108:tx111 +lane_to_serdes_map_fabric_lane109=rx109:tx109 +lane_to_serdes_map_fabric_lane110=rx110:tx110 +lane_to_serdes_map_fabric_lane111=rx111:tx108 +lane_to_serdes_map_fabric_lane112=rx112:tx114 +lane_to_serdes_map_fabric_lane113=rx113:tx113 +lane_to_serdes_map_fabric_lane114=rx114:tx112 +lane_to_serdes_map_fabric_lane115=rx115:tx115 +lane_to_serdes_map_fabric_lane116=rx116:tx117 +lane_to_serdes_map_fabric_lane117=rx117:tx116 +lane_to_serdes_map_fabric_lane118=rx118:tx119 +lane_to_serdes_map_fabric_lane119=rx119:tx118 +lane_to_serdes_map_fabric_lane120=rx120:tx123 +lane_to_serdes_map_fabric_lane121=rx121:tx120 +lane_to_serdes_map_fabric_lane122=rx122:tx122 +lane_to_serdes_map_fabric_lane123=rx123:tx121 +lane_to_serdes_map_fabric_lane124=rx124:tx127 +lane_to_serdes_map_fabric_lane125=rx125:tx125 +lane_to_serdes_map_fabric_lane126=rx126:tx124 +lane_to_serdes_map_fabric_lane127=rx127:tx126 +lane_to_serdes_map_fabric_lane128=rx128:tx128 +lane_to_serdes_map_fabric_lane129=rx129:tx129 +lane_to_serdes_map_fabric_lane130=rx130:tx130 +lane_to_serdes_map_fabric_lane131=rx131:tx131 +lane_to_serdes_map_fabric_lane132=rx132:tx132 +lane_to_serdes_map_fabric_lane133=rx133:tx133 +lane_to_serdes_map_fabric_lane134=rx134:tx134 +lane_to_serdes_map_fabric_lane135=rx135:tx135 +lane_to_serdes_map_fabric_lane136=rx136:tx139 +lane_to_serdes_map_fabric_lane137=rx137:tx138 +lane_to_serdes_map_fabric_lane138=rx138:tx137 +lane_to_serdes_map_fabric_lane139=rx139:tx136 +lane_to_serdes_map_fabric_lane140=rx140:tx140 +lane_to_serdes_map_fabric_lane141=rx141:tx142 +lane_to_serdes_map_fabric_lane142=rx142:tx141 +lane_to_serdes_map_fabric_lane143=rx143:tx143 +lane_to_serdes_map_fabric_lane144=rx144:tx144 +lane_to_serdes_map_fabric_lane145=rx145:tx145 +lane_to_serdes_map_fabric_lane146=rx146:tx146 +lane_to_serdes_map_fabric_lane147=rx147:tx147 +lane_to_serdes_map_fabric_lane148=rx148:tx148 +lane_to_serdes_map_fabric_lane149=rx149:tx149 +lane_to_serdes_map_fabric_lane150=rx150:tx150 +lane_to_serdes_map_fabric_lane151=rx151:tx151 +lane_to_serdes_map_fabric_lane152=rx152:tx155 +lane_to_serdes_map_fabric_lane153=rx153:tx154 +lane_to_serdes_map_fabric_lane154=rx154:tx153 +lane_to_serdes_map_fabric_lane155=rx155:tx152 +lane_to_serdes_map_fabric_lane156=rx156:tx159 +lane_to_serdes_map_fabric_lane157=rx157:tx158 +lane_to_serdes_map_fabric_lane158=rx158:tx157 +lane_to_serdes_map_fabric_lane159=rx159:tx156 +lane_to_serdes_map_fabric_lane160=rx160:tx160 +lane_to_serdes_map_fabric_lane161=rx161:tx161 +lane_to_serdes_map_fabric_lane162=rx162:tx162 +lane_to_serdes_map_fabric_lane163=rx163:tx163 +lane_to_serdes_map_fabric_lane164=rx164:tx164 +lane_to_serdes_map_fabric_lane165=rx165:tx165 +lane_to_serdes_map_fabric_lane166=rx166:tx166 +lane_to_serdes_map_fabric_lane167=rx167:tx167 +lane_to_serdes_map_fabric_lane168=rx168:tx171 +lane_to_serdes_map_fabric_lane169=rx169:tx170 +lane_to_serdes_map_fabric_lane170=rx170:tx169 +lane_to_serdes_map_fabric_lane171=rx171:tx168 +lane_to_serdes_map_fabric_lane172=rx172:tx175 +lane_to_serdes_map_fabric_lane173=rx173:tx174 +lane_to_serdes_map_fabric_lane174=rx174:tx173 +lane_to_serdes_map_fabric_lane175=rx175:tx172 +lane_to_serdes_map_fabric_lane176=rx176:tx176 +lane_to_serdes_map_fabric_lane177=rx177:tx177 +lane_to_serdes_map_fabric_lane178=rx178:tx179 +lane_to_serdes_map_fabric_lane179=rx179:tx178 +lane_to_serdes_map_fabric_lane180=rx180:tx181 +lane_to_serdes_map_fabric_lane181=rx181:tx182 +lane_to_serdes_map_fabric_lane182=rx182:tx180 +lane_to_serdes_map_fabric_lane183=rx183:tx183 +lane_to_serdes_map_fabric_lane184=rx184:tx186 +lane_to_serdes_map_fabric_lane185=rx185:tx184 +lane_to_serdes_map_fabric_lane186=rx186:tx185 +lane_to_serdes_map_fabric_lane187=rx187:tx187 +lane_to_serdes_map_fabric_lane188=rx188:tx188 +lane_to_serdes_map_fabric_lane189=rx189:tx189 +lane_to_serdes_map_fabric_lane190=rx190:tx190 +lane_to_serdes_map_fabric_lane191=rx191:tx191 + +# +##Protocol trap look-up mode: +# Options: IN_LIF - Look-ups in the profile table are done by IN-LIF +# IN_PORT - Look-ups in the profile table are done by IN-PORT +protocol_traps_mode=IN_LIF + +# access definitions +schan_intr_enable=0 +tdma_intr_enable=0 +tslam_intr_enable=0 +miim_intr_enable=0 +schan_timeout_usec=300000 +tdma_timeout_usec=1000000 +tslam_timeout_usec=1000000 + +### Interrupts +appl_enable_intr_init=1 +polled_irq_mode=1 +# reduce CPU load, configure delay 100ms +polled_irq_delay=1000 + +# reduce the CPU load over adapter (caused by counter thread) +bcm_stat_interval=1000 + +# shadow memory +mem_cache_enable_ecc=1 +mem_cache_enable_parity=1 + +# serdes_nif/fabric_clk_freq_in/out configuration +serdes_nif_clk_freq_in=2 +serdes_nif_clk_freq_out=1 +serdes_fabric_clk_freq_in=2 +serdes_fabric_clk_freq_out=1 + +dport_map_direct=1 + +rif_id_max=0x6000 + +phy_rx_polarity_flip_phy0=0 +phy_rx_polarity_flip_phy1=0 +phy_rx_polarity_flip_phy2=0 +phy_rx_polarity_flip_phy3=0 +phy_rx_polarity_flip_phy4=0 +phy_rx_polarity_flip_phy5=0 +phy_rx_polarity_flip_phy6=0 +phy_rx_polarity_flip_phy7=0 +phy_rx_polarity_flip_phy8=1 +phy_rx_polarity_flip_phy9=1 +phy_rx_polarity_flip_phy10=0 +phy_rx_polarity_flip_phy11=0 +phy_rx_polarity_flip_phy12=1 +phy_rx_polarity_flip_phy13=1 +phy_rx_polarity_flip_phy14=0 +phy_rx_polarity_flip_phy15=1 +phy_rx_polarity_flip_phy16=0 +phy_rx_polarity_flip_phy17=0 +phy_rx_polarity_flip_phy18=0 +phy_rx_polarity_flip_phy19=0 +phy_rx_polarity_flip_phy20=0 +phy_rx_polarity_flip_phy21=0 +phy_rx_polarity_flip_phy22=0 +phy_rx_polarity_flip_phy23=0 +phy_rx_polarity_flip_phy24=0 +phy_rx_polarity_flip_phy25=0 +phy_rx_polarity_flip_phy26=0 +phy_rx_polarity_flip_phy27=0 +phy_rx_polarity_flip_phy28=0 +phy_rx_polarity_flip_phy29=0 +phy_rx_polarity_flip_phy30=0 +phy_rx_polarity_flip_phy31=0 +phy_rx_polarity_flip_phy32=0 +phy_rx_polarity_flip_phy33=0 +phy_rx_polarity_flip_phy34=0 +phy_rx_polarity_flip_phy35=0 +phy_rx_polarity_flip_phy36=0 +phy_rx_polarity_flip_phy37=0 +phy_rx_polarity_flip_phy38=0 +phy_rx_polarity_flip_phy39=0 +phy_rx_polarity_flip_phy40=0 +phy_rx_polarity_flip_phy41=0 +phy_rx_polarity_flip_phy42=0 +phy_rx_polarity_flip_phy43=0 +phy_rx_polarity_flip_phy44=0 +phy_rx_polarity_flip_phy45=0 +phy_rx_polarity_flip_phy46=0 +phy_rx_polarity_flip_phy47=0 +phy_rx_polarity_flip_phy48=0 +phy_rx_polarity_flip_phy49=0 +phy_rx_polarity_flip_phy50=0 +phy_rx_polarity_flip_phy51=0 +phy_rx_polarity_flip_phy52=0 +phy_rx_polarity_flip_phy53=0 +phy_rx_polarity_flip_phy54=0 +phy_rx_polarity_flip_phy55=0 +phy_rx_polarity_flip_phy56=0 +phy_rx_polarity_flip_phy57=0 +phy_rx_polarity_flip_phy58=0 +phy_rx_polarity_flip_phy59=0 +phy_rx_polarity_flip_phy60=0 +phy_rx_polarity_flip_phy61=0 +phy_rx_polarity_flip_phy62=0 +phy_rx_polarity_flip_phy63=0 +phy_rx_polarity_flip_phy64=0 +phy_rx_polarity_flip_phy65=0 +phy_rx_polarity_flip_phy66=0 +phy_rx_polarity_flip_phy67=0 +phy_rx_polarity_flip_phy68=0 +phy_rx_polarity_flip_phy69=0 +phy_rx_polarity_flip_phy70=0 +phy_rx_polarity_flip_phy71=0 +phy_rx_polarity_flip_phy72=1 +phy_rx_polarity_flip_phy73=1 +phy_rx_polarity_flip_phy74=0 +phy_rx_polarity_flip_phy75=1 +phy_rx_polarity_flip_phy76=1 +phy_rx_polarity_flip_phy77=1 +phy_rx_polarity_flip_phy78=1 +phy_rx_polarity_flip_phy79=1 +phy_rx_polarity_flip_phy80=0 +phy_rx_polarity_flip_phy81=0 +phy_rx_polarity_flip_phy82=0 +phy_rx_polarity_flip_phy83=0 +phy_rx_polarity_flip_phy84=0 +phy_rx_polarity_flip_phy85=0 +phy_rx_polarity_flip_phy86=0 +phy_rx_polarity_flip_phy87=0 +phy_rx_polarity_flip_phy88=0 +phy_rx_polarity_flip_phy89=0 +phy_rx_polarity_flip_phy90=0 +phy_rx_polarity_flip_phy91=0 +phy_rx_polarity_flip_phy92=0 +phy_rx_polarity_flip_phy93=0 +phy_rx_polarity_flip_phy94=0 +phy_rx_polarity_flip_phy95=0 +phy_rx_polarity_flip_phy96=0 +phy_rx_polarity_flip_phy97=0 +phy_rx_polarity_flip_phy98=0 +phy_rx_polarity_flip_phy99=0 +phy_rx_polarity_flip_phy100=0 +phy_rx_polarity_flip_phy101=0 +phy_rx_polarity_flip_phy102=0 +phy_rx_polarity_flip_phy103=0 +phy_rx_polarity_flip_phy104=0 +phy_rx_polarity_flip_phy105=0 +phy_rx_polarity_flip_phy106=0 +phy_rx_polarity_flip_phy107=0 +phy_rx_polarity_flip_phy108=0 +phy_rx_polarity_flip_phy109=0 +phy_rx_polarity_flip_phy110=0 +phy_rx_polarity_flip_phy111=0 +phy_rx_polarity_flip_phy112=0 +phy_rx_polarity_flip_phy113=0 +phy_rx_polarity_flip_phy114=0 +phy_rx_polarity_flip_phy115=0 +phy_rx_polarity_flip_phy116=0 +phy_rx_polarity_flip_phy117=0 +phy_rx_polarity_flip_phy118=0 +phy_rx_polarity_flip_phy119=0 +phy_rx_polarity_flip_phy120=0 +phy_rx_polarity_flip_phy121=0 +phy_rx_polarity_flip_phy122=0 +phy_rx_polarity_flip_phy123=0 +phy_rx_polarity_flip_phy124=0 +phy_rx_polarity_flip_phy125=0 +phy_rx_polarity_flip_phy126=0 +phy_rx_polarity_flip_phy127=0 +phy_rx_polarity_flip_phy128=0 +phy_rx_polarity_flip_phy129=0 +phy_rx_polarity_flip_phy130=0 +phy_rx_polarity_flip_phy131=0 +phy_rx_polarity_flip_phy132=0 +phy_rx_polarity_flip_phy133=0 +phy_rx_polarity_flip_phy134=0 +phy_rx_polarity_flip_phy135=0 +phy_rx_polarity_flip_phy136=0 +phy_rx_polarity_flip_phy137=0 +phy_rx_polarity_flip_phy138=0 +phy_rx_polarity_flip_phy139=0 +phy_rx_polarity_flip_phy140=0 +phy_rx_polarity_flip_phy141=0 +phy_rx_polarity_flip_phy142=0 +phy_rx_polarity_flip_phy143=0 +phy_tx_polarity_flip_phy0=1 +phy_tx_polarity_flip_phy1=1 +phy_tx_polarity_flip_phy2=1 +phy_tx_polarity_flip_phy3=1 +phy_tx_polarity_flip_phy4=1 +phy_tx_polarity_flip_phy5=1 +phy_tx_polarity_flip_phy6=1 +phy_tx_polarity_flip_phy7=1 +phy_tx_polarity_flip_phy8=1 +phy_tx_polarity_flip_phy9=1 +phy_tx_polarity_flip_phy10=1 +phy_tx_polarity_flip_phy11=1 +phy_tx_polarity_flip_phy12=1 +phy_tx_polarity_flip_phy13=1 +phy_tx_polarity_flip_phy14=1 +phy_tx_polarity_flip_phy15=1 +phy_tx_polarity_flip_phy16=1 +phy_tx_polarity_flip_phy17=1 +phy_tx_polarity_flip_phy18=1 +phy_tx_polarity_flip_phy19=1 +phy_tx_polarity_flip_phy20=1 +phy_tx_polarity_flip_phy21=1 +phy_tx_polarity_flip_phy22=1 +phy_tx_polarity_flip_phy23=1 +phy_tx_polarity_flip_phy24=1 +phy_tx_polarity_flip_phy25=1 +phy_tx_polarity_flip_phy26=1 +phy_tx_polarity_flip_phy27=1 +phy_tx_polarity_flip_phy28=1 +phy_tx_polarity_flip_phy29=1 +phy_tx_polarity_flip_phy30=1 +phy_tx_polarity_flip_phy31=1 +phy_tx_polarity_flip_phy32=1 +phy_tx_polarity_flip_phy33=1 +phy_tx_polarity_flip_phy34=1 +phy_tx_polarity_flip_phy35=1 +phy_tx_polarity_flip_phy36=1 +phy_tx_polarity_flip_phy37=1 +phy_tx_polarity_flip_phy38=1 +phy_tx_polarity_flip_phy39=1 +phy_tx_polarity_flip_phy40=1 +phy_tx_polarity_flip_phy41=1 +phy_tx_polarity_flip_phy42=1 +phy_tx_polarity_flip_phy43=1 +phy_tx_polarity_flip_phy44=1 +phy_tx_polarity_flip_phy45=1 +phy_tx_polarity_flip_phy46=1 +phy_tx_polarity_flip_phy47=1 +phy_tx_polarity_flip_phy48=1 +phy_tx_polarity_flip_phy49=1 +phy_tx_polarity_flip_phy50=1 +phy_tx_polarity_flip_phy51=1 +phy_tx_polarity_flip_phy52=1 +phy_tx_polarity_flip_phy53=1 +phy_tx_polarity_flip_phy54=1 +phy_tx_polarity_flip_phy55=1 +phy_tx_polarity_flip_phy56=1 +phy_tx_polarity_flip_phy57=1 +phy_tx_polarity_flip_phy58=1 +phy_tx_polarity_flip_phy59=1 +phy_tx_polarity_flip_phy60=1 +phy_tx_polarity_flip_phy61=1 +phy_tx_polarity_flip_phy62=1 +phy_tx_polarity_flip_phy63=1 +phy_tx_polarity_flip_phy64=1 +phy_tx_polarity_flip_phy65=1 +phy_tx_polarity_flip_phy66=1 +phy_tx_polarity_flip_phy67=1 +phy_tx_polarity_flip_phy68=1 +phy_tx_polarity_flip_phy69=1 +phy_tx_polarity_flip_phy70=1 +phy_tx_polarity_flip_phy71=1 +phy_tx_polarity_flip_phy72=0 +phy_tx_polarity_flip_phy73=0 +phy_tx_polarity_flip_phy74=0 +phy_tx_polarity_flip_phy75=0 +phy_tx_polarity_flip_phy76=0 +phy_tx_polarity_flip_phy77=0 +phy_tx_polarity_flip_phy78=0 +phy_tx_polarity_flip_phy79=0 +phy_tx_polarity_flip_phy80=0 +phy_tx_polarity_flip_phy81=0 +phy_tx_polarity_flip_phy82=0 +phy_tx_polarity_flip_phy83=0 +phy_tx_polarity_flip_phy84=0 +phy_tx_polarity_flip_phy85=0 +phy_tx_polarity_flip_phy86=0 +phy_tx_polarity_flip_phy87=0 +phy_tx_polarity_flip_phy88=1 +phy_tx_polarity_flip_phy89=1 +phy_tx_polarity_flip_phy90=1 +phy_tx_polarity_flip_phy91=1 +phy_tx_polarity_flip_phy92=1 +phy_tx_polarity_flip_phy93=1 +phy_tx_polarity_flip_phy94=1 +phy_tx_polarity_flip_phy95=1 +phy_tx_polarity_flip_phy96=1 +phy_tx_polarity_flip_phy97=1 +phy_tx_polarity_flip_phy98=1 +phy_tx_polarity_flip_phy99=1 +phy_tx_polarity_flip_phy100=1 +phy_tx_polarity_flip_phy101=1 +phy_tx_polarity_flip_phy102=1 +phy_tx_polarity_flip_phy103=1 +phy_tx_polarity_flip_phy104=1 +phy_tx_polarity_flip_phy105=1 +phy_tx_polarity_flip_phy106=1 +phy_tx_polarity_flip_phy107=1 +phy_tx_polarity_flip_phy108=1 +phy_tx_polarity_flip_phy109=1 +phy_tx_polarity_flip_phy110=1 +phy_tx_polarity_flip_phy111=1 +phy_tx_polarity_flip_phy112=1 +phy_tx_polarity_flip_phy113=1 +phy_tx_polarity_flip_phy114=1 +phy_tx_polarity_flip_phy115=1 +phy_tx_polarity_flip_phy116=1 +phy_tx_polarity_flip_phy117=1 +phy_tx_polarity_flip_phy118=1 +phy_tx_polarity_flip_phy119=1 +phy_tx_polarity_flip_phy120=1 +phy_tx_polarity_flip_phy121=1 +phy_tx_polarity_flip_phy122=1 +phy_tx_polarity_flip_phy123=1 +phy_tx_polarity_flip_phy124=1 +phy_tx_polarity_flip_phy125=1 +phy_tx_polarity_flip_phy126=1 +phy_tx_polarity_flip_phy127=1 +phy_tx_polarity_flip_phy128=1 +phy_tx_polarity_flip_phy129=1 +phy_tx_polarity_flip_phy130=1 +phy_tx_polarity_flip_phy131=1 +phy_tx_polarity_flip_phy132=1 +phy_tx_polarity_flip_phy133=1 +phy_tx_polarity_flip_phy134=1 +phy_tx_polarity_flip_phy135=1 +phy_tx_polarity_flip_phy136=1 +phy_tx_polarity_flip_phy137=1 +phy_tx_polarity_flip_phy138=1 +phy_tx_polarity_flip_phy139=1 +phy_tx_polarity_flip_phy140=1 +phy_tx_polarity_flip_phy141=1 +phy_tx_polarity_flip_phy142=1 +phy_tx_polarity_flip_phy143=1 + +serdes_tx_taps_1=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_2=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_3=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_4=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_5=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_6=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_7=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_8=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_9=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_10=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_11=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_12=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_13=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_14=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_15=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_16=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_17=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_18=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_19=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_20=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_21=nrz:-4:75:-21:0:0:0 +serdes_tx_taps_22=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_23=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_24=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_25=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_26=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_27=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_28=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_29=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_30=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_31=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_32=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_33=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_34=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_35=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_36=nrz:-7:85:-25:0:0:0 + +serdes_tx_taps_1=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_2=pam4:-16:141:-5:3:-2:-3 +serdes_tx_taps_3=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_4=pam4:-16:141:-5:3:-2:-3 +serdes_tx_taps_5=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_6=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_7=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_8=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_9=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_10=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_11=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_12=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_13=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_14=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_15=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_16=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_17=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_18=pam4:-16:137:-12:2:0:-3 +serdes_tx_taps_19=pam4:-17:144:-1:2:-3:-3 +serdes_tx_taps_20=pam4:-16:141:-5:3:-2:-3 +serdes_tx_taps_21=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_22=pam4:-16:141:-5:3:-2:-3 +serdes_tx_taps_23=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_24=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_25=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_26=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_27=pam4:-16:139:-7:3:-2:-3 +serdes_tx_taps_28=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_29=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_30=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_31=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_32=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_33=pam4:-16:148:-3:2:0:-1 +serdes_tx_taps_34=pam4:-14:136:-14:2:0:-4 +serdes_tx_taps_35=pam4:-16:141:-5:3:-2:-3 +serdes_tx_taps_36=pam4:-16:137:-12:2:0:-3 + +xflow_macsec_secure_chan_to_num_secure_assoc_encrypt=2 +xflow_macsec_secure_chan_to_num_secure_assoc_decrypt=4 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/pg_profile_lookup.ini b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/pg_profile_lookup.ini new file mode 100644 index 000000000000..e8289ab03112 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/pg_profile_lookup.ini @@ -0,0 +1,17 @@ +# PG lossless profiles. +# speed cable size xon xoff threshold xon_offset + 40000 300m 1280 2560 69632 0 1280 + 100000 300m 1280 2560 110592 0 1280 + 400000 300m 1280 2560 315392 0 1280 + 40000 1000m 1280 2560 114688 0 1280 + 100000 1000m 1280 2560 225280 0 1280 + 400000 1000m 1280 2560 778240 0 1280 + 40000 2000m 1280 2560 184320 0 1280 + 100000 2000m 1280 2560 393216 0 1280 + 400000 2000m 1280 2560 1445888 0 1280 + 40000 80000m 1280 2560 5369856 0 1280 + 100000 80000m 1280 2560 13357056 0 1280 + 400000 80000m 1280 2560 53305344 0 1280 + 40000 120000m 1280 2560 8028160 0 1280 + 100000 120000m 1280 2560 20004864 0 1280 + 400000 120000m 1280 2560 79900672 0 1280 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/port_config.ini b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/port_config.ini new file mode 100644 index 000000000000..bfdbc47fcf25 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/port_config.ini @@ -0,0 +1,21 @@ +# name lanes alias index role speed asic_port_name coreId corePortId numVoq +Ethernet144 72,73,74,75,76,77,78,79 Ethernet19/1 21 Ext 400000 Eth0-ASIC1 1 1 8 +Ethernet152 80,81,82,83,84,85,86,87 Ethernet20/1 22 Ext 400000 Eth8-ASIC1 1 2 8 +Ethernet160 88,89,90,91,92,93,94,95 Ethernet21/1 23 Ext 400000 Eth16-ASIC1 1 3 8 +Ethernet168 96,97,98,99,100,101,102,103 Ethernet22/1 24 Ext 400000 Eth24-ASIC1 1 4 8 +Ethernet176 104,105,106,107,108,109,110,111 Ethernet23/1 25 Ext 400000 Eth32-ASIC1 1 5 8 +Ethernet184 112,113,114,115,116,117,118,119 Ethernet24/1 26 Ext 400000 Eth40-ASIC1 1 6 8 +Ethernet192 120,121,122,123,124,125,126,127 Ethernet25/1 27 Ext 400000 Eth48-ASIC1 1 7 8 +Ethernet200 128,129,130,131,132,133,134,135 Ethernet26/1 28 Ext 400000 Eth56-ASIC1 1 8 8 +Ethernet208 136,137,138,139,140,141,142,143 Ethernet27/1 29 Ext 400000 Eth64-ASIC1 1 9 8 +Ethernet216 64,65,66,67,68,69,70,71 Ethernet28/1 30 Ext 400000 Eth72-ASIC1 0 10 8 +Ethernet224 56,57,58,59,60,61,62,63 Ethernet29/1 31 Ext 400000 Eth80-ASIC1 0 11 8 +Ethernet232 48,49,50,51,52,53,54,55 Ethernet30/1 32 Ext 400000 Eth88-ASIC1 0 12 8 +Ethernet240 40,41,42,43,44,45,46,47 Ethernet31/1 33 Ext 400000 Eth96-ASIC1 0 13 8 +Ethernet248 32,33,34,35,36,37,38,39 Ethernet32/1 34 Ext 400000 Eth104-ASIC1 0 14 8 +Ethernet256 24,25,26,27,28,29,30,31 Ethernet33/1 35 Ext 400000 Eth112-ASIC1 0 15 8 +Ethernet264 16,17,18,19,20,21,22,23 Ethernet34/1 36 Ext 400000 Eth120-ASIC1 0 16 8 +Ethernet272 8,9,10,11,12,13,14,15 Ethernet35/1 37 Ext 400000 Eth128-ASIC1 0 17 8 +Ethernet280 0,1,2,3,4,5,6,7 Ethernet36/1 38 Ext 400000 Eth136-ASIC1 0 18 8 +Ethernet-Rec1 221 Recirc0/0 39 Rec 400000 Rcy0-ASIC1 0 221 8 +Ethernet-IB1 222 Recirc0/1 40 Inb 400000 Rcy1-ASIC1 1 222 8 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/qos.json.j2 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/qos.json.j2 new file mode 100644 index 000000000000..3e548325ea30 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/qos.json.j2 @@ -0,0 +1 @@ +{%- include 'qos_config.j2' %} diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/sai.profile b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/sai.profile new file mode 120000 index 000000000000..1e172f3e0765 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-D36/1/sai.profile @@ -0,0 +1 @@ +../0/sai.profile \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36DM2-D36 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36DM2-D36 new file mode 120000 index 000000000000..e029ef37b78a --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36DM2-D36 @@ -0,0 +1 @@ +Arista-7800R3A-36D2-D36 \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36P-P36 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36P-P36 new file mode 120000 index 000000000000..e029ef37b78a --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36P-P36 @@ -0,0 +1 @@ +Arista-7800R3A-36D2-D36 \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36D2-D36 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36D2-D36 new file mode 120000 index 000000000000..e029ef37b78a --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36D2-D36 @@ -0,0 +1 @@ +Arista-7800R3A-36D2-D36 \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36DM2-D36 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36DM2-D36 new file mode 120000 index 000000000000..e029ef37b78a --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36DM2-D36 @@ -0,0 +1 @@ +Arista-7800R3A-36D2-D36 \ No newline at end of file From ea101a90d5db7e3558756d0820e53538456b7bc7 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Fri, 14 Oct 2022 00:11:51 -0400 Subject: [PATCH 052/174] [device/delta] Mitigation for command injection vulnerability (#11865) #### Why I did it `os` execution functions are not secure against maliciously constructed input. #### How I did it Use `subprocess` module --- .../delta/x86_64-delta_ag5648-r0/plugins/psuutil.py | 9 +++++---- .../x86_64-delta_ag9032v1-r0/plugins/psuutil.py | 9 +++++---- .../x86_64-delta_ag9032v2a-r0/plugins/psuutil.py | 11 ++++++----- .../delta/x86_64-delta_ag9064-r0/plugins/psuutil.py | 11 ++++++----- .../delta/x86_64-delta_agc032-r0/plugins/psuutil.py | 9 +++++---- .../x86_64-delta_et-c032if-r0/plugins/psuutil.py | 13 +++++++------ 6 files changed, 34 insertions(+), 28 deletions(-) diff --git a/device/delta/x86_64-delta_ag5648-r0/plugins/psuutil.py b/device/delta/x86_64-delta_ag5648-r0/plugins/psuutil.py index 8aa080166f8a..98112753a0aa 100644 --- a/device/delta/x86_64-delta_ag5648-r0/plugins/psuutil.py +++ b/device/delta/x86_64-delta_ag5648-r0/plugins/psuutil.py @@ -1,4 +1,4 @@ -import os.path +import subprocess try: from sonic_psu.psu_base import PsuBase @@ -14,7 +14,7 @@ def __init__(self): self.psu_path = "/sys/bus/i2c/devices/6-00{}/" self.psu_oper_status = "in1_input" - self.psu_presence = "i2cget -y 6 0x{} 0x00" + self.psu_presence = ["i2cget", "-y", "6", "", "0x00"] def get_num_psus(self): """ @@ -44,9 +44,10 @@ def get_psu_presence(self, index): return False Base_bus_number = 49 status = 0 + self.psu_presence[3] = "0x" + str(index + Base_bus_number) try: - p = os.popen(self.psu_presence.format(index + Base_bus_number) + "> /dev/null 2>&1") - if p.readline() != None: + p = subprocess.Popen(self.psu_presence, stdout=subprocess.PIPE, universal_newlines=True) + if p.stdout.readline() != None: status = 1 p.close() except IOError: diff --git a/device/delta/x86_64-delta_ag9032v1-r0/plugins/psuutil.py b/device/delta/x86_64-delta_ag9032v1-r0/plugins/psuutil.py index 391efbd2401f..0659d051de01 100644 --- a/device/delta/x86_64-delta_ag9032v1-r0/plugins/psuutil.py +++ b/device/delta/x86_64-delta_ag9032v1-r0/plugins/psuutil.py @@ -1,4 +1,4 @@ -import os.path +import subprocess try: from sonic_psu.psu_base import PsuBase @@ -14,7 +14,7 @@ def __init__(self): self.psu_path = "/sys/bus/i2c/devices/{}-0058/" self.psu_oper_status = "in1_input" - self.psu_presence = "i2cget -y {} 0x50 0x00" + self.psu_presence = ["i2cget", "-y", "", "0x50", "0x00"] def get_num_psus(self): """ @@ -45,9 +45,10 @@ def get_psu_presence(self, index): return False Base_bus_number = 39 status = 0 + self.psu_presence[2] = str(index + Base_bus_number) try: - p = os.popen(self.psu_presence.format(index + Base_bus_number) + "> /dev/null 2>&1") - if p.readline() != None: + p = subprocess.Popen(self.psu_presence, stdout=subprocess.PIPE, universal_newlines=True) + if p.stdout.readline() != None: status = 1 p.close() except IOError: diff --git a/device/delta/x86_64-delta_ag9032v2a-r0/plugins/psuutil.py b/device/delta/x86_64-delta_ag9032v2a-r0/plugins/psuutil.py index 06655c07a416..abce79d09240 100644 --- a/device/delta/x86_64-delta_ag9032v2a-r0/plugins/psuutil.py +++ b/device/delta/x86_64-delta_ag9032v2a-r0/plugins/psuutil.py @@ -3,7 +3,6 @@ # provides the PSUs status which are available in the platform # -import os.path import subprocess try: @@ -38,8 +37,9 @@ def get_psu_status(self, index): return False status = 0 try: - p = os.popen("ipmitool raw 0x38 0x2 3 0x6a 0x3 1") - content = p.readline().rstrip() + cmd = ["ipmitool", "raw", "0x38", "0x2", "3", "0x6a", "0x3", "1"] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True) + content = p.stdout.readline().rstrip() reg_value = int(content, 16) if index == 1: mask = (1 << 6) @@ -66,8 +66,9 @@ def get_psu_presence(self, index): status = 0 try: - p = os.popen("ipmitool raw 0x38 0x2 3 0x6a 0x3 1") - content = p.readline().rstrip() + cmd = ["ipmitool", "raw", "0x38", "0x2", "3", "0x6a", "0x3", "1"] + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True) + content = p.stdout.readline().rstrip() reg_value = int(content, 16) if index == 1: mask = (1 << 7) diff --git a/device/delta/x86_64-delta_ag9064-r0/plugins/psuutil.py b/device/delta/x86_64-delta_ag9064-r0/plugins/psuutil.py index 43f7bb6acb3c..aef4534f2e76 100644 --- a/device/delta/x86_64-delta_ag9064-r0/plugins/psuutil.py +++ b/device/delta/x86_64-delta_ag9064-r0/plugins/psuutil.py @@ -3,7 +3,6 @@ # provides the PSUs status which are available in the platform # -import os.path import subprocess try: @@ -17,8 +16,8 @@ class PsuUtil(PsuBase): def __init__(self): PsuBase.__init__(self) - self.psu_presence = "cat /sys/devices/platform/delta-ag9064-cpld.0/psu{}_scan" - self.psu_status = "cat /sys/devices/platform/delta-ag9064-swpld1.0/psu{}_pwr_ok" + self.psu_presence = "/sys/devices/platform/delta-ag9064-cpld.0/psu{}_scan" + self.psu_status = "/sys/devices/platform/delta-ag9064-swpld1.0/psu{}_pwr_ok" def get_num_psus(self): """ @@ -40,8 +39,9 @@ def get_psu_status(self, index): return False status = 0 + self.psu_status = self.psu_status.format(index) try: - p = os.popen(self.psu_status.format(index)) + p = open(self.psu_status, 'r') content = p.readline().rstrip() reg_value = int(content) if reg_value != 0: @@ -63,8 +63,9 @@ def get_psu_presence(self, index): if index is None: return False status = 0 + self.psu_presence = self.psu_presence.format(index) try: - p = os.popen(self.psu_presence.format(index)) + p = open(self.psu_presence, 'r') content = p.readline().rstrip() reg_value = int(content, 16) if reg_value != 0: diff --git a/device/delta/x86_64-delta_agc032-r0/plugins/psuutil.py b/device/delta/x86_64-delta_agc032-r0/plugins/psuutil.py index 0cb47cad1bf9..3cee96c1f6a3 100644 --- a/device/delta/x86_64-delta_agc032-r0/plugins/psuutil.py +++ b/device/delta/x86_64-delta_agc032-r0/plugins/psuutil.py @@ -1,4 +1,4 @@ -import os.path +import subprocess try: from sonic_psu.psu_base import PsuBase @@ -15,7 +15,7 @@ def __init__(self): self.psu_path = "/sys/bus/i2c/devices/{}-0058/" self.psu_oper_status = "in1_input" self.psu_oper_status2 = "in2_input" - self.psu_presence = "i2cget -y {} 0x50 0x00" + self.psu_presence = ["i2cget", "-y", "", "0x50", "0x00"] def get_num_psus(self): """ @@ -50,9 +50,10 @@ def get_psu_presence(self, index): return False Base_bus_number = 0 status = 0 + self.psu_presence[2] = str(index + Base_bus_number) try: - p = os.popen(self.psu_presence.format(index + Base_bus_number) + "> /dev/null 2>&1") - if p.readline() != None: + p = subprocess.Popen(self.psu_presence, stdout=subprocess.PIPE, universal_newlines=True) + if p.stdout.readline() != None: status = 1 p.close() except IOError: diff --git a/device/delta/x86_64-delta_et-c032if-r0/plugins/psuutil.py b/device/delta/x86_64-delta_et-c032if-r0/plugins/psuutil.py index 4eb3bb87db80..6d1a50847678 100644 --- a/device/delta/x86_64-delta_et-c032if-r0/plugins/psuutil.py +++ b/device/delta/x86_64-delta_et-c032if-r0/plugins/psuutil.py @@ -3,7 +3,6 @@ # provides the PSUs status which are available in the platform # -import os.path import subprocess try: @@ -17,7 +16,7 @@ class PsuUtil(PsuBase): def __init__(self): PsuBase.__init__(self) - self.psu_status = "ipmitool raw 0x38 0x1 {} 0x50" + self.psu_status = ["ipmitool", "raw", "0x38", "0x1", "", "0x50"] def get_num_psus(self): """ @@ -39,9 +38,10 @@ def get_psu_status(self, index): return False status = 0 + cmd = ["ipmitool", "raw", "0x38", "0x2", "7", "0x32", "0x28", "1"] try: - p = os.popen("ipmitool raw 0x38 0x2 7 0x32 0x28 1") - content = p.readline().rstrip() + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True) + content = p.stdout.readline().rstrip() reg_value = int(content, 16) mask = (1 << (8 - index)) if reg_value & mask == 0: @@ -63,9 +63,10 @@ def get_psu_presence(self, index): if index is None: return False status = 0 + self.psu_status[4] = str(index-1) try: - p = os.popen(self.psu_status.format(index - 1)) - content = p.readline().rstrip() + p = subprocess.Popen(self.psu_status, stdout=subprocess.PIPE, universal_newlines=True) + content = p.stdout.readline().rstrip() reg_value = int(content, 16) if reg_value != 0: return False From 92d25be08f3866b4ae37f8eeba7ef53369851803 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Fri, 14 Oct 2022 10:21:44 -0400 Subject: [PATCH 053/174] [inventec] Replace os.system and remove subprocess with shell=True (#12108) Signed-off-by: maipbui Dependency: [https://github.com/sonic-net/sonic-buildimage/pull/12065](https://github.com/sonic-net/sonic-buildimage/pull/12065) #### Why I did it 1. `getstatusoutput` is used without a static string and it uses `shell=True` 2. `subprocess()` - when using with `shell=True` is dangerous. Using subprocess function without a static string can lead to command injection. 3. `os` - not secure against maliciously constructed input and dangerous if used to evaluate dynamic content. #### How I did it 1. use `getstatusoutput` without shell=True 2. `subprocess()` - use `shell=False` instead. use an array string. Ref: [https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation](https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation) 3. `os` - use with `subprocess` --- .../common/utils/asic_monitor.py | 24 ++++++++++--------- .../common/utils/led_proc.py | 5 ++-- .../common/utils/platform_status.py | 3 ++- .../common/utils/transceiver_monitor.py | 3 ++- .../d6332/sonic_platform/qsfp.py | 3 ++- .../d6332/utils/inventec_d6332_util.py | 6 +++-- .../d6356/sonic_platform/qsfp.py | 3 ++- .../d6356/sonic_platform/sfp.py | 4 ++-- .../d7054q28b/sonic_platform/sfp.py | 4 ++-- 9 files changed, 32 insertions(+), 23 deletions(-) diff --git a/platform/broadcom/sonic-platform-modules-inventec/common/utils/asic_monitor.py b/platform/broadcom/sonic-platform-modules-inventec/common/utils/asic_monitor.py index bcb05b9cb845..9c994c572dd4 100755 --- a/platform/broadcom/sonic-platform-modules-inventec/common/utils/asic_monitor.py +++ b/platform/broadcom/sonic-platform-modules-inventec/common/utils/asic_monitor.py @@ -17,13 +17,11 @@ try: import os - import commands - import sys, getopt - import logging import re import time import syslog from sonic_sfp.bcmshell import bcmshell + from sonic_py_common.general import getstatusoutput_noshell except ImportError as e: raise ImportError("%s - required module not found" % str(e)) @@ -53,10 +51,10 @@ def initialLoop(): bcm_obj = BCMUtil() bcm_obj.execute_command("echo") initialNotOK = False - print bcm_obj + print(bcm_obj) log_message( syslog.LOG_INFO, "BCMUtil Object initialed successfully" ) - except Exception, e: - print "Exception. The warning is {0}".format(str(e)) + except Exception as e: + print("Exception. The warning is {0}".format(str(e))) time.sleep(10) class BCMUtil(bcmshell): @@ -66,7 +64,7 @@ class BCMUtil(bcmshell): def get_platform(self): if self.platform is None: - self.platform = os.popen("uname -n").read().strip() + _, self.platform = getstatusoutput_noshell(["uname", "-n"]).strip() return self.platform def get_asic_temperature( self ): @@ -102,14 +100,18 @@ def main(): content = readPtr.read().strip() if bcm_obj.get_platform() == INV_SEQUOIA_PLATFORM : if content == "inv_bmc" and SWITCH_TEMP_FILE_NAME in file_list : - os.system("echo {0} > {1}/{2}/device/{3}".format( ( bcm_obj.get_asic_temperature() * 1000 ), HWMON_PATH, index, SWITCH_TEMP_FILE_NAME )) + file = "{0}/{1}/device/{2}".format(HWMON_PATH, index, SWITCH_TEMP_FILE_NAME) + with open(file, 'w') as f: + f.write(str(bcm_obj.get_asic_temperature() * 1000) + '\n') break else : if content == "inv_psoc" and SWITCH_TEMP_FILE_NAME in file_list : - print "echo {0} > {1}/{2}/device/{3}".format( ( bcm_obj.get_asic_temperature() * 1000 ), HWMON_PATH, index, SWITCH_TEMP_FILE_NAME ) - os.system("echo {0} > {1}/{2}/device/{3}".format( ( bcm_obj.get_asic_temperature() * 1000 ), HWMON_PATH, index, SWITCH_TEMP_FILE_NAME )) + print("echo {0} > {1}/{2}/device/{3}".format( ( bcm_obj.get_asic_temperature() * 1000 ), HWMON_PATH, index, SWITCH_TEMP_FILE_NAME)) + file = "{0}/{1}/device/{2}".format(HWMON_PATH, index, SWITCH_TEMP_FILE_NAME) + with open(file, 'w') as f: + f.write(str(bcm_obj.get_asic_temperature() * 1000) + '\n') break - except Exception, e: + except Exception as e: log_message( syslog.LOG_WARNING, "Exception. The warning is {0}".format(str(e)) ) initialLoop() time.sleep(5) diff --git a/platform/broadcom/sonic-platform-modules-inventec/common/utils/led_proc.py b/platform/broadcom/sonic-platform-modules-inventec/common/utils/led_proc.py index bab0e2dafe76..8f4564287dde 100755 --- a/platform/broadcom/sonic-platform-modules-inventec/common/utils/led_proc.py +++ b/platform/broadcom/sonic-platform-modules-inventec/common/utils/led_proc.py @@ -20,6 +20,7 @@ import syslog import re from sonic_sfp.bcmshell import bcmshell +from sonic_py_common.general import getstatusoutput_noshell # ===================================================================== @@ -120,8 +121,8 @@ def _board_init(): global SYNC_S global SYNC_P - cmd = "uname -n" - platform = os.popen(cmd).read() + cmd = ["uname", "-n"] + _, platform = getstatusoutput_noshell(cmd) if platform.rstrip() == INV_MAGNOLIA: BOARD_TPYE = "inventec_d6254qs" diff --git a/platform/broadcom/sonic-platform-modules-inventec/common/utils/platform_status.py b/platform/broadcom/sonic-platform-modules-inventec/common/utils/platform_status.py index f1e7f7fece77..d73687e025ae 100755 --- a/platform/broadcom/sonic-platform-modules-inventec/common/utils/platform_status.py +++ b/platform/broadcom/sonic-platform-modules-inventec/common/utils/platform_status.py @@ -2,6 +2,7 @@ import os import socket +import subprocess from collections import OrderedDict # Purpose: Shutdown DUT upon receiving thermaltrip event from kernel (inv_pthread) @@ -59,6 +60,6 @@ def next_events(self): # Receive thermaltrip event if event['ACTION'] == 'remove' and event['DEVPATH'] == '/kernel/platform_status/fan': - os.system("shutdown -h now") + subprocess.call(["shutdown", "-h", "now"]) diff --git a/platform/broadcom/sonic-platform-modules-inventec/common/utils/transceiver_monitor.py b/platform/broadcom/sonic-platform-modules-inventec/common/utils/transceiver_monitor.py index 9e4a44c167fe..453c33deeaf3 100755 --- a/platform/broadcom/sonic-platform-modules-inventec/common/utils/transceiver_monitor.py +++ b/platform/broadcom/sonic-platform-modules-inventec/common/utils/transceiver_monitor.py @@ -28,6 +28,7 @@ import syslog from sfputil import SfpUtil from sonic_sfp.bcmshell import bcmshell + from sonic_py_common.general import getstatusoutput_noshell except ImportError as e: raise ImportError("%s - required module not found" % str(e)) @@ -127,7 +128,7 @@ class BCMUtil(bcmshell): def get_platform(self): if self.platform is None: - self.platform = os.popen("uname -n").read().strip() + _, self.platform = getstatusoutput_noshell(["uname", "-n"]) return self.platform def get_port_to_bcm_mapping(self): diff --git a/platform/broadcom/sonic-platform-modules-inventec/d6332/sonic_platform/qsfp.py b/platform/broadcom/sonic-platform-modules-inventec/d6332/sonic_platform/qsfp.py index b1f8799ab593..f71a19d00e62 100644 --- a/platform/broadcom/sonic-platform-modules-inventec/d6332/sonic_platform/qsfp.py +++ b/platform/broadcom/sonic-platform-modules-inventec/d6332/sonic_platform/qsfp.py @@ -8,6 +8,7 @@ try: import os import logging + import subprocess from ctypes import create_string_buffer from sonic_platform_base.sfp_base import SfpBase from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom @@ -120,7 +121,7 @@ def __set_attr_value(self, attr_path, value): return True def __is_host(self): - return os.system("docker > /dev/null 2>&1") == 0 + return subprocess.call(["docker"]) == 0 def __get_path_to_port_config_file(self): host_platform_root_path = '/usr/share/sonic/device' diff --git a/platform/broadcom/sonic-platform-modules-inventec/d6332/utils/inventec_d6332_util.py b/platform/broadcom/sonic-platform-modules-inventec/d6332/utils/inventec_d6332_util.py index a468a323d6fc..d467fd8fb5df 100755 --- a/platform/broadcom/sonic-platform-modules-inventec/d6332/utils/inventec_d6332_util.py +++ b/platform/broadcom/sonic-platform-modules-inventec/d6332/utils/inventec_d6332_util.py @@ -33,6 +33,7 @@ import logging import syslog import time +from sonic_py_common.general import getstatusoutput_noshell_pipe DEBUG = False args = [] @@ -236,8 +237,9 @@ def system_install(boot_option): return status for addr_offset in range (0,FAN_NUM): addr=FAN_VPD_ADDR_BASE+addr_offset - cmd = "i2cdetect -y "+str(FAN_VPD_CHANNEL)+" "+str(addr)+" "+str(addr)+" | grep "+str(hex(addr)).replace('0x','') - result=os.system(cmd) + cmd1 = ["i2cdetect", "-y", str(FAN_VPD_CHANNEL), str(addr), str(addr)] + cmd2 = ["grep", f'{addr:x}'] + result, _ = getstatusoutput_noshell_pipe(cmd1, cmd2) if( result==0 ): cmd="echo inv_eeprom "+str(addr)+" > /sys/bus/i2c/devices/i2c-"+FAN_VPD_CHANNEL status, output = exec_cmd(cmd,1) diff --git a/platform/broadcom/sonic-platform-modules-inventec/d6356/sonic_platform/qsfp.py b/platform/broadcom/sonic-platform-modules-inventec/d6356/sonic_platform/qsfp.py index fccde46df4c5..22eae317f8ce 100644 --- a/platform/broadcom/sonic-platform-modules-inventec/d6356/sonic_platform/qsfp.py +++ b/platform/broadcom/sonic-platform-modules-inventec/d6356/sonic_platform/qsfp.py @@ -9,6 +9,7 @@ import os import sys import time + import subprocess from sonic_platform_base.sfp_base import SfpBase from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId @@ -108,7 +109,7 @@ def __get_attr_value(self, attr_path): return retval def __is_host(self): - return os.system("docker > /dev/null 2>&1") == 0 + return subprocess.call(["docker"]) == 0 def __get_path_to_port_config_file(self): host_platform_root_path = '/usr/share/sonic/device' diff --git a/platform/broadcom/sonic-platform-modules-inventec/d6356/sonic_platform/sfp.py b/platform/broadcom/sonic-platform-modules-inventec/d6356/sonic_platform/sfp.py index 5c75a8749a46..de8d50b7abda 100644 --- a/platform/broadcom/sonic-platform-modules-inventec/d6356/sonic_platform/sfp.py +++ b/platform/broadcom/sonic-platform-modules-inventec/d6356/sonic_platform/sfp.py @@ -7,7 +7,7 @@ try: import os - import sys + import subprocess from sonic_platform_base.sfp_base import SfpBase from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId @@ -95,7 +95,7 @@ def __get_attr_value(self, attr_path): return retval def __is_host(self): - return os.system("docker > /dev/null 2>&1") == 0 + return subprocess.call(["docker"]) == 0 def __get_path_to_port_config_file(self): host_platform_root_path = '/usr/share/sonic/device' diff --git a/platform/broadcom/sonic-platform-modules-inventec/d7054q28b/sonic_platform/sfp.py b/platform/broadcom/sonic-platform-modules-inventec/d7054q28b/sonic_platform/sfp.py index 9d6337195d5d..ee5768a640f7 100644 --- a/platform/broadcom/sonic-platform-modules-inventec/d7054q28b/sonic_platform/sfp.py +++ b/platform/broadcom/sonic-platform-modules-inventec/d7054q28b/sonic_platform/sfp.py @@ -153,7 +153,7 @@ class Sfp(SfpBase): # Path to QSFP sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "docker > /dev/null 2>&1" + HOST_CHK_CMD = ["docker"] PLATFORM = "x86_64-inventec_d7054q28b-r0" HWSKU = "INVENTEC-D7054Q28B-S48-Q6" @@ -285,7 +285,7 @@ def __read_txt_file(self, file_path): return "" def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) From 629343e0b7c909e5a00e708c70b4d92efc752216 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Fri, 14 Oct 2022 22:12:28 -0700 Subject: [PATCH 054/174] [Mellanox]Adding SKU Mellanox-SN2700-D44C10 (#12396) #### Why I did it To add new SKU Mellanox-SN2700-D44C10 with following requirements: | Port configuration | Value | | ------ |--------- | | Breakout mode for each port |**Defined in port mapping** | | Speed of the port | **Defined in Port mapping** | | Auto-negotiation enable/disable | **No setting required** | | FEC mode | **No setting required** | |Type of transceiver used | **Not needed**| Buffer configuration | Value ------ |--------- Shared headroom | **Enabled** Shared headroom pool factor | **2** Dynamic Buffer | **Disable** In static buffer scenario how many uplinks and downlinks? | **44 x50G and 2x100G Downlinks 8x100G uplinks** 2km cable support required? | **No** Switch configuration | Value ------ |--------- Warmboot enabled? | **yes** Should warmboot be added to SAI profile when enabled? | **yes** Is VxLAN source port range set? | **No** Should Vxlan source port range be added to SAI profile when set. | **No** Is Static Policy Based Hashing enabled? | **No** Port Mapping | Ports | Mode | | ------ |--------- | | 1,2 | 1x100G | | 3-6 | 2x50G | | 7-10 | 1x100G | | 11-22 | 2x50G | | 23-26 | 1x100G | | 27-32 | 2x50G | Number of Uplinks / Downlinks: TO topology: **44 x50G and 2x100G Downlinks 8x100G uplinks**. #### How I did it Defined the SKU as per requirements #### How to verify it Load the SKU and verify if all links come up and traffic passes. --- .../Mellanox-SN2700-D44C10/buffers.json.j2 | 15 + .../buffers_defaults_objects.j2 | 1 + .../buffers_defaults_t0.j2 | 36 +++ .../buffers_defaults_t1.j2 | 36 +++ .../buffers_dynamic.json.j2 | 16 ++ .../Mellanox-SN2700-D44C10/hwsku.json | 166 +++++++++++ .../pg_profile_lookup.ini | 1 + .../Mellanox-SN2700-D44C10/port_config.ini | 55 ++++ .../Mellanox-SN2700-D44C10/qos.json.j2 | 1 + .../Mellanox-SN2700-D44C10/sai.profile | 3 + .../sai_2700_44x50g_10x100g.xml | 269 ++++++++++++++++++ 11 files changed, 599 insertions(+) create mode 100644 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers.json.j2 create mode 120000 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_objects.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_t0.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_t1.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_dynamic.json.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/hwsku.json create mode 120000 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/pg_profile_lookup.ini create mode 100644 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/port_config.ini create mode 120000 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/qos.json.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/sai.profile create mode 100644 device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/sai_2700_44x50g_10x100g.xml diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers.json.j2 b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers.json.j2 new file mode 100644 index 000000000000..ad3266249133 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers.json.j2 @@ -0,0 +1,15 @@ +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{%- set default_topo = 't0' %} +{%- include 'buffers_config.j2' %} diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_objects.j2 b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_objects.j2 new file mode 120000 index 000000000000..c01aebb7ae12 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_objects.j2 @@ -0,0 +1 @@ +../../x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D48C8/buffers_defaults_objects.j2 \ No newline at end of file diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_t0.j2 b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_t0.j2 new file mode 100644 index 000000000000..920730c15ecd --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_t0.j2 @@ -0,0 +1,36 @@ +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{% set default_cable = '5m' %} +{% set ingress_lossless_pool_size = '6822912' %} +{% set ingress_lossless_pool_xoff = '999424' %} +{% set egress_lossless_pool_size = '13945824' %} +{% set egress_lossy_pool_size = '6822912' %} + +{% import 'buffers_defaults_objects.j2' as defs with context %} + +{%- macro generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) %} +{{ defs.generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_profile_lists_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_profile_lists(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_queue_buffers_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_queue_buffers(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_pg_profiles_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_pg_profiles(port_names_active, port_names_inactive) }} +{%- endmacro %} diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_t1.j2 b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_t1.j2 new file mode 100644 index 000000000000..18a57073f823 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_defaults_t1.j2 @@ -0,0 +1,36 @@ +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{% set default_cable = '300m' %} +{% set ingress_lossless_pool_size = '6282240' %} +{% set ingress_lossless_pool_xoff = '1540096' %} +{% set egress_lossless_pool_size = '13945824' %} +{% set egress_lossy_pool_size = '6282240' %} + +{% import 'buffers_defaults_objects.j2' as defs with context %} + +{%- macro generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) %} +{{ defs.generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_profile_lists_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_profile_lists(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_queue_buffers_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_queue_buffers(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_pg_profiles_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_pg_profiles(port_names_active, port_names_inactive) }} +{%- endmacro %} diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_dynamic.json.j2 b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_dynamic.json.j2 new file mode 100644 index 000000000000..cea77067d2a6 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/buffers_dynamic.json.j2 @@ -0,0 +1,16 @@ +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{%- set default_topo = 't0' %} +{%- set dynamic_mode = 'true' %} +{%- include 'buffers_config.j2' %} diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/hwsku.json b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/hwsku.json new file mode 100644 index 000000000000..b74b2d5ad570 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/hwsku.json @@ -0,0 +1,166 @@ +{ + "interfaces": { + "Ethernet0": { + "default_brkout_mode": "1x100G[50G,40G,25G,10G]" + }, + "Ethernet4": { + "default_brkout_mode": "1x100G[50G,40G,25G,10G]" + }, + "Ethernet8": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet10": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet12": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet14": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet16": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet18": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet20": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet22": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet24": { + "default_brkout_mode": "1x100G[50G,40G,25G,10G]" + }, + "Ethernet28": { + "default_brkout_mode": "1x100G[50G,40G,25G,10G]" + }, + "Ethernet32": { + "default_brkout_mode": "1x100G[50G,40G,25G,10G]" + }, + "Ethernet36": { + "default_brkout_mode": "1x100G[50G,40G,25G,10G]" + }, + "Ethernet40": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet42": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet44": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet46": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet48": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet50": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet52": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet54": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet56": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet58": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet60": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet62": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet64": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet66": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet68": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet70": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet72": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet74": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet76": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet78": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet80": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet82": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet84": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet86": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet88": { + "default_brkout_mode": "1x100G[50G,40G,25G,10G]" + }, + "Ethernet92": { + "default_brkout_mode": "1x100G[50G,40G,25G,10G]" + }, + "Ethernet96": { + "default_brkout_mode": "1x100G[50G,40G,25G,10G]" + }, + "Ethernet100": { + "default_brkout_mode": "1x100G[50G,40G,25G,10G]" + }, + "Ethernet104": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet106": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet108": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet110": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet112": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet114": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet116": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet118": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet120": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet122": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet124": { + "default_brkout_mode": "2x50G[25G,10G]" + }, + "Ethernet126": { + "default_brkout_mode": "2x50G[25G,10G]" + } + } +} diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/pg_profile_lookup.ini b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/pg_profile_lookup.ini new file mode 120000 index 000000000000..b1f8524ff2e5 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/pg_profile_lookup.ini @@ -0,0 +1 @@ +../Mellanox-SN2700-D40C8S8/pg_profile_lookup.ini \ No newline at end of file diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/port_config.ini b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/port_config.ini new file mode 100644 index 000000000000..1320b267d8c4 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/port_config.ini @@ -0,0 +1,55 @@ +# name lanes alias index speed +Ethernet0 0,1,2,3 etp1 1 100000 +Ethernet4 4,5,6,7 etp2 2 100000 +Ethernet8 8,9 etp3a 3 50000 +Ethernet10 10,11 etp3b 3 50000 +Ethernet12 12,13 etp4a 4 50000 +Ethernet14 14,15 etp4b 4 50000 +Ethernet16 16,17 etp5a 5 50000 +Ethernet18 18,19 etp5b 5 50000 +Ethernet20 20,21 etp6a 6 50000 +Ethernet22 22,23 etp6b 6 50000 +Ethernet24 24,25,26,27 etp7 7 100000 +Ethernet28 28,29,30,31 etp8 8 100000 +Ethernet32 32,33,34,35 etp9 9 100000 +Ethernet36 36,37,38,39 etp10 10 100000 +Ethernet40 40,41 etp11a 11 50000 +Ethernet42 42,43 etp11b 11 50000 +Ethernet44 44,45 etp12a 12 50000 +Ethernet46 46,47 etp12b 12 50000 +Ethernet48 48,49 etp13a 13 50000 +Ethernet50 50,51 etp13b 13 50000 +Ethernet52 52,53 etp14a 14 50000 +Ethernet54 54,55 etp14b 14 50000 +Ethernet56 56,57 etp15a 15 50000 +Ethernet58 58,59 etp15b 15 50000 +Ethernet60 60,61 etp16a 16 50000 +Ethernet62 62,63 etp16b 16 50000 +Ethernet64 64,65 etp17a 17 50000 +Ethernet66 66,67 etp17b 17 50000 +Ethernet68 68,69 etp18a 18 50000 +Ethernet70 70,71 etp18b 18 50000 +Ethernet72 72,73 etp19a 19 50000 +Ethernet74 74,75 etp19b 19 50000 +Ethernet76 76,77 etp20a 20 50000 +Ethernet78 78,79 etp20b 20 50000 +Ethernet80 80,81 etp21a 21 50000 +Ethernet82 82,83 etp21b 21 50000 +Ethernet84 84,85 etp22a 22 50000 +Ethernet86 86,87 etp22b 22 50000 +Ethernet88 88,89,90,91 etp23 23 100000 +Ethernet92 92,93,94,95 etp24 24 100000 +Ethernet96 96,97,98,99 etp25 25 100000 +Ethernet100 100,101,102,103 etp26 26 100000 +Ethernet104 104,105 etp27a 27 50000 +Ethernet106 106,107 etp27b 27 50000 +Ethernet108 108,109 etp28a 28 50000 +Ethernet110 110,111 etp28b 28 50000 +Ethernet112 112,113 etp29a 29 50000 +Ethernet114 114,115 etp29b 29 50000 +Ethernet116 116,117 etp30a 30 50000 +Ethernet118 118,119 etp30b 30 50000 +Ethernet120 120,121 etp31a 31 50000 +Ethernet122 122,123 etp31b 31 50000 +Ethernet124 124,125 etp32a 32 50000 +Ethernet126 126,127 etp32b 32 50000 diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/qos.json.j2 b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/qos.json.j2 new file mode 120000 index 000000000000..8bd2d26567b8 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/qos.json.j2 @@ -0,0 +1 @@ +../ACS-MSN2700/qos.json.j2 \ No newline at end of file diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/sai.profile b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/sai.profile new file mode 100644 index 000000000000..f25a70d39743 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/sai.profile @@ -0,0 +1,3 @@ +SAI_INIT_CONFIG_FILE=/usr/share/sonic/hwsku/sai_2700_44x50g_10x100g.xml +SAI_DUMP_STORE_PATH=/var/log/mellanox/sdk-dumps +SAI_DUMP_STORE_AMOUNT=10 diff --git a/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/sai_2700_44x50g_10x100g.xml b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/sai_2700_44x50g_10x100g.xml new file mode 100644 index 000000000000..c505f0c449e6 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D44C10/sai_2700_44x50g_10x100g.xml @@ -0,0 +1,269 @@ + + + + + + 00:02:03:04:05:00 + + + 1 + + + 32 + + + + + 1 + 2 + 4 + 16 + + + 3 + + + 3221487616 + + + 3 + 2 + 4 + 17 + 1 + 3221487616 + + + 5 + 2 + 4 + 18 + 3 + 3221487616 + + + 7 + 2 + 4 + 19 + 1 + 3221487616 + + + 9 + 2 + 4 + 20 + 3 + 3221487616 + + + 11 + 2 + 4 + 21 + 1 + 3221487616 + + + 13 + 4 + 22 + 3 + 11534336 + + + 15 + 4 + 23 + 1 + 11534336 + + + 17 + 4 + 24 + 3 + 11534336 + + + 19 + 4 + 25 + 1 + 11534336 + + + 21 + 2 + 4 + 26 + 3 + 3221487616 + + + 23 + 2 + 4 + 27 + 1 + 3221487616 + + + 25 + 2 + 4 + 28 + 3 + 3221487616 + + + 27 + 2 + 4 + 29 + 1 + 3221487616 + + + 29 + 2 + 4 + 30 + 3 + 3221487616 + + + 31 + 2 + 4 + 31 + 1 + 3221487616 + + + 33 + 2 + 4 + 14 + 3 + 3221487616 + + + 35 + 2 + 4 + 15 + 1 + 3221487616 + + + 37 + 2 + 4 + 12 + 3 + 3221487616 + + + 39 + 2 + 4 + 13 + 1 + 3221487616 + + + 41 + 2 + 4 + 10 + 3 + 3221487616 + + + 43 + 2 + 4 + 11 + 1 + 3221487616 + + + 45 + 4 + 8 + 3 + 11534336 + + + 47 + 4 + 9 + 1 + 11534336 + + + 49 + 2 + 4 + 6 + 3 + 3221487616 + + + 51 + 4 + 7 + 1 + 11534336 + + + 53 + 2 + 4 + 4 + 3 + 3221487616 + + + 55 + 2 + 4 + 5 + 1 + 3221487616 + + + 57 + 2 + 4 + 2 + 3 + 3221487616 + + + 59 + 2 + 4 + 3 + 1 + 3221487616 + + + 61 + 4 + 0 + 3 + 11534336 + + + 63 + 4 + 1 + 1 + 11534336 + + + + From 1f2ee7b729d8afb98b871bcad502f5b5ae2b7b51 Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Fri, 14 Oct 2022 22:27:06 -0700 Subject: [PATCH 055/174] add Restapi Yang model (#12379) #### Why I did it Reverting PR https://github.com/sonic-net/sonic-buildimage/pull/12374 Fix build issues This PR fixed https://github.com/sonic-net/sonic-buildimage/issues/10548 --- src/sonic-yang-models/doc/Configuration.md | 18 ++++ src/sonic-yang-models/setup.py | 1 + .../tests/files/sample_config_db.json | 13 +++ .../tests/yang_model_tests/tests/restapi.json | 13 +++ .../tests_config/restapi.json | 38 +++++++++ .../yang-models/sonic-restapi.yang | 85 +++++++++++++++++++ 6 files changed, 168 insertions(+) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json create mode 100644 src/sonic-yang-models/yang-models/sonic-restapi.yang diff --git a/src/sonic-yang-models/doc/Configuration.md b/src/sonic-yang-models/doc/Configuration.md index ff18aff97970..060751ab4d70 100644 --- a/src/sonic-yang-models/doc/Configuration.md +++ b/src/sonic-yang-models/doc/Configuration.md @@ -46,6 +46,7 @@ Table of Contents * [Scheduler](#scheduler) * [Port QoS Map](#port-qos-map) * [Queue](#queue) + * [Restapi](#restapi) * [Tacplus Server](#tacplus-server) * [TC to Priority group map](#tc-to-priority-group-map) * [TC to Queue map](#tc-to-queue-map) @@ -1412,6 +1413,23 @@ name as object key and member list as attribute. } ``` +### Restapi +``` +{ +"RESTAPI": { + "certs": { + "ca_crt": "/etc/sonic/credentials/ame_root.pem", + "server_key": "/etc/sonic/credentials/restapiserver.key", + "server_crt": "/etc/sonic/credentials/restapiserver.crt", + "client_crt_cname": "client.sonic.net" + }, + "config": { + "client_auth": "true", + "log_level": "trace", + "allow_insecure": "false" + } +} +``` ### Tacplus Server diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index 208321754141..db9fb9d4f56b 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -151,6 +151,7 @@ def run(self): './yang-models/sonic-scheduler.yang', './yang-models/sonic-wred-profile.yang', './yang-models/sonic-queue.yang', + './yang-models/sonic-restapi.yang', './yang-models/sonic-dscp-fc-map.yang', './yang-models/sonic-exp-fc-map.yang', './yang-models/sonic-dscp-tc-map.yang', diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index 7beca6d2af07..79110704046a 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -1122,6 +1122,19 @@ "port": "50051" } }, + "RESTAPI": { + "certs": { + "ca_crt": "/etc/sonic/credentials/ame_root.pem", + "server_key": "/etc/sonic/credentials/restapiserver.key", + "server_crt": "/etc/sonic/credentials/restapiserver.crt", + "client_crt_cname": "client.sonic.net" + }, + "config": { + "client_auth": "true", + "log_level": "trace", + "allow_insecure": "false" + } + }, "FLEX_COUNTER_TABLE": { "PFCWD": { "FLEX_COUNTER_STATUS": "enable" diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json b/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json new file mode 100644 index 000000000000..e18d6e163ce4 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json @@ -0,0 +1,13 @@ +{ + "RESTAPI_TABLE_WITH_INCORRECT_CERT": { + "desc": "RESTAPI TABLE_WITH_INCORRECT_CERT failure.", + "eStr": ["Value", "does not satisfy the constraint"] + }, + "RESTAPI_TABLE_WITH_INCORRECT_CLIENT": { + "desc": "RESTAPI TABLE_WITH_INCORRECT_CLIENT failure.", + "eStr": ["Value", "does not satisfy the constraint"] + }, + "RESTAPI_TABLE_WITH_VALID_CONFIG": { + "desc": "RESTAPI TABLE WITH VALID CONFIG." + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json new file mode 100644 index 000000000000..8ae212ed7026 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json @@ -0,0 +1,38 @@ +{ + "RESTAPI_TABLE_WITH_INCORRECT_CERT": { + "sonic-restapi:sonic-restapi": { + "sonic-restapi:RESTAPI": { + "certs": { + "ca_crt": "/etc/sonic/credentials/ame_root.pem", + "server_crt": "a/b/c", + "server_key": "/etc/sonic/credentials/restapiserver.key", + "client_crt_cname": "client" + } + } + } + }, + "RESTAPI_TABLE_WITH_INCORRECT_CLIENT": { + "sonic-restapi:sonic-restapi": { + "sonic-restapi:RESTAPI": { + "certs": { + "ca_crt": "/etc/sonic/credentials/ame_root.pem", + "server_crt": "/etc/sonic/credentials/restapiserver.crt", + "server_key": "/etc/sonic/credentials/restapiserver.key", + "client_crt_cname": "/client" + } + } + } + }, + "RESTAPI_TABLE_WITH_VALID_CONFIG": { + "sonic-restapi:sonic-restapi": { + "sonic-restapi:RESTAPI": { + "certs": { + "ca_crt": "/etc/sonic/credentials/ame_root.pem", + "server_crt": "/etc/sonic/credentials/restapiserver.crt", + "server_key": "/etc/sonic/credentials/restapiserver.key", + "client_crt_cname": "client.sonic.net" + } + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-restapi.yang b/src/sonic-yang-models/yang-models/sonic-restapi.yang new file mode 100644 index 000000000000..e049e2fa5c23 --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-restapi.yang @@ -0,0 +1,85 @@ +module sonic-restapi { + + yang-version 1.1; + + namespace "http://github.com/Azure/sonic-restapi"; + prefix restapi; + + import ietf-inet-types { + prefix inet; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description "RESTAPI YANG Module for SONiC OS"; + + revision 2022-10-05 { + description "First Revision"; + } + + container sonic-restapi { + + container RESTAPI { + + description "RESTAPI TABLE part of config_db.json"; + + container certs { + + leaf ca_crt { + type string { + pattern '(/[a-zA-Z0-9_-]+)*/([a-zA-Z0-9_-]+).([a-z]+)'; + } + description "Local path for ca_crt."; + } + + leaf server_crt { + type string { + pattern '(/[a-zA-Z0-9_-]+)*/([a-zA-Z0-9_-]+).crt'; + } + description "Local path for server_crt."; + } + + leaf client_crt_cname { + type string { + pattern '([a-zA-Z0-9_\-\.]+)'; + } + description "Client cert name."; + } + + leaf server_key { + type string { + pattern '(/[a-zA-Z0-9_-]+)*/([a-zA-Z0-9_-]+).key'; + } + description "Local path for server_key."; + } + + } + + container config { + + leaf client_auth { + type boolean; + default true; + description "Enable client authentication"; + } + + leaf log_level { + type string { + pattern "trace|info"; + } + description "container log level for restapi"; + } + + leaf allow_insecure { + type boolean; + default false; + description "Allow insecure connection"; + } + } + } + } +} From e2ae965fddb1f1ae4909a32822c237ae83beda63 Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Sun, 16 Oct 2022 09:37:45 -0700 Subject: [PATCH 056/174] [FRR] import FRR patch: zebra: Note when the netlink DUMP command is interrupted (#12412) Why I did it There is an outstanding FRR issue #12380. This seems to be a known issue but without good fix so far. The root cause is around zebra and kernel netlink interaction. The failure was previously not noticed by zebra. How I did it Port the patch that would make the issue obvious. Signed-off-by: Ying Xie ying.xie@microsoft.com --- ...the-netlink-DUMP-command-is-interrup.patch | 45 +++++++++++++++++++ src/sonic-frr/patch/series | 1 + 2 files changed, 46 insertions(+) create mode 100644 src/sonic-frr/patch/0010-zebra-Note-when-the-netlink-DUMP-command-is-interrup.patch diff --git a/src/sonic-frr/patch/0010-zebra-Note-when-the-netlink-DUMP-command-is-interrup.patch b/src/sonic-frr/patch/0010-zebra-Note-when-the-netlink-DUMP-command-is-interrup.patch new file mode 100644 index 000000000000..3da1ab318d1c --- /dev/null +++ b/src/sonic-frr/patch/0010-zebra-Note-when-the-netlink-DUMP-command-is-interrup.patch @@ -0,0 +1,45 @@ +From 8f10590a85669f300d2706d5ef1e560cdbaaf0f8 Mon Sep 17 00:00:00 2001 +From: Donald Sharp +Date: Fri, 25 Mar 2022 19:08:14 -0400 +Subject: [PATCH 10/10] zebra: Note when the netlink DUMP command is + interrupted + +There exists code paths in the linux kernel where a dump command +will be interrupted( I am not sure I understand what this really +means ) and the data sent back from the kernel is wrong or incomplete. + +At this point in time I am not 100% certain what should be done, but +let's start noticing that this has happened so we can formulate a plan +or allow the end operator to know bad stuff is a foot at the circle K. + +Signed-off-by: Donald Sharp +--- + zebra/kernel_netlink.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c +index ccafc8f0c..9dc597fad 100644 +--- a/zebra/kernel_netlink.c ++++ b/zebra/kernel_netlink.c +@@ -972,6 +972,18 @@ int netlink_parse_info(int (*filter)(struct nlmsghdr *, ns_id_t, int), + return err; + } + ++ /* ++ * What is the right thing to do? The kernel ++ * is telling us that the dump request was interrupted ++ * and we more than likely are out of luck and have ++ * missed data from the kernel. At this point in time ++ * lets just note that this is happening. ++ */ ++ if (h->nlmsg_flags & NLM_F_DUMP_INTR) ++ flog_err( ++ EC_ZEBRA_NETLINK_BAD_SEQUENCE, ++ "netlink recvmsg: The Dump request was interrupted"); ++ + /* OK we got netlink message. */ + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug( +-- +2.17.1 + diff --git a/src/sonic-frr/patch/series b/src/sonic-frr/patch/series index a474b918a8cc..34ec5d510a3c 100644 --- a/src/sonic-frr/patch/series +++ b/src/sonic-frr/patch/series @@ -9,3 +9,4 @@ Disable-ipv6-src-address-test-in-pceplib.patch cross-compile-changes.patch 0009-ignore-route-from-default-table.patch +0010-zebra-Note-when-the-netlink-DUMP-command-is-interrup.patch From 094646c6bcee5a24e13d6a2f794d25d35bf20a08 Mon Sep 17 00:00:00 2001 From: pettershao-ragilenetworks <81281940+pettershao-ragilenetworks@users.noreply.github.com> Date: Mon, 17 Oct 2022 08:02:31 +0800 Subject: [PATCH 057/174] [yang]Update port yang model, change MTU range to [68,9216] (#12223) What I did Filter port invalid MTU configuration How I did it Adjust the MTU value to the range of [68,9216] How to verify it Use "config interface mtu Ethernet1 40" command to configure the port MTU. The following error will occur in SWSS. --- .../tests/yang_model_tests/tests/port.json | 6 ++++ .../yang_model_tests/tests_config/port.json | 30 +++++++++++++++++++ .../yang-models/sonic-port.yang | 2 +- 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/port.json b/src/sonic-yang-models/tests/yang_model_tests/tests/port.json index 3b94cedab4d9..6f7af54c4ad8 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests/port.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/port.json @@ -13,6 +13,12 @@ "eStrKey" : "Pattern", "eStr": ["rc"] }, + "PORT_VALID_MTU_TEST_1": { + "desc": "PORT_VALID_MTU_TEST_1 no failure." + }, + "PORT_VALID_MTU_TEST_2": { + "desc": "PORT_VALID_MTU_TEST_2 no failure." + }, "PORT_VALID_AUTONEG_TEST_1": { "desc": "PORT_VALID_AUTONEG_TEST_1 no failure." }, diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/port.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/port.json index b6ccf9ec8780..684caa2432b9 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests_config/port.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/port.json @@ -39,6 +39,36 @@ } } }, + "PORT_VALID_MTU_TEST_1": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "name": "Ethernet8", + "alias": "eth8", + "lanes": "65", + "speed": 25000, + "mtu": 68 + } + ] + } + } + }, + "PORT_VALID_MTU_TEST_2": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "name": "Ethernet8", + "alias": "eth8", + "lanes": "65", + "speed": 25000, + "mtu": 9216 + } + ] + } + } + }, "PORT_VALID_AUTONEG_TEST_1": { "sonic-port:sonic-port": { "sonic-port:PORT": { diff --git a/src/sonic-yang-models/yang-models/sonic-port.yang b/src/sonic-yang-models/yang-models/sonic-port.yang index 6e03f777a859..a60e6b1624a3 100644 --- a/src/sonic-yang-models/yang-models/sonic-port.yang +++ b/src/sonic-yang-models/yang-models/sonic-port.yang @@ -117,7 +117,7 @@ module sonic-port{ leaf mtu { type uint16 { - range 1..9216; + range 68..9216; } } From 51eac0b335bf3ba1941a49241a2aa81d05abb07b Mon Sep 17 00:00:00 2001 From: lixiaoyuner <35456895+lixiaoyuner@users.noreply.github.com> Date: Mon, 17 Oct 2022 15:58:09 +0800 Subject: [PATCH 058/174] Add k8s master table yang model (#12331) * Add k8s master table yang model Signed-off-by: Yun Li --- src/sonic-yang-models/doc/Configuration.md | 22 ++++++ src/sonic-yang-models/setup.py | 2 + .../tests/files/sample_config_db.json | 8 ++ .../tests/kubernetes_master.json | 27 +++++++ .../tests_config/kubernetes_master.json | 74 +++++++++++++++++++ .../yang-models/sonic-kubernetes_master.yang | 62 ++++++++++++++++ 6 files changed, 195 insertions(+) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/kubernetes_master.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/kubernetes_master.json create mode 100644 src/sonic-yang-models/yang-models/sonic-kubernetes_master.yang diff --git a/src/sonic-yang-models/doc/Configuration.md b/src/sonic-yang-models/doc/Configuration.md index 060751ab4d70..f30a2c8c5f23 100644 --- a/src/sonic-yang-models/doc/Configuration.md +++ b/src/sonic-yang-models/doc/Configuration.md @@ -28,6 +28,7 @@ Table of Contents * [DSCP_TO_TC_MAP](#dscp_to_tc_map) * [FLEX_COUNTER_TABLE](#flex_counter_table) * [KDUMP](#kdump) + * [Kubernetes Master](#kubernetes-master) * [L2 Neighbors](#l2-neighbors) * [Loopback Interface](#loopback-interface) * [LOSSLESS_TRAFFIC_PATTERN](#LOSSLESS_TRAFFIC_PATTERN) @@ -936,6 +937,27 @@ instance is supported in SONiC. ``` +### Kubernetes Master + +Kubernetes Master related configurations are stored in +**KUBERNETES_MASTER** table. These configurations are used mainly +for CTRMGR service. CTRMGR service will interactive with +kubernetes master according to these configurations. + +``` +{ + "KUBERNETES_MASTER": { + "SERVER": { + "disable": "False", + "insecure": "True", + "ip": "k8s.apiserver.com", + "port": "6443" + } + } +} + +``` + ### L2 Neighbors The L2 neighbor and connection information can be configured in diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index db9fb9d4f56b..b620438451ac 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -112,6 +112,7 @@ def run(self): './yang-models/sonic-system-defaults.yang', './yang-models/sonic-interface.yang', './yang-models/sonic-kdump.yang', + './yang-models/sonic-kubernetes_master.yang', './yang-models/sonic-loopback-interface.yang', './yang-models/sonic-lossless-traffic-pattern.yang', './yang-models/sonic-mgmt_interface.yang', @@ -185,6 +186,7 @@ def run(self): './cvlyang-models/sonic-system-defaults.yang', './cvlyang-models/sonic-interface.yang', './cvlyang-models/sonic-kdump.yang', + './cvlyang-models/sonic-kubernetes_master.yang', './cvlyang-models/sonic-loopback-interface.yang', './cvlyang-models/sonic-mgmt_interface.yang', './cvlyang-models/sonic-mgmt_port.yang', diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index 79110704046a..9dec93b52037 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -2062,6 +2062,14 @@ "nexthop": "10.184.229.212", "nexthop-vrf": "default" } + }, + "KUBERNETES_MASTER": { + "SERVER": { + "disable": "True", + "insecure": "False", + "ip": "kubernetes.apiserver.com", + "port": "6443" + } } }, "SAMPLE_CONFIG_DB_UNKNOWN": { diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/kubernetes_master.json b/src/sonic-yang-models/tests/yang_model_tests/tests/kubernetes_master.json new file mode 100644 index 000000000000..6518f425da16 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/kubernetes_master.json @@ -0,0 +1,27 @@ +{ + "KUBERNETES_MASTER_WITH_CORRECT_VALUES_IP": { + "desc": "CONFIG KUBERNETES_MASTER TABLE WITH ALL THE CORRECT VALUES" + }, + "KUBERNETES_MASTER_WITH_CORRECT_VALUES_DNS": { + "desc": "CONFIG KUBERNETES_MASTER TABLE WITH ALL THE CORRECT VALUES" + }, + "KUBERNETES_MASTER_WITH_INVALID_BOOLEAN_TYPE" : { + "desc": "Referring invalid kubernetes_master boolean types.", + "eStrKey": "Pattern", + "eStr": ["false|true|False|True"] + }, + "KUBERNETES_MASTER_INVALID_PORT": { + "desc": "Configure invalid PORT in kubernetes_master.", + "eStrKey": "InvalidValue" + }, + "KUBERNETES_MASTER_INVALID_IP" : { + "desc": "Configure invalid IP in kubernetes_master.", + "eStrKey": "InvalidValue", + "eStr": ["ip"] + }, + "KUBERNETES_MASTER_INVALID_DNS" : { + "desc": "Configure invalid DNS in kubernetes_master.", + "eStrKey": "InvalidValue", + "eStr": ["ip"] + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/kubernetes_master.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/kubernetes_master.json new file mode 100644 index 000000000000..096cea0ee365 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/kubernetes_master.json @@ -0,0 +1,74 @@ +{ + "KUBERNETES_MASTER_WITH_CORRECT_VALUES_IP": { + "sonic-kubernetes_master:sonic-kubernetes_master": { + "sonic-kubernetes_master:KUBERNETES_MASTER": { + "sonic-kubernetes_master:SERVER": { + "ip": "192.168.122.11", + "port": "6443", + "disable": "False", + "insecure": "True" + } + } + } + }, + "KUBERNETES_MASTER_WITH_CORRECT_VALUES_DNS": { + "sonic-kubernetes_master:sonic-kubernetes_master": { + "sonic-kubernetes_master:KUBERNETES_MASTER": { + "sonic-kubernetes_master:SERVER": { + "ip": "k8s.apiserver.com", + "port": "6443", + "disable": "False", + "insecure": "True" + } + } + } + }, + "KUBERNETES_MASTER_WITH_INVALID_BOOLEAN_TYPE": { + "sonic-kubernetes_master:sonic-kubernetes_master": { + "sonic-kubernetes_master:KUBERNETES_MASTER": { + "sonic-kubernetes_master:SERVER": { + "ip": "192.168.122.11", + "port": "6443", + "disable": "FAlse", + "insecure": "True" + } + } + } + }, + "KUBERNETES_MASTER_INVALID_PORT": { + "sonic-kubernetes_master:sonic-kubernetes_master": { + "sonic-kubernetes_master:KUBERNETES_MASTER": { + "sonic-kubernetes_master:SERVER": { + "ip": "192.168.122.11", + "port": "65536", + "disable": "False", + "insecure": "True" + } + } + } + }, + "KUBERNETES_MASTER_INVALID_IP": { + "sonic-kubernetes_master:sonic-kubernetes_master": { + "sonic-kubernetes_master:KUBERNETES_MASTER": { + "sonic-kubernetes_master:SERVER": { + "ip": "2001:aa:aa:aa", + "port": "6443", + "disable": "False", + "insecure": "True" + } + } + } + }, + "KUBERNETES_MASTER_INVALID_DNS": { + "sonic-kubernetes_master:sonic-kubernetes_master": { + "sonic-kubernetes_master:KUBERNETES_MASTER": { + "sonic-kubernetes_master:SERVER": { + "ip": "k8s@.apiserver.com", + "port": "6443", + "disable": "False", + "insecure": "True" + } + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-kubernetes_master.yang b/src/sonic-yang-models/yang-models/sonic-kubernetes_master.yang new file mode 100644 index 000000000000..6ac153c606d0 --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-kubernetes_master.yang @@ -0,0 +1,62 @@ +module sonic-kubernetes_master { + + yang-version 1.1; + + namespace "http://github.com/Azure/sonic-kubernetes_master"; + prefix kubernetes_master; + + import ietf-inet-types { + prefix inet; + } + + import sonic-types { + prefix stypes; + } + + description "KUBERNETES_MASTER YANG Module for SONiC OS"; + + revision 2022-10-09 { + description "First Revision"; + } + + container sonic-kubernetes_master { + + container KUBERNETES_MASTER { + + description "KUBERNETES_MASTER part of config_db.json"; + + container SERVER { + + leaf ip { + description "Kubernetes api server endpoint(an IP address or a DNS + domain name.)"; + type inet:host; + } + + leaf port { + description "Kubernetes api server port"; + type inet:port-number; + default 6443; + } + + leaf disable { + description "This configuration identicates whether disable kubernetes"; + type stypes:boolean_type; + default "false"; + } + + leaf insecure { + description "This configuration identicates it will download kubernetes + CA by http other than https"; + type stypes:boolean_type; + default "true"; + } + + } + /* end of container SERVER */ + } + /* end of container KUBERNETES_MASTER */ + } + /* end of top level container */ +} +/* end of module sonic-kubernetes_master */ From a750930ba90ded08b6ef0838a681b2376e4fc4ff Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Mon, 17 Oct 2022 09:01:38 -0700 Subject: [PATCH 059/174] * 176c385 (HEAD, origin/master, origin/HEAD, master) Streaming events URL support "not to use cac (#12394) he" (#45) * 4f45e3a Update gnmi_cli (#5) (#44) --- src/sonic-gnmi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-gnmi b/src/sonic-gnmi index 194ecd0896a0..176c385b885b 160000 --- a/src/sonic-gnmi +++ b/src/sonic-gnmi @@ -1 +1 @@ -Subproject commit 194ecd0896a0e698e22dc2320e5087e9b7de3d06 +Subproject commit 176c385b885bd27eb865f8e9f8023f9a11e9efc6 From 7ba1d25757eec2150f16ff90a40e8155a115827c Mon Sep 17 00:00:00 2001 From: Sambath Kumar Balasubramanian <63021927+skbarista@users.noreply.github.com> Date: Mon, 17 Oct 2022 11:15:19 -0700 Subject: [PATCH 060/174] Add 36 port 100g sku for x86_64-arista_7800r3a_36d series of linecards. (#11813) Add 36 port 100g sku for x86_64-arista_7800r3a_36d series of linecards. --- .../Arista-7800R3A-36D-C36 | 1 + .../0/context_config.json | 25 + .../0/j2p-a7800r3a-36d-36x400G.config.bcm | 985 ++++++++++++++++++ .../Arista-7800R3A-36D2-C36/0/port_config.ini | 21 + .../Arista-7800R3A-36D2-C36/0/sai.profile | 2 + .../1/context_config.json | 1 + .../1/j2p-a7800r3a-36d-36x400G.config.bcm | 984 +++++++++++++++++ .../Arista-7800R3A-36D2-C36/1/port_config.ini | 21 + .../Arista-7800R3A-36D2-C36/1/sai.profile | 1 + .../Arista-7800R3A-36DM2-C36 | 1 + .../Arista-7800R3A-36P-C36 | 1 + .../Arista-7800R3AK-36D2-C36 | 1 + .../Arista-7800R3AK-36DM2-C36 | 1 + 13 files changed, 2045 insertions(+) create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D-C36 create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/context_config.json create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/j2p-a7800r3a-36d-36x400G.config.bcm create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/port_config.ini create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/sai.profile create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/context_config.json create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/j2p-a7800r3a-36d-36x400G.config.bcm create mode 100644 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/port_config.ini create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/sai.profile create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36DM2-C36 create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36P-C36 create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36D2-C36 create mode 120000 device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36DM2-C36 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D-C36 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D-C36 new file mode 120000 index 000000000000..41ebc98e1c4e --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D-C36 @@ -0,0 +1 @@ +Arista-7800R3A-36D2-C36 \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/context_config.json b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/context_config.json new file mode 100644 index 000000000000..2c126e71899e --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/context_config.json @@ -0,0 +1,25 @@ +{ + "CONTEXTS": [ + { + "guid" : 0, + "name" : "syncd", + "dbAsic" : "ASIC_DB", + "dbCounters" : "COUNTERS_DB", + "dbFlex": "FLEX_COUNTER_DB", + "dbState" : "STATE_DB", + "zmq_enable": false, + "zmq_endpoint": "tcp://127.0.0.1:5555", + "zmq_ntf_endpoint": "tcp://127.0.0.1:5556", + "switches": [ + { + "index" : 0, + "hwinfo" : "06:00.0" + }, + { + "index" : 1, + "hwinfo" : "07:00.0" + } + ] + } + ] +} diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/j2p-a7800r3a-36d-36x400G.config.bcm b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/j2p-a7800r3a-36d-36x400G.config.bcm new file mode 100644 index 000000000000..9be5d8e836f3 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/j2p-a7800r3a-36d-36x400G.config.bcm @@ -0,0 +1,985 @@ +soc_family=BCM8885X +system_ref_core_clock_khz=1600000 + +dpp_db_path=/usr/share/bcm/db + +#################################################### +##Reference applications related properties - Start +#################################################### + +## PMF small EXEM connected stage: +# Options: IPMF2 - Ingress PMF 2 stage can perform small EXEM lookups. +# IPMF3 - Ingress PMF 3 stage can perform small EXEM lookups. +## PMF small EXEM connected stage: +# Options: IPMF2 - Ingress PMF 2 stage can perform small EXEM lookups. +# IPMF3 - Ingress PMF 3 stage can perform small EXEM lookups. +pmf_sexem3_stage=IPMF2 + +#################################################### +##Reference applications related properties - End +#################################################### + +# Jericho2-mode (description 0x1 used for Jericho 2 mode) +system_headers_mode=1 + +# HW mode to support 1024 16-member system wide LAGs +trunk_group_max_members=16 + +# Disable link-training +port_init_cl72=0 + +###Default interfaces for Jericho2Plus +#CPU interfaces +ucode_port_0=CPU.0:core_0.0 +ucode_port_200=CPU.8:core_1.200 +ucode_port_201=CPU.16:core_0.201 +ucode_port_202=CPU.24:core_1.202 +ucode_port_203=CPU.32:core_0.203 + +#NIF ETH interfaces on device +ucode_port_1=CGE18:core_1.1 +ucode_port_2=CGE20:core_1.2 +ucode_port_3=CGE22:core_1.3 +ucode_port_4=CGE24:core_1.4 +ucode_port_5=CGE26:core_1.5 +ucode_port_6=CGE28:core_1.6 +ucode_port_7=CGE30:core_1.7 +ucode_port_8=CGE32:core_1.8 +ucode_port_9=CGE34:core_1.9 + +ucode_port_10=CGE16:core_0.10 +ucode_port_11=CGE14:core_0.11 +ucode_port_12=CGE12:core_0.12 +ucode_port_13=CGE10:core_0.13 +ucode_port_14=CGE8:core_0.14 +ucode_port_15=CGE6:core_0.15 +ucode_port_16=CGE4:core_0.16 +ucode_port_17=CGE2:core_0.17 +ucode_port_18=CGE0:core_0.18 + +#NIF default speeds +port_init_speed_xe=10000 +port_init_speed_xl=40000 +port_init_speed_le=50000 +port_init_speed_ce=100000 +port_init_speed_cc=200000 +port_init_speed_cd=400000 +port_init_speed_il=10312 + +port_priorities=8 + +#special ports +ucode_port_240=OLP:core_0.240 + +# NIF lane mapping +lane_to_serdes_map_nif_lane0=rx3:tx4 +lane_to_serdes_map_nif_lane1=rx6:tx1 +lane_to_serdes_map_nif_lane2=rx7:tx5 +lane_to_serdes_map_nif_lane3=rx4:tx7 +lane_to_serdes_map_nif_lane4=rx1:tx2 +lane_to_serdes_map_nif_lane5=rx0:tx0 +lane_to_serdes_map_nif_lane6=rx5:tx3 +lane_to_serdes_map_nif_lane7=rx2:tx6 +lane_to_serdes_map_nif_lane8=rx10:tx11 +lane_to_serdes_map_nif_lane9=rx8:tx8 +lane_to_serdes_map_nif_lane10=rx14:tx12 +lane_to_serdes_map_nif_lane11=rx15:tx15 +lane_to_serdes_map_nif_lane12=rx13:tx10 +lane_to_serdes_map_nif_lane13=rx9:tx9 +lane_to_serdes_map_nif_lane14=rx11:tx13 +lane_to_serdes_map_nif_lane15=rx12:tx14 +lane_to_serdes_map_nif_lane16=rx16:tx17 +lane_to_serdes_map_nif_lane17=rx19:tx21 +lane_to_serdes_map_nif_lane18=rx21:tx18 +lane_to_serdes_map_nif_lane19=rx18:tx16 +lane_to_serdes_map_nif_lane20=rx17:tx23 +lane_to_serdes_map_nif_lane21=rx20:tx22 +lane_to_serdes_map_nif_lane22=rx22:tx20 +lane_to_serdes_map_nif_lane23=rx23:tx19 +lane_to_serdes_map_nif_lane24=rx26:tx28 +lane_to_serdes_map_nif_lane25=rx29:tx31 +lane_to_serdes_map_nif_lane26=rx31:tx29 +lane_to_serdes_map_nif_lane27=rx28:tx27 +lane_to_serdes_map_nif_lane28=rx25:tx25 +lane_to_serdes_map_nif_lane29=rx24:tx30 +lane_to_serdes_map_nif_lane30=rx30:tx24 +lane_to_serdes_map_nif_lane31=rx27:tx26 +lane_to_serdes_map_nif_lane32=rx32:tx39 +lane_to_serdes_map_nif_lane33=rx33:tx38 +lane_to_serdes_map_nif_lane34=rx38:tx32 +lane_to_serdes_map_nif_lane35=rx39:tx33 +lane_to_serdes_map_nif_lane36=rx35:tx37 +lane_to_serdes_map_nif_lane37=rx34:tx36 +lane_to_serdes_map_nif_lane38=rx36:tx34 +lane_to_serdes_map_nif_lane39=rx37:tx35 +lane_to_serdes_map_nif_lane40=rx40:tx41 +lane_to_serdes_map_nif_lane41=rx43:tx45 +lane_to_serdes_map_nif_lane42=rx45:tx42 +lane_to_serdes_map_nif_lane43=rx42:tx40 +lane_to_serdes_map_nif_lane44=rx41:tx47 +lane_to_serdes_map_nif_lane45=rx44:tx46 +lane_to_serdes_map_nif_lane46=rx46:tx44 +lane_to_serdes_map_nif_lane47=rx47:tx43 +lane_to_serdes_map_nif_lane48=rx50:tx52 +lane_to_serdes_map_nif_lane49=rx53:tx55 +lane_to_serdes_map_nif_lane50=rx55:tx53 +lane_to_serdes_map_nif_lane51=rx52:tx51 +lane_to_serdes_map_nif_lane52=rx49:tx49 +lane_to_serdes_map_nif_lane53=rx48:tx54 +lane_to_serdes_map_nif_lane54=rx54:tx48 +lane_to_serdes_map_nif_lane55=rx51:tx50 +lane_to_serdes_map_nif_lane56=rx56:tx63 +lane_to_serdes_map_nif_lane57=rx57:tx62 +lane_to_serdes_map_nif_lane58=rx62:tx56 +lane_to_serdes_map_nif_lane59=rx63:tx57 +lane_to_serdes_map_nif_lane60=rx59:tx61 +lane_to_serdes_map_nif_lane61=rx58:tx60 +lane_to_serdes_map_nif_lane62=rx60:tx58 +lane_to_serdes_map_nif_lane63=rx61:tx59 +lane_to_serdes_map_nif_lane64=rx64:tx65 +lane_to_serdes_map_nif_lane65=rx67:tx69 +lane_to_serdes_map_nif_lane66=rx69:tx66 +lane_to_serdes_map_nif_lane67=rx66:tx64 +lane_to_serdes_map_nif_lane68=rx65:tx71 +lane_to_serdes_map_nif_lane69=rx68:tx70 +lane_to_serdes_map_nif_lane70=rx70:tx68 +lane_to_serdes_map_nif_lane71=rx71:tx67 +lane_to_serdes_map_nif_lane72=rx79:tx74 +lane_to_serdes_map_nif_lane73=rx76:tx75 +lane_to_serdes_map_nif_lane74=rx72:tx76 +lane_to_serdes_map_nif_lane75=rx74:tx73 +lane_to_serdes_map_nif_lane76=rx77:tx79 +lane_to_serdes_map_nif_lane77=rx78:tx78 +lane_to_serdes_map_nif_lane78=rx73:tx77 +lane_to_serdes_map_nif_lane79=rx75:tx72 +lane_to_serdes_map_nif_lane80=rx86:tx86 +lane_to_serdes_map_nif_lane81=rx83:tx87 +lane_to_serdes_map_nif_lane82=rx82:tx81 +lane_to_serdes_map_nif_lane83=rx85:tx80 +lane_to_serdes_map_nif_lane84=rx87:tx85 +lane_to_serdes_map_nif_lane85=rx84:tx84 +lane_to_serdes_map_nif_lane86=rx80:tx82 +lane_to_serdes_map_nif_lane87=rx81:tx83 +lane_to_serdes_map_nif_lane88=rx95:tx90 +lane_to_serdes_map_nif_lane89=rx92:tx88 +lane_to_serdes_map_nif_lane90=rx88:tx92 +lane_to_serdes_map_nif_lane91=rx91:tx95 +lane_to_serdes_map_nif_lane92=rx94:tx89 +lane_to_serdes_map_nif_lane93=rx93:tx91 +lane_to_serdes_map_nif_lane94=rx89:tx93 +lane_to_serdes_map_nif_lane95=rx90:tx94 +lane_to_serdes_map_nif_lane96=rx103:tx97 +lane_to_serdes_map_nif_lane97=rx100:tx96 +lane_to_serdes_map_nif_lane98=rx96:tx100 +lane_to_serdes_map_nif_lane99=rx99:tx103 +lane_to_serdes_map_nif_lane100=rx102:tx99 +lane_to_serdes_map_nif_lane101=rx101:tx98 +lane_to_serdes_map_nif_lane102=rx97:tx101 +lane_to_serdes_map_nif_lane103=rx98:tx102 +lane_to_serdes_map_nif_lane104=rx110:tx107 +lane_to_serdes_map_nif_lane105=rx108:tx105 +lane_to_serdes_map_nif_lane106=rx104:tx108 +lane_to_serdes_map_nif_lane107=rx107:tx110 +lane_to_serdes_map_nif_lane108=rx111:tx106 +lane_to_serdes_map_nif_lane109=rx109:tx104 +lane_to_serdes_map_nif_lane110=rx105:tx109 +lane_to_serdes_map_nif_lane111=rx106:tx111 +lane_to_serdes_map_nif_lane112=rx119:tx114 +lane_to_serdes_map_nif_lane113=rx116:tx112 +lane_to_serdes_map_nif_lane114=rx112:tx116 +lane_to_serdes_map_nif_lane115=rx115:tx119 +lane_to_serdes_map_nif_lane116=rx118:tx113 +lane_to_serdes_map_nif_lane117=rx117:tx115 +lane_to_serdes_map_nif_lane118=rx113:tx117 +lane_to_serdes_map_nif_lane119=rx114:tx118 +lane_to_serdes_map_nif_lane120=rx127:tx121 +lane_to_serdes_map_nif_lane121=rx124:tx120 +lane_to_serdes_map_nif_lane122=rx120:tx124 +lane_to_serdes_map_nif_lane123=rx123:tx127 +lane_to_serdes_map_nif_lane124=rx126:tx123 +lane_to_serdes_map_nif_lane125=rx125:tx122 +lane_to_serdes_map_nif_lane126=rx121:tx125 +lane_to_serdes_map_nif_lane127=rx122:tx126 +lane_to_serdes_map_nif_lane128=rx134:tx131 +lane_to_serdes_map_nif_lane129=rx132:tx129 +lane_to_serdes_map_nif_lane130=rx128:tx132 +lane_to_serdes_map_nif_lane131=rx131:tx134 +lane_to_serdes_map_nif_lane132=rx135:tx130 +lane_to_serdes_map_nif_lane133=rx133:tx128 +lane_to_serdes_map_nif_lane134=rx129:tx133 +lane_to_serdes_map_nif_lane135=rx130:tx135 +lane_to_serdes_map_nif_lane136=rx143:tx138 +lane_to_serdes_map_nif_lane137=rx140:tx136 +lane_to_serdes_map_nif_lane138=rx136:tx140 +lane_to_serdes_map_nif_lane139=rx139:tx143 +lane_to_serdes_map_nif_lane140=rx142:tx137 +lane_to_serdes_map_nif_lane141=rx141:tx139 +lane_to_serdes_map_nif_lane142=rx137:tx141 +lane_to_serdes_map_nif_lane143=rx138:tx142 + +######################### +### High Availability ### +######################### + +sw_state_max_size=750000000 + +#location of warmboot NV memory +#Allowed options for dnx are - 3:external storage in filesystem 4:driver will save the state directly in shared memory +stable_location=4 + +# Note that each unit should have a unique filename and that adapter does not play well with tmp and dev/shm folders. +stable_filename=/dev/shm/warmboot_data_0 +stable_filename.1=/dev/shm/warmboot_data_1 +stable_filename.2=/dev/shm/warmboot_data_2 + +#Maximum size for NVM used for WB storage, must be larger than sw_state_max_size.BCM8885X +stable_size=800000000 + +######################### +######################### +######################### + +tm_port_header_type_in_0=INJECTED_2_PP +tm_port_header_type_out_0=CPU + +tm_port_header_type_in_200=INJECTED_2_PP +tm_port_header_type_out_200=ETH +tm_port_header_type_in_201=INJECTED_2_PP +tm_port_header_type_out_201=ETH +tm_port_header_type_in_202=INJECTED_2_PP +tm_port_header_type_out_202=ETH +tm_port_header_type_in_203=INJECTED_2_PP +tm_port_header_type_out_203=ETH + +### SAT +## Enable SAT Interface. 0 - Disable, 1 - Enable (Default) +sat_enable=1 +ucode_port_218=SAT:core_0.218 +tm_port_header_type_out_218=CPU +tm_port_header_type_in_218=INJECTED_2 +ucode_port_219=SAT:core_1.219 +tm_port_header_type_out_219=CPU +tm_port_header_type_in_219=INJECTED_2 +port_init_speed_sat=400000 + +### RCY +sai_recycle_port_lane_base=0 +ucode_port_221=RCY.21:core_0.221 +ucode_port_222=RCY.22:core_1.222 +tm_port_header_type_out_221=ETH +tm_port_header_type_in_221=ETH +tm_port_header_type_out_222=ETH +tm_port_header_type_in_222=ETH +port_init_speed_221=400000 +port_init_speed_222=400000 + +#OLP port +tm_port_header_type_in_240=INJECTED_2 +tm_port_header_type_out_240=RAW + +# Set statically the region mode per region id +dtm_flow_mapping_mode_region_257=3 +dtm_flow_mapping_mode_region_258=3 +dtm_flow_mapping_mode_region_259=3 +dtm_flow_mapping_mode_region_260=3 +dtm_flow_mapping_mode_region_261=3 +dtm_flow_mapping_mode_region_262=3 +dtm_flow_mapping_mode_region_263=3 +dtm_flow_mapping_mode_region_264=3 +dtm_flow_mapping_mode_region_265=3 +dtm_flow_mapping_mode_region_266=7 +dtm_flow_mapping_mode_region_267=3 +dtm_flow_mapping_mode_region_268=3 +dtm_flow_mapping_mode_region_269=3 +dtm_flow_mapping_mode_region_270=3 +dtm_flow_mapping_mode_region_271=3 +dtm_flow_mapping_mode_region_272=3 +dtm_flow_mapping_mode_region_273=3 +dtm_flow_mapping_mode_region_274=3 +dtm_flow_mapping_mode_region_275=3 +dtm_flow_mapping_mode_region_276=3 +dtm_flow_mapping_mode_region_277=3 +dtm_flow_mapping_mode_region_278=3 +dtm_flow_mapping_mode_region_279=3 +dtm_flow_mapping_mode_region_280=3 +dtm_flow_mapping_mode_region_281=3 +dtm_flow_mapping_mode_region_282=3 +dtm_flow_mapping_mode_region_283=3 +dtm_flow_mapping_mode_region_284=3 +dtm_flow_mapping_mode_region_285=3 +dtm_flow_mapping_mode_region_286=3 +dtm_flow_mapping_mode_region_287=3 + +## Configure number of symmetric cores each region supports ## +dtm_flow_nof_remote_cores_region_1=2 +dtm_flow_nof_remote_cores_region_2=2 +dtm_flow_nof_remote_cores_region_3=2 +dtm_flow_nof_remote_cores_region_4=2 +dtm_flow_nof_remote_cores_region_5=2 +dtm_flow_nof_remote_cores_region_6=2 +dtm_flow_nof_remote_cores_region_7=2 +dtm_flow_nof_remote_cores_region_8=2 +dtm_flow_nof_remote_cores_region_9=2 +dtm_flow_nof_remote_cores_region_10=2 +dtm_flow_nof_remote_cores_region_11=2 +dtm_flow_nof_remote_cores_region_12=2 +dtm_flow_nof_remote_cores_region_13=2 +dtm_flow_nof_remote_cores_region_14=2 +dtm_flow_nof_remote_cores_region_15=2 +dtm_flow_nof_remote_cores_region_16=2 +dtm_flow_nof_remote_cores_region_17=2 +dtm_flow_nof_remote_cores_region_18=2 +dtm_flow_nof_remote_cores_region_19=2 +dtm_flow_nof_remote_cores_region_20=2 +dtm_flow_nof_remote_cores_region_21=2 +dtm_flow_nof_remote_cores_region_22=2 +dtm_flow_nof_remote_cores_region_23=2 +dtm_flow_nof_remote_cores_region_24=2 +dtm_flow_nof_remote_cores_region_25=2 +dtm_flow_nof_remote_cores_region_26=2 +dtm_flow_nof_remote_cores_region_27=2 +dtm_flow_nof_remote_cores_region_28=2 +dtm_flow_nof_remote_cores_region_29=2 +dtm_flow_nof_remote_cores_region_30=2 +dtm_flow_nof_remote_cores_region_31=2 +dtm_flow_nof_remote_cores_region_32=2 +dtm_flow_nof_remote_cores_region_33=2 +dtm_flow_nof_remote_cores_region_34=2 +dtm_flow_nof_remote_cores_region_35=2 +dtm_flow_nof_remote_cores_region_36=2 +dtm_flow_nof_remote_cores_region_37=2 +dtm_flow_nof_remote_cores_region_38=2 +dtm_flow_nof_remote_cores_region_39=2 +dtm_flow_nof_remote_cores_region_40=2 +dtm_flow_nof_remote_cores_region_41=2 +dtm_flow_nof_remote_cores_region_42=2 +dtm_flow_nof_remote_cores_region_43=2 +dtm_flow_nof_remote_cores_region_44=2 +dtm_flow_nof_remote_cores_region_45=2 +dtm_flow_nof_remote_cores_region_46=2 +dtm_flow_nof_remote_cores_region_47=2 +dtm_flow_nof_remote_cores_region_48=2 +dtm_flow_nof_remote_cores_region_49=2 +dtm_flow_nof_remote_cores_region_50=2 +dtm_flow_nof_remote_cores_region_51=2 +dtm_flow_nof_remote_cores_region_52=2 +dtm_flow_nof_remote_cores_region_53=2 +dtm_flow_nof_remote_cores_region_54=2 +dtm_flow_nof_remote_cores_region_55=2 +dtm_flow_nof_remote_cores_region_56=2 +dtm_flow_nof_remote_cores_region_57=2 +dtm_flow_nof_remote_cores_region_58=2 +dtm_flow_nof_remote_cores_region_59=2 +dtm_flow_nof_remote_cores_region_60=2 + +### MDB configuration ### +mdb_profile=balanced-exem + +### Descriptor-DMA configuration ### +dma_desc_aggregator_chain_length_max=1000 +dma_desc_aggregator_buff_size_kb=100 +dma_desc_aggregator_timeout_usec=1000 +dma_desc_aggregator_enable_specific_MDB_LPM=1 +dma_desc_aggregator_enable_specific_MDB_FEC=1 + +### Outlif configuarion ### +outlif_logical_to_physical_phase_map_1=S1 +outlif_logical_to_physical_phase_map_2=L1 +outlif_logical_to_physical_phase_map_3=XL +outlif_logical_to_physical_phase_map_4=L2 +outlif_logical_to_physical_phase_map_5=M1 +outlif_logical_to_physical_phase_map_6=M2 +outlif_logical_to_physical_phase_map_7=M3 +outlif_logical_to_physical_phase_map_8=S2 + +### Outlif data granularity configuration ### +outlif_physical_phase_data_granularity_S1=60 +outlif_physical_phase_data_granularity_S2=60 +outlif_physical_phase_data_granularity_M1=60 +outlif_physical_phase_data_granularity_M2=60 +outlif_physical_phase_data_granularity_M3=60 +outlif_physical_phase_data_granularity_L1=60 +outlif_physical_phase_data_granularity_L2=60 +outlif_physical_phase_data_granularity_XL=60 + +### Fabric configuration ### +# Enable link-training +port_init_cl72_sfi=1 +serdes_lane_config_cl72_auto_polarity_en=0 +serdes_lane_config_cl72_auto_polarity_en_sfi=1 +serdes_lane_config_cl72_restart_timeout_en=0 + +#SFI speed rate +port_init_speed_fabric=53125 + +## Fabric transmission mode +# Set the Connect mode to the Fabric +# Options: FE - presence of a Fabric device (single stage) +# SINGLE_FAP - stand-alone device +# MESH - devices in Mesh +# Note: If 'diag_chassis' is on, value will be override in dnx.soc +# to be FE instead of SINGLE_FAP. +fabric_connect_mode=FE + +fabric_logical_port_base=512 + +# Fabric lane mapping +lane_to_serdes_map_fabric_lane0=rx0:tx0 +lane_to_serdes_map_fabric_lane1=rx1:tx1 +lane_to_serdes_map_fabric_lane2=rx2:tx2 +lane_to_serdes_map_fabric_lane3=rx3:tx3 +lane_to_serdes_map_fabric_lane4=rx4:tx4 +lane_to_serdes_map_fabric_lane5=rx5:tx5 +lane_to_serdes_map_fabric_lane6=rx6:tx6 +lane_to_serdes_map_fabric_lane7=rx7:tx7 +lane_to_serdes_map_fabric_lane8=rx8:tx10 +lane_to_serdes_map_fabric_lane9=rx9:tx11 +lane_to_serdes_map_fabric_lane10=rx10:tx9 +lane_to_serdes_map_fabric_lane11=rx11:tx8 +lane_to_serdes_map_fabric_lane12=rx12:tx12 +lane_to_serdes_map_fabric_lane13=rx13:tx15 +lane_to_serdes_map_fabric_lane14=rx14:tx14 +lane_to_serdes_map_fabric_lane15=rx15:tx13 +lane_to_serdes_map_fabric_lane16=rx16:tx17 +lane_to_serdes_map_fabric_lane17=rx17:tx18 +lane_to_serdes_map_fabric_lane18=rx18:tx16 +lane_to_serdes_map_fabric_lane19=rx19:tx19 +lane_to_serdes_map_fabric_lane20=rx20:tx21 +lane_to_serdes_map_fabric_lane21=rx21:tx23 +lane_to_serdes_map_fabric_lane22=rx22:tx20 +lane_to_serdes_map_fabric_lane23=rx23:tx22 +lane_to_serdes_map_fabric_lane24=rx24:tx26 +lane_to_serdes_map_fabric_lane25=rx25:tx24 +lane_to_serdes_map_fabric_lane26=rx26:tx25 +lane_to_serdes_map_fabric_lane27=rx27:tx27 +lane_to_serdes_map_fabric_lane28=rx28:tx31 +lane_to_serdes_map_fabric_lane29=rx29:tx30 +lane_to_serdes_map_fabric_lane30=rx30:tx29 +lane_to_serdes_map_fabric_lane31=rx31:tx28 +lane_to_serdes_map_fabric_lane32=rx32:tx32 +lane_to_serdes_map_fabric_lane33=rx33:tx33 +lane_to_serdes_map_fabric_lane34=rx34:tx34 +lane_to_serdes_map_fabric_lane35=rx35:tx35 +lane_to_serdes_map_fabric_lane36=rx36:tx36 +lane_to_serdes_map_fabric_lane37=rx37:tx37 +lane_to_serdes_map_fabric_lane38=rx38:tx38 +lane_to_serdes_map_fabric_lane39=rx39:tx39 +lane_to_serdes_map_fabric_lane40=rx40:tx43 +lane_to_serdes_map_fabric_lane41=rx41:tx42 +lane_to_serdes_map_fabric_lane42=rx42:tx41 +lane_to_serdes_map_fabric_lane43=rx43:tx40 +lane_to_serdes_map_fabric_lane44=rx44:tx47 +lane_to_serdes_map_fabric_lane45=rx45:tx46 +lane_to_serdes_map_fabric_lane46=rx46:tx45 +lane_to_serdes_map_fabric_lane47=rx47:tx44 +lane_to_serdes_map_fabric_lane48=rx48:tx48 +lane_to_serdes_map_fabric_lane49=rx49:tx49 +lane_to_serdes_map_fabric_lane50=rx50:tx50 +lane_to_serdes_map_fabric_lane51=rx51:tx51 +lane_to_serdes_map_fabric_lane52=rx52:tx52 +lane_to_serdes_map_fabric_lane53=rx53:tx53 +lane_to_serdes_map_fabric_lane54=rx54:tx54 +lane_to_serdes_map_fabric_lane55=rx55:tx55 +lane_to_serdes_map_fabric_lane56=rx56:tx59 +lane_to_serdes_map_fabric_lane57=rx57:tx58 +lane_to_serdes_map_fabric_lane58=rx58:tx57 +lane_to_serdes_map_fabric_lane59=rx59:tx56 +lane_to_serdes_map_fabric_lane60=rx60:tx63 +lane_to_serdes_map_fabric_lane61=rx61:tx62 +lane_to_serdes_map_fabric_lane62=rx62:tx61 +lane_to_serdes_map_fabric_lane63=rx63:tx60 +lane_to_serdes_map_fabric_lane64=rx64:tx64 +lane_to_serdes_map_fabric_lane65=rx65:tx65 +lane_to_serdes_map_fabric_lane66=rx66:tx66 +lane_to_serdes_map_fabric_lane67=rx67:tx67 +lane_to_serdes_map_fabric_lane68=rx68:tx68 +lane_to_serdes_map_fabric_lane69=rx69:tx69 +lane_to_serdes_map_fabric_lane70=rx70:tx70 +lane_to_serdes_map_fabric_lane71=rx71:tx71 +lane_to_serdes_map_fabric_lane72=rx72:tx75 +lane_to_serdes_map_fabric_lane73=rx73:tx74 +lane_to_serdes_map_fabric_lane74=rx74:tx73 +lane_to_serdes_map_fabric_lane75=rx75:tx72 +lane_to_serdes_map_fabric_lane76=rx76:tx79 +lane_to_serdes_map_fabric_lane77=rx77:tx78 +lane_to_serdes_map_fabric_lane78=rx78:tx77 +lane_to_serdes_map_fabric_lane79=rx79:tx76 +lane_to_serdes_map_fabric_lane80=rx80:tx80 +lane_to_serdes_map_fabric_lane81=rx81:tx81 +lane_to_serdes_map_fabric_lane82=rx82:tx83 +lane_to_serdes_map_fabric_lane83=rx83:tx82 +lane_to_serdes_map_fabric_lane84=rx84:tx85 +lane_to_serdes_map_fabric_lane85=rx85:tx86 +lane_to_serdes_map_fabric_lane86=rx86:tx84 +lane_to_serdes_map_fabric_lane87=rx87:tx87 +lane_to_serdes_map_fabric_lane88=rx88:tx90 +lane_to_serdes_map_fabric_lane89=rx89:tx88 +lane_to_serdes_map_fabric_lane90=rx90:tx91 +lane_to_serdes_map_fabric_lane91=rx91:tx89 +lane_to_serdes_map_fabric_lane92=rx92:tx93 +lane_to_serdes_map_fabric_lane93=rx93:tx92 +lane_to_serdes_map_fabric_lane94=rx94:tx94 +lane_to_serdes_map_fabric_lane95=rx95:tx95 +lane_to_serdes_map_fabric_lane96=rx96:tx96 +lane_to_serdes_map_fabric_lane97=rx97:tx97 +lane_to_serdes_map_fabric_lane98=rx98:tx98 +lane_to_serdes_map_fabric_lane99=rx99:tx99 +lane_to_serdes_map_fabric_lane100=rx100:tx100 +lane_to_serdes_map_fabric_lane101=rx101:tx101 +lane_to_serdes_map_fabric_lane102=rx102:tx102 +lane_to_serdes_map_fabric_lane103=rx103:tx103 +lane_to_serdes_map_fabric_lane104=rx104:tx105 +lane_to_serdes_map_fabric_lane105=rx105:tx106 +lane_to_serdes_map_fabric_lane106=rx106:tx107 +lane_to_serdes_map_fabric_lane107=rx107:tx104 +lane_to_serdes_map_fabric_lane108=rx108:tx111 +lane_to_serdes_map_fabric_lane109=rx109:tx109 +lane_to_serdes_map_fabric_lane110=rx110:tx110 +lane_to_serdes_map_fabric_lane111=rx111:tx108 +lane_to_serdes_map_fabric_lane112=rx112:tx114 +lane_to_serdes_map_fabric_lane113=rx113:tx113 +lane_to_serdes_map_fabric_lane114=rx114:tx112 +lane_to_serdes_map_fabric_lane115=rx115:tx115 +lane_to_serdes_map_fabric_lane116=rx116:tx117 +lane_to_serdes_map_fabric_lane117=rx117:tx116 +lane_to_serdes_map_fabric_lane118=rx118:tx119 +lane_to_serdes_map_fabric_lane119=rx119:tx118 +lane_to_serdes_map_fabric_lane120=rx120:tx123 +lane_to_serdes_map_fabric_lane121=rx121:tx120 +lane_to_serdes_map_fabric_lane122=rx122:tx122 +lane_to_serdes_map_fabric_lane123=rx123:tx121 +lane_to_serdes_map_fabric_lane124=rx124:tx127 +lane_to_serdes_map_fabric_lane125=rx125:tx125 +lane_to_serdes_map_fabric_lane126=rx126:tx124 +lane_to_serdes_map_fabric_lane127=rx127:tx126 +lane_to_serdes_map_fabric_lane128=rx128:tx128 +lane_to_serdes_map_fabric_lane129=rx129:tx129 +lane_to_serdes_map_fabric_lane130=rx130:tx130 +lane_to_serdes_map_fabric_lane131=rx131:tx131 +lane_to_serdes_map_fabric_lane132=rx132:tx132 +lane_to_serdes_map_fabric_lane133=rx133:tx133 +lane_to_serdes_map_fabric_lane134=rx134:tx134 +lane_to_serdes_map_fabric_lane135=rx135:tx135 +lane_to_serdes_map_fabric_lane136=rx136:tx139 +lane_to_serdes_map_fabric_lane137=rx137:tx138 +lane_to_serdes_map_fabric_lane138=rx138:tx137 +lane_to_serdes_map_fabric_lane139=rx139:tx136 +lane_to_serdes_map_fabric_lane140=rx140:tx140 +lane_to_serdes_map_fabric_lane141=rx141:tx142 +lane_to_serdes_map_fabric_lane142=rx142:tx141 +lane_to_serdes_map_fabric_lane143=rx143:tx143 +lane_to_serdes_map_fabric_lane144=rx144:tx144 +lane_to_serdes_map_fabric_lane145=rx145:tx145 +lane_to_serdes_map_fabric_lane146=rx146:tx146 +lane_to_serdes_map_fabric_lane147=rx147:tx147 +lane_to_serdes_map_fabric_lane148=rx148:tx148 +lane_to_serdes_map_fabric_lane149=rx149:tx149 +lane_to_serdes_map_fabric_lane150=rx150:tx150 +lane_to_serdes_map_fabric_lane151=rx151:tx151 +lane_to_serdes_map_fabric_lane152=rx152:tx155 +lane_to_serdes_map_fabric_lane153=rx153:tx154 +lane_to_serdes_map_fabric_lane154=rx154:tx153 +lane_to_serdes_map_fabric_lane155=rx155:tx152 +lane_to_serdes_map_fabric_lane156=rx156:tx159 +lane_to_serdes_map_fabric_lane157=rx157:tx158 +lane_to_serdes_map_fabric_lane158=rx158:tx157 +lane_to_serdes_map_fabric_lane159=rx159:tx156 +lane_to_serdes_map_fabric_lane160=rx160:tx160 +lane_to_serdes_map_fabric_lane161=rx161:tx161 +lane_to_serdes_map_fabric_lane162=rx162:tx162 +lane_to_serdes_map_fabric_lane163=rx163:tx163 +lane_to_serdes_map_fabric_lane164=rx164:tx164 +lane_to_serdes_map_fabric_lane165=rx165:tx165 +lane_to_serdes_map_fabric_lane166=rx166:tx166 +lane_to_serdes_map_fabric_lane167=rx167:tx167 +lane_to_serdes_map_fabric_lane168=rx168:tx171 +lane_to_serdes_map_fabric_lane169=rx169:tx170 +lane_to_serdes_map_fabric_lane170=rx170:tx169 +lane_to_serdes_map_fabric_lane171=rx171:tx168 +lane_to_serdes_map_fabric_lane172=rx172:tx175 +lane_to_serdes_map_fabric_lane173=rx173:tx174 +lane_to_serdes_map_fabric_lane174=rx174:tx173 +lane_to_serdes_map_fabric_lane175=rx175:tx172 +lane_to_serdes_map_fabric_lane176=rx176:tx176 +lane_to_serdes_map_fabric_lane177=rx177:tx177 +lane_to_serdes_map_fabric_lane178=rx178:tx179 +lane_to_serdes_map_fabric_lane179=rx179:tx178 +lane_to_serdes_map_fabric_lane180=rx180:tx181 +lane_to_serdes_map_fabric_lane181=rx181:tx182 +lane_to_serdes_map_fabric_lane182=rx182:tx180 +lane_to_serdes_map_fabric_lane183=rx183:tx183 +lane_to_serdes_map_fabric_lane184=rx184:tx186 +lane_to_serdes_map_fabric_lane185=rx185:tx184 +lane_to_serdes_map_fabric_lane186=rx186:tx185 +lane_to_serdes_map_fabric_lane187=rx187:tx187 +lane_to_serdes_map_fabric_lane188=rx188:tx188 +lane_to_serdes_map_fabric_lane189=rx189:tx189 +lane_to_serdes_map_fabric_lane190=rx190:tx190 +lane_to_serdes_map_fabric_lane191=rx191:tx191 + +# +##Protocol trap look-up mode: +# Options: IN_LIF - Look-ups in the profile table are done by IN-LIF +# IN_PORT - Look-ups in the profile table are done by IN-PORT +protocol_traps_mode=IN_LIF + +# access definitions +schan_intr_enable=0 +tdma_intr_enable=0 +tslam_intr_enable=0 +miim_intr_enable=0 +schan_timeout_usec=300000 +tdma_timeout_usec=1000000 +tslam_timeout_usec=1000000 + +### Interrupts +appl_enable_intr_init=1 +polled_irq_mode=1 +# reduce CPU load, configure delay 100ms +polled_irq_delay=1000 + +# reduce the CPU load over adapter (caused by counter thread) +bcm_stat_interval=1000 + +# shadow memory +mem_cache_enable_ecc=1 +mem_cache_enable_parity=1 + +# serdes_nif/fabric_clk_freq_in/out configuration +serdes_nif_clk_freq_in=2 +serdes_nif_clk_freq_out=1 +serdes_fabric_clk_freq_in=2 +serdes_fabric_clk_freq_out=1 + +dport_map_direct=1 + +rif_id_max=0x6000 + +phy_rx_polarity_flip_phy0=0 +phy_rx_polarity_flip_phy1=0 +phy_rx_polarity_flip_phy2=0 +phy_rx_polarity_flip_phy3=0 +phy_rx_polarity_flip_phy4=0 +phy_rx_polarity_flip_phy5=0 +phy_rx_polarity_flip_phy6=0 +phy_rx_polarity_flip_phy7=0 +phy_rx_polarity_flip_phy8=1 +phy_rx_polarity_flip_phy9=1 +phy_rx_polarity_flip_phy10=0 +phy_rx_polarity_flip_phy11=1 +phy_rx_polarity_flip_phy12=1 +phy_rx_polarity_flip_phy13=1 +phy_rx_polarity_flip_phy14=1 +phy_rx_polarity_flip_phy15=1 +phy_rx_polarity_flip_phy16=0 +phy_rx_polarity_flip_phy17=0 +phy_rx_polarity_flip_phy18=0 +phy_rx_polarity_flip_phy19=0 +phy_rx_polarity_flip_phy20=0 +phy_rx_polarity_flip_phy21=0 +phy_rx_polarity_flip_phy22=0 +phy_rx_polarity_flip_phy23=0 +phy_rx_polarity_flip_phy24=0 +phy_rx_polarity_flip_phy25=0 +phy_rx_polarity_flip_phy26=0 +phy_rx_polarity_flip_phy27=0 +phy_rx_polarity_flip_phy28=0 +phy_rx_polarity_flip_phy29=0 +phy_rx_polarity_flip_phy30=0 +phy_rx_polarity_flip_phy31=0 +phy_rx_polarity_flip_phy32=0 +phy_rx_polarity_flip_phy33=0 +phy_rx_polarity_flip_phy34=0 +phy_rx_polarity_flip_phy35=0 +phy_rx_polarity_flip_phy36=0 +phy_rx_polarity_flip_phy37=0 +phy_rx_polarity_flip_phy38=0 +phy_rx_polarity_flip_phy39=0 +phy_rx_polarity_flip_phy40=0 +phy_rx_polarity_flip_phy41=0 +phy_rx_polarity_flip_phy42=0 +phy_rx_polarity_flip_phy43=0 +phy_rx_polarity_flip_phy44=0 +phy_rx_polarity_flip_phy45=0 +phy_rx_polarity_flip_phy46=0 +phy_rx_polarity_flip_phy47=0 +phy_rx_polarity_flip_phy48=0 +phy_rx_polarity_flip_phy49=0 +phy_rx_polarity_flip_phy50=0 +phy_rx_polarity_flip_phy51=0 +phy_rx_polarity_flip_phy52=0 +phy_rx_polarity_flip_phy53=0 +phy_rx_polarity_flip_phy54=0 +phy_rx_polarity_flip_phy55=0 +phy_rx_polarity_flip_phy56=0 +phy_rx_polarity_flip_phy57=0 +phy_rx_polarity_flip_phy58=0 +phy_rx_polarity_flip_phy59=0 +phy_rx_polarity_flip_phy60=0 +phy_rx_polarity_flip_phy61=0 +phy_rx_polarity_flip_phy62=0 +phy_rx_polarity_flip_phy63=0 +phy_rx_polarity_flip_phy64=0 +phy_rx_polarity_flip_phy65=0 +phy_rx_polarity_flip_phy66=0 +phy_rx_polarity_flip_phy67=0 +phy_rx_polarity_flip_phy68=0 +phy_rx_polarity_flip_phy69=0 +phy_rx_polarity_flip_phy70=0 +phy_rx_polarity_flip_phy71=0 +phy_rx_polarity_flip_phy72=1 +phy_rx_polarity_flip_phy73=1 +phy_rx_polarity_flip_phy74=1 +phy_rx_polarity_flip_phy75=1 +phy_rx_polarity_flip_phy76=1 +phy_rx_polarity_flip_phy77=1 +phy_rx_polarity_flip_phy78=1 +phy_rx_polarity_flip_phy79=1 +phy_rx_polarity_flip_phy80=0 +phy_rx_polarity_flip_phy81=0 +phy_rx_polarity_flip_phy82=0 +phy_rx_polarity_flip_phy83=0 +phy_rx_polarity_flip_phy84=0 +phy_rx_polarity_flip_phy85=0 +phy_rx_polarity_flip_phy86=0 +phy_rx_polarity_flip_phy87=0 +phy_rx_polarity_flip_phy88=0 +phy_rx_polarity_flip_phy89=0 +phy_rx_polarity_flip_phy90=1 +phy_rx_polarity_flip_phy91=0 +phy_rx_polarity_flip_phy92=0 +phy_rx_polarity_flip_phy93=0 +phy_rx_polarity_flip_phy94=0 +phy_rx_polarity_flip_phy95=0 +phy_rx_polarity_flip_phy96=0 +phy_rx_polarity_flip_phy97=0 +phy_rx_polarity_flip_phy98=0 +phy_rx_polarity_flip_phy99=0 +phy_rx_polarity_flip_phy100=0 +phy_rx_polarity_flip_phy101=0 +phy_rx_polarity_flip_phy102=0 +phy_rx_polarity_flip_phy103=0 +phy_rx_polarity_flip_phy104=0 +phy_rx_polarity_flip_phy105=0 +phy_rx_polarity_flip_phy106=0 +phy_rx_polarity_flip_phy107=0 +phy_rx_polarity_flip_phy108=0 +phy_rx_polarity_flip_phy109=0 +phy_rx_polarity_flip_phy110=0 +phy_rx_polarity_flip_phy111=0 +phy_rx_polarity_flip_phy112=0 +phy_rx_polarity_flip_phy113=0 +phy_rx_polarity_flip_phy114=0 +phy_rx_polarity_flip_phy115=0 +phy_rx_polarity_flip_phy116=0 +phy_rx_polarity_flip_phy117=0 +phy_rx_polarity_flip_phy118=0 +phy_rx_polarity_flip_phy119=0 +phy_rx_polarity_flip_phy120=0 +phy_rx_polarity_flip_phy121=0 +phy_rx_polarity_flip_phy122=0 +phy_rx_polarity_flip_phy123=0 +phy_rx_polarity_flip_phy124=0 +phy_rx_polarity_flip_phy125=0 +phy_rx_polarity_flip_phy126=0 +phy_rx_polarity_flip_phy127=0 +phy_rx_polarity_flip_phy128=0 +phy_rx_polarity_flip_phy129=0 +phy_rx_polarity_flip_phy130=0 +phy_rx_polarity_flip_phy131=0 +phy_rx_polarity_flip_phy132=0 +phy_rx_polarity_flip_phy133=0 +phy_rx_polarity_flip_phy134=0 +phy_rx_polarity_flip_phy135=0 +phy_rx_polarity_flip_phy136=0 +phy_rx_polarity_flip_phy137=0 +phy_rx_polarity_flip_phy138=0 +phy_rx_polarity_flip_phy139=0 +phy_rx_polarity_flip_phy140=0 +phy_rx_polarity_flip_phy141=0 +phy_rx_polarity_flip_phy142=0 +phy_rx_polarity_flip_phy143=0 +phy_tx_polarity_flip_phy0=1 +phy_tx_polarity_flip_phy1=1 +phy_tx_polarity_flip_phy2=1 +phy_tx_polarity_flip_phy3=1 +phy_tx_polarity_flip_phy4=1 +phy_tx_polarity_flip_phy5=1 +phy_tx_polarity_flip_phy6=1 +phy_tx_polarity_flip_phy7=1 +phy_tx_polarity_flip_phy8=1 +phy_tx_polarity_flip_phy9=0 +phy_tx_polarity_flip_phy10=1 +phy_tx_polarity_flip_phy11=1 +phy_tx_polarity_flip_phy12=1 +phy_tx_polarity_flip_phy13=1 +phy_tx_polarity_flip_phy14=1 +phy_tx_polarity_flip_phy15=1 +phy_tx_polarity_flip_phy16=1 +phy_tx_polarity_flip_phy17=1 +phy_tx_polarity_flip_phy18=1 +phy_tx_polarity_flip_phy19=1 +phy_tx_polarity_flip_phy20=1 +phy_tx_polarity_flip_phy21=1 +phy_tx_polarity_flip_phy22=1 +phy_tx_polarity_flip_phy23=1 +phy_tx_polarity_flip_phy24=1 +phy_tx_polarity_flip_phy25=1 +phy_tx_polarity_flip_phy26=1 +phy_tx_polarity_flip_phy27=1 +phy_tx_polarity_flip_phy28=1 +phy_tx_polarity_flip_phy29=1 +phy_tx_polarity_flip_phy30=1 +phy_tx_polarity_flip_phy31=1 +phy_tx_polarity_flip_phy32=1 +phy_tx_polarity_flip_phy33=1 +phy_tx_polarity_flip_phy34=1 +phy_tx_polarity_flip_phy35=1 +phy_tx_polarity_flip_phy36=1 +phy_tx_polarity_flip_phy37=1 +phy_tx_polarity_flip_phy38=1 +phy_tx_polarity_flip_phy39=1 +phy_tx_polarity_flip_phy40=1 +phy_tx_polarity_flip_phy41=1 +phy_tx_polarity_flip_phy42=1 +phy_tx_polarity_flip_phy43=1 +phy_tx_polarity_flip_phy44=1 +phy_tx_polarity_flip_phy45=1 +phy_tx_polarity_flip_phy46=1 +phy_tx_polarity_flip_phy47=1 +phy_tx_polarity_flip_phy48=1 +phy_tx_polarity_flip_phy49=1 +phy_tx_polarity_flip_phy50=1 +phy_tx_polarity_flip_phy51=1 +phy_tx_polarity_flip_phy52=1 +phy_tx_polarity_flip_phy53=1 +phy_tx_polarity_flip_phy54=1 +phy_tx_polarity_flip_phy55=1 +phy_tx_polarity_flip_phy56=1 +phy_tx_polarity_flip_phy57=1 +phy_tx_polarity_flip_phy58=1 +phy_tx_polarity_flip_phy59=1 +phy_tx_polarity_flip_phy60=1 +phy_tx_polarity_flip_phy61=1 +phy_tx_polarity_flip_phy62=1 +phy_tx_polarity_flip_phy63=1 +phy_tx_polarity_flip_phy64=1 +phy_tx_polarity_flip_phy65=1 +phy_tx_polarity_flip_phy66=1 +phy_tx_polarity_flip_phy67=1 +phy_tx_polarity_flip_phy68=1 +phy_tx_polarity_flip_phy69=1 +phy_tx_polarity_flip_phy70=1 +phy_tx_polarity_flip_phy71=1 +phy_tx_polarity_flip_phy72=0 +phy_tx_polarity_flip_phy73=0 +phy_tx_polarity_flip_phy74=0 +phy_tx_polarity_flip_phy75=0 +phy_tx_polarity_flip_phy76=0 +phy_tx_polarity_flip_phy77=0 +phy_tx_polarity_flip_phy78=0 +phy_tx_polarity_flip_phy79=0 +phy_tx_polarity_flip_phy80=0 +phy_tx_polarity_flip_phy81=0 +phy_tx_polarity_flip_phy82=0 +phy_tx_polarity_flip_phy83=0 +phy_tx_polarity_flip_phy84=0 +phy_tx_polarity_flip_phy85=0 +phy_tx_polarity_flip_phy86=0 +phy_tx_polarity_flip_phy87=0 +phy_tx_polarity_flip_phy88=1 +phy_tx_polarity_flip_phy89=1 +phy_tx_polarity_flip_phy90=1 +phy_tx_polarity_flip_phy91=1 +phy_tx_polarity_flip_phy92=1 +phy_tx_polarity_flip_phy93=1 +phy_tx_polarity_flip_phy94=1 +phy_tx_polarity_flip_phy95=1 +phy_tx_polarity_flip_phy96=1 +phy_tx_polarity_flip_phy97=1 +phy_tx_polarity_flip_phy98=1 +phy_tx_polarity_flip_phy99=1 +phy_tx_polarity_flip_phy100=1 +phy_tx_polarity_flip_phy101=1 +phy_tx_polarity_flip_phy102=1 +phy_tx_polarity_flip_phy103=1 +phy_tx_polarity_flip_phy104=1 +phy_tx_polarity_flip_phy105=1 +phy_tx_polarity_flip_phy106=1 +phy_tx_polarity_flip_phy107=1 +phy_tx_polarity_flip_phy108=1 +phy_tx_polarity_flip_phy109=1 +phy_tx_polarity_flip_phy110=1 +phy_tx_polarity_flip_phy111=1 +phy_tx_polarity_flip_phy112=1 +phy_tx_polarity_flip_phy113=1 +phy_tx_polarity_flip_phy114=1 +phy_tx_polarity_flip_phy115=1 +phy_tx_polarity_flip_phy116=1 +phy_tx_polarity_flip_phy117=1 +phy_tx_polarity_flip_phy118=1 +phy_tx_polarity_flip_phy119=1 +phy_tx_polarity_flip_phy120=1 +phy_tx_polarity_flip_phy121=1 +phy_tx_polarity_flip_phy122=1 +phy_tx_polarity_flip_phy123=1 +phy_tx_polarity_flip_phy124=1 +phy_tx_polarity_flip_phy125=1 +phy_tx_polarity_flip_phy126=1 +phy_tx_polarity_flip_phy127=1 +phy_tx_polarity_flip_phy128=1 +phy_tx_polarity_flip_phy129=1 +phy_tx_polarity_flip_phy130=1 +phy_tx_polarity_flip_phy131=1 +phy_tx_polarity_flip_phy132=1 +phy_tx_polarity_flip_phy133=1 +phy_tx_polarity_flip_phy134=1 +phy_tx_polarity_flip_phy135=1 +phy_tx_polarity_flip_phy136=1 +phy_tx_polarity_flip_phy137=1 +phy_tx_polarity_flip_phy138=1 +phy_tx_polarity_flip_phy139=1 +phy_tx_polarity_flip_phy140=1 +phy_tx_polarity_flip_phy141=1 +phy_tx_polarity_flip_phy142=1 +phy_tx_polarity_flip_phy143=1 + +serdes_tx_taps_1=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_2=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_3=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_4=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_5=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_6=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_7=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_8=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_9=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_10=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_11=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_12=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_13=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_14=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_15=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_16=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_17=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_18=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_19=nrz:-6:85:-21:0:0:0 +serdes_tx_taps_20=nrz:-5:83:-22:0:0:0 +serdes_tx_taps_21=nrz:-4:75:-21:0:0:0 +serdes_tx_taps_22=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_23=nrz:-6:85:-21:0:0:0 +serdes_tx_taps_24=nrz:-5:83:-22:0:0:0 +serdes_tx_taps_25=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_26=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_27=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_28=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_29=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_30=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_31=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_32=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_33=nrz:-5:83:-22:0:0:0 +serdes_tx_taps_34=nrz:-5:83:-22:0:0:0 +serdes_tx_taps_35=nrz:-4:75:-21:0:0:0 +serdes_tx_taps_36=nrz:-8:89:-29:0:0:0 + +xflow_macsec_secure_chan_to_num_secure_assoc_encrypt=2 +xflow_macsec_secure_chan_to_num_secure_assoc_decrypt=2 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/port_config.ini b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/port_config.ini new file mode 100644 index 000000000000..4756e475658e --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/port_config.ini @@ -0,0 +1,21 @@ +# name lanes alias index role speed asic_port_name coreId corePortId numVoq +Ethernet0 72,73,74,75 Ethernet1/1 1 Ext 100000 Eth0-ASIC0 1 1 8 +Ethernet8 80,81,82,83 Ethernet2/1 2 Ext 100000 Eth8-ASIC0 1 2 8 +Ethernet16 88,89,90,91 Ethernet3/1 3 Ext 100000 Eth16-ASIC0 1 3 8 +Ethernet24 96,97,98,99 Ethernet4/1 4 Ext 100000 Eth24-ASIC0 1 4 8 +Ethernet32 104,105,106,107 Ethernet5/1 5 Ext 100000 Eth32-ASIC0 1 5 8 +Ethernet40 112,113,114,115 Ethernet6/1 6 Ext 100000 Eth40-ASIC0 1 6 8 +Ethernet48 120,121,122,123 Ethernet7/1 7 Ext 100000 Eth48-ASIC0 1 7 8 +Ethernet56 128,129,130,131 Ethernet8/1 8 Ext 100000 Eth56-ASIC0 1 8 8 +Ethernet64 136,137,138,139 Ethernet9/1 9 Ext 100000 Eth64-ASIC0 1 9 8 +Ethernet72 64,65,66,67 Ethernet10/1 10 Ext 100000 Eth72-ASIC0 0 10 8 +Ethernet80 56,57,58,59 Ethernet11/1 11 Ext 100000 Eth80-ASIC0 0 11 8 +Ethernet88 48,49,50,51 Ethernet12/1 12 Ext 100000 Eth88-ASIC0 0 12 8 +Ethernet96 40,41,42,43 Ethernet13/1 13 Ext 100000 Eth96-ASIC0 0 13 8 +Ethernet104 32,33,34,35 Ethernet14/1 14 Ext 100000 Eth104-ASIC0 0 14 8 +Ethernet112 24,25,26,27 Ethernet15/1 15 Ext 100000 Eth112-ASIC0 0 15 8 +Ethernet120 16,17,18,19 Ethernet16/1 16 Ext 100000 Eth120-ASIC0 0 16 8 +Ethernet128 8,9,10,11 Ethernet17/1 17 Ext 100000 Eth128-ASIC0 0 17 8 +Ethernet136 0,1,2,3 Ethernet18/1 18 Ext 100000 Eth136-ASIC0 0 18 8 +Ethernet-Rec0 221 Recirc0/0 19 Rec 400000 Rcy0-ASIC0 0 221 8 +Ethernet-IB0 222 Recirc0/1 20 Inb 400000 Rcy1-ASIC0 1 222 8 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/sai.profile b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/sai.profile new file mode 100644 index 000000000000..894b300ad733 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/0/sai.profile @@ -0,0 +1,2 @@ +SAI_INIT_CONFIG_FILE=/usr/share/sonic/hwsku/j2p-a7800r3a-36d-36x400G.config.bcm + diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/context_config.json b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/context_config.json new file mode 120000 index 000000000000..3db0e8ed3d9b --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/context_config.json @@ -0,0 +1 @@ +../0/context_config.json \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/j2p-a7800r3a-36d-36x400G.config.bcm b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/j2p-a7800r3a-36d-36x400G.config.bcm new file mode 100644 index 000000000000..b30d9d238b29 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/j2p-a7800r3a-36d-36x400G.config.bcm @@ -0,0 +1,984 @@ +soc_family=BCM8885X + +dpp_db_path=/usr/share/bcm/db + +#################################################### +##Reference applications related properties - Start +#################################################### + +## PMF small EXEM connected stage: +# Options: IPMF2 - Ingress PMF 2 stage can perform small EXEM lookups. +# IPMF3 - Ingress PMF 3 stage can perform small EXEM lookups. +## PMF small EXEM connected stage: +# Options: IPMF2 - Ingress PMF 2 stage can perform small EXEM lookups. +# IPMF3 - Ingress PMF 3 stage can perform small EXEM lookups. +pmf_sexem3_stage=IPMF2 + +#################################################### +##Reference applications related properties - End +#################################################### + +# Jericho2-mode (description 0x1 used for Jericho 2 mode) +system_headers_mode=1 + +# HW mode to support 1024 16-member system wide LAGs +trunk_group_max_members=16 + +# Disable link-training +port_init_cl72=0 + +###Default interfaces for Jericho2Plus +#CPU interfaces +ucode_port_0=CPU.0:core_0.0 +ucode_port_200=CPU.8:core_1.200 +ucode_port_201=CPU.16:core_0.201 +ucode_port_202=CPU.24:core_1.202 +ucode_port_203=CPU.32:core_0.203 + +#NIF ETH interfaces on device +ucode_port_1=CGE18:core_1.1 +ucode_port_2=CGE20:core_1.2 +ucode_port_3=CGE22:core_1.3 +ucode_port_4=CGE24:core_1.4 +ucode_port_5=CGE26:core_1.5 +ucode_port_6=CGE28:core_1.6 +ucode_port_7=CGE30:core_1.7 +ucode_port_8=CGE32:core_1.8 +ucode_port_9=CGE34:core_1.9 + +ucode_port_10=CGE16:core_0.10 +ucode_port_11=CGE14:core_0.11 +ucode_port_12=CGE12:core_0.12 +ucode_port_13=CGE10:core_0.13 +ucode_port_14=CGE8:core_0.14 +ucode_port_15=CGE6:core_0.15 +ucode_port_16=CGE4:core_0.16 +ucode_port_17=CGE2:core_0.17 +ucode_port_18=CGE0:core_0.18 + +#NIF default speeds +port_init_speed_xe=10000 +port_init_speed_xl=40000 +port_init_speed_le=50000 +port_init_speed_ce=100000 +port_init_speed_cc=200000 +port_init_speed_cd=400000 +port_init_speed_il=10312 + +port_priorities=8 + +#special ports +ucode_port_240=OLP:core_0.240 + +# NIF lane mapping +lane_to_serdes_map_nif_lane0=rx3:tx4 +lane_to_serdes_map_nif_lane1=rx6:tx1 +lane_to_serdes_map_nif_lane2=rx7:tx5 +lane_to_serdes_map_nif_lane3=rx4:tx7 +lane_to_serdes_map_nif_lane4=rx1:tx2 +lane_to_serdes_map_nif_lane5=rx0:tx0 +lane_to_serdes_map_nif_lane6=rx5:tx3 +lane_to_serdes_map_nif_lane7=rx2:tx6 +lane_to_serdes_map_nif_lane8=rx10:tx11 +lane_to_serdes_map_nif_lane9=rx8:tx8 +lane_to_serdes_map_nif_lane10=rx14:tx12 +lane_to_serdes_map_nif_lane11=rx15:tx15 +lane_to_serdes_map_nif_lane12=rx13:tx10 +lane_to_serdes_map_nif_lane13=rx9:tx9 +lane_to_serdes_map_nif_lane14=rx11:tx13 +lane_to_serdes_map_nif_lane15=rx12:tx14 +lane_to_serdes_map_nif_lane16=rx16:tx17 +lane_to_serdes_map_nif_lane17=rx19:tx21 +lane_to_serdes_map_nif_lane18=rx21:tx18 +lane_to_serdes_map_nif_lane19=rx18:tx16 +lane_to_serdes_map_nif_lane20=rx17:tx23 +lane_to_serdes_map_nif_lane21=rx20:tx22 +lane_to_serdes_map_nif_lane22=rx22:tx20 +lane_to_serdes_map_nif_lane23=rx23:tx19 +lane_to_serdes_map_nif_lane24=rx26:tx28 +lane_to_serdes_map_nif_lane25=rx29:tx31 +lane_to_serdes_map_nif_lane26=rx31:tx29 +lane_to_serdes_map_nif_lane27=rx28:tx27 +lane_to_serdes_map_nif_lane28=rx25:tx25 +lane_to_serdes_map_nif_lane29=rx24:tx30 +lane_to_serdes_map_nif_lane30=rx30:tx24 +lane_to_serdes_map_nif_lane31=rx27:tx26 +lane_to_serdes_map_nif_lane32=rx32:tx39 +lane_to_serdes_map_nif_lane33=rx33:tx38 +lane_to_serdes_map_nif_lane34=rx38:tx32 +lane_to_serdes_map_nif_lane35=rx39:tx33 +lane_to_serdes_map_nif_lane36=rx35:tx37 +lane_to_serdes_map_nif_lane37=rx34:tx36 +lane_to_serdes_map_nif_lane38=rx36:tx34 +lane_to_serdes_map_nif_lane39=rx37:tx35 +lane_to_serdes_map_nif_lane40=rx40:tx41 +lane_to_serdes_map_nif_lane41=rx43:tx45 +lane_to_serdes_map_nif_lane42=rx45:tx42 +lane_to_serdes_map_nif_lane43=rx42:tx40 +lane_to_serdes_map_nif_lane44=rx41:tx47 +lane_to_serdes_map_nif_lane45=rx44:tx46 +lane_to_serdes_map_nif_lane46=rx46:tx44 +lane_to_serdes_map_nif_lane47=rx47:tx43 +lane_to_serdes_map_nif_lane48=rx50:tx52 +lane_to_serdes_map_nif_lane49=rx53:tx55 +lane_to_serdes_map_nif_lane50=rx55:tx53 +lane_to_serdes_map_nif_lane51=rx52:tx51 +lane_to_serdes_map_nif_lane52=rx49:tx49 +lane_to_serdes_map_nif_lane53=rx48:tx54 +lane_to_serdes_map_nif_lane54=rx54:tx48 +lane_to_serdes_map_nif_lane55=rx51:tx50 +lane_to_serdes_map_nif_lane56=rx56:tx63 +lane_to_serdes_map_nif_lane57=rx57:tx62 +lane_to_serdes_map_nif_lane58=rx62:tx56 +lane_to_serdes_map_nif_lane59=rx63:tx57 +lane_to_serdes_map_nif_lane60=rx59:tx61 +lane_to_serdes_map_nif_lane61=rx58:tx60 +lane_to_serdes_map_nif_lane62=rx60:tx58 +lane_to_serdes_map_nif_lane63=rx61:tx59 +lane_to_serdes_map_nif_lane64=rx64:tx65 +lane_to_serdes_map_nif_lane65=rx67:tx69 +lane_to_serdes_map_nif_lane66=rx69:tx66 +lane_to_serdes_map_nif_lane67=rx66:tx64 +lane_to_serdes_map_nif_lane68=rx65:tx71 +lane_to_serdes_map_nif_lane69=rx68:tx70 +lane_to_serdes_map_nif_lane70=rx70:tx68 +lane_to_serdes_map_nif_lane71=rx71:tx67 +lane_to_serdes_map_nif_lane72=rx79:tx74 +lane_to_serdes_map_nif_lane73=rx76:tx75 +lane_to_serdes_map_nif_lane74=rx72:tx76 +lane_to_serdes_map_nif_lane75=rx74:tx73 +lane_to_serdes_map_nif_lane76=rx77:tx79 +lane_to_serdes_map_nif_lane77=rx78:tx78 +lane_to_serdes_map_nif_lane78=rx73:tx77 +lane_to_serdes_map_nif_lane79=rx75:tx72 +lane_to_serdes_map_nif_lane80=rx86:tx86 +lane_to_serdes_map_nif_lane81=rx83:tx87 +lane_to_serdes_map_nif_lane82=rx82:tx81 +lane_to_serdes_map_nif_lane83=rx85:tx80 +lane_to_serdes_map_nif_lane84=rx87:tx85 +lane_to_serdes_map_nif_lane85=rx84:tx84 +lane_to_serdes_map_nif_lane86=rx80:tx82 +lane_to_serdes_map_nif_lane87=rx81:tx83 +lane_to_serdes_map_nif_lane88=rx95:tx90 +lane_to_serdes_map_nif_lane89=rx92:tx88 +lane_to_serdes_map_nif_lane90=rx88:tx92 +lane_to_serdes_map_nif_lane91=rx91:tx95 +lane_to_serdes_map_nif_lane92=rx94:tx89 +lane_to_serdes_map_nif_lane93=rx93:tx91 +lane_to_serdes_map_nif_lane94=rx89:tx93 +lane_to_serdes_map_nif_lane95=rx90:tx94 +lane_to_serdes_map_nif_lane96=rx103:tx97 +lane_to_serdes_map_nif_lane97=rx100:tx96 +lane_to_serdes_map_nif_lane98=rx96:tx100 +lane_to_serdes_map_nif_lane99=rx99:tx103 +lane_to_serdes_map_nif_lane100=rx102:tx99 +lane_to_serdes_map_nif_lane101=rx101:tx98 +lane_to_serdes_map_nif_lane102=rx97:tx101 +lane_to_serdes_map_nif_lane103=rx98:tx102 +lane_to_serdes_map_nif_lane104=rx110:tx107 +lane_to_serdes_map_nif_lane105=rx108:tx105 +lane_to_serdes_map_nif_lane106=rx104:tx108 +lane_to_serdes_map_nif_lane107=rx107:tx110 +lane_to_serdes_map_nif_lane108=rx111:tx106 +lane_to_serdes_map_nif_lane109=rx109:tx104 +lane_to_serdes_map_nif_lane110=rx105:tx109 +lane_to_serdes_map_nif_lane111=rx106:tx111 +lane_to_serdes_map_nif_lane112=rx119:tx114 +lane_to_serdes_map_nif_lane113=rx116:tx112 +lane_to_serdes_map_nif_lane114=rx112:tx116 +lane_to_serdes_map_nif_lane115=rx115:tx119 +lane_to_serdes_map_nif_lane116=rx118:tx113 +lane_to_serdes_map_nif_lane117=rx117:tx115 +lane_to_serdes_map_nif_lane118=rx113:tx117 +lane_to_serdes_map_nif_lane119=rx114:tx118 +lane_to_serdes_map_nif_lane120=rx127:tx121 +lane_to_serdes_map_nif_lane121=rx124:tx120 +lane_to_serdes_map_nif_lane122=rx120:tx124 +lane_to_serdes_map_nif_lane123=rx123:tx127 +lane_to_serdes_map_nif_lane124=rx126:tx123 +lane_to_serdes_map_nif_lane125=rx125:tx122 +lane_to_serdes_map_nif_lane126=rx121:tx125 +lane_to_serdes_map_nif_lane127=rx122:tx126 +lane_to_serdes_map_nif_lane128=rx134:tx131 +lane_to_serdes_map_nif_lane129=rx132:tx129 +lane_to_serdes_map_nif_lane130=rx128:tx132 +lane_to_serdes_map_nif_lane131=rx131:tx134 +lane_to_serdes_map_nif_lane132=rx135:tx130 +lane_to_serdes_map_nif_lane133=rx133:tx128 +lane_to_serdes_map_nif_lane134=rx129:tx133 +lane_to_serdes_map_nif_lane135=rx130:tx135 +lane_to_serdes_map_nif_lane136=rx143:tx138 +lane_to_serdes_map_nif_lane137=rx140:tx136 +lane_to_serdes_map_nif_lane138=rx136:tx140 +lane_to_serdes_map_nif_lane139=rx139:tx143 +lane_to_serdes_map_nif_lane140=rx142:tx137 +lane_to_serdes_map_nif_lane141=rx141:tx139 +lane_to_serdes_map_nif_lane142=rx137:tx141 +lane_to_serdes_map_nif_lane143=rx138:tx142 + +######################### +### High Availability ### +######################### + +sw_state_max_size=750000000 + +#location of warmboot NV memory +#Allowed options for dnx are - 3:external storage in filesystem 4:driver will save the state directly in shared memory +stable_location=4 + +# Note that each unit should have a unique filename and that adapter does not play well with tmp and dev/shm folders. +stable_filename=/dev/shm/warmboot_data_0 +stable_filename.1=/dev/shm/warmboot_data_1 +stable_filename.2=/dev/shm/warmboot_data_2 + +#Maximum size for NVM used for WB storage, must be larger than sw_state_max_size.BCM8885X +stable_size=800000000 + +######################### +######################### +######################### + +tm_port_header_type_in_0=INJECTED_2_PP +tm_port_header_type_out_0=CPU + +tm_port_header_type_in_200=INJECTED_2_PP +tm_port_header_type_out_200=ETH +tm_port_header_type_in_201=INJECTED_2_PP +tm_port_header_type_out_201=ETH +tm_port_header_type_in_202=INJECTED_2_PP +tm_port_header_type_out_202=ETH +tm_port_header_type_in_203=INJECTED_2_PP +tm_port_header_type_out_203=ETH + +### SAT +## Enable SAT Interface. 0 - Disable, 1 - Enable (Default) +sat_enable=1 +ucode_port_218=SAT:core_0.218 +tm_port_header_type_out_218=CPU +tm_port_header_type_in_218=INJECTED_2 +ucode_port_219=SAT:core_1.219 +tm_port_header_type_out_219=CPU +tm_port_header_type_in_219=INJECTED_2 +port_init_speed_sat=400000 + +### RCY +sai_recycle_port_lane_base=0 +ucode_port_221=RCY.21:core_0.221 +ucode_port_222=RCY.22:core_1.222 +tm_port_header_type_out_221=ETH +tm_port_header_type_in_221=ETH +tm_port_header_type_out_222=ETH +tm_port_header_type_in_222=ETH +port_init_speed_221=400000 +port_init_speed_222=400000 + +#OLP port +tm_port_header_type_in_240=INJECTED_2 +tm_port_header_type_out_240=RAW + +# Set statically the region mode per region id +dtm_flow_mapping_mode_region_257=3 +dtm_flow_mapping_mode_region_258=3 +dtm_flow_mapping_mode_region_259=3 +dtm_flow_mapping_mode_region_260=3 +dtm_flow_mapping_mode_region_261=3 +dtm_flow_mapping_mode_region_262=3 +dtm_flow_mapping_mode_region_263=3 +dtm_flow_mapping_mode_region_264=3 +dtm_flow_mapping_mode_region_265=3 +dtm_flow_mapping_mode_region_266=7 +dtm_flow_mapping_mode_region_267=3 +dtm_flow_mapping_mode_region_268=3 +dtm_flow_mapping_mode_region_269=3 +dtm_flow_mapping_mode_region_270=3 +dtm_flow_mapping_mode_region_271=3 +dtm_flow_mapping_mode_region_272=3 +dtm_flow_mapping_mode_region_273=3 +dtm_flow_mapping_mode_region_274=3 +dtm_flow_mapping_mode_region_275=3 +dtm_flow_mapping_mode_region_276=3 +dtm_flow_mapping_mode_region_277=3 +dtm_flow_mapping_mode_region_278=3 +dtm_flow_mapping_mode_region_279=3 +dtm_flow_mapping_mode_region_280=3 +dtm_flow_mapping_mode_region_281=3 +dtm_flow_mapping_mode_region_282=3 +dtm_flow_mapping_mode_region_283=3 +dtm_flow_mapping_mode_region_284=3 +dtm_flow_mapping_mode_region_285=3 +dtm_flow_mapping_mode_region_286=3 +dtm_flow_mapping_mode_region_287=3 + +## Configure number of symmetric cores each region supports ## +dtm_flow_nof_remote_cores_region_1=2 +dtm_flow_nof_remote_cores_region_2=2 +dtm_flow_nof_remote_cores_region_3=2 +dtm_flow_nof_remote_cores_region_4=2 +dtm_flow_nof_remote_cores_region_5=2 +dtm_flow_nof_remote_cores_region_6=2 +dtm_flow_nof_remote_cores_region_7=2 +dtm_flow_nof_remote_cores_region_8=2 +dtm_flow_nof_remote_cores_region_9=2 +dtm_flow_nof_remote_cores_region_10=2 +dtm_flow_nof_remote_cores_region_11=2 +dtm_flow_nof_remote_cores_region_12=2 +dtm_flow_nof_remote_cores_region_13=2 +dtm_flow_nof_remote_cores_region_14=2 +dtm_flow_nof_remote_cores_region_15=2 +dtm_flow_nof_remote_cores_region_16=2 +dtm_flow_nof_remote_cores_region_17=2 +dtm_flow_nof_remote_cores_region_18=2 +dtm_flow_nof_remote_cores_region_19=2 +dtm_flow_nof_remote_cores_region_20=2 +dtm_flow_nof_remote_cores_region_21=2 +dtm_flow_nof_remote_cores_region_22=2 +dtm_flow_nof_remote_cores_region_23=2 +dtm_flow_nof_remote_cores_region_24=2 +dtm_flow_nof_remote_cores_region_25=2 +dtm_flow_nof_remote_cores_region_26=2 +dtm_flow_nof_remote_cores_region_27=2 +dtm_flow_nof_remote_cores_region_28=2 +dtm_flow_nof_remote_cores_region_29=2 +dtm_flow_nof_remote_cores_region_30=2 +dtm_flow_nof_remote_cores_region_31=2 +dtm_flow_nof_remote_cores_region_32=2 +dtm_flow_nof_remote_cores_region_33=2 +dtm_flow_nof_remote_cores_region_34=2 +dtm_flow_nof_remote_cores_region_35=2 +dtm_flow_nof_remote_cores_region_36=2 +dtm_flow_nof_remote_cores_region_37=2 +dtm_flow_nof_remote_cores_region_38=2 +dtm_flow_nof_remote_cores_region_39=2 +dtm_flow_nof_remote_cores_region_40=2 +dtm_flow_nof_remote_cores_region_41=2 +dtm_flow_nof_remote_cores_region_42=2 +dtm_flow_nof_remote_cores_region_43=2 +dtm_flow_nof_remote_cores_region_44=2 +dtm_flow_nof_remote_cores_region_45=2 +dtm_flow_nof_remote_cores_region_46=2 +dtm_flow_nof_remote_cores_region_47=2 +dtm_flow_nof_remote_cores_region_48=2 +dtm_flow_nof_remote_cores_region_49=2 +dtm_flow_nof_remote_cores_region_50=2 +dtm_flow_nof_remote_cores_region_51=2 +dtm_flow_nof_remote_cores_region_52=2 +dtm_flow_nof_remote_cores_region_53=2 +dtm_flow_nof_remote_cores_region_54=2 +dtm_flow_nof_remote_cores_region_55=2 +dtm_flow_nof_remote_cores_region_56=2 +dtm_flow_nof_remote_cores_region_57=2 +dtm_flow_nof_remote_cores_region_58=2 +dtm_flow_nof_remote_cores_region_59=2 +dtm_flow_nof_remote_cores_region_60=2 + +### MDB configuration ### +mdb_profile=balanced-exem + +### Descriptor-DMA configuration ### +dma_desc_aggregator_chain_length_max=1000 +dma_desc_aggregator_buff_size_kb=100 +dma_desc_aggregator_timeout_usec=1000 +dma_desc_aggregator_enable_specific_MDB_LPM=1 +dma_desc_aggregator_enable_specific_MDB_FEC=1 + +### Outlif configuarion ### +outlif_logical_to_physical_phase_map_1=S1 +outlif_logical_to_physical_phase_map_2=L1 +outlif_logical_to_physical_phase_map_3=XL +outlif_logical_to_physical_phase_map_4=L2 +outlif_logical_to_physical_phase_map_5=M1 +outlif_logical_to_physical_phase_map_6=M2 +outlif_logical_to_physical_phase_map_7=M3 +outlif_logical_to_physical_phase_map_8=S2 + +### Outlif data granularity configuration ### +outlif_physical_phase_data_granularity_S1=60 +outlif_physical_phase_data_granularity_S2=60 +outlif_physical_phase_data_granularity_M1=60 +outlif_physical_phase_data_granularity_M2=60 +outlif_physical_phase_data_granularity_M3=60 +outlif_physical_phase_data_granularity_L1=60 +outlif_physical_phase_data_granularity_L2=60 +outlif_physical_phase_data_granularity_XL=60 + +### Fabric configuration ### +# Enable link-training +port_init_cl72_sfi=1 +serdes_lane_config_cl72_auto_polarity_en=0 +serdes_lane_config_cl72_auto_polarity_en_sfi=1 +serdes_lane_config_cl72_restart_timeout_en=0 + +#SFI speed rate +port_init_speed_fabric=53125 + +## Fabric transmission mode +# Set the Connect mode to the Fabric +# Options: FE - presence of a Fabric device (single stage) +# SINGLE_FAP - stand-alone device +# MESH - devices in Mesh +# Note: If 'diag_chassis' is on, value will be override in dnx.soc +# to be FE instead of SINGLE_FAP. +fabric_connect_mode=FE + +fabric_logical_port_base=512 + +# Fabric lane mapping +lane_to_serdes_map_fabric_lane0=rx0:tx0 +lane_to_serdes_map_fabric_lane1=rx1:tx1 +lane_to_serdes_map_fabric_lane2=rx2:tx2 +lane_to_serdes_map_fabric_lane3=rx3:tx3 +lane_to_serdes_map_fabric_lane4=rx4:tx4 +lane_to_serdes_map_fabric_lane5=rx5:tx5 +lane_to_serdes_map_fabric_lane6=rx6:tx6 +lane_to_serdes_map_fabric_lane7=rx7:tx7 +lane_to_serdes_map_fabric_lane8=rx8:tx10 +lane_to_serdes_map_fabric_lane9=rx9:tx11 +lane_to_serdes_map_fabric_lane10=rx10:tx9 +lane_to_serdes_map_fabric_lane11=rx11:tx8 +lane_to_serdes_map_fabric_lane12=rx12:tx12 +lane_to_serdes_map_fabric_lane13=rx13:tx15 +lane_to_serdes_map_fabric_lane14=rx14:tx14 +lane_to_serdes_map_fabric_lane15=rx15:tx13 +lane_to_serdes_map_fabric_lane16=rx16:tx17 +lane_to_serdes_map_fabric_lane17=rx17:tx18 +lane_to_serdes_map_fabric_lane18=rx18:tx16 +lane_to_serdes_map_fabric_lane19=rx19:tx19 +lane_to_serdes_map_fabric_lane20=rx20:tx21 +lane_to_serdes_map_fabric_lane21=rx21:tx23 +lane_to_serdes_map_fabric_lane22=rx22:tx20 +lane_to_serdes_map_fabric_lane23=rx23:tx22 +lane_to_serdes_map_fabric_lane24=rx24:tx26 +lane_to_serdes_map_fabric_lane25=rx25:tx24 +lane_to_serdes_map_fabric_lane26=rx26:tx25 +lane_to_serdes_map_fabric_lane27=rx27:tx27 +lane_to_serdes_map_fabric_lane28=rx28:tx31 +lane_to_serdes_map_fabric_lane29=rx29:tx30 +lane_to_serdes_map_fabric_lane30=rx30:tx29 +lane_to_serdes_map_fabric_lane31=rx31:tx28 +lane_to_serdes_map_fabric_lane32=rx32:tx32 +lane_to_serdes_map_fabric_lane33=rx33:tx33 +lane_to_serdes_map_fabric_lane34=rx34:tx34 +lane_to_serdes_map_fabric_lane35=rx35:tx35 +lane_to_serdes_map_fabric_lane36=rx36:tx36 +lane_to_serdes_map_fabric_lane37=rx37:tx37 +lane_to_serdes_map_fabric_lane38=rx38:tx38 +lane_to_serdes_map_fabric_lane39=rx39:tx39 +lane_to_serdes_map_fabric_lane40=rx40:tx43 +lane_to_serdes_map_fabric_lane41=rx41:tx42 +lane_to_serdes_map_fabric_lane42=rx42:tx41 +lane_to_serdes_map_fabric_lane43=rx43:tx40 +lane_to_serdes_map_fabric_lane44=rx44:tx47 +lane_to_serdes_map_fabric_lane45=rx45:tx46 +lane_to_serdes_map_fabric_lane46=rx46:tx45 +lane_to_serdes_map_fabric_lane47=rx47:tx44 +lane_to_serdes_map_fabric_lane48=rx48:tx48 +lane_to_serdes_map_fabric_lane49=rx49:tx49 +lane_to_serdes_map_fabric_lane50=rx50:tx50 +lane_to_serdes_map_fabric_lane51=rx51:tx51 +lane_to_serdes_map_fabric_lane52=rx52:tx52 +lane_to_serdes_map_fabric_lane53=rx53:tx53 +lane_to_serdes_map_fabric_lane54=rx54:tx54 +lane_to_serdes_map_fabric_lane55=rx55:tx55 +lane_to_serdes_map_fabric_lane56=rx56:tx59 +lane_to_serdes_map_fabric_lane57=rx57:tx58 +lane_to_serdes_map_fabric_lane58=rx58:tx57 +lane_to_serdes_map_fabric_lane59=rx59:tx56 +lane_to_serdes_map_fabric_lane60=rx60:tx63 +lane_to_serdes_map_fabric_lane61=rx61:tx62 +lane_to_serdes_map_fabric_lane62=rx62:tx61 +lane_to_serdes_map_fabric_lane63=rx63:tx60 +lane_to_serdes_map_fabric_lane64=rx64:tx64 +lane_to_serdes_map_fabric_lane65=rx65:tx65 +lane_to_serdes_map_fabric_lane66=rx66:tx66 +lane_to_serdes_map_fabric_lane67=rx67:tx67 +lane_to_serdes_map_fabric_lane68=rx68:tx68 +lane_to_serdes_map_fabric_lane69=rx69:tx69 +lane_to_serdes_map_fabric_lane70=rx70:tx70 +lane_to_serdes_map_fabric_lane71=rx71:tx71 +lane_to_serdes_map_fabric_lane72=rx72:tx75 +lane_to_serdes_map_fabric_lane73=rx73:tx74 +lane_to_serdes_map_fabric_lane74=rx74:tx73 +lane_to_serdes_map_fabric_lane75=rx75:tx72 +lane_to_serdes_map_fabric_lane76=rx76:tx79 +lane_to_serdes_map_fabric_lane77=rx77:tx78 +lane_to_serdes_map_fabric_lane78=rx78:tx77 +lane_to_serdes_map_fabric_lane79=rx79:tx76 +lane_to_serdes_map_fabric_lane80=rx80:tx80 +lane_to_serdes_map_fabric_lane81=rx81:tx81 +lane_to_serdes_map_fabric_lane82=rx82:tx83 +lane_to_serdes_map_fabric_lane83=rx83:tx82 +lane_to_serdes_map_fabric_lane84=rx84:tx85 +lane_to_serdes_map_fabric_lane85=rx85:tx86 +lane_to_serdes_map_fabric_lane86=rx86:tx84 +lane_to_serdes_map_fabric_lane87=rx87:tx87 +lane_to_serdes_map_fabric_lane88=rx88:tx90 +lane_to_serdes_map_fabric_lane89=rx89:tx88 +lane_to_serdes_map_fabric_lane90=rx90:tx91 +lane_to_serdes_map_fabric_lane91=rx91:tx89 +lane_to_serdes_map_fabric_lane92=rx92:tx93 +lane_to_serdes_map_fabric_lane93=rx93:tx92 +lane_to_serdes_map_fabric_lane94=rx94:tx94 +lane_to_serdes_map_fabric_lane95=rx95:tx95 +lane_to_serdes_map_fabric_lane96=rx96:tx96 +lane_to_serdes_map_fabric_lane97=rx97:tx97 +lane_to_serdes_map_fabric_lane98=rx98:tx98 +lane_to_serdes_map_fabric_lane99=rx99:tx99 +lane_to_serdes_map_fabric_lane100=rx100:tx100 +lane_to_serdes_map_fabric_lane101=rx101:tx101 +lane_to_serdes_map_fabric_lane102=rx102:tx102 +lane_to_serdes_map_fabric_lane103=rx103:tx103 +lane_to_serdes_map_fabric_lane104=rx104:tx105 +lane_to_serdes_map_fabric_lane105=rx105:tx106 +lane_to_serdes_map_fabric_lane106=rx106:tx107 +lane_to_serdes_map_fabric_lane107=rx107:tx104 +lane_to_serdes_map_fabric_lane108=rx108:tx111 +lane_to_serdes_map_fabric_lane109=rx109:tx109 +lane_to_serdes_map_fabric_lane110=rx110:tx110 +lane_to_serdes_map_fabric_lane111=rx111:tx108 +lane_to_serdes_map_fabric_lane112=rx112:tx114 +lane_to_serdes_map_fabric_lane113=rx113:tx113 +lane_to_serdes_map_fabric_lane114=rx114:tx112 +lane_to_serdes_map_fabric_lane115=rx115:tx115 +lane_to_serdes_map_fabric_lane116=rx116:tx117 +lane_to_serdes_map_fabric_lane117=rx117:tx116 +lane_to_serdes_map_fabric_lane118=rx118:tx119 +lane_to_serdes_map_fabric_lane119=rx119:tx118 +lane_to_serdes_map_fabric_lane120=rx120:tx123 +lane_to_serdes_map_fabric_lane121=rx121:tx120 +lane_to_serdes_map_fabric_lane122=rx122:tx122 +lane_to_serdes_map_fabric_lane123=rx123:tx121 +lane_to_serdes_map_fabric_lane124=rx124:tx127 +lane_to_serdes_map_fabric_lane125=rx125:tx125 +lane_to_serdes_map_fabric_lane126=rx126:tx124 +lane_to_serdes_map_fabric_lane127=rx127:tx126 +lane_to_serdes_map_fabric_lane128=rx128:tx128 +lane_to_serdes_map_fabric_lane129=rx129:tx129 +lane_to_serdes_map_fabric_lane130=rx130:tx130 +lane_to_serdes_map_fabric_lane131=rx131:tx131 +lane_to_serdes_map_fabric_lane132=rx132:tx132 +lane_to_serdes_map_fabric_lane133=rx133:tx133 +lane_to_serdes_map_fabric_lane134=rx134:tx134 +lane_to_serdes_map_fabric_lane135=rx135:tx135 +lane_to_serdes_map_fabric_lane136=rx136:tx139 +lane_to_serdes_map_fabric_lane137=rx137:tx138 +lane_to_serdes_map_fabric_lane138=rx138:tx137 +lane_to_serdes_map_fabric_lane139=rx139:tx136 +lane_to_serdes_map_fabric_lane140=rx140:tx140 +lane_to_serdes_map_fabric_lane141=rx141:tx142 +lane_to_serdes_map_fabric_lane142=rx142:tx141 +lane_to_serdes_map_fabric_lane143=rx143:tx143 +lane_to_serdes_map_fabric_lane144=rx144:tx144 +lane_to_serdes_map_fabric_lane145=rx145:tx145 +lane_to_serdes_map_fabric_lane146=rx146:tx146 +lane_to_serdes_map_fabric_lane147=rx147:tx147 +lane_to_serdes_map_fabric_lane148=rx148:tx148 +lane_to_serdes_map_fabric_lane149=rx149:tx149 +lane_to_serdes_map_fabric_lane150=rx150:tx150 +lane_to_serdes_map_fabric_lane151=rx151:tx151 +lane_to_serdes_map_fabric_lane152=rx152:tx155 +lane_to_serdes_map_fabric_lane153=rx153:tx154 +lane_to_serdes_map_fabric_lane154=rx154:tx153 +lane_to_serdes_map_fabric_lane155=rx155:tx152 +lane_to_serdes_map_fabric_lane156=rx156:tx159 +lane_to_serdes_map_fabric_lane157=rx157:tx158 +lane_to_serdes_map_fabric_lane158=rx158:tx157 +lane_to_serdes_map_fabric_lane159=rx159:tx156 +lane_to_serdes_map_fabric_lane160=rx160:tx160 +lane_to_serdes_map_fabric_lane161=rx161:tx161 +lane_to_serdes_map_fabric_lane162=rx162:tx162 +lane_to_serdes_map_fabric_lane163=rx163:tx163 +lane_to_serdes_map_fabric_lane164=rx164:tx164 +lane_to_serdes_map_fabric_lane165=rx165:tx165 +lane_to_serdes_map_fabric_lane166=rx166:tx166 +lane_to_serdes_map_fabric_lane167=rx167:tx167 +lane_to_serdes_map_fabric_lane168=rx168:tx171 +lane_to_serdes_map_fabric_lane169=rx169:tx170 +lane_to_serdes_map_fabric_lane170=rx170:tx169 +lane_to_serdes_map_fabric_lane171=rx171:tx168 +lane_to_serdes_map_fabric_lane172=rx172:tx175 +lane_to_serdes_map_fabric_lane173=rx173:tx174 +lane_to_serdes_map_fabric_lane174=rx174:tx173 +lane_to_serdes_map_fabric_lane175=rx175:tx172 +lane_to_serdes_map_fabric_lane176=rx176:tx176 +lane_to_serdes_map_fabric_lane177=rx177:tx177 +lane_to_serdes_map_fabric_lane178=rx178:tx179 +lane_to_serdes_map_fabric_lane179=rx179:tx178 +lane_to_serdes_map_fabric_lane180=rx180:tx181 +lane_to_serdes_map_fabric_lane181=rx181:tx182 +lane_to_serdes_map_fabric_lane182=rx182:tx180 +lane_to_serdes_map_fabric_lane183=rx183:tx183 +lane_to_serdes_map_fabric_lane184=rx184:tx186 +lane_to_serdes_map_fabric_lane185=rx185:tx184 +lane_to_serdes_map_fabric_lane186=rx186:tx185 +lane_to_serdes_map_fabric_lane187=rx187:tx187 +lane_to_serdes_map_fabric_lane188=rx188:tx188 +lane_to_serdes_map_fabric_lane189=rx189:tx189 +lane_to_serdes_map_fabric_lane190=rx190:tx190 +lane_to_serdes_map_fabric_lane191=rx191:tx191 + +# +##Protocol trap look-up mode: +# Options: IN_LIF - Look-ups in the profile table are done by IN-LIF +# IN_PORT - Look-ups in the profile table are done by IN-PORT +protocol_traps_mode=IN_LIF + +# access definitions +schan_intr_enable=0 +tdma_intr_enable=0 +tslam_intr_enable=0 +miim_intr_enable=0 +schan_timeout_usec=300000 +tdma_timeout_usec=1000000 +tslam_timeout_usec=1000000 + +### Interrupts +appl_enable_intr_init=1 +polled_irq_mode=1 +# reduce CPU load, configure delay 100ms +polled_irq_delay=1000 + +# reduce the CPU load over adapter (caused by counter thread) +bcm_stat_interval=1000 + +# shadow memory +mem_cache_enable_ecc=1 +mem_cache_enable_parity=1 + +# serdes_nif/fabric_clk_freq_in/out configuration +serdes_nif_clk_freq_in=2 +serdes_nif_clk_freq_out=1 +serdes_fabric_clk_freq_in=2 +serdes_fabric_clk_freq_out=1 + +dport_map_direct=1 + +rif_id_max=0x6000 + +phy_rx_polarity_flip_phy0=0 +phy_rx_polarity_flip_phy1=0 +phy_rx_polarity_flip_phy2=0 +phy_rx_polarity_flip_phy3=0 +phy_rx_polarity_flip_phy4=0 +phy_rx_polarity_flip_phy5=0 +phy_rx_polarity_flip_phy6=0 +phy_rx_polarity_flip_phy7=0 +phy_rx_polarity_flip_phy8=1 +phy_rx_polarity_flip_phy9=1 +phy_rx_polarity_flip_phy10=0 +phy_rx_polarity_flip_phy11=0 +phy_rx_polarity_flip_phy12=1 +phy_rx_polarity_flip_phy13=1 +phy_rx_polarity_flip_phy14=0 +phy_rx_polarity_flip_phy15=1 +phy_rx_polarity_flip_phy16=0 +phy_rx_polarity_flip_phy17=0 +phy_rx_polarity_flip_phy18=0 +phy_rx_polarity_flip_phy19=0 +phy_rx_polarity_flip_phy20=0 +phy_rx_polarity_flip_phy21=0 +phy_rx_polarity_flip_phy22=0 +phy_rx_polarity_flip_phy23=0 +phy_rx_polarity_flip_phy24=0 +phy_rx_polarity_flip_phy25=0 +phy_rx_polarity_flip_phy26=0 +phy_rx_polarity_flip_phy27=0 +phy_rx_polarity_flip_phy28=0 +phy_rx_polarity_flip_phy29=0 +phy_rx_polarity_flip_phy30=0 +phy_rx_polarity_flip_phy31=0 +phy_rx_polarity_flip_phy32=0 +phy_rx_polarity_flip_phy33=0 +phy_rx_polarity_flip_phy34=0 +phy_rx_polarity_flip_phy35=0 +phy_rx_polarity_flip_phy36=0 +phy_rx_polarity_flip_phy37=0 +phy_rx_polarity_flip_phy38=0 +phy_rx_polarity_flip_phy39=0 +phy_rx_polarity_flip_phy40=0 +phy_rx_polarity_flip_phy41=0 +phy_rx_polarity_flip_phy42=0 +phy_rx_polarity_flip_phy43=0 +phy_rx_polarity_flip_phy44=0 +phy_rx_polarity_flip_phy45=0 +phy_rx_polarity_flip_phy46=0 +phy_rx_polarity_flip_phy47=0 +phy_rx_polarity_flip_phy48=0 +phy_rx_polarity_flip_phy49=0 +phy_rx_polarity_flip_phy50=0 +phy_rx_polarity_flip_phy51=0 +phy_rx_polarity_flip_phy52=0 +phy_rx_polarity_flip_phy53=0 +phy_rx_polarity_flip_phy54=0 +phy_rx_polarity_flip_phy55=0 +phy_rx_polarity_flip_phy56=0 +phy_rx_polarity_flip_phy57=0 +phy_rx_polarity_flip_phy58=0 +phy_rx_polarity_flip_phy59=0 +phy_rx_polarity_flip_phy60=0 +phy_rx_polarity_flip_phy61=0 +phy_rx_polarity_flip_phy62=0 +phy_rx_polarity_flip_phy63=0 +phy_rx_polarity_flip_phy64=0 +phy_rx_polarity_flip_phy65=0 +phy_rx_polarity_flip_phy66=0 +phy_rx_polarity_flip_phy67=0 +phy_rx_polarity_flip_phy68=0 +phy_rx_polarity_flip_phy69=0 +phy_rx_polarity_flip_phy70=0 +phy_rx_polarity_flip_phy71=0 +phy_rx_polarity_flip_phy72=1 +phy_rx_polarity_flip_phy73=1 +phy_rx_polarity_flip_phy74=0 +phy_rx_polarity_flip_phy75=1 +phy_rx_polarity_flip_phy76=1 +phy_rx_polarity_flip_phy77=1 +phy_rx_polarity_flip_phy78=1 +phy_rx_polarity_flip_phy79=1 +phy_rx_polarity_flip_phy80=0 +phy_rx_polarity_flip_phy81=0 +phy_rx_polarity_flip_phy82=0 +phy_rx_polarity_flip_phy83=0 +phy_rx_polarity_flip_phy84=0 +phy_rx_polarity_flip_phy85=0 +phy_rx_polarity_flip_phy86=0 +phy_rx_polarity_flip_phy87=0 +phy_rx_polarity_flip_phy88=0 +phy_rx_polarity_flip_phy89=0 +phy_rx_polarity_flip_phy90=0 +phy_rx_polarity_flip_phy91=0 +phy_rx_polarity_flip_phy92=0 +phy_rx_polarity_flip_phy93=0 +phy_rx_polarity_flip_phy94=0 +phy_rx_polarity_flip_phy95=0 +phy_rx_polarity_flip_phy96=0 +phy_rx_polarity_flip_phy97=0 +phy_rx_polarity_flip_phy98=0 +phy_rx_polarity_flip_phy99=0 +phy_rx_polarity_flip_phy100=0 +phy_rx_polarity_flip_phy101=0 +phy_rx_polarity_flip_phy102=0 +phy_rx_polarity_flip_phy103=0 +phy_rx_polarity_flip_phy104=0 +phy_rx_polarity_flip_phy105=0 +phy_rx_polarity_flip_phy106=0 +phy_rx_polarity_flip_phy107=0 +phy_rx_polarity_flip_phy108=0 +phy_rx_polarity_flip_phy109=0 +phy_rx_polarity_flip_phy110=0 +phy_rx_polarity_flip_phy111=0 +phy_rx_polarity_flip_phy112=0 +phy_rx_polarity_flip_phy113=0 +phy_rx_polarity_flip_phy114=0 +phy_rx_polarity_flip_phy115=0 +phy_rx_polarity_flip_phy116=0 +phy_rx_polarity_flip_phy117=0 +phy_rx_polarity_flip_phy118=0 +phy_rx_polarity_flip_phy119=0 +phy_rx_polarity_flip_phy120=0 +phy_rx_polarity_flip_phy121=0 +phy_rx_polarity_flip_phy122=0 +phy_rx_polarity_flip_phy123=0 +phy_rx_polarity_flip_phy124=0 +phy_rx_polarity_flip_phy125=0 +phy_rx_polarity_flip_phy126=0 +phy_rx_polarity_flip_phy127=0 +phy_rx_polarity_flip_phy128=0 +phy_rx_polarity_flip_phy129=0 +phy_rx_polarity_flip_phy130=0 +phy_rx_polarity_flip_phy131=0 +phy_rx_polarity_flip_phy132=0 +phy_rx_polarity_flip_phy133=0 +phy_rx_polarity_flip_phy134=0 +phy_rx_polarity_flip_phy135=0 +phy_rx_polarity_flip_phy136=0 +phy_rx_polarity_flip_phy137=0 +phy_rx_polarity_flip_phy138=0 +phy_rx_polarity_flip_phy139=0 +phy_rx_polarity_flip_phy140=0 +phy_rx_polarity_flip_phy141=0 +phy_rx_polarity_flip_phy142=0 +phy_rx_polarity_flip_phy143=0 +phy_tx_polarity_flip_phy0=1 +phy_tx_polarity_flip_phy1=1 +phy_tx_polarity_flip_phy2=1 +phy_tx_polarity_flip_phy3=1 +phy_tx_polarity_flip_phy4=1 +phy_tx_polarity_flip_phy5=1 +phy_tx_polarity_flip_phy6=1 +phy_tx_polarity_flip_phy7=1 +phy_tx_polarity_flip_phy8=1 +phy_tx_polarity_flip_phy9=1 +phy_tx_polarity_flip_phy10=1 +phy_tx_polarity_flip_phy11=1 +phy_tx_polarity_flip_phy12=1 +phy_tx_polarity_flip_phy13=1 +phy_tx_polarity_flip_phy14=1 +phy_tx_polarity_flip_phy15=1 +phy_tx_polarity_flip_phy16=1 +phy_tx_polarity_flip_phy17=1 +phy_tx_polarity_flip_phy18=1 +phy_tx_polarity_flip_phy19=1 +phy_tx_polarity_flip_phy20=1 +phy_tx_polarity_flip_phy21=1 +phy_tx_polarity_flip_phy22=1 +phy_tx_polarity_flip_phy23=1 +phy_tx_polarity_flip_phy24=1 +phy_tx_polarity_flip_phy25=1 +phy_tx_polarity_flip_phy26=1 +phy_tx_polarity_flip_phy27=1 +phy_tx_polarity_flip_phy28=1 +phy_tx_polarity_flip_phy29=1 +phy_tx_polarity_flip_phy30=1 +phy_tx_polarity_flip_phy31=1 +phy_tx_polarity_flip_phy32=1 +phy_tx_polarity_flip_phy33=1 +phy_tx_polarity_flip_phy34=1 +phy_tx_polarity_flip_phy35=1 +phy_tx_polarity_flip_phy36=1 +phy_tx_polarity_flip_phy37=1 +phy_tx_polarity_flip_phy38=1 +phy_tx_polarity_flip_phy39=1 +phy_tx_polarity_flip_phy40=1 +phy_tx_polarity_flip_phy41=1 +phy_tx_polarity_flip_phy42=1 +phy_tx_polarity_flip_phy43=1 +phy_tx_polarity_flip_phy44=1 +phy_tx_polarity_flip_phy45=1 +phy_tx_polarity_flip_phy46=1 +phy_tx_polarity_flip_phy47=1 +phy_tx_polarity_flip_phy48=1 +phy_tx_polarity_flip_phy49=1 +phy_tx_polarity_flip_phy50=1 +phy_tx_polarity_flip_phy51=1 +phy_tx_polarity_flip_phy52=1 +phy_tx_polarity_flip_phy53=1 +phy_tx_polarity_flip_phy54=1 +phy_tx_polarity_flip_phy55=1 +phy_tx_polarity_flip_phy56=1 +phy_tx_polarity_flip_phy57=1 +phy_tx_polarity_flip_phy58=1 +phy_tx_polarity_flip_phy59=1 +phy_tx_polarity_flip_phy60=1 +phy_tx_polarity_flip_phy61=1 +phy_tx_polarity_flip_phy62=1 +phy_tx_polarity_flip_phy63=1 +phy_tx_polarity_flip_phy64=1 +phy_tx_polarity_flip_phy65=1 +phy_tx_polarity_flip_phy66=1 +phy_tx_polarity_flip_phy67=1 +phy_tx_polarity_flip_phy68=1 +phy_tx_polarity_flip_phy69=1 +phy_tx_polarity_flip_phy70=1 +phy_tx_polarity_flip_phy71=1 +phy_tx_polarity_flip_phy72=0 +phy_tx_polarity_flip_phy73=0 +phy_tx_polarity_flip_phy74=0 +phy_tx_polarity_flip_phy75=0 +phy_tx_polarity_flip_phy76=0 +phy_tx_polarity_flip_phy77=0 +phy_tx_polarity_flip_phy78=0 +phy_tx_polarity_flip_phy79=0 +phy_tx_polarity_flip_phy80=0 +phy_tx_polarity_flip_phy81=0 +phy_tx_polarity_flip_phy82=0 +phy_tx_polarity_flip_phy83=0 +phy_tx_polarity_flip_phy84=0 +phy_tx_polarity_flip_phy85=0 +phy_tx_polarity_flip_phy86=0 +phy_tx_polarity_flip_phy87=0 +phy_tx_polarity_flip_phy88=1 +phy_tx_polarity_flip_phy89=1 +phy_tx_polarity_flip_phy90=1 +phy_tx_polarity_flip_phy91=1 +phy_tx_polarity_flip_phy92=1 +phy_tx_polarity_flip_phy93=1 +phy_tx_polarity_flip_phy94=1 +phy_tx_polarity_flip_phy95=1 +phy_tx_polarity_flip_phy96=1 +phy_tx_polarity_flip_phy97=1 +phy_tx_polarity_flip_phy98=1 +phy_tx_polarity_flip_phy99=1 +phy_tx_polarity_flip_phy100=1 +phy_tx_polarity_flip_phy101=1 +phy_tx_polarity_flip_phy102=1 +phy_tx_polarity_flip_phy103=1 +phy_tx_polarity_flip_phy104=1 +phy_tx_polarity_flip_phy105=1 +phy_tx_polarity_flip_phy106=1 +phy_tx_polarity_flip_phy107=1 +phy_tx_polarity_flip_phy108=1 +phy_tx_polarity_flip_phy109=1 +phy_tx_polarity_flip_phy110=1 +phy_tx_polarity_flip_phy111=1 +phy_tx_polarity_flip_phy112=1 +phy_tx_polarity_flip_phy113=1 +phy_tx_polarity_flip_phy114=1 +phy_tx_polarity_flip_phy115=1 +phy_tx_polarity_flip_phy116=1 +phy_tx_polarity_flip_phy117=1 +phy_tx_polarity_flip_phy118=1 +phy_tx_polarity_flip_phy119=1 +phy_tx_polarity_flip_phy120=1 +phy_tx_polarity_flip_phy121=1 +phy_tx_polarity_flip_phy122=1 +phy_tx_polarity_flip_phy123=1 +phy_tx_polarity_flip_phy124=1 +phy_tx_polarity_flip_phy125=1 +phy_tx_polarity_flip_phy126=1 +phy_tx_polarity_flip_phy127=1 +phy_tx_polarity_flip_phy128=1 +phy_tx_polarity_flip_phy129=1 +phy_tx_polarity_flip_phy130=1 +phy_tx_polarity_flip_phy131=1 +phy_tx_polarity_flip_phy132=1 +phy_tx_polarity_flip_phy133=1 +phy_tx_polarity_flip_phy134=1 +phy_tx_polarity_flip_phy135=1 +phy_tx_polarity_flip_phy136=1 +phy_tx_polarity_flip_phy137=1 +phy_tx_polarity_flip_phy138=1 +phy_tx_polarity_flip_phy139=1 +phy_tx_polarity_flip_phy140=1 +phy_tx_polarity_flip_phy141=1 +phy_tx_polarity_flip_phy142=1 +phy_tx_polarity_flip_phy143=1 + +serdes_tx_taps_1=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_2=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_3=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_4=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_5=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_6=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_7=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_8=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_9=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_10=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_11=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_12=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_13=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_14=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_15=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_16=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_17=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_18=nrz:-8:89:-29:0:0:0 +serdes_tx_taps_19=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_20=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_21=nrz:-4:75:-21:0:0:0 +serdes_tx_taps_22=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_23=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_24=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_25=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_26=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_27=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_28=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_29=nrz:-5:78:-22:0:0:0 +serdes_tx_taps_30=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_31=nrz:-7:85:-25:0:0:0 +serdes_tx_taps_32=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_33=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_34=nrz:-5:75:-20:0:0:0 +serdes_tx_taps_35=nrz:-5:80:-23:0:0:0 +serdes_tx_taps_36=nrz:-7:85:-25:0:0:0 + +xflow_macsec_secure_chan_to_num_secure_assoc_encrypt=2 +xflow_macsec_secure_chan_to_num_secure_assoc_decrypt=2 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/port_config.ini b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/port_config.ini new file mode 100644 index 000000000000..f77f5b35172c --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/port_config.ini @@ -0,0 +1,21 @@ +# name lanes alias index role speed asic_port_name coreId corePortId numVoq +Ethernet144 72,73,74,75 Ethernet19/1 21 Ext 100000 Eth0-ASIC1 1 1 8 +Ethernet152 80,81,82,83 Ethernet20/1 22 Ext 100000 Eth8-ASIC1 1 2 8 +Ethernet160 88,89,90,91 Ethernet21/1 23 Ext 100000 Eth16-ASIC1 1 3 8 +Ethernet168 96,97,98,99 Ethernet22/1 24 Ext 100000 Eth24-ASIC1 1 4 8 +Ethernet176 104,105,106,107 Ethernet23/1 25 Ext 100000 Eth32-ASIC1 1 5 8 +Ethernet184 112,113,114,115 Ethernet24/1 26 Ext 100000 Eth40-ASIC1 1 6 8 +Ethernet192 120,121,122,123 Ethernet25/1 27 Ext 100000 Eth48-ASIC1 1 7 8 +Ethernet200 128,129,130,131 Ethernet26/1 28 Ext 100000 Eth56-ASIC1 1 8 8 +Ethernet208 136,137,138,139 Ethernet27/1 29 Ext 100000 Eth64-ASIC1 1 9 8 +Ethernet216 64,65,66,67 Ethernet28/1 30 Ext 100000 Eth72-ASIC1 0 10 8 +Ethernet224 56,57,58,59 Ethernet29/1 31 Ext 100000 Eth80-ASIC1 0 11 8 +Ethernet232 48,49,50,51 Ethernet30/1 32 Ext 100000 Eth88-ASIC1 0 12 8 +Ethernet240 40,41,42,43 Ethernet31/1 33 Ext 100000 Eth96-ASIC1 0 13 8 +Ethernet248 32,33,34,35 Ethernet32/1 34 Ext 100000 Eth104-ASIC1 0 14 8 +Ethernet256 24,25,26,27 Ethernet33/1 35 Ext 100000 Eth112-ASIC1 0 15 8 +Ethernet264 16,17,18,19 Ethernet34/1 36 Ext 100000 Eth120-ASIC1 0 16 8 +Ethernet272 8,9,10,11 Ethernet35/1 37 Ext 100000 Eth128-ASIC1 0 17 8 +Ethernet280 0,1,2,3 Ethernet36/1 38 Ext 100000 Eth136-ASIC1 0 18 8 +Ethernet-Rec1 221 Recirc0/0 39 Rec 400000 Rcy0-ASIC1 0 221 8 +Ethernet-IB1 222 Recirc0/1 40 Inb 400000 Rcy1-ASIC1 1 222 8 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/sai.profile b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/sai.profile new file mode 120000 index 000000000000..1e172f3e0765 --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C36/1/sai.profile @@ -0,0 +1 @@ +../0/sai.profile \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36DM2-C36 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36DM2-C36 new file mode 120000 index 000000000000..41ebc98e1c4e --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36DM2-C36 @@ -0,0 +1 @@ +Arista-7800R3A-36D2-C36 \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36P-C36 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36P-C36 new file mode 120000 index 000000000000..41ebc98e1c4e --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36P-C36 @@ -0,0 +1 @@ +Arista-7800R3A-36D2-C36 \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36D2-C36 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36D2-C36 new file mode 120000 index 000000000000..41ebc98e1c4e --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36D2-C36 @@ -0,0 +1 @@ +Arista-7800R3A-36D2-C36 \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36DM2-C36 b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36DM2-C36 new file mode 120000 index 000000000000..41ebc98e1c4e --- /dev/null +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3AK-36DM2-C36 @@ -0,0 +1 @@ +Arista-7800R3A-36D2-C36 \ No newline at end of file From 9ef846496423ab7b37c324f5615e944047f6990b Mon Sep 17 00:00:00 2001 From: cytsao1 <111393130+cytsao1@users.noreply.github.com> Date: Mon, 17 Oct 2022 13:26:31 -0700 Subject: [PATCH 061/174] [pmon] Add smartmontools to pmon docker (#11837) * Add smartmontools to pmon docker * Set smartmontools to install version 7.2-1 in pmon to match host; clean up smartmontools build files * Add comments on smartmontools version for both host and pmon --- dockers/docker-platform-monitor/Dockerfile.j2 | 4 +++- files/build_templates/sonic_debian_extension.j2 | 2 +- rules/smartmontools.dep | 10 ---------- rules/smartmontools.mk | 12 ------------ 4 files changed, 4 insertions(+), 24 deletions(-) delete mode 100644 rules/smartmontools.dep delete mode 100644 rules/smartmontools.mk diff --git a/dockers/docker-platform-monitor/Dockerfile.j2 b/dockers/docker-platform-monitor/Dockerfile.j2 index 5c34246ca8be..51d944d170dd 100755 --- a/dockers/docker-platform-monitor/Dockerfile.j2 +++ b/dockers/docker-platform-monitor/Dockerfile.j2 @@ -26,7 +26,9 @@ RUN apt-get update && \ psmisc \ python3-jsonschema \ libpci3 \ - iputils-ping + iputils-ping \ +# smartmontools version should match the installed smartmontools in sonic_debian_extension build template + smartmontools=7.2-1 # On Arista devices, the sonic_platform wheel is not installed in the container. # Instead, the installation directory is mounted from the host OS. However, this method diff --git a/files/build_templates/sonic_debian_extension.j2 b/files/build_templates/sonic_debian_extension.j2 index 0e219e9fa2c7..57438ef785f0 100644 --- a/files/build_templates/sonic_debian_extension.j2 +++ b/files/build_templates/sonic_debian_extension.j2 @@ -344,7 +344,7 @@ sudo chmod 755 $FILESYSTEM_ROOT/usr/bin/memory_checker sudo cp $IMAGE_CONFIGS/monit/restart_service $FILESYSTEM_ROOT/usr/bin/ sudo chmod 755 $FILESYSTEM_ROOT/usr/bin/restart_service -# Install custom-built smartmontools +# Installed smartmontools version should match installed smartmontools in docker-platform-monitor Dockerfile sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y install smartmontools=7.2-1 # Install custom-built openssh sshd diff --git a/rules/smartmontools.dep b/rules/smartmontools.dep deleted file mode 100644 index 0ca63f5f1fac..000000000000 --- a/rules/smartmontools.dep +++ /dev/null @@ -1,10 +0,0 @@ - -SPATH := $($(SMARTMONTOOLS)_SRC_PATH) -DEP_FILES := $(SONIC_COMMON_FILES_LIST) rules/smartmontools.mk rules/smartmontools.dep -DEP_FILES += $(SONIC_COMMON_BASE_FILES_LIST) -DEP_FILES += $(shell git ls-files $(SPATH)) - -$(SMARTMONTOOLS)_CACHE_MODE := GIT_CONTENT_SHA -$(SMARTMONTOOLS)_DEP_FLAGS := $(SONIC_COMMON_FLAGS_LIST) -$(SMARTMONTOOLS)_DEP_FILES := $(DEP_FILES) - diff --git a/rules/smartmontools.mk b/rules/smartmontools.mk deleted file mode 100644 index 7cc61eee6feb..000000000000 --- a/rules/smartmontools.mk +++ /dev/null @@ -1,12 +0,0 @@ -# smartmontools package -# - -SMARTMONTOOLS_VERSION_MAJOR = 6.6 -SMARTMONTOOLS_VERSION_FULL = $(SMARTMONTOOLS_VERSION_MAJOR)-1 - -export SMARTMONTOOLS_VERSION_MAJOR SMARTMONTOOLS_VERSION_FULL - -SMARTMONTOOLS = smartmontools_$(SMARTMONTOOLS_VERSION_FULL)_$(CONFIGURED_ARCH).deb -$(SMARTMONTOOLS)_SRC_PATH = $(SRC_PATH)/smartmontools - -SONIC_MAKE_DEBS += $(SMARTMONTOOLS) From dc2cc9d507dc9a66e569c7f5ed0cdf902160f79d Mon Sep 17 00:00:00 2001 From: vmittal-msft <46945843+vmittal-msft@users.noreply.github.com> Date: Mon, 17 Oct 2022 15:22:53 -0700 Subject: [PATCH 062/174] Updated BRCM SAI to version 7.1.10.4 (#12423) --- platform/broadcom/sai.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/platform/broadcom/sai.mk b/platform/broadcom/sai.mk index 6de969a662bc..ab8d47148b03 100644 --- a/platform/broadcom/sai.mk +++ b/platform/broadcom/sai.mk @@ -1,5 +1,5 @@ -LIBSAIBCM_XGS_VERSION = 7.1.7.2 -LIBSAIBCM_DNX_VERSION = 7.1.7.2 +LIBSAIBCM_XGS_VERSION = 7.1.10.4 +LIBSAIBCM_DNX_VERSION = 7.1.10.4 LIBSAIBCM_BRANCH_NAME = REL_7.0 LIBSAIBCM_XGS_URL_PREFIX = "https://sonicstorage.blob.core.windows.net/public/sai/bcmsai/$(LIBSAIBCM_BRANCH_NAME)/$(LIBSAIBCM_XGS_VERSION)" LIBSAIBCM_DNX_URL_PREFIX = "https://sonicstorage.blob.core.windows.net/public/sai/bcmsai/$(LIBSAIBCM_BRANCH_NAME)/$(LIBSAIBCM_DNX_VERSION)" From a07aaca831c588067d87f9f9823acd1b9a7daa91 Mon Sep 17 00:00:00 2001 From: Xin Wang Date: Tue, 18 Oct 2022 10:02:30 +0800 Subject: [PATCH 063/174] [docker-sonic-mgmt] Cleanup and upgrade some packages (#12218) Why I did it The Dockerfile of docker-sonic-mgmt became a little bit messy over time. Some packages are also a little bit too old. It would be better to do some cleanup and upgrade some important packages. How I did it Updated the dockerfile template for building docker-sonic-mgmt. How to verify it Locally built the docker-sonic-mgmt image and used it to run some test scripts. Description for the changelog: The build-essential package contains gcc and make. It's unnecessary to install them again. The python-is-python2 package is included in the python package for Ubuntu 20.04. It's unnecessary to install it again. Sort the apt and pip packages by alphabetic order. Cleanup get-pip.py after installation. Cleanup the python-scapy deb package after installation. Ensure that the python pip, setuptools and wheel packages are up to date. Install pytest-ansible from pip instead of from source code. While installing docker-ce-cli, it's unnecessary to install curl and software-properties-common again. Merged some pip install steps into one step. Upgrade ansible from 2.8.12 to 2.9.27 for env-python3. Upgrade pytest to 7.1.3 for env-python3. Add ncclient package to evn-python3. --- dockers/docker-sonic-mgmt/Dockerfile.j2 | 196 +++++++++++------------- 1 file changed, 90 insertions(+), 106 deletions(-) diff --git a/dockers/docker-sonic-mgmt/Dockerfile.j2 b/dockers/docker-sonic-mgmt/Dockerfile.j2 index 627fbaa65c8f..77b30ef784f8 100755 --- a/dockers/docker-sonic-mgmt/Dockerfile.j2 +++ b/dockers/docker-sonic-mgmt/Dockerfile.j2 @@ -7,7 +7,6 @@ RUN apt-get update && apt-get install -y build-essential \ cmake \ curl \ default-jre \ - gcc \ git \ inetutils-ping \ iproute2 \ @@ -16,7 +15,6 @@ RUN apt-get update && apt-get install -y build-essential \ libssl-dev \ libxml2 \ libxslt1-dev \ - make \ openssh-server \ psmisc \ python \ @@ -25,36 +23,43 @@ RUN apt-get update && apt-get install -y build-essential \ python3-venv \ rsyslog \ snmp \ + software-properties-common \ sshpass \ sudo \ tcpdump \ telnet \ - vim \ - python-is-python2 \ - software-properties-common + vim -RUN add-apt-repository -y universe -RUN curl https://bootstrap.pypa.io/pip/2.7/get-pip.py --output get-pip.py \ - && python2 get-pip.py - -RUN curl -L http://archive.ubuntu.com/ubuntu/pool/universe/s/scapy/python-scapy_2.3.3-3_all.deb \ +RUN curl -fsSL http://archive.ubuntu.com/ubuntu/pool/universe/s/scapy/python-scapy_2.3.3-3_all.deb \ --output python-scapy_2.3.3-3_all.deb \ - && dpkg -i python-scapy_2.3.3-3_all.deb + && dpkg -i python-scapy_2.3.3-3_all.deb \ + && rm -f python-scapy_2.3.3-3_all.deb + +RUN curl -fsSL https://bootstrap.pypa.io/pip/2.7/get-pip.py --output get-pip.py \ + && python2 get-pip.py \ + && rm -f get-pip.py \ + && ln -sf `which pip2` /usr/bin/pip -RUN pip install setuptools==44.1.1 -RUN pip install cffi==1.12.0 \ +RUN pip install --upgrade pip setuptools wheel +RUN pip install allure-pytest==2.8.22 \ + ansible==2.8.12 \ + azure-storage-blob==12.9.0 \ + celery[redis]==4.4.7 \ + cffi==1.12.0 \ contextlib2==0.6.0.post1 \ cryptography==3.3.2 \ + dpkt \ "future>=0.16.0" \ gitpython \ ipaddr \ ipython==5.4.1 \ ixnetwork-restpy==1.0.64 \ ixnetwork-open-traffic-generator==0.0.79 \ - snappi[ixnetwork,convergence]==0.7.44 \ jinja2==2.7.2 \ jsonpatch \ + lazy-object-proxy==1.6.0 \ lxml \ + msrest==0.6.21 \ natsort \ netaddr \ netmiko==2.4.2 \ @@ -64,30 +69,29 @@ RUN pip install cffi==1.12.0 \ prettytable \ psutil \ pyasn1==0.1.9 \ + pycryptodome==3.9.8 \ pyfiglet \ - lazy-object-proxy==1.6.0 \ pylint==1.8.1 \ pyro4 \ pysnmp==4.2.5 \ + pysubnettree \ + pytest==4.6.11 \ + pytest-ansible \ pytest-repeat \ pytest-html \ pytest-xdist==1.28.0 \ - pytest==4.6.5 \ + python-dateutil \ redis \ requests \ + retry \ rpyc \ six \ - tabulate \ + snappi[ixnetwork,convergence]==0.7.44 \ statistics \ - textfsm==1.1.2 \ - virtualenv \ - retry \ + tabulate \ + textfsm==1.1.3 \ thrift==0.11.0 \ - allure-pytest==2.8.22 \ - celery[redis]==4.4.7 \ - msrest==0.6.21 \ - python-dateutil \ - azure-storage-blob==12.9.0 \ + virtualenv \ && git clone https://github.com/p4lang/scapy-vxlan.git \ && cd scapy-vxlan \ && python setup.py install \ @@ -104,8 +108,7 @@ RUN pip install cffi==1.12.0 \ && cd ../.. \ && rm -fr nanomsg-1.0.0 \ && rm -f 1.0.0.tar.gz \ - && pip install nnpy \ - && pip install dpkt \ + && pip install nnpy \ && pip install scapy==2.4.5 --upgrade --ignore-installed # Install docker-ce-cli @@ -113,9 +116,7 @@ RUN apt-get update \ && apt-get install -y \ apt-transport-https \ ca-certificates \ - curl \ gnupg-agent \ - software-properties-common \ && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ && add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \ && apt-get update \ @@ -124,8 +125,6 @@ RUN apt-get update \ # Install Azure CLI RUN curl -sL https://aka.ms/InstallAzureCLIDeb | bash -RUN pip install wheel==0.33.6 - ## Copy and install sonic-mgmt docker dependencies COPY \ {% for deb in docker_sonic_mgmt_debs.split(' ') -%} @@ -138,18 +137,6 @@ RUN dpkg -i \ debs/{{ deb }}{{' '}} {%- endfor %} -RUN pip install ansible==2.8.12 - -RUN pip install pysubnettree - -# Install pytest-ansible module with 'become', 'become_user' parameters support -RUN git clone https://github.com/ansible/pytest-ansible.git \ - && cd pytest-ansible \ - && git checkout d33c025f070a9c870220a157cc5a999fda68de44 \ - && python setup.py install \ - && cd .. \ - && rm -fr pytest-ansible - RUN mkdir /var/run/sshd EXPOSE 22 @@ -188,10 +175,6 @@ RUN env-201811/bin/pip install cryptography==3.3.2 ansible==2.0.0.2 RUN python3 -m venv env-python3 -# NOTE: There is an ordering dependency for pycryptodome. Leaving this at -# the end until we figure that out. -RUN pip install pycryptodome==3.9.8 - # Activating a virtualenv. The virtualenv automatically works for RUN, ENV and CMD. ENV VIRTUAL_ENV=env-python3 ARG BACKUP_OF_PATH="$PATH" @@ -199,65 +182,66 @@ ENV PATH="$VIRTUAL_ENV/bin:$PATH" ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 PYTHONIOENCODING=UTF-8 -RUN python3 -m pip install --upgrade --ignore-installed pip setuptools==58.4.0 wheel==0.33.6 -RUN python3 -m pip install setuptools-rust \ - aiohttp \ - defusedxml \ - azure-kusto-ingest \ - azure-kusto-data \ - cffi \ - contextlib2==0.6.0.post1 \ - cryptography==3.3.2 \ - "future>=0.16.0" \ - gitpython \ - ipaddr \ - ipython==5.4.1 \ - ixnetwork-restpy==1.0.64 \ - ixnetwork-open-traffic-generator==0.0.79 \ - snappi[ixnetwork,convergence]==0.7.44 \ - markupsafe==2.0.1 \ - jinja2==2.7.2 \ - jsonpatch \ - lxml \ - natsort \ - netaddr \ - netmiko==2.4.2 \ - paramiko==2.7.1 \ - passlib \ - pexpect \ - prettytable \ - psutil \ - pyasn1==0.4.8 \ - pyfiglet \ - pylint==1.8.1 \ - pyro4 \ - pysnmp==4.4.12 \ - pytest-repeat \ - pytest-html \ - pytest-xdist==1.28.0 \ - pytest \ - redis \ - requests \ - rpyc \ - six \ - tabulate \ - textfsm==1.1.2 \ - virtualenv \ - pysubnettree \ - nnpy \ - dpkt \ - pycryptodome==3.9.8 \ - ansible==2.8.12 \ - pytest-ansible \ - allure-pytest==2.8.22 \ - retry \ - thrift==0.11.0 \ - ptf \ - scapy==2.4.5 \ - celery[redis]==4.4.7 \ - msrest==0.6.21 \ - python-dateutil \ - azure-storage-blob==12.9.0 +RUN python3 -m pip install --upgrade pip setuptools wheel +RUN python3 -m pip install aiohttp \ + allure-pytest==2.8.22 \ + ansible==2.9.27 \ + azure-storage-blob==12.9.0 \ + azure-kusto-data \ + azure-kusto-ingest \ + defusedxml \ + celery[redis]==4.4.7 \ + cffi \ + contextlib2==0.6.0.post1 \ + cryptography==3.3.2 \ + dpkt \ + "future>=0.16.0" \ + gitpython \ + ipaddr \ + ipython==5.4.1 \ + ixnetwork-restpy==1.0.64 \ + ixnetwork-open-traffic-generator==0.0.79 \ + jinja2==2.7.2 \ + jsonpatch \ + lxml \ + markupsafe==2.0.1 \ + msrest==0.6.21 \ + natsort \ + ncclient \ + netaddr \ + netmiko==2.4.2 \ + nnpy \ + paramiko==2.7.1 \ + passlib \ + pexpect \ + prettytable \ + psutil \ + ptf \ + pyasn1==0.4.8 \ + pycryptodome==3.9.8 \ + pyfiglet \ + pylint==1.8.1 \ + pyro4 \ + pysnmp==4.4.12 \ + pysubnettree \ + pytest-ansible \ + pytest-html \ + pytest-repeat \ + pytest-xdist==1.28.0 \ + python-dateutil \ + pytest==7.1.3 \ + redis \ + requests \ + retry \ + rpyc \ + scapy==2.4.5 \ + setuptools-rust \ + six \ + snappi[ixnetwork,convergence]==0.7.44 \ + tabulate \ + textfsm==1.1.2 \ + thrift==0.11.0 \ + virtualenv # Deactivating a virtualenv ENV PATH="$BACKUP_OF_PATH" From 05b1e06012ff42f0b0854612b1910676ba10ade3 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Tue, 18 Oct 2022 13:53:29 +0800 Subject: [PATCH 064/174] [action] Add debug info for automerge github action. (#12389) --- .github/workflows/automerge.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml index ee27244bfb1f..2bef87072404 100644 --- a/.github/workflows/automerge.yml +++ b/.github/workflows/automerge.yml @@ -14,6 +14,8 @@ jobs: steps: - name: automerge uses: 'pascalgn/automerge-action@v0.13.1' + with: + args: "--trace" env: GITHUB_TOKEN: '${{ secrets.TOKEN }}' MERGE_LABELS: 'automerge' From bc8ee7a105b0f79cadf713ac952c1d84d6f6dad9 Mon Sep 17 00:00:00 2001 From: Vivek Date: Tue, 18 Oct 2022 02:11:02 -0700 Subject: [PATCH 065/174] [Mellanox] [SKU] Mellanox-SN4700-V48C32 SKU added (#12250) A new SKU for MSN4700 Platform i.e. Mellanox-SN4700-V48C32 Requirements: Breakout: Port 1-24: 2x200G Port 25-32: 4x100G Downlinks: 48 (1-24) Uplinks: 32 (25-32) Shared Headroom: Enabled Over Subscribe Ratio: 1:8 Default Topology: T1 Default Cable Length for T1: 300m VxLAN source port range set: No Static Policy Based Hashing Supported: No Additional Details: QoS params: The default ones defined in qos_config.j2 will be applied Small Packet Percentage: Used 50% for traditional buffer model Note: For dynamic model, the value defined in LOSSLESS_TRAFFIC_PATTERN|AZURE|small_packet_percentage is used Cable Lengths used for generating buffer_defaults_{t0,t1}.j2 values Signed-off-by: Vivek Reddy Karri --- .../Mellanox-SN4700-V48C32/buffers.json.j2 | 16 + .../buffers_defaults_objects.j2 | 1 + .../buffers_defaults_t0.j2 | 40 +++ .../buffers_defaults_t1.j2 | 46 +++ .../buffers_dynamic.json.j2 | 17 ++ .../Mellanox-SN4700-V48C32/hwsku.json | 244 +++++++++++++++ .../pg_profile_lookup.ini | 1 + .../Mellanox-SN4700-V48C32/port_config.ini | 81 +++++ .../Mellanox-SN4700-V48C32/qos.json.j2 | 1 + .../Mellanox-SN4700-V48C32/sai.profile | 3 + .../sai_4700_32x100g_48x200g.xml | 277 ++++++++++++++++++ 11 files changed, 727 insertions(+) create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers.json.j2 create mode 120000 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_objects.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_t0.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_t1.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_dynamic.json.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/hwsku.json create mode 120000 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/pg_profile_lookup.ini create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/port_config.ini create mode 120000 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/qos.json.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/sai.profile create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/sai_4700_32x100g_48x200g.xml diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers.json.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers.json.j2 new file mode 100644 index 000000000000..44f0d97ce37d --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers.json.j2 @@ -0,0 +1,16 @@ + +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{%- set default_topo = 't1' %} +{%- include 'buffers_config.j2' %} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_objects.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_objects.j2 new file mode 120000 index 000000000000..c01aebb7ae12 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_objects.j2 @@ -0,0 +1 @@ +../../x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D48C8/buffers_defaults_objects.j2 \ No newline at end of file diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_t0.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_t0.j2 new file mode 100644 index 000000000000..841444e9aa94 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_t0.j2 @@ -0,0 +1,40 @@ + +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{% set default_cable = '5m' %} +{% set ingress_lossless_pool_size = '50208768' %} +{% set ingress_lossless_pool_xoff = '1662976' %} +{% set egress_lossless_pool_size = '60817392' %} +{% set egress_lossy_pool_size = '50208768' %} + +{% import 'buffers_defaults_objects.j2' as defs with context %} + +{%- macro generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) %} +{{ defs.generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_profile_lists_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_profile_lists(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_queue_buffers_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_queue_buffers(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_pg_profiles_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_pg_profiles(port_names_active, port_names_inactive) }} +{%- endmacro %} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_t1.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_t1.j2 new file mode 100644 index 000000000000..03f41430d73b --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_defaults_t1.j2 @@ -0,0 +1,46 @@ + +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{% set default_cable = '300m' %} +{%-set ports2cable = { + 'torrouter_server' : '5m', + 'leafrouter_torrouter' : '300m', + 'spinerouter_leafrouter' : '1500m' + } +-%} +{% set ingress_lossless_pool_size = '46780416' %} +{% set ingress_lossless_pool_xoff = '5091328' %} +{% set egress_lossless_pool_size = '60817392' %} +{% set egress_lossy_pool_size = '46780416' %} + +{% import 'buffers_defaults_objects.j2' as defs with context %} + +{%- macro generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) %} +{{ defs.generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_profile_lists_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_profile_lists(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_queue_buffers_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_queue_buffers(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_pg_profiles_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_pg_profiles(port_names_active, port_names_inactive) }} +{%- endmacro %} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_dynamic.json.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_dynamic.json.j2 new file mode 100644 index 000000000000..0829b960de18 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/buffers_dynamic.json.j2 @@ -0,0 +1,17 @@ + +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{%- set default_topo = 't1' %} +{%- set dynamic_mode = 'true' %} +{%- include 'buffers_config.j2' %} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/hwsku.json b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/hwsku.json new file mode 100644 index 000000000000..50d2faec2899 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/hwsku.json @@ -0,0 +1,244 @@ +{ + "interfaces": { + "Ethernet0": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet4": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet8": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet12": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet16": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet20": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet24": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet28": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet32": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet36": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet40": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet44": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet48": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet52": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet56": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet60": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet64": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet68": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet72": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet76": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet80": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet84": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet88": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet92": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet96": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet100": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet104": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet108": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet112": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet116": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet120": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet124": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet128": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet132": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet136": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet140": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet144": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet148": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet152": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet156": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet160": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet164": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet168": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet172": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet176": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet180": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet184": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet188": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet192": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet194": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet196": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet198": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet200": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet202": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet204": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet206": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet208": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet210": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet212": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet214": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet216": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet218": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet220": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet222": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet224": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet226": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet228": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet230": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet232": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet234": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet236": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet238": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet240": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet242": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet244": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet246": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet248": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet250": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet252": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + }, + "Ethernet254": { + "default_brkout_mode": "4x100G[50G,25G,10G,1G]" + } + } +} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/pg_profile_lookup.ini b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/pg_profile_lookup.ini new file mode 120000 index 000000000000..66cab04d2c42 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/pg_profile_lookup.ini @@ -0,0 +1 @@ +../Mellanox-SN4700-C128/pg_profile_lookup.ini \ No newline at end of file diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/port_config.ini b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/port_config.ini new file mode 100644 index 000000000000..8fdb867ef848 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/port_config.ini @@ -0,0 +1,81 @@ +# name lanes alias index speed +Ethernet0 0,1,2,3 etp1a 1 200000 +Ethernet4 4,5,6,7 etp1b 1 200000 +Ethernet8 8,9,10,11 etp2a 2 200000 +Ethernet12 12,13,14,15 etp2b 2 200000 +Ethernet16 16,17,18,19 etp3a 3 200000 +Ethernet20 20,21,22,23 etp3b 3 200000 +Ethernet24 24,25,26,27 etp4a 4 200000 +Ethernet28 28,29,30,31 etp4b 4 200000 +Ethernet32 32,33,34,35 etp5a 5 200000 +Ethernet36 36,37,38,39 etp5b 5 200000 +Ethernet40 40,41,42,43 etp6a 6 200000 +Ethernet44 44,45,46,47 etp6b 6 200000 +Ethernet48 48,49,50,51 etp7a 7 200000 +Ethernet52 52,53,54,55 etp7b 7 200000 +Ethernet56 56,57,58,59 etp8a 8 200000 +Ethernet60 60,61,62,63 etp8b 8 200000 +Ethernet64 64,65,66,67 etp9a 9 200000 +Ethernet68 68,69,70,71 etp9b 9 200000 +Ethernet72 72,73,74,75 etp10a 10 200000 +Ethernet76 76,77,78,79 etp10b 10 200000 +Ethernet80 80,81,82,83 etp11a 11 200000 +Ethernet84 84,85,86,87 etp11b 11 200000 +Ethernet88 88,89,90,91 etp12a 12 200000 +Ethernet92 92,93,94,95 etp12b 12 200000 +Ethernet96 96,97,98,99 etp13a 13 200000 +Ethernet100 100,101,102,103 etp13b 13 200000 +Ethernet104 104,105,106,107 etp14a 14 200000 +Ethernet108 108,109,110,111 etp14b 14 200000 +Ethernet112 112,113,114,115 etp15a 15 200000 +Ethernet116 116,117,118,119 etp15b 15 200000 +Ethernet120 120,121,122,123 etp16a 16 200000 +Ethernet124 124,125,126,127 etp16b 16 200000 +Ethernet128 128,129,130,131 etp17a 17 200000 +Ethernet132 132,133,134,135 etp17b 17 200000 +Ethernet136 136,137,138,139 etp18a 18 200000 +Ethernet140 140,141,142,143 etp18b 18 200000 +Ethernet144 144,145,146,147 etp19a 19 200000 +Ethernet148 148,149,150,151 etp19b 19 200000 +Ethernet152 152,153,154,155 etp20a 20 200000 +Ethernet156 156,157,158,159 etp20b 20 200000 +Ethernet160 160,161,162,163 etp21a 21 200000 +Ethernet164 164,165,166,167 etp21b 21 200000 +Ethernet168 168,169,170,171 etp22a 22 200000 +Ethernet172 172,173,174,175 etp22b 22 200000 +Ethernet176 176,177,178,179 etp23a 23 200000 +Ethernet180 180,181,182,183 etp23b 23 200000 +Ethernet184 184,185,186,187 etp24a 24 200000 +Ethernet188 188,189,190,191 etp24b 24 200000 +Ethernet192 192,193 etp25a 25 100000 +Ethernet194 194,195 etp25b 25 100000 +Ethernet196 196,197 etp25c 25 100000 +Ethernet198 198,199 etp25d 25 100000 +Ethernet200 200,201 etp26a 26 100000 +Ethernet202 202,203 etp26b 26 100000 +Ethernet204 204,205 etp26c 26 100000 +Ethernet206 206,207 etp26d 26 100000 +Ethernet208 208,209 etp27a 27 100000 +Ethernet210 210,211 etp27b 27 100000 +Ethernet212 212,213 etp27c 27 100000 +Ethernet214 214,215 etp27d 27 100000 +Ethernet216 216,217 etp28a 28 100000 +Ethernet218 218,219 etp28b 28 100000 +Ethernet220 220,221 etp28c 28 100000 +Ethernet222 222,223 etp28d 28 100000 +Ethernet224 224,225 etp29a 29 100000 +Ethernet226 226,227 etp29b 29 100000 +Ethernet228 228,229 etp29c 29 100000 +Ethernet230 230,231 etp29d 29 100000 +Ethernet232 232,233 etp30a 30 100000 +Ethernet234 234,235 etp30b 30 100000 +Ethernet236 236,237 etp30c 30 100000 +Ethernet238 238,239 etp30d 30 100000 +Ethernet240 240,241 etp31a 31 100000 +Ethernet242 242,243 etp31b 31 100000 +Ethernet244 244,245 etp31c 31 100000 +Ethernet246 246,247 etp31d 31 100000 +Ethernet248 248,249 etp32a 32 100000 +Ethernet250 250,251 etp32b 32 100000 +Ethernet252 252,253 etp32c 32 100000 +Ethernet254 254,255 etp32d 32 100000 diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/qos.json.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/qos.json.j2 new file mode 120000 index 000000000000..eccf286dc879 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/qos.json.j2 @@ -0,0 +1 @@ +../../x86_64-mlnx_msn2700-r0/ACS-MSN2700/qos.json.j2 \ No newline at end of file diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/sai.profile b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/sai.profile new file mode 100644 index 000000000000..42518df9d9e2 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/sai.profile @@ -0,0 +1,3 @@ +SAI_INIT_CONFIG_FILE=/usr/share/sonic/hwsku/sai_4700_32x100g_48x200g.xml +SAI_DUMP_STORE_PATH=/var/log/mellanox/sdk-dumps +SAI_DUMP_STORE_AMOUNT=10 diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/sai_4700_32x100g_48x200g.xml b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/sai_4700_32x100g_48x200g.xml new file mode 100644 index 000000000000..fd382f8bd93e --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-V48C32/sai_4700_32x100g_48x200g.xml @@ -0,0 +1,277 @@ + + + + + 00:02:03:04:05:00 + + + 1 + + + 32 + + + + + 1 + 8 + 17 + 1 + 4096 + 2 + + + 5 + 8 + 16 + 1 + 4096 + 2 + + + 9 + 8 + 19 + 1 + 4096 + 2 + + + 13 + 8 + 18 + 1 + 4096 + 2 + + + 17 + 8 + 21 + 1 + 4096 + 2 + + + 21 + 8 + 20 + 1 + 4096 + 2 + + + 25 + 8 + 23 + 1 + 4096 + 2 + + + 29 + 8 + 22 + 1 + 4096 + 2 + + + 33 + 8 + 29 + 3 + 1536 + 4 + + + 37 + 8 + 28 + 3 + 1536 + 4 + + + 41 + 8 + 31 + 3 + 1536 + 4 + + + 45 + 8 + 30 + 3 + 1536 + 4 + + + 49 + 8 + 25 + 3 + 1536 + 4 + + + 53 + 8 + 24 + 3 + 1536 + 4 + + + 57 + 8 + 27 + 3 + 1536 + 4 + + + 61 + 8 + 26 + 3 + 1536 + 4 + + + 65 + 8 + 14 + 1 + 4096 + 2 + + + 69 + 8 + 15 + 1 + 4096 + 2 + + + 73 + 8 + 12 + 1 + 4096 + 2 + + + 77 + 8 + 13 + 1 + 4096 + 2 + + + 81 + 8 + 10 + 1 + 4096 + 2 + + + 85 + 8 + 11 + 1 + 4096 + 2 + + + 89 + 8 + 8 + 1 + 4096 + 2 + + + 93 + 8 + 9 + 1 + 4096 + 2 + + + 97 + 8 + 2 + 1 + 4096 + 2 + + + 101 + 8 + 3 + 1 + 4096 + 2 + + + 105 + 8 + 0 + + + 1 + + + 4096 + 2 + + + 109 + 8 + 1 + 1 + 4096 + 2 + + + 113 + 8 + 6 + 1 + 4096 + 2 + + + 117 + 8 + 7 + 1 + 4096 + 2 + + + 121 + 8 + 4 + 1 + 4096 + 2 + + + 125 + 8 + 5 + 1 + 4096 + 2 + + + + From cca17ce1047032c616a74f48e7ad711eb4d51ac7 Mon Sep 17 00:00:00 2001 From: vmittal-msft <46945843+vmittal-msft@users.noreply.github.com> Date: Tue, 18 Oct 2022 10:13:07 -0700 Subject: [PATCH 066/174] Updated config files to disable DLR_INIT capability (#12401) --- .../Arista-7800R3-48CQ2-C48/jr2-a7280cr3-32d4-40x100G.config.bcm | 1 + .../0/j2p-a7800r3a-36d-36x400G.config.bcm | 1 + .../1/j2p-a7800r3a-36d-36x400G.config.bcm | 1 + .../0/jr2cp-nokia-18x100g-4x25g-config.bcm | 1 + .../1/jr2cp-nokia-18x100g-4x25g-config.bcm | 1 + .../Nokia-IXR7250E-36x400G/0/jr2cp-nokia-18x400g-config.bcm | 1 + .../Nokia-IXR7250E-36x400G/1/jr2cp-nokia-18x400g-config.bcm | 1 + 7 files changed, 7 insertions(+) diff --git a/device/arista/x86_64-arista_7800r3_48cq2_lc/Arista-7800R3-48CQ2-C48/jr2-a7280cr3-32d4-40x100G.config.bcm b/device/arista/x86_64-arista_7800r3_48cq2_lc/Arista-7800R3-48CQ2-C48/jr2-a7280cr3-32d4-40x100G.config.bcm index f20ea81049fb..3db7f3a5147b 100644 --- a/device/arista/x86_64-arista_7800r3_48cq2_lc/Arista-7800R3-48CQ2-C48/jr2-a7280cr3-32d4-40x100G.config.bcm +++ b/device/arista/x86_64-arista_7800r3_48cq2_lc/Arista-7800R3-48CQ2-C48/jr2-a7280cr3-32d4-40x100G.config.bcm @@ -793,3 +793,4 @@ dma_desc_aggregator_buff_size_kb.BCM8869X=100 dma_desc_aggregator_timeout_usec.BCM8869X=1000 dma_desc_aggregator_enable_specific_MDB_LPM.BCM8869X=1 dma_desc_aggregator_enable_specific_MDB_FEC.BCM8869X=1 +sai_pfc_dlr_init_capability=0 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C72/0/j2p-a7800r3a-36d-36x400G.config.bcm b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C72/0/j2p-a7800r3a-36d-36x400G.config.bcm index e32603e1da0c..99dfb9e3e264 100644 --- a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C72/0/j2p-a7800r3a-36d-36x400G.config.bcm +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C72/0/j2p-a7800r3a-36d-36x400G.config.bcm @@ -1001,3 +1001,4 @@ serdes_tx_taps_36=nrz:-8:89:-29:0:0:0 xflow_macsec_secure_chan_to_num_secure_assoc_encrypt=2 xflow_macsec_secure_chan_to_num_secure_assoc_decrypt=4 +sai_pfc_dlr_init_capability=0 diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C72/1/j2p-a7800r3a-36d-36x400G.config.bcm b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C72/1/j2p-a7800r3a-36d-36x400G.config.bcm index c2a1d4229f24..bccc2d5be9f1 100644 --- a/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C72/1/j2p-a7800r3a-36d-36x400G.config.bcm +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/Arista-7800R3A-36D2-C72/1/j2p-a7800r3a-36d-36x400G.config.bcm @@ -1000,3 +1000,4 @@ serdes_tx_taps_36=nrz:-7:85:-25:0:0:0 xflow_macsec_secure_chan_to_num_secure_assoc_encrypt=2 xflow_macsec_secure_chan_to_num_secure_assoc_decrypt=4 +sai_pfc_dlr_init_capability=0 diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/jr2cp-nokia-18x100g-4x25g-config.bcm b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/jr2cp-nokia-18x100g-4x25g-config.bcm index 3eb3ba20a424..856d429a7e6e 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/jr2cp-nokia-18x100g-4x25g-config.bcm +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/jr2cp-nokia-18x100g-4x25g-config.bcm @@ -2092,3 +2092,4 @@ modreg IPS_FORCE_LOCAL_OR_FABRIC FORCE_FABRIC=1 xflow_macsec_secure_chan_to_num_secure_assoc_encrypt=2 xflow_macsec_secure_chan_to_num_secure_assoc_decrypt=4 cmic_dma_abort_in_cold_boot=0 +sai_pfc_dlr_init_capability=0 diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/jr2cp-nokia-18x100g-4x25g-config.bcm b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/jr2cp-nokia-18x100g-4x25g-config.bcm index 57e966b35315..0d1b3972a5cf 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/jr2cp-nokia-18x100g-4x25g-config.bcm +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/jr2cp-nokia-18x100g-4x25g-config.bcm @@ -2093,3 +2093,4 @@ modreg IPS_FORCE_LOCAL_OR_FABRIC FORCE_FABRIC=1 xflow_macsec_secure_chan_to_num_secure_assoc_encrypt=2 xflow_macsec_secure_chan_to_num_secure_assoc_decrypt=4 cmic_dma_abort_in_cold_boot=0 +sai_pfc_dlr_init_capability=0 diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/jr2cp-nokia-18x400g-config.bcm b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/jr2cp-nokia-18x400g-config.bcm index 1da65733155a..32fe9ef43709 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/jr2cp-nokia-18x400g-config.bcm +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/jr2cp-nokia-18x400g-config.bcm @@ -2093,3 +2093,4 @@ modreg IPS_FORCE_LOCAL_OR_FABRIC FORCE_FABRIC=1 xflow_macsec_secure_chan_to_num_secure_assoc_encrypt=2 xflow_macsec_secure_chan_to_num_secure_assoc_decrypt=4 cmic_dma_abort_in_cold_boot=0 +sai_pfc_dlr_init_capability=0 diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/jr2cp-nokia-18x400g-config.bcm b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/jr2cp-nokia-18x400g-config.bcm index 4d6790d5398b..8dec8b48477e 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/jr2cp-nokia-18x400g-config.bcm +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/jr2cp-nokia-18x400g-config.bcm @@ -2095,3 +2095,4 @@ modreg IPS_FORCE_LOCAL_OR_FABRIC FORCE_FABRIC=1 xflow_macsec_secure_chan_to_num_secure_assoc_encrypt=2 xflow_macsec_secure_chan_to_num_secure_assoc_decrypt=4 cmic_dma_abort_in_cold_boot=0 +sai_pfc_dlr_init_capability=0 From 34b6cc0de2318807f2014f58ace3bbce8975d63f Mon Sep 17 00:00:00 2001 From: andywongarista <78833093+andywongarista@users.noreply.github.com> Date: Tue, 18 Oct 2022 18:38:28 -0700 Subject: [PATCH 067/174] [Arista] Fix content of platform.json for DCS-7050CX3-32S (#12082) * Fix platform.json for 7050cx3 * Add platform_components.json * Mark thermals as not controllable --- .../x86_64-arista_7050cx3_32s/platform.json | 71 ++++++++++++------- .../platform_components.json | 13 ++++ 2 files changed, 57 insertions(+), 27 deletions(-) create mode 100644 device/arista/x86_64-arista_7050cx3_32s/platform_components.json diff --git a/device/arista/x86_64-arista_7050cx3_32s/platform.json b/device/arista/x86_64-arista_7050cx3_32s/platform.json index 6a417e320e6c..274f44615fb5 100644 --- a/device/arista/x86_64-arista_7050cx3_32s/platform.json +++ b/device/arista/x86_64-arista_7050cx3_32s/platform.json @@ -1,7 +1,23 @@ { "chassis": { "name": "DCS-7050CX3-32S", - "components": [], + "components": [ + { + "name": "Aboot()" + }, + { + "name": "Scd(addr=0000:02:00.0)" + }, + { + "name": "Ucd90120A(addr=3-004e)" + }, + { + "name": "Ucd90120A(addr=16-004e)" + }, + { + "name": "CrowSysCpld(addr=2-0023)" + } + ], "fans": [], "fan_drawers": [ { @@ -40,46 +56,47 @@ "psus": [ { "name": "psu1", - "fans": [] + "fans": [ + { + "name": "psu1/1", + "speed": { + "controllable": false + } + } + ] }, { "name": "psu2", - "fans": [] + "fans": [ + { + "name": "psu2/1", + "speed": { + "controllable": false + } + } + ] } ], "thermals": [ { - "name": "Cpu temp sensor" - }, - { - "name": "Cpu board temp sensor" - }, - { - "name": "Back-panel temp sensor" - }, - { - "name": "Board temp sensor" - }, - { - "name": "Front-panel temp sensor" - }, - { - "name": "Power supply 1 hotspot sensor" - }, - { - "name": "Power supply 1 inlet temp sensor" + "name": "Cpu temp sensor", + "controllable": false }, { - "name": "Power supply 1 exhaust temp sensor" + "name": "Cpu board temp sensor", + "controllable": false }, { - "name": "Power supply 2 hotspot sensor" + "name": "Back-panel temp sensor", + "controllable": false }, { - "name": "Power supply 2 inlet temp sensor" + "name": "Board temp sensor", + "controllable": false }, { - "name": "Power supply 2 exhaust temp sensor" + "name": "Front-panel temp sensor", + "controllable": false } ], "sfps": [ @@ -815,4 +832,4 @@ } } } -} \ No newline at end of file +} diff --git a/device/arista/x86_64-arista_7050cx3_32s/platform_components.json b/device/arista/x86_64-arista_7050cx3_32s/platform_components.json new file mode 100644 index 000000000000..0d30d1b13e1b --- /dev/null +++ b/device/arista/x86_64-arista_7050cx3_32s/platform_components.json @@ -0,0 +1,13 @@ +{ + "chassis": { + "DCS-7050CX3-32S": { + "component": { + "Aboot()": {}, + "Scd(addr=0000:00:18.7)": {}, + "Ucd90120A(addr=3-004e)": {}, + "Ucd90120A(addr=16-004e)": {}, + "CrowSysCpld(addr=2-0023)": {} + } + } + } +} From 2bf2e02719dea59facc5f23eba25f713133da747 Mon Sep 17 00:00:00 2001 From: Ye Jianquan Date: Wed, 19 Oct 2022 12:26:50 +0800 Subject: [PATCH 068/174] Enable to cancel pipeline jobs during checkout code and tests (#12436) co-authorized by: jianquanye@microsoft.com Why I did it Now, checkout code step and KVM test job can't be cancelled even though the whole build is cancelled. That's because by using Azure Pipeline Conditions, we customized the running condition, and we need to react to the Cancel action explicitly by asserting 'succeeded' https://learn.microsoft.com/en-us/azure/devops/pipelines/process/expressions?view=azure-devops#succeeded https://learn.microsoft.com/en-us/azure/devops/pipelines/process/conditions?view=azure-devops&tabs=yaml#ive-got-a-conditional-step-that-runs-even-when-a-job-is-canceled-how-do-i-manage-to-cancel-all-jobs-at-once How I did it Assert 'succeeded' condition explicitly. How to verify it Verified by cancelling and rerunning the azure pipeline. --- .azure-pipelines/azure-pipelines-image-template.yml | 2 +- azure-pipelines.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/azure-pipelines-image-template.yml b/.azure-pipelines/azure-pipelines-image-template.yml index 6a9c2790572f..f055414e137d 100644 --- a/.azure-pipelines/azure-pipelines-image-template.yml +++ b/.azure-pipelines/azure-pipelines-image-template.yml @@ -36,7 +36,7 @@ jobs: displayName: "Set cache options" - checkout: self submodules: recursive - condition: eq(variables.SKIP_CHECKOUT, '') + condition: and(succeeded(), eq(variables.SKIP_CHECKOUT, '')) displayName: 'Checkout code' - script: | BRANCH_NAME=$(Build.SourceBranchName) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a9004af559ef..d82050cd5fb7 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -74,7 +74,7 @@ stages: - stage: Test dependsOn: BuildVS - condition: and(ne(stageDependencies.BuildVS.outputs['vs.SetVar.SKIP_VSTEST'], 'YES'), in(dependencies.BuildVS.result, 'Succeeded', 'SucceededWithIssues')) + condition: and(succeeded(), and(ne(stageDependencies.BuildVS.outputs['vs.SetVar.SKIP_VSTEST'], 'YES'), in(dependencies.BuildVS.result, 'Succeeded', 'SucceededWithIssues'))) variables: - name: inventory value: veos_vtb From ef0559c0306e309a5c3da374f2809498fd4d469e Mon Sep 17 00:00:00 2001 From: Ye Jianquan Date: Wed, 19 Oct 2022 13:33:17 +0800 Subject: [PATCH 069/174] [TestbedV2]Migrate t0 and t1-lag to TestbedV2 (#12383) co-authorized by: jianquanye@microsoft.com Migrate the t0 and t1-lag test jobs in buildimage repo to TestbedV2. Why I did it Migrate the t0 and t1-lag test jobs in buildimage repo to TestbedV2. How I did it Migrate the t0 and t1-lag test jobs in buildimage repo to TestbedV2. --- .../run-test-scheduler-template.yml | 114 ++++++++++++++++++ azure-pipelines.yml | 104 ++++++++++++++-- 2 files changed, 207 insertions(+), 11 deletions(-) create mode 100644 .azure-pipelines/run-test-scheduler-template.yml diff --git a/.azure-pipelines/run-test-scheduler-template.yml b/.azure-pipelines/run-test-scheduler-template.yml new file mode 100644 index 000000000000..41956381fbaf --- /dev/null +++ b/.azure-pipelines/run-test-scheduler-template.yml @@ -0,0 +1,114 @@ +parameters: +- name: TOPOLOGY + type: string + +- name: POLL_INTERVAL + type: number + default: 10 + +- name: POLL_TIMEOUT + type: number + default: 36000 + +- name: MIN_WORKER + type: number + default: 1 + +- name: MAX_WORKER + type: number + default: 2 + +- name: TEST_SET + type: string + default: "" + +- name: DEPLOY_MG_EXTRA_PARAMS + type: string + default: "" + +steps: + - script: | + set -ex + wget -O ./.azure-pipelines/test_plan.py https://raw.githubusercontent.com/sonic-net/sonic-mgmt/master/.azure-pipelines/test_plan.py + wget -O ./.azure-pipelines/pr_test_scripts.yaml https://raw.githubusercontent.com/sonic-net/sonic-mgmt/master/.azure-pipelines/pr_test_scripts.yaml + displayName: Download TestbedV2 scripts + + - script: | + set -ex + pip install PyYAML + rm -f new_test_plan_id.txt + python ./.azure-pipelines/test_plan.py create -t ${{ parameters.TOPOLOGY }} -o new_test_plan_id.txt --min-worker ${{ parameters.MIN_WORKER }} --max-worker ${{ parameters.MAX_WORKER }} --test-set ${{ parameters.TEST_SET }} --kvm-build-id $(KVM_BUILD_ID) --deploy-mg-extra-params "${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" + TEST_PLAN_ID=`cat new_test_plan_id.txt` + + echo "Created test plan $TEST_PLAN_ID" + echo "Check https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID for test plan status" + echo "##vso[task.setvariable variable=TEST_PLAN_ID]$TEST_PLAN_ID" + env: + TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL) + TENANT_ID: $(TESTBED_TOOLS_MSAL_TENANT_ID) + CLIENT_ID: $(TESTBED_TOOLS_MSAL_CLIENT_ID) + CLIENT_SECRET: $(TESTBED_TOOLS_MSAL_CLIENT_SECRET) + displayName: Trigger test + + - script: | + set -ex + echo "Lock testbed" + echo "TestbedV2 is just online and might not be stable enough, for any issue, please send email to sonictestbedtools@microsoft.com" + echo "Runtime detailed progress at https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID" + # When "LOCK_TESTBED" finish, it changes into "PREPARE_TESTBED" + python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --timeout 43200 --expected-states PREPARE_TESTBED EXECUTING KVMDUMP FINISHED CANCELLED FAILED + env: + TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL) + displayName: Lock testbed + timeoutInMinutes: 240 + + - script: | + set -ex + echo "Prepare testbed" + echo "Preparing the testbed(add-topo, deploy-mg) may take 15-30 minutes. Before the testbed is ready, the progress of the test plan keeps displayed as 0, please be patient(We will improve the indication in a short time)" + echo "If the progress keeps as 0 for more than 1 hour, please cancel and retry this pipeline" + echo "TestbedV2 is just online and might not be stable enough, for any issue, please send email to sonictestbedtools@microsoft.com" + echo "Runtime detailed progress at https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID" + # When "PREPARE_TESTBED" finish, it changes into "EXECUTING" + python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --timeout 2400 --expected-states EXECUTING KVMDUMP FINISHED CANCELLED FAILED + env: + TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL) + displayName: Prepare testbed + timeoutInMinutes: 40 + + - script: | + set -ex + echo "Run test" + echo "TestbedV2 is just online and might not be stable enough, for any issue, please send email to sonictestbedtools@microsoft.com" + echo "Runtime detailed progress at https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID" + # When "EXECUTING" finish, it changes into "KVMDUMP", "FAILED", "CANCELLED" or "FINISHED" + python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --timeout 18000 --expected-states KVMDUMP FINISHED CANCELLED FAILED + env: + TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL) + displayName: Run test + timeoutInMinutes: 300 + + - script: | + set -ex + echo "KVM dump" + echo "TestbedV2 is just online and might not be stable enough, for any issue, please send email to sonictestbedtools@microsoft.com" + echo "Runtime detailed progress at https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID" + # When "KVMDUMP" finish, it changes into "FAILED", "CANCELLED" or "FINISHED" + python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --timeout 43200 --expected-states FINISHED CANCELLED FAILED + condition: always() + env: + TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL) + displayName: KVM dump + timeoutInMinutes: 20 + + - script: | + set -ex + echo "Try to cancel test plan $TEST_PLAN_ID, cancelling finished test plan has no effect." + python ./.azure-pipelines/test_plan.py cancel -i "$(TEST_PLAN_ID)" + condition: always() + env: + TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL) + TENANT_ID: $(TESTBED_TOOLS_MSAL_TENANT_ID) + CLIENT_ID: $(TESTBED_TOOLS_MSAL_CLIENT_ID) + CLIENT_SECRET: $(TESTBED_TOOLS_MSAL_CLIENT_SECRET) + displayName: Finalize running test plan diff --git a/azure-pipelines.yml b/azure-pipelines.yml index d82050cd5fb7..80c9d36dae71 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -76,6 +76,7 @@ stages: dependsOn: BuildVS condition: and(succeeded(), and(ne(stageDependencies.BuildVS.outputs['vs.SetVar.SKIP_VSTEST'], 'YES'), in(dependencies.BuildVS.result, 'Succeeded', 'SucceededWithIssues'))) variables: + - group: Testbed-Tools - name: inventory value: veos_vtb - name: testbed_file @@ -138,7 +139,8 @@ stages: - job: t0_part1 pool: sonictest displayName: "kvmtest-t0-part1" - timeoutInMinutes: 360 + timeoutInMinutes: 400 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_CLASSICAL_TEST, 'YES')) continueOnError: false steps: - template: .azure-pipelines/run-test-template.yml @@ -147,12 +149,14 @@ stages: tbname: vms-kvm-t0 ptf_name: ptf_vms6-1 tbtype: t0 + vmtype: ceos section: part-1 - job: t0_part2 pool: sonictest displayName: "kvmtest-t0-part2" - timeoutInMinutes: 360 + timeoutInMinutes: 400 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_CLASSICAL_TEST, 'YES')) continueOnError: false steps: - template: .azure-pipelines/run-test-template.yml @@ -161,35 +165,76 @@ stages: tbname: vms-kvm-t0 ptf_name: ptf_vms6-1 tbtype: t0 + vmtype: ceos section: part-2 + - job: t0_testbedv2 + pool: + vmImage: 'ubuntu-20.04' + displayName: "kvmtest-t0 by TestbedV2" + timeoutInMinutes: 1080 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES')) + continueOnError: false + steps: + - template: .azure-pipelines/run-test-scheduler-template.yml + parameters: + TOPOLOGY: t0 + MIN_WORKER: 2 + MAX_WORKER: 3 + + - job: t0_2vlans_testbedv2 + pool: + vmImage: 'ubuntu-20.04' + displayName: "kvmtest-t0-2vlans by TestbedV2" + timeoutInMinutes: 1080 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES')) + continueOnError: false + steps: + - template: .azure-pipelines/run-test-scheduler-template.yml + parameters: + TOPOLOGY: t0 + TEST_SET: t0-2vlans + MAX_WORKER: 1 + DEPLOY_MG_EXTRA_PARAMS: "-e vlan_config=two_vlan_a" + - job: - pool: sonictest + pool: + vmImage: 'ubuntu-20.04' displayName: "kvmtest-t0" - timeoutInMinutes: 360 dependsOn: - t0_part1 - t0_part2 + - t0_testbedv2 + - t0_2vlans_testbedv2 condition: always() continueOnError: false variables: resultOfPart1: $[ dependencies.t0_part1.result ] resultOfPart2: $[ dependencies.t0_part2.result ] + resultOfT0TestbedV2: $[ dependencies.t0_testbedv2.result ] + resultOfT02VlansTestbedV2: $[ dependencies.t0_2vlans_testbedv2.result ] steps: - script: | + if [ $(resultOfT0TestbedV2) == "Succeeded" ] && [ $(resultOfT02VlansTestbedV2) == "Succeeded" ]; then + echo "TestbedV2 t0 passed." + exit 0 + fi + if [ $(resultOfPart1) == "Succeeded" ] && [ $(resultOfPart2) == "Succeeded" ]; then - echo "Both job kvmtest-t0-part1 and kvmtest-t0-part2 are passed." + echo "Classic t0 jobs(both part1 and part2) passed." exit 0 - else - echo "Either job kvmtest-t0-part1 or job kvmtest-t0-part2 failed! Please check the detailed information." - exit 1 fi - - job: + echo "Both classic and TestbedV2 t0 jobs failed! Please check the detailed information. (Any of them passed, t0 will be considered as passed)" + exit 1 + + + - job: t1_lag_classic pool: sonictest-t1-lag - displayName: "kvmtest-t1-lag" - timeoutInMinutes: 360 + displayName: "kvmtest-t1-lag classic" + timeoutInMinutes: 400 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_CLASSICAL_TEST, 'YES')) continueOnError: false steps: - template: .azure-pipelines/run-test-template.yml @@ -198,6 +243,43 @@ stages: tbname: vms-kvm-t1-lag ptf_name: ptf_vms6-2 tbtype: t1-lag + vmtype: ceos + + - job: t1_lag_testbedv2 + pool: + vmImage: 'ubuntu-20.04' + displayName: "kvmtest-t1-lag by TestbedV2" + timeoutInMinutes: 600 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES')) + continueOnError: false + steps: + - template: .azure-pipelines/run-test-scheduler-template.yml + parameters: + TOPOLOGY: t1-lag + MIN_WORKER: 2 + MAX_WORKER: 3 + + - job: + pool: + vmImage: 'ubuntu-20.04' + displayName: "kvmtest-t1-lag" + dependsOn: + - t1_lag_classic + - t1_lag_testbedv2 + condition: always() + continueOnError: false + variables: + resultOfClassic: $[ dependencies.t1_lag_classic.result ] + resultOfTestbedV2: $[ dependencies.t1_lag_testbedv2.result ] + steps: + - script: | + if [ $(resultOfClassic) == "Succeeded" ] || [ $(resultOfTestbedV2) == "Succeeded" ]; then + echo "One or both of t1_lag_classic and t1_lag_testbedv2 passed." + exit 0 + else + echo "Both t1_lag_classic and t1_lag_testbedv2 failed! Please check the detailed information." + exit 1 + fi - job: pool: sonictest-sonic-t0 From 6f67a3ac6aad6e4f70cb9c125ef9ec7b6de69adb Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Wed, 19 Oct 2022 07:05:36 -0700 Subject: [PATCH 070/174] [device/quanta] Mitigation for security vulnerability (#11867) Signed-off-by: maipbui Dependency: [https://github.com/sonic-net/sonic-buildimage/pull/12065](https://github.com/sonic-net/sonic-buildimage/pull/12065) #### Why I did it `shell=True` is dangerous because this call will spawn the command using a shell process `os` - not secure against maliciously constructed input and dangerous if used to evaluate dynamic content. #### How I did it `os` - use with `subprocess` Use `shell=False` with shell features - redirection: [https://stackoverflow.com/questions/4965159/how-to-redirect-output-with-subprocess-in-python/6482200#6482200?newreg=53afb91b3ebd47c5930be627fcdf2930](https://stackoverflow.com/questions/4965159/how-to-redirect-output-with-subprocess-in-python/6482200#6482200?newreg=53afb91b3ebd47c5930be627fcdf2930) - `|` operator: [https://docs.python.org/2/library/subprocess.html#replacing-shell-pipeline](https://docs.python.org/2/library/subprocess.html#replacing-shell-pipeline) --- .../plugins/psuutil.py | 38 ++++++++++--------- .../ix1b-32x/utils/quanta_ix1b_util.py | 37 +++++++++--------- .../ix7-32x/sonic_platform/component.py | 29 ++++++++++---- .../ix7-32x/sonic_platform/sfp.py | 18 ++++----- .../ix7-32x/utils/quanta_ix7_util.py | 9 +++-- .../ix7-bwde-32x/sonic_platform/component.py | 33 +++++++++++----- .../ix7-bwde-32x/sonic_platform/sfp.py | 18 ++++----- .../utils/quanta_ix7_bwde_util.py | 9 +++-- .../ix8-56x/sonic_platform/component.py | 29 ++++++++++---- .../ix8-56x/sonic_platform/sfp.py | 18 ++++----- .../ix8-56x/utils/quanta_ix8_util.py | 9 +++-- .../ix8a-bwde-56x/sonic_platform/component.py | 33 +++++++++++----- .../ix8a-bwde-56x/sonic_platform/sfp.py | 18 ++++----- .../utils/quanta_ix8a_bwde_util.py | 9 +++-- .../ix8c-56x/sonic_platform/component.py | 35 ++++++++++++----- .../ix8c-56x/sonic_platform/sfp.py | 16 ++++---- .../ix8c-56x/utils/quanta_ix8c_util.py | 9 +++-- .../ix9-32x/sonic_platform/component.py | 33 +++++++++++----- .../ix9-32x/sonic_platform/sfp.py | 24 ++++++------ .../ix9-32x/utils/quanta_ix9_util.py | 9 +++-- 20 files changed, 266 insertions(+), 167 deletions(-) diff --git a/device/quanta/x86_64-quanta_ix1b_rglbmc-r0/plugins/psuutil.py b/device/quanta/x86_64-quanta_ix1b_rglbmc-r0/plugins/psuutil.py index 0b2027afdda4..2cdcf10f8d41 100755 --- a/device/quanta/x86_64-quanta_ix1b_rglbmc-r0/plugins/psuutil.py +++ b/device/quanta/x86_64-quanta_ix1b_rglbmc-r0/plugins/psuutil.py @@ -7,6 +7,7 @@ import os.path import subprocess import logging +from sonic_py_common.general import check_output_pipe try: from sonic_psu.psu_base import PsuBase @@ -22,11 +23,13 @@ def show_log(txt): return -def exec_cmd(cmd, show): +def exec_cmd(cmd_args, out_file, show): + cmd = ' '.join(cmd_args) + ' > ' + out_file logging.info('Run :'+cmd) try: - output = subprocess.check_output(cmd, shell=True, universal_newlines=True) - show_log(cmd + "output:"+str(output)) + with open(out_file, 'w') as f: + output = subprocess.check_output(cmd_args, stdout=f, universal_newlines=True) + show_log(cmd + "output:"+str(output)) except subprocess.CalledProcessError as e: logging.info("Failed :"+cmd) if show: @@ -40,12 +43,13 @@ def my_log(txt): return -def log_os_system(cmd, show): +def log_os_system(cmd1_args, cmd2_args, show): + cmd = ' '.join(cmd1_args) + ' | ' + ' '.join(cmd2_args) logging.info('Run :'+cmd) status = 1 output = "" try: - output = subprocess.check_output(cmd, shell=True, universal_newlines=True) + output = check_output_pipe(cmd1_args, cmd2_args) my_log(cmd + "output:"+str(output)) except subprocess.CalledProcessError as e: logging.info('Failed :'+cmd) @@ -55,28 +59,28 @@ def log_os_system(cmd, show): def gpio16_exist(): - ls = log_os_system("ls /sys/class/gpio/ | grep gpio16", 0) + ls = log_os_system(["ls", "/sys/class/gpio/"], ["grep", "gpio16"], 0) logging.info('mods:'+ls) if len(ls) == 0: return False def gpio17_exist(): - ls = log_os_system("ls /sys/class/gpio/ | grep gpio17", 0) + ls = log_os_system(["ls", "/sys/class/gpio/"], ["grep", "gpio17"], 0) logging.info('mods:'+ls) if len(ls) == 0: return False def gpio19_exist(): - ls = log_os_system("ls /sys/class/gpio/ | grep gpio19", 0) + ls = log_os_system(["ls", "/sys/class/gpio/"], ["grep", "gpio19"], 0) logging.info('mods:'+ls) if len(ls) == 0: return False def gpio20_exist(): - ls = log_os_system("ls /sys/class/gpio/ | grep gpio20", 0) + ls = log_os_system(["ls", "/sys/class/gpio/"], ["grep", "gpio20"], 0) logging.info('mods:'+ls) if len(ls) == 0: return False @@ -95,20 +99,20 @@ def __init__(self): PsuBase.__init__(self) if gpio16_exist() == False: - output = exec_cmd("echo 16 > /sys/class/gpio/export ", 1) - output = exec_cmd("echo in > /sys/class/gpio/gpio16/direction ", 1) + output = exec_cmd(["echo", "16"], "/sys/class/gpio/export", 1) + output = exec_cmd(["echo", "in"], "/sys/class/gpio/gpio16/direction", 1) if gpio17_exist() == False: - output = exec_cmd("echo 17 > /sys/class/gpio/export ", 1) - output = exec_cmd("echo in > /sys/class/gpio/gpio17/direction ", 1) + output = exec_cmd(["echo", "17"], "/sys/class/gpio/export", 1) + output = exec_cmd(["echo", "in"], "/sys/class/gpio/gpio17/direction", 1) if gpio19_exist() == False: - output = exec_cmd("echo 19 > /sys/class/gpio/export ", 1) - output = exec_cmd("echo in > /sys/class/gpio/gpio19/direction ", 1) + output = exec_cmd(["echo", "19"], "/sys/class/gpio/export", 1) + output = exec_cmd(["echo", "in"], "/sys/class/gpio/gpio19/direction", 1) if gpio20_exist() == False: - output = exec_cmd("echo 20 > /sys/class/gpio/export ", 1) - output = exec_cmd("echo in > /sys/class/gpio/gpio20/direction ", 1) + output = exec_cmd(["echo", "20"], "/sys/class/gpio/export", 1) + output = exec_cmd(["echo", "in"], "/sys/class/gpio/gpio20/direction", 1) # Get sysfs attribute def get_attr_value(self, attr_path): diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix1b-32x/utils/quanta_ix1b_util.py b/platform/broadcom/sonic-platform-modules-quanta/ix1b-32x/utils/quanta_ix1b_util.py index 2135932938eb..b92c5cf828c2 100755 --- a/platform/broadcom/sonic-platform-modules-quanta/ix1b-32x/utils/quanta_ix1b_util.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix1b-32x/utils/quanta_ix1b_util.py @@ -27,13 +27,10 @@ clean : uninstall drivers and remove related sysfs nodes """ -import os import commands import sys, getopt import logging -import re import time -from collections import namedtuple DEBUG = False args = [] @@ -41,8 +38,8 @@ i2c_prefix = '/sys/bus/i2c/devices/' if DEBUG == True: - print sys.argv[0] - print 'ARGV :', sys.argv[1:] + print(sys.argv[0]) + print('ARGV :', sys.argv[1:]) def main(): global DEBUG @@ -56,10 +53,10 @@ def main(): 'debug', 'force', ]) - if DEBUG == True: - print options - print args - print len(sys.argv) + if DEBUG is True: + print(options) + print(args) + print(len(sys.argv)) for opt, arg in options: if opt in ('-h', '--help'): @@ -83,12 +80,12 @@ def main(): return 0 def show_help(): - print __doc__ % {'scriptName' : sys.argv[0].split("/")[-1]} + print(__doc__ % {'scriptName' : sys.argv[0].split("/")[-1]}) sys.exit(0) def show_log(txt): - if DEBUG == True: - print "[IX1B-32X]" + txt + if DEBUG is True: + print("[IX1B-32X]" + txt) return @@ -165,7 +162,7 @@ def system_install(): status, output = exec_cmd("modprobe " + drivers[i], 1) if status: - print output + print(output) if FORCE == 0: return status @@ -174,7 +171,7 @@ def system_install(): status, output = exec_cmd(instantiate[i], 1) if status: - print output + print(output) if FORCE == 0: return status @@ -184,7 +181,9 @@ def system_install(): #QSFP for 1~32 port for port_number in range(1, 33): bus_number = port_number + 31 - os.system("echo %d >/sys/bus/i2c/devices/%d-0050/port_name" % (port_number, bus_number)) + file = "/sys/bus/i2c/devices/%d-0050/port_name" % bus_number + with open(file, 'w') as f: + f.write(str(port_number) + '\n') #Set system LED to green status, output = exec_cmd("echo 1 > /sys/class/leds/sysled_green/brightness", 1) @@ -199,14 +198,14 @@ def system_ready(): def install(): if not device_found(): - print "No device, installing...." + print("No device, installing....") status = system_install() if status: if FORCE == 0: return status else: - print " ix1b driver already installed...." + print(" ix1b driver already installed....") return @@ -215,10 +214,10 @@ def uninstall(): #uninstall drivers for i in range(len(drivers) - 1, -1, -1): - status, output = exec_cmd("rmmod " + drivers[i], 1) + status, output = exec_cmd("rmmod " + drivers[i], 1) if status: - print output + print(output) if FORCE == 0: return status diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/sonic_platform/component.py b/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/sonic_platform/component.py index 9f5f69f422a9..bbaf23533f20 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/sonic_platform/component.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/sonic_platform/component.py @@ -14,6 +14,7 @@ import subprocess from sonic_platform_base.component_base import ComponentBase from collections import namedtuple + from sonic_py_common.general import getstatusoutput_noshell_pipe except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -47,7 +48,7 @@ def _get_command_result(cmdline): try: proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, - shell=True, stderr=subprocess.STDOUT, + stderr=subprocess.STDOUT, universal_newlines=True) stdout = proc.communicate()[0] rc = proc.wait() @@ -60,12 +61,24 @@ def _get_command_result(cmdline): return result + @staticmethod + def _get_command_result_pipe(cmd1, cmd2): + try: + rc, result = getstatusoutput_noshell_pipe(cmd1, cmd2) + if rc != [0, 0]: + raise RuntimeError("Failed to execute command {} {}, return code {}, {}".format(cmd1, cmd2, rc, result)) + + except OSError as e: + raise RuntimeError("Failed to execute command {} {} due to {}".format(cmd1, cmd2, repr(e))) + + return result + class ComponentBIOS(Component): COMPONENT_NAME = 'BIOS' COMPONENT_DESCRIPTION = 'BIOS - Basic Input/Output System' - BIOS_QUERY_VERSION_COMMAND = "dmidecode -s bios-version" + BIOS_QUERY_VERSION_COMMAND = ["dmidecode", "-s", "bios-version"] def __init__(self): super(ComponentBIOS, self).__init__() @@ -90,7 +103,8 @@ def get_firmware_version(self): class ComponentBMC(Component): COMPONENT_NAME = 'BMC' COMPONENT_DESCRIPTION = 'BMC - Board Management Controller' - BMC_QUERY_VERSION_COMMAND = "ipmitool mc info | grep 'Firmware Revision'" + BMC_QUERY_VERSION_COMMAND1 = ["ipmitool", "mc", "info"] + BMC_QUERY_VERSION_COMMAND2 = ["grep", 'Firmware Revision'] def __init__(self): super(ComponentBMC, self).__init__() @@ -105,7 +119,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - bmc_ver = self._get_command_result(self.BMC_QUERY_VERSION_COMMAND) + bmc_ver = self._get_command_result_pipe(self.BMC_QUERY_VERSION_COMMAND1, self.BMC_QUERY_VERSION_COMMAND2) if not bmc_ver: return 'ERR' else: @@ -159,7 +173,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - res = self._get_command_result("ipmitool raw 0x32 0xff 0x02 {}".format(self.cplds[self.index].cmd_index)) + res = self._get_command_result(["ipmitool", "raw", "0x32", "0xff", "0x02", str(self.cplds[self.index].cmd_index)]) if not res: return 'ERR' else: @@ -179,7 +193,8 @@ def get_component_list(cls): class ComponentPCIE(Component): COMPONENT_NAME = 'PCIe' COMPONENT_DESCRIPTION = 'ASIC PCIe Firmware' - PCIE_QUERY_VERSION_COMMAND = "bcmcmd 'pciephy fw version' | grep 'FW version'" + PCIE_QUERY_VERSION_COMMAND1 = ["bcmcmd", 'pciephy fw version'] + PCIE_QUERY_VERSION_COMMAND2 = ["grep", 'FW version'] def __init__(self): super(ComponentPCIE, self).__init__() @@ -194,7 +209,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - version = self._get_command_result(self.PCIE_QUERY_VERSION_COMMAND) + version = self._get_command_result_pipe(self.PCIE_QUERY_VERSION_COMMAND1, self.PCIE_QUERY_VERSION_COMMAND2) if not version: return 'ERR' else: diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/sonic_platform/sfp.py b/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/sonic_platform/sfp.py index c8336473051e..a2da83d290c5 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/sonic_platform/sfp.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/sonic_platform/sfp.py @@ -8,17 +8,17 @@ # ############################################################################# -import os import time +import subprocess from ctypes import create_string_buffer try: - from sonic_platform_base.sfp_base import SfpBase - from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId - from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom - from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId - from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom - from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper + from sonic_platform_base.sfp_base import SfpBase + from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId + from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom + from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId + from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom + from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -163,7 +163,7 @@ class Sfp(SfpBase): # Path to QSFP sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "docker > /dev/null 2>&1" + HOST_CHK_CMD = ["docker"] PLATFORM = "x86_64-quanta_ix7_rglbmc-r0" HWSKU = "Quanta-IX7-32X" @@ -259,7 +259,7 @@ def _convert_string_to_num(self, value_str): return 'N/A' def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/utils/quanta_ix7_util.py b/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/utils/quanta_ix7_util.py index 4196e24f4625..9b679cacd916 100755 --- a/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/utils/quanta_ix7_util.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix7-32x/utils/quanta_ix7_util.py @@ -27,7 +27,6 @@ clean : uninstall drivers and remove related sysfs nodes """ -import os import subprocess import sys, getopt import logging @@ -54,7 +53,7 @@ def main(): 'debug', 'force', ]) - if DEBUG == True: + if DEBUG is True: print(options) print(args) print(len(sys.argv)) @@ -84,7 +83,7 @@ def show_help(): sys.exit(0) def show_log(txt): - if DEBUG == True: + if DEBUG is True: print("[IX7-32X]" + txt) return @@ -204,7 +203,9 @@ def system_install(): #QSFP for 1~32 port for port_number in range(1, 33): bus_number = port_number + 16 - os.system("echo %d >/sys/bus/i2c/devices/%d-0050/port_name" % (port_number, bus_number)) + file = "/sys/bus/i2c/devices/%d-0050/port_name" % bus_number + with open(file, 'w') as f: + f.write(str(port_number) + '\n') return diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/sonic_platform/component.py b/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/sonic_platform/component.py index ae0c25228930..e5228829f5e2 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/sonic_platform/component.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/sonic_platform/component.py @@ -14,6 +14,7 @@ import subprocess from sonic_platform_base.component_base import ComponentBase from collections import namedtuple + from sonic_py_common.general import getstatusoutput_noshell_pipe except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -47,7 +48,7 @@ def _get_command_result(cmdline): try: proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, - shell=True, stderr=subprocess.STDOUT, + stderr=subprocess.STDOUT, universal_newlines=True) stdout = proc.communicate()[0] rc = proc.wait() @@ -60,12 +61,24 @@ def _get_command_result(cmdline): return result + @staticmethod + def _get_command_result_pipe(cmd1, cmd2): + try: + rc, result = getstatusoutput_noshell_pipe(cmd1, cmd2) + if rc != [0, 0]: + raise RuntimeError("Failed to execute command {} {}, return code {}, {}".format(cmd1, cmd2, rc, result)) + + except OSError as e: + raise RuntimeError("Failed to execute command {} {} due to {}".format(cmd1, cmd2, repr(e))) + + return result + class ComponentBIOS(Component): COMPONENT_NAME = 'BIOS' COMPONENT_DESCRIPTION = 'BIOS - Basic Input/Output System' - BIOS_QUERY_VERSION_COMMAND = "dmidecode -s bios-version" + BIOS_QUERY_VERSION_COMMAND = ["dmidecode", "-s", "bios-version"] def __init__(self): super(ComponentBIOS, self).__init__() @@ -90,7 +103,8 @@ def get_firmware_version(self): class ComponentBMC(Component): COMPONENT_NAME = 'BMC' COMPONENT_DESCRIPTION = 'BMC - Board Management Controller' - BMC_QUERY_VERSION_COMMAND = "ipmitool mc info | grep 'Firmware Revision'" + BMC_QUERY_VERSION_COMMAND1 = ["ipmitool", "mc", "info"] + BMC_QUERY_VERSION_COMMAND2 = ["grep", 'Firmware Revision'] def __init__(self): super(ComponentBMC, self).__init__() @@ -105,7 +119,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - bmc_ver = self._get_command_result(self.BMC_QUERY_VERSION_COMMAND) + bmc_ver = self._get_command_result_pipe(self.BMC_QUERY_VERSION_COMMAND1, self.BMC_QUERY_VERSION_COMMAND2) if not bmc_ver: return 'ERR' else: @@ -158,9 +172,9 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - self._get_command_result("ipmitool raw 0x30 0xe0 0xd0 0x01 0x01") - res = self._get_command_result("ipmitool raw 0x32 0xff 0x02 {}".format(self.cplds[self.index].cmd_index)) - self._get_command_result("ipmitool raw 0x30 0xe0 0xd0 0x01 0x00") + self._get_command_result(["ipmitool", "raw", "0x30", "0xe0", "0xd0", "0x01", "0x01"]) + res = self._get_command_result(["ipmitool", "raw", "0x32", "0xff", "0x02", str(self.cplds[self.index].cmd_index)]) + self._get_command_result(["ipmitool", "raw", "0x30", "0xe0", "0xd0", "0x01", "0x00"]) if not res: return 'ERR' else: @@ -180,7 +194,8 @@ def get_component_list(cls): class ComponentPCIE(Component): COMPONENT_NAME = 'PCIe' COMPONENT_DESCRIPTION = 'ASIC PCIe Firmware' - PCIE_QUERY_VERSION_COMMAND = "bcmcmd 'pciephy fw version' | grep 'FW version'" + PCIE_QUERY_VERSION_COMMAND1 = ["bcmcmd", 'pciephy fw version'] + PCIE_QUERY_VERSION_COMMAND2 = ["grep", 'FW version'] def __init__(self): super(ComponentPCIE, self).__init__() @@ -195,7 +210,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - version = self._get_command_result(self.PCIE_QUERY_VERSION_COMMAND) + version = self._get_command_result_pipe(self.PCIE_QUERY_VERSION_COMMAND1, self.PCIE_QUERY_VERSION_COMMAND2) if not version: return 'ERR' else: diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/sonic_platform/sfp.py b/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/sonic_platform/sfp.py index 1faf552c7b56..942074bb7266 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/sonic_platform/sfp.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/sonic_platform/sfp.py @@ -8,17 +8,17 @@ # ############################################################################# -import os import time +import subprocess from ctypes import create_string_buffer try: - from sonic_platform_base.sfp_base import SfpBase - from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId - from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom - from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId - from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom - from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper + from sonic_platform_base.sfp_base import SfpBase + from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId + from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom + from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId + from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom + from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -163,7 +163,7 @@ class Sfp(SfpBase): # Path to QSFP sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "docker > /dev/null 2>&1" + HOST_CHK_CMD = ["docker"] PLATFORM = "x86_64-quanta_ix7_bwde-r0" HWSKU = "Quanta-IX7-BWDE-32X" @@ -259,7 +259,7 @@ def _convert_string_to_num(self, value_str): return 'N/A' def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/utils/quanta_ix7_bwde_util.py b/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/utils/quanta_ix7_bwde_util.py index a20aa33828e4..8a3c896de5c0 100755 --- a/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/utils/quanta_ix7_bwde_util.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix7-bwde-32x/utils/quanta_ix7_bwde_util.py @@ -27,7 +27,6 @@ clean : uninstall drivers and remove related sysfs nodes """ -import os import subprocess import sys, getopt import logging @@ -54,7 +53,7 @@ def main(): 'debug', 'force', ]) - if DEBUG == True: + if DEBUG is True: print(options) print(args) print(len(sys.argv)) @@ -84,7 +83,7 @@ def show_help(): sys.exit(0) def show_log(txt): - if DEBUG == True: + if DEBUG is True: print("[IX7-BWDE-32X]"+txt) return @@ -203,7 +202,9 @@ def system_install(): #QSFP for 1~32 port for port_number in range(1, 33): bus_number = port_number + 12 - os.system("echo %d >/sys/bus/i2c/devices/%d-0050/port_name" % (port_number, bus_number)) + file = "/sys/bus/i2c/devices/%d-0050/port_name" % bus_number + with open(file, 'w') as f: + f.write(str(port_number) + '\n') status, output = exec_cmd("pip3 install /usr/share/sonic/device/x86_64-quanta_ix7_bwde-r0/sonic_platform-1.0-py3-none-any.whl",1) if status: diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/sonic_platform/component.py b/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/sonic_platform/component.py index 95f275014f5b..03b2b1c1745c 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/sonic_platform/component.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/sonic_platform/component.py @@ -14,6 +14,7 @@ import subprocess from sonic_platform_base.component_base import ComponentBase from collections import namedtuple + from sonic_py_common.general import getstatusoutput_noshell_pipe except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -47,7 +48,7 @@ def _get_command_result(cmdline): try: proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, - shell=True, stderr=subprocess.STDOUT, + stderr=subprocess.STDOUT, universal_newlines=True) stdout = proc.communicate()[0] rc = proc.wait() @@ -60,12 +61,24 @@ def _get_command_result(cmdline): return result + @staticmethod + def _get_command_result_pipe(cmd1, cmd2): + try: + rc, result = getstatusoutput_noshell_pipe(cmd1, cmd2) + if rc != [0, 0]: + raise RuntimeError("Failed to execute command {} {}, return code {}, {}".format(cmd1, cmd2, rc, result)) + + except OSError as e: + raise RuntimeError("Failed to execute command {} {} due to {}".format(cmd1, cmd2, repr(e))) + + return result + class ComponentBIOS(Component): COMPONENT_NAME = 'BIOS' COMPONENT_DESCRIPTION = 'BIOS - Basic Input/Output System' - BIOS_QUERY_VERSION_COMMAND = "dmidecode -s bios-version" + BIOS_QUERY_VERSION_COMMAND = ["dmidecode", "-s", "bios-version"] def __init__(self): super(ComponentBIOS, self).__init__() @@ -90,7 +103,8 @@ def get_firmware_version(self): class ComponentBMC(Component): COMPONENT_NAME = 'BMC' COMPONENT_DESCRIPTION = 'BMC - Board Management Controller' - BMC_QUERY_VERSION_COMMAND = "ipmitool mc info | grep 'Firmware Revision'" + BMC_QUERY_VERSION_COMMAND1 = ["ipmitool", "mc", "info"] + BMC_QUERY_VERSION_COMMAND2 = ["grep", 'Firmware Revision'] def __init__(self): super(ComponentBMC, self).__init__() @@ -105,7 +119,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - bmc_ver = self._get_command_result(self.BMC_QUERY_VERSION_COMMAND) + bmc_ver = self._get_command_result_pipe(self.BMC_QUERY_VERSION_COMMAND1, self.BMC_QUERY_VERSION_COMMAND2) if not bmc_ver: return 'ERR' else: @@ -160,7 +174,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - res = self._get_command_result("ipmitool raw 0x32 0xff 0x02 {}".format(self.cplds[self.index].cmd_index)) + res = self._get_command_result(["ipmitool", "raw", "0x32", "0xff", "0x02", str(self.cplds[self.index].cmd_index)]) if not res: return 'ERR' else: @@ -180,7 +194,8 @@ def get_component_list(cls): class ComponentPCIE(Component): COMPONENT_NAME = 'PCIe' COMPONENT_DESCRIPTION = 'ASIC PCIe Firmware' - PCIE_QUERY_VERSION_COMMAND = "bcmcmd 'pciephy fw version' | grep 'FW version'" + PCIE_QUERY_VERSION_COMMAND1 = ["bcmcmd", 'pciephy fw version'] + PCIE_QUERY_VERSION_COMMAND2 = ["grep", 'FW version'] def __init__(self): super(ComponentPCIE, self).__init__() @@ -195,7 +210,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - version = self._get_command_result(self.PCIE_QUERY_VERSION_COMMAND) + version = self._get_command_result_pipe(self.PCIE_QUERY_VERSION_COMMAND1, self.PCIE_QUERY_VERSION_COMMAND2) if not version: return 'ERR' else: diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/sonic_platform/sfp.py b/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/sonic_platform/sfp.py index f7a9a105a5c7..45b14d64273c 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/sonic_platform/sfp.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/sonic_platform/sfp.py @@ -8,17 +8,17 @@ # ############################################################################# -import os import time +import subprocess from ctypes import create_string_buffer try: - from sonic_platform_base.sfp_base import SfpBase - from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId - from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom - from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId - from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom - from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper + from sonic_platform_base.sfp_base import SfpBase + from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId + from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom + from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId + from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom + from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -163,7 +163,7 @@ class Sfp(SfpBase): # Path to QSFP sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "docker > /dev/null 2>&1" + HOST_CHK_CMD = ["docker"] PLATFORM = "x86_64-quanta_ix8_rglbmc-r0" HWSKU = "Quanta-IX8-56X" @@ -281,7 +281,7 @@ def _convert_string_to_num(self, value_str): return 'N/A' def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/utils/quanta_ix8_util.py b/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/utils/quanta_ix8_util.py index 833bfe27c420..7875eec59a0e 100755 --- a/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/utils/quanta_ix8_util.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix8-56x/utils/quanta_ix8_util.py @@ -27,7 +27,6 @@ clean : uninstall drivers and remove related sysfs nodes """ -import os import subprocess import sys, getopt import logging @@ -54,7 +53,7 @@ def main(): 'debug', 'force', ]) - if DEBUG == True: + if DEBUG is True: print(options) print(args) print(len(sys.argv)) @@ -84,7 +83,7 @@ def show_help(): sys.exit(0) def show_log(txt): - if DEBUG == True: + if DEBUG is True: print("[IX8-56X]" + txt) return @@ -301,7 +300,9 @@ def system_install(): #QSFP for 1~56 port for port_number in range(1, 57): bus_number = port_number + 16 - os.system("echo %d >/sys/bus/i2c/devices/%d-0050/port_name" % (port_number, bus_number)) + file = "/sys/bus/i2c/devices/%d-0050/port_name" % bus_number + with open(file, 'w') as f: + f.write(str(port_number) + '\n') status, output = exec_cmd("pip3 install /usr/share/sonic/device/x86_64-quanta_ix8_rglbmc-r0/sonic_platform-1.0-py3-none-any.whl",1) if status: diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/sonic_platform/component.py b/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/sonic_platform/component.py index f697f9bbe53b..414512fbf1bb 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/sonic_platform/component.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/sonic_platform/component.py @@ -14,6 +14,7 @@ import subprocess from sonic_platform_base.component_base import ComponentBase from collections import namedtuple + from sonic_py_common.general import getstatusoutput_noshell_pipe except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -47,7 +48,7 @@ def _get_command_result(cmdline): try: proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, - shell=True, stderr=subprocess.STDOUT, + stderr=subprocess.STDOUT, universal_newlines=True) stdout = proc.communicate()[0] rc = proc.wait() @@ -60,12 +61,24 @@ def _get_command_result(cmdline): return result + @staticmethod + def _get_command_result_pipe(cmd1, cmd2): + try: + rc, result = getstatusoutput_noshell_pipe(cmd1, cmd2) + if rc != [0, 0]: + raise RuntimeError("Failed to execute command {} {}, return code {}, {}".format(cmd1, cmd2, rc, result)) + + except OSError as e: + raise RuntimeError("Failed to execute command {} {} due to {}".format(cmd1, cmd2, repr(e))) + + return result + class ComponentBIOS(Component): COMPONENT_NAME = 'BIOS' COMPONENT_DESCRIPTION = 'BIOS - Basic Input/Output System' - BIOS_QUERY_VERSION_COMMAND = "dmidecode -s bios-version" + BIOS_QUERY_VERSION_COMMAND = ["dmidecode", "-s", "bios-version"] def __init__(self): super(ComponentBIOS, self).__init__() @@ -90,7 +103,8 @@ def get_firmware_version(self): class ComponentBMC(Component): COMPONENT_NAME = 'BMC' COMPONENT_DESCRIPTION = 'BMC - Board Management Controller' - BMC_QUERY_VERSION_COMMAND = "ipmitool mc info | grep 'Firmware Revision'" + BMC_QUERY_VERSION_COMMAND1 = ["ipmitool", "mc", "info"] + BMC_QUERY_VERSION_COMMAND2 = ["grep", 'Firmware Revision'] def __init__(self): super(ComponentBMC, self).__init__() @@ -105,7 +119,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - bmc_ver = self._get_command_result(self.BMC_QUERY_VERSION_COMMAND) + bmc_ver = self._get_command_result_pipe(self.BMC_QUERY_VERSION_COMMAND1, self.BMC_QUERY_VERSION_COMMAND2) if not bmc_ver: return 'ERR' else: @@ -159,9 +173,9 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - self._get_command_result("ipmitool raw 0x30 0xe0 0xd0 0x01 0x01") - res = self._get_command_result("ipmitool raw 0x32 0xff 0x02 {}".format(self.cplds[self.index].cmd_index)) - self._get_command_result("ipmitool raw 0x30 0xe0 0xd0 0x01 0x00") + self._get_command_result(["ipmitool", "raw", "0x30", "0xe0", "0xd0", "0x01", "0x01"]) + res = self._get_command_result(["ipmitool", "raw", "0x32", "0xff", "0x02", str(self.cplds[self.index].cmd_index)]) + self._get_command_result(["ipmitool", "raw", "0x30", "0xe0", "0xd0", "0x01", "0x00"]) if not res: return 'ERR' else: @@ -181,7 +195,8 @@ def get_component_list(cls): class ComponentPCIE(Component): COMPONENT_NAME = 'PCIe' COMPONENT_DESCRIPTION = 'ASIC PCIe Firmware' - PCIE_QUERY_VERSION_COMMAND = "bcmcmd 'pciephy fw version' | grep 'FW version'" + PCIE_QUERY_VERSION_COMMAND1 = ["bcmcmd", 'pciephy fw version'] + PCIE_QUERY_VERSION_COMMAND2 = ["grep", 'FW version'] def __init__(self): super(ComponentPCIE, self).__init__() @@ -196,7 +211,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - version = self._get_command_result(self.PCIE_QUERY_VERSION_COMMAND) + version = self._get_command_result_pipe(self.PCIE_QUERY_VERSION_COMMAND1, self.PCIE_QUERY_VERSION_COMMAND2) if not version: return 'ERR' else: diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/sonic_platform/sfp.py b/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/sonic_platform/sfp.py index abbebb03434a..7d76740ca762 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/sonic_platform/sfp.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/sonic_platform/sfp.py @@ -8,17 +8,17 @@ # ############################################################################# -import os import time +import subprocess from ctypes import create_string_buffer try: - from sonic_platform_base.sfp_base import SfpBase - from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId - from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom - from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId - from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom - from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper + from sonic_platform_base.sfp_base import SfpBase + from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId + from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom + from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId + from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom + from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -163,7 +163,7 @@ class Sfp(SfpBase): # Path to QSFP sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "docker > /dev/null 2>&1" + HOST_CHK_CMD = ["docker"] PLATFORM = "x86_64-quanta_ix8a_bwde-r0" HWSKU = "Quanta-IX8A-BWDE-56X" @@ -281,7 +281,7 @@ def _convert_string_to_num(self, value_str): return 'N/A' def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/utils/quanta_ix8a_bwde_util.py b/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/utils/quanta_ix8a_bwde_util.py index c23824779373..ff6141a51141 100755 --- a/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/utils/quanta_ix8a_bwde_util.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix8a-bwde-56x/utils/quanta_ix8a_bwde_util.py @@ -27,7 +27,6 @@ clean : uninstall drivers and remove related sysfs nodes """ -import os import subprocess import sys, getopt import logging @@ -54,7 +53,7 @@ def main(): 'debug', 'force', ]) - if DEBUG == True: + if DEBUG is True: print(options) print(args) print(len(sys.argv)) @@ -84,7 +83,7 @@ def show_help(): sys.exit(0) def show_log(txt): - if DEBUG == True: + if DEBUG is True: print("[IX8A-BWDE-56X]" + txt) return @@ -301,7 +300,9 @@ def system_install(): #QSFP for 1~56 port for port_number in range(1, 57): bus_number = port_number + 12 - os.system("echo %d >/sys/bus/i2c/devices/%d-0050/port_name" % (port_number, bus_number)) + file = "/sys/bus/i2c/devices/%d-0050/port_name" % bus_number + with open(file, 'w') as f: + f.write(str(port_number) + '\n') #Enable front-ports LED decoding exec_cmd('echo 1 > /sys/class/cpld-led/CPLDLED-1/led_decode', 1) diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/sonic_platform/component.py b/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/sonic_platform/component.py index 75066275d2c3..15076314b3f2 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/sonic_platform/component.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/sonic_platform/component.py @@ -14,6 +14,7 @@ import subprocess from sonic_platform_base.component_base import ComponentBase from collections import namedtuple + from sonic_py_common.general import getstatusoutput_noshell_pipe except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -47,7 +48,7 @@ def _get_command_result(cmdline): try: proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, - shell=True, stderr=subprocess.STDOUT, + stderr=subprocess.STDOUT, universal_newlines=True) stdout = proc.communicate()[0] rc = proc.wait() @@ -60,12 +61,24 @@ def _get_command_result(cmdline): return result + @staticmethod + def _get_command_result_pipe(cmd1, cmd2): + try: + rc, result = getstatusoutput_noshell_pipe(cmd1, cmd2) + if rc != [0, 0]: + raise RuntimeError("Failed to execute command {} {}, return code {}, {}".format(cmd1, cmd2, rc, result)) + + except OSError as e: + raise RuntimeError("Failed to execute command {} {} due to {}".format(cmd1, cmd2, repr(e))) + + return result + class ComponentBIOS(Component): COMPONENT_NAME = 'BIOS' COMPONENT_DESCRIPTION = 'BIOS - Basic Input/Output System' - BIOS_QUERY_VERSION_COMMAND = "dmidecode -s bios-version" + BIOS_QUERY_VERSION_COMMAND = ["dmidecode", "-s", "bios-version"] def __init__(self): super(ComponentBIOS, self).__init__() @@ -90,7 +103,8 @@ def get_firmware_version(self): class ComponentBMC(Component): COMPONENT_NAME = 'BMC' COMPONENT_DESCRIPTION = 'BMC - Board Management Controller' - BMC_QUERY_VERSION_COMMAND = "ipmitool mc info | grep 'Firmware Revision'" + BMC_QUERY_VERSION_COMMAND1 = ["ipmitool", "mc", "info"] + BMC_QUERY_VERSION_COMMAND2 = ["grep", 'Firmware Revision'] def __init__(self): super(ComponentBMC, self).__init__() @@ -105,7 +119,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - bmc_ver = self._get_command_result(self.BMC_QUERY_VERSION_COMMAND) + bmc_ver = self._get_command_result_pipe(self.BMC_QUERY_VERSION_COMMAND1, self.BMC_QUERY_VERSION_COMMAND2) if not bmc_ver: return 'ERR' else: @@ -158,10 +172,10 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component - """ - self._get_command_result("ipmitool raw 0x30 0xe0 0xd0 0x01 0x01") - res = self._get_command_result("ipmitool raw 0x32 0xff 0x02 {}".format(self.cplds[self.index].cmd_index)) - self._get_command_result("ipmitool raw 0x30 0xe0 0xd0 0x01 0x00") + """ + self._get_command_result(["ipmitool", "raw", "0x30", "0xe0", "0xd0", "0x01", "0x01"]) + res = self._get_command_result(["ipmitool", "raw", "0x32", "0xff", "0x02", str(self.cplds[self.index].cmd_index)]) + self._get_command_result(["ipmitool", "raw", "0x30", "0xe0", "0xd0", "0x01", "0x00"]) if not res: return 'ERR' else: @@ -181,7 +195,8 @@ def get_component_list(cls): class ComponentPCIE(Component): COMPONENT_NAME = 'PCIe' COMPONENT_DESCRIPTION = 'ASIC PCIe Firmware' - PCIE_QUERY_VERSION_COMMAND = "bcmcmd 'pciephy fw version' | grep 'FW version'" + PCIE_QUERY_VERSION_COMMAND1 = ["bcmcmd", 'pciephy fw version'] + PCIE_QUERY_VERSION_COMMAND2 = ["grep", 'FW version'] def __init__(self): super(ComponentPCIE, self).__init__() @@ -196,7 +211,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - version = self._get_command_result(self.PCIE_QUERY_VERSION_COMMAND) + version = self._get_command_result_pipe(self.PCIE_QUERY_VERSION_COMMAND1, self.PCIE_QUERY_VERSION_COMMAND2) if not version: return 'ERR' else: diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/sonic_platform/sfp.py b/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/sonic_platform/sfp.py index 305d78aba4cc..a9baef8eab36 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/sonic_platform/sfp.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/sonic_platform/sfp.py @@ -8,17 +8,17 @@ # ############################################################################# -import os +import subprocess import time from ctypes import create_string_buffer try: - from sonic_platform_base.sfp_base import SfpBase - from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId - from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom - from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId - from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom - from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper + from sonic_platform_base.sfp_base import SfpBase + from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId + from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom + from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId + from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom + from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -281,7 +281,7 @@ def _convert_string_to_num(self, value_str): return 'N/A' def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/utils/quanta_ix8c_util.py b/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/utils/quanta_ix8c_util.py index a7e50d908fe4..ffa80e613a62 100755 --- a/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/utils/quanta_ix8c_util.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix8c-56x/utils/quanta_ix8c_util.py @@ -27,7 +27,6 @@ clean : uninstall drivers and remove related sysfs nodes """ -import os import subprocess import sys, getopt import logging @@ -54,7 +53,7 @@ def main(): 'debug', 'force', ]) - if DEBUG == True: + if DEBUG is True: print(options) print(args) print(len(sys.argv)) @@ -84,7 +83,7 @@ def show_help(): sys.exit(0) def show_log(txt): - if DEBUG == True: + if DEBUG is True: print("[IX8C-56X]" + txt) return @@ -297,7 +296,9 @@ def system_install(): #QSFP for 1~56 port for port_number in range(1, 57): bus_number = port_number + 12 - os.system("echo %d >/sys/bus/i2c/devices/%d-0050/port_name" % (port_number, bus_number)) + file = "/sys/bus/i2c/devices/%d-0050/port_name" % bus_number + with open(file, 'w') as f: + f.write(str(port_number) + '\n') return diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/sonic_platform/component.py b/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/sonic_platform/component.py index 49d25f31cbcc..c1b94b62d31a 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/sonic_platform/component.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/sonic_platform/component.py @@ -14,6 +14,7 @@ import subprocess from sonic_platform_base.component_base import ComponentBase from collections import namedtuple + from sonic_py_common.general import getstatusoutput_noshell_pipe except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -47,7 +48,7 @@ def _get_command_result(cmdline): try: proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, - shell=True, stderr=subprocess.STDOUT, + stderr=subprocess.STDOUT, universal_newlines=True) stdout = proc.communicate()[0] rc = proc.wait() @@ -60,12 +61,24 @@ def _get_command_result(cmdline): return result + @staticmethod + def _get_command_result_pipe(cmd1, cmd2): + try: + rc, result = getstatusoutput_noshell_pipe(cmd1, cmd2) + if rc != [0, 0]: + raise RuntimeError("Failed to execute command {} {}, return code {}, {}".format(cmd1, cmd2, rc, result)) + + except OSError as e: + raise RuntimeError("Failed to execute command {} {} due to {}".format(cmd1, cmd2, repr(e))) + + return result + class ComponentBIOS(Component): COMPONENT_NAME = 'BIOS' COMPONENT_DESCRIPTION = 'BIOS - Basic Input/Output System' - BIOS_QUERY_VERSION_COMMAND = "dmidecode -s bios-version" + BIOS_QUERY_VERSION_COMMAND = ["dmidecode", "-s", "bios-version"] def __init__(self): super(ComponentBIOS, self).__init__() @@ -90,7 +103,8 @@ def get_firmware_version(self): class ComponentBMC(Component): COMPONENT_NAME = 'BMC' COMPONENT_DESCRIPTION = 'BMC - Board Management Controller' - BMC_QUERY_VERSION_COMMAND = "ipmitool mc info | grep 'Firmware Revision'" + BMC_QUERY_VERSION_COMMAND1 = ["ipmitool", "mc", "info"] + BMC_QUERY_VERSION_COMMAND2 = ["grep", 'Firmware Revision'] def __init__(self): super(ComponentBMC, self).__init__() @@ -105,7 +119,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - bmc_ver = self._get_command_result(self.BMC_QUERY_VERSION_COMMAND) + bmc_ver = self._get_command_result_pipe(self.BMC_QUERY_VERSION_COMMAND1, self.BMC_QUERY_VERSION_COMMAND2) if not bmc_ver: return 'ERR' else: @@ -158,9 +172,9 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - self._get_command_result("ipmitool raw 0x30 0xe0 0xd0 0x01 0x01") - res = self._get_command_result("ipmitool raw 0x32 0xff 0x02 {}".format(self.cplds[self.index].cmd_index)) - self._get_command_result("ipmitool raw 0x30 0xe0 0xd0 0x01 0x00") + self._get_command_result(["ipmitool", "raw", "0x30", "0xe0", "0xd0", "0x01", "0x01"]) + res = self._get_command_result(["ipmitool", "raw", "0x32", "0xff", "0x02", str(self.cplds[self.index].cmd_index)]) + self._get_command_result(["ipmitool", "raw", "0x30", "0xe0", "0xd0", "0x01", "0x00"]) if not res: return 'ERR' else: @@ -180,7 +194,8 @@ def get_component_list(cls): class ComponentPCIE(Component): COMPONENT_NAME = 'PCIe' COMPONENT_DESCRIPTION = 'ASIC PCIe Firmware' - PCIE_QUERY_VERSION_COMMAND = "bcmcmd 'pciephy fw version' | grep 'FW version'" + PCIE_QUERY_VERSION_COMMAND1 = ["bcmcmd", 'pciephy fw version'] + PCIE_QUERY_VERSION_COMMAND2 = ["grep", 'FW version'] def __init__(self): super(ComponentPCIE, self).__init__() @@ -195,7 +210,7 @@ def get_firmware_version(self): Returns: A string containing the firmware version of the component """ - version = self._get_command_result(self.PCIE_QUERY_VERSION_COMMAND) + version = self._get_command_result_pipe(self.PCIE_QUERY_VERSION_COMMAND1, self.PCIE_QUERY_VERSION_COMMAND2) if not version: return 'ERR' else: diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/sonic_platform/sfp.py b/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/sonic_platform/sfp.py index 77176ad083f5..30997115cb27 100644 --- a/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/sonic_platform/sfp.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/sonic_platform/sfp.py @@ -8,21 +8,21 @@ # ############################################################################# -import os import time +import subprocess from ctypes import create_string_buffer try: - from sonic_platform_base.sfp_base import SfpBase - from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId - from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom - from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId - from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom - from sonic_platform_base.sonic_sfp.inf8628 import inf8628InterfaceId - from sonic_platform_base.sonic_sfp.qsfp_dd import qsfp_dd_InterfaceId - from sonic_platform_base.sonic_sfp.qsfp_dd import qsfp_dd_Dom - from sonic_py_common.logger import Logger - from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper + from sonic_platform_base.sfp_base import SfpBase + from sonic_platform_base.sonic_sfp.sff8472 import sff8472InterfaceId + from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom + from sonic_platform_base.sonic_sfp.sff8436 import sff8436InterfaceId + from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom + from sonic_platform_base.sonic_sfp.inf8628 import inf8628InterfaceId + from sonic_platform_base.sonic_sfp.qsfp_dd import qsfp_dd_InterfaceId + from sonic_platform_base.sonic_sfp.qsfp_dd import qsfp_dd_Dom + from sonic_py_common.logger import Logger + from sonic_platform_base.sonic_sfp.sfputilhelper import SfpUtilHelper except ImportError as e: raise ImportError (str(e) + "- required module not found") @@ -354,7 +354,7 @@ def _convert_string_to_num(self, value_str): return 'N/A' def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/utils/quanta_ix9_util.py b/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/utils/quanta_ix9_util.py index ec14e10ad6d1..fa6466da3d33 100755 --- a/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/utils/quanta_ix9_util.py +++ b/platform/broadcom/sonic-platform-modules-quanta/ix9-32x/utils/quanta_ix9_util.py @@ -27,7 +27,6 @@ clean : uninstall drivers and remove related sysfs nodes """ -import os import subprocess import sys, getopt import logging @@ -54,7 +53,7 @@ def main(): 'debug', 'force', ]) - if DEBUG == True: + if DEBUG is True: print(options) print(args) print(len(sys.argv)) @@ -84,7 +83,7 @@ def show_help(): sys.exit(0) def show_log(txt): - if DEBUG == True: + if DEBUG is True: print("[IX9-32X]" + txt) return @@ -237,7 +236,9 @@ def system_install(): #QSFPDD for 1~32 port for port_number in range(1, 33): bus_number = port_number + 12 - os.system("echo %d >/sys/bus/i2c/devices/%d-0050/port_name" % (port_number, bus_number)) + file = "/sys/bus/i2c/devices/%d-0050/port_name" % bus_number + with open(file, 'w') as f: + f.write(str(port_number) + '\n') return From 9f88d03c2b520c2abfddcc6b28254deb5bdf1d20 Mon Sep 17 00:00:00 2001 From: Mariusz Stachura Date: Wed, 19 Oct 2022 18:36:56 +0200 Subject: [PATCH 071/174] [QoS] Support dynamic headroom calculation for Barefoot platforms (#11708) Signed-off-by: Mariusz Stachura What I did Adding the dynamic headroom calculation support for Barefoot platforms. Why I did it Enabling dynamic mode for barefoot case. How I verified it The community tests are adjusted and pass. --- .../newport/buffers_defaults_t0.j2 | 13 ++++++---- .../newport/buffers_defaults_t1.j2 | 13 ++++++---- .../newport/buffers_dynamic.json.j2 | 4 ++++ .../newport/switch-tna-sai.conf | 4 ++-- .../port_peripheral_config.j2 | 10 ++++++++ files/build_templates/buffers_config.j2 | 5 ++++ .../build_templates/sonic_debian_extension.j2 | 2 +- platform/barefoot/asic_table.j2 | 24 +++++++++++++++++++ platform/barefoot/peripheral_table.j2 | 18 ++++++++++++++ platform/barefoot/rules.mk | 2 ++ 10 files changed, 82 insertions(+), 13 deletions(-) create mode 100644 device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_dynamic.json.j2 create mode 100644 device/barefoot/x86_64-accton_as9516_32d-r0/port_peripheral_config.j2 create mode 100644 platform/barefoot/asic_table.j2 create mode 100644 platform/barefoot/peripheral_table.j2 diff --git a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_defaults_t0.j2 b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_defaults_t0.j2 index a06f62f733ae..89485794926e 100644 --- a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_defaults_t0.j2 +++ b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_defaults_t0.j2 @@ -1,12 +1,12 @@ {% set default_cable = '5m' %} -{% set ingress_lossless_pool_size = '23850816' %} -{% set ingress_lossy_pool_size = '36222208' %} -{% set egress_lossless_pool_size = '29482816' %} -{% set egress_lossy_pool_size = '26400000' %} +{% set ingress_lossless_pool_size = '43067728' %} +{% set ingress_lossy_pool_size = '3520000' %} +{% set egress_lossless_pool_size = '46749824' %} +{% set egress_lossy_pool_size = '2463824' %} {%- macro generate_port_lists(PORT_ALL) %} {# Generate list of ports #} - {%- for port_idx in range(0,32) %} + {%- for port_idx in range(0, 32) %} {%- if PORT_ALL.append("Ethernet%d" % (port_idx * 4)) %}{%- endif %} {%- endfor %} {%- endmacro %} @@ -15,6 +15,9 @@ "BUFFER_POOL": { "ingress_lossless_pool": { "size": "{{ ingress_lossless_pool_size }}", + {%- if dynamic_mode is defined %} + "xoff": "3153920", + {%- endif %} "type": "ingress", "mode": "dynamic" }, diff --git a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_defaults_t1.j2 b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_defaults_t1.j2 index a06f62f733ae..89485794926e 100644 --- a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_defaults_t1.j2 +++ b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_defaults_t1.j2 @@ -1,12 +1,12 @@ {% set default_cable = '5m' %} -{% set ingress_lossless_pool_size = '23850816' %} -{% set ingress_lossy_pool_size = '36222208' %} -{% set egress_lossless_pool_size = '29482816' %} -{% set egress_lossy_pool_size = '26400000' %} +{% set ingress_lossless_pool_size = '43067728' %} +{% set ingress_lossy_pool_size = '3520000' %} +{% set egress_lossless_pool_size = '46749824' %} +{% set egress_lossy_pool_size = '2463824' %} {%- macro generate_port_lists(PORT_ALL) %} {# Generate list of ports #} - {%- for port_idx in range(0,32) %} + {%- for port_idx in range(0, 32) %} {%- if PORT_ALL.append("Ethernet%d" % (port_idx * 4)) %}{%- endif %} {%- endfor %} {%- endmacro %} @@ -15,6 +15,9 @@ "BUFFER_POOL": { "ingress_lossless_pool": { "size": "{{ ingress_lossless_pool_size }}", + {%- if dynamic_mode is defined %} + "xoff": "3153920", + {%- endif %} "type": "ingress", "mode": "dynamic" }, diff --git a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_dynamic.json.j2 b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_dynamic.json.j2 new file mode 100644 index 000000000000..426aee9cd8dd --- /dev/null +++ b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/buffers_dynamic.json.j2 @@ -0,0 +1,4 @@ +{%- set default_topo = 't0' %} +{%- set dynamic_mode = 'true' %} +{%- set shp = 'true' %} +{%- include 'buffers_config.j2' %} diff --git a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/switch-tna-sai.conf b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/switch-tna-sai.conf index 2789eb0e2c0d..9a6ae5b120f5 100644 --- a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/switch-tna-sai.conf +++ b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/switch-tna-sai.conf @@ -23,7 +23,7 @@ ], "model_json_path" : "share/switch/aug_model.json", "switchapi_port_add": false, - "non_default_port_ppgs": 5 + "non_default_port_ppgs": 6 } ] } @@ -32,7 +32,7 @@ { "device-id": 0, "model_json_path": "share/switch/aug_model.json", - "non_default_port_ppgs": 5, + "non_default_port_ppgs": 6, "switchapi_port_add": false } ] diff --git a/device/barefoot/x86_64-accton_as9516_32d-r0/port_peripheral_config.j2 b/device/barefoot/x86_64-accton_as9516_32d-r0/port_peripheral_config.j2 new file mode 100644 index 000000000000..c1322ec2cc68 --- /dev/null +++ b/device/barefoot/x86_64-accton_as9516_32d-r0/port_peripheral_config.j2 @@ -0,0 +1,10 @@ +[ + {%- include 'peripheral_table.j2' %} + , + { + "PORT_PERIPHERAL_TABLE:global": { + "gearbox_model": "barefoot" + }, + "OP": "SET" + } +] diff --git a/files/build_templates/buffers_config.j2 b/files/build_templates/buffers_config.j2 index 21d730c74b2a..f15e54b55136 100644 --- a/files/build_templates/buffers_config.j2 +++ b/files/build_templates/buffers_config.j2 @@ -226,6 +226,11 @@ def "DEFAULT_LOSSLESS_BUFFER_PARAMETER": { "AZURE": { "default_dynamic_th": "0" + {%- if shp is defined -%} + , + "max_headroom_size" : "0", + "over_subscribe_ratio" : "1" + {%- endif -%} } }, "LOSSLESS_TRAFFIC_PATTERN": { diff --git a/files/build_templates/sonic_debian_extension.j2 b/files/build_templates/sonic_debian_extension.j2 index 57438ef785f0..c5e76d3c47cc 100644 --- a/files/build_templates/sonic_debian_extension.j2 +++ b/files/build_templates/sonic_debian_extension.j2 @@ -530,7 +530,7 @@ sudo cp $BUILD_TEMPLATES/buffers_config.j2 $FILESYSTEM_ROOT_USR_SHARE_SONIC_TEMP sudo cp $BUILD_TEMPLATES/qos_config.j2 $FILESYSTEM_ROOT_USR_SHARE_SONIC_TEMPLATES/ # Copy the templates for dynamically buffer calculation -{% if sonic_asic_platform == "mellanox" or sonic_asic_platform == "vs" %} +{% if sonic_asic_platform == "mellanox" or sonic_asic_platform == "vs" or sonic_asic_platform == "barefoot" %} if [ -f platform/{{ sonic_asic_platform }}/asic_table.j2 ] then sudo cp platform/{{ sonic_asic_platform }}/asic_table.j2 $FILESYSTEM_ROOT/usr/share/sonic/templates/asic_table.j2 diff --git a/platform/barefoot/asic_table.j2 b/platform/barefoot/asic_table.j2 new file mode 100644 index 000000000000..04670dd28e70 --- /dev/null +++ b/platform/barefoot/asic_table.j2 @@ -0,0 +1,24 @@ +{%- if DEVICE_METADATA is defined and DEVICE_METADATA['localhost']['platform'] is defined %} +{%- set platform = DEVICE_METADATA['localhost']['platform'] %} +{%- endif -%} + + +[ +{% set platform2asic = { + 'x86_64-accton_as9516_32d-r0':'BAREFOOT-TOFINO-2', + 'x86_64-accton_as9516bf_32d-r0':'BAREFOOT-TOFINO-2' + } +%} +{% set asic_type = platform2asic[platform] %} +{% if asic_type == 'BAREFOOT-TOFINO-2' %} + { + "ASIC_TABLE:BAREFOOT-TOFINO-2": { + "cell_size": "176", + "pipeline_latency": "68", + "mac_phy_delay": "0.8", + "peer_response_time": "9" + }, + "OP": "SET" + } +{% endif %} +] diff --git a/platform/barefoot/peripheral_table.j2 b/platform/barefoot/peripheral_table.j2 new file mode 100644 index 000000000000..06b33810fb29 --- /dev/null +++ b/platform/barefoot/peripheral_table.j2 @@ -0,0 +1,18 @@ +{%- if DEVICE_METADATA is defined and DEVICE_METADATA['localhost']['platform'] is defined %} +{%- set platform = DEVICE_METADATA['localhost']['platform'] %} +{%- endif -%} + +{% set platform2gearbox = { + 'x86_64-accton_as9516_32d-r0':'barefoot', + 'x86_64-accton_as9516bf_32d-r0':'barefoot' + } +%} +{% set gearbox_type = platform2gearbox[platform] %} +{% if gearbox_type == 'barefoot' %} + { + "PERIPHERAL TABLE:barefoot": { + "gearbox_delay" : "365" + }, + "OP": "SET" + } +{% endif %} diff --git a/platform/barefoot/rules.mk b/platform/barefoot/rules.mk index 7f159da63fc7..3eaa35fdb2c3 100644 --- a/platform/barefoot/rules.mk +++ b/platform/barefoot/rules.mk @@ -33,3 +33,5 @@ endif # Runtime dependency on sai is set only for syncd #$(SYNCD)_RDEPENDS += $(BFN_SAI) $(WNC_OSW1800_PLATFORM) $(BFN_INGRASYS_PLATFORM) $(BFN_PLATFORM) $(SYNCD)_RDEPENDS += $(BFN_SAI) $(BFN_INGRASYS_PLATFORM) $(BFN_PLATFORM) + +export SONIC_BUFFER_MODEL=dynamic From 041d50224e05dd203869f0e36c931b4e3341924b Mon Sep 17 00:00:00 2001 From: kellyyeh <42761586+kellyyeh@users.noreply.github.com> Date: Wed, 19 Oct 2022 14:18:43 -0700 Subject: [PATCH 072/174] Advance dhcprelay submodule head (#12214) --- src/dhcprelay | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dhcprelay b/src/dhcprelay index 6f94c2ededb3..9c3b73837f76 160000 --- a/src/dhcprelay +++ b/src/dhcprelay @@ -1 +1 @@ -Subproject commit 6f94c2ededb39ef4cdab788e295a041b2aec12b4 +Subproject commit 9c3b73837f768b3220d68ed01030d204c650d476 From 7c5e4e2b87b416cf2ed9c765f34745f4390b7f3a Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Wed, 19 Oct 2022 19:23:11 -0700 Subject: [PATCH 073/174] Revert syslog change in dhcp_mon disparity (#12425) Why I did it Revert change in syslog such that it does not utilize c++ string How I did it Code change How to verify it Which release branch to backport (provide reason below if selected) 201811 201911 202006 202012 202106 202111 202205 Description for the changelog Ensure to add label/tag for the feature raised. example - PR#2174 under sonic-utilities repo. where, Generic Config and Update feature has been labelled as GCU. Link to config_db schema for YANG module changes A picture of a cute animal (not mandatory but encouraged) --- src/dhcpmon/src/dhcp_mon.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dhcpmon/src/dhcp_mon.cpp b/src/dhcpmon/src/dhcp_mon.cpp index dd850d00d280..4860b2b06a55 100644 --- a/src/dhcpmon/src/dhcp_mon.cpp +++ b/src/dhcpmon/src/dhcp_mon.cpp @@ -100,7 +100,7 @@ static void check_dhcp_relay_health(dhcp_mon_state_t *state_data) if (++state_data->count > dhcp_unhealthy_max_count) { auto duration = state_data->count * window_interval_sec; std::string vlan(context->intf); - syslog(LOG_ALERT, state_data->msg, duration, vlan); + syslog(LOG_ALERT, state_data->msg, duration, context->intf); if (state_data->check_type == DHCP_MON_CHECK_POSITIVE) { event_params_t params = { { "vlan", vlan }, From 010f9203f22183fd8f5d087bb68030373500ed9d Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Thu, 20 Oct 2022 13:00:40 +0800 Subject: [PATCH 074/174] [submodule] Update sonic-mgmt-framework HEAD pointer. (#12434) --- src/sonic-mgmt-framework | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-mgmt-framework b/src/sonic-mgmt-framework index 3c3384cec8d1..744602bbd854 160000 --- a/src/sonic-mgmt-framework +++ b/src/sonic-mgmt-framework @@ -1 +1 @@ -Subproject commit 3c3384cec8d15e493e6889bff6361a5a280a8811 +Subproject commit 744602bbd8541546dfe977f598f917e3471f85d0 From 357c1eaa1b7d938545f0be62b2248d9fb3721978 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Thu, 20 Oct 2022 13:03:39 +0800 Subject: [PATCH 075/174] [action] Update automerge version, change log level and change auto cherry-pick branch name. (#12455) --- .github/workflows/automerge.yml | 5 ++--- .github/workflows/pr_cherrypick_prestep.yml | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml index 2bef87072404..3edb8ad53188 100644 --- a/.github/workflows/automerge.yml +++ b/.github/workflows/automerge.yml @@ -13,12 +13,11 @@ jobs: runs-on: ubuntu-latest steps: - name: automerge - uses: 'pascalgn/automerge-action@v0.13.1' - with: - args: "--trace" + uses: 'pascalgn/automerge-action@v0.15.5' env: GITHUB_TOKEN: '${{ secrets.TOKEN }}' MERGE_LABELS: 'automerge' MERGE_METHOD: 'squash' MERGE_FILTER_AUTHOR: 'mssonicbld' MERGE_DELETE_BRANCH: true + LOG: "TRACE" diff --git a/.github/workflows/pr_cherrypick_prestep.yml b/.github/workflows/pr_cherrypick_prestep.yml index cdfe6b682b45..b99e883d904e 100644 --- a/.github/workflows/pr_cherrypick_prestep.yml +++ b/.github/workflows/pr_cherrypick_prestep.yml @@ -104,8 +104,8 @@ jobs: echo 'Add commnet "@${author} PR conflicts with $branch branch"' else # Create PR to release branch - git push mssonicbld HEAD:$branch-${pr_id} -f - result=$(gh pr create -R ${repository} -H mssonicbld:$branch-${pr_id} -B $branch -t "[action] [PR:$pr_id] $title" -b '' 2>&1) + git push mssonicbld HEAD:cherry/$branch/${pr_id} -f + result=$(gh pr create -R ${repository} -H mssonicbld:cherry/$branch/${pr_id} -B $branch -t "[action] [PR:$pr_id] $title" -b '' 2>&1) echo $result | grep "already exists" && { echo $result; return 0; } echo $result | grep github.com || { echo $result; return 1; } new_pr_rul=$(echo $result | grep github.com) From 1dec49005ba43096e64e72cd19aa8c082ec34de3 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Thu, 20 Oct 2022 13:06:10 +0800 Subject: [PATCH 076/174] [ci] Fix test job issue on checkout step (#12445) --- .azure-pipelines/run-test-template.yml | 1 + azure-pipelines.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.azure-pipelines/run-test-template.yml b/.azure-pipelines/run-test-template.yml index 2f1b3d8702d9..5c848ca1d4db 100644 --- a/.azure-pipelines/run-test-template.yml +++ b/.azure-pipelines/run-test-template.yml @@ -22,6 +22,7 @@ parameters: - sonic-6asic-vs.img.gz steps: +- template: cleanup.yml - checkout: self clean: true displayName: 'checkout sonic-mgmt repo' diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 80c9d36dae71..15554b8bc7b7 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -92,6 +92,7 @@ stages: displayName: "vstest" timeoutInMinutes: 60 steps: + - template: .azure-pipelines/cleanup.yml - checkout: self clean: true submodules: recursive From bdebcffa5c195146cf312c57e33ecd0b8fc3a490 Mon Sep 17 00:00:00 2001 From: Mariusz Stachura Date: Thu, 20 Oct 2022 08:01:30 +0200 Subject: [PATCH 077/174] [Barefoot] Add xon_offset to pg_profile_lookup.ini (#12073) - Why I did it Barefoot uses hysteresis, instead of 'xon-threshold'. 'xon' is only supported in static mode, so there is a need to add this attribute to every mode in PG profile init file - How I did it 'xon_offset' was added to pg_profile_lookup.ini - How to verify it Install and basic sanity tests including traffic. Checked with: pfcwd/test_pfc_config.py pfcwd/test_pfcwd_all_port_storm.py pfcwd/test_pfcwd_function.py pfcwd/test_pfcwd_war_reboot.py pfc_asym/test_pfc_asym.py Signed-off-by: Mariusz Stachura Signed-off-by: Mariusz Stachura --- .../newport/pg_profile_lookup.ini | 39 ++++++++++--------- .../montara/pg_profile_lookup.ini | 33 ++++++++-------- .../mavericks/pg_profile_lookup.ini | 33 ++++++++-------- 3 files changed, 54 insertions(+), 51 deletions(-) diff --git a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/pg_profile_lookup.ini b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/pg_profile_lookup.ini index 602400325be0..4103c1fc7d44 100644 --- a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/pg_profile_lookup.ini +++ b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/pg_profile_lookup.ini @@ -1,20 +1,21 @@ # PG lossless profiles. -# speed cable size xon xoff threshold - 10000 5m 34816 18432 16384 7 - 25000 5m 34816 18432 16384 7 - 40000 5m 34816 18432 16384 7 - 50000 5m 34816 18432 16384 7 - 100000 5m 36864 18432 18432 7 - 400000 5m 36864 18432 18432 7 - 10000 40m 36864 18432 18432 7 - 25000 40m 39936 18432 21504 7 - 40000 40m 41984 18432 23552 7 - 50000 40m 41984 18432 23552 7 - 100000 40m 54272 18432 35840 7 - 400000 40m 54272 18432 35840 7 - 10000 300m 49152 18432 30720 7 - 25000 300m 71680 18432 53248 7 - 40000 300m 94208 18432 75776 7 - 50000 300m 94208 18432 75776 7 - 100000 300m 184320 18432 165888 7 - 400000 300m 184320 18432 165888 7 +# speed cable size xon xoff threshold xon_offset + 10000 5m 34816 18432 16384 7 18432 + 25000 5m 34816 18432 16384 7 18432 + 40000 5m 34816 18432 16384 7 18432 + 50000 5m 34816 18432 16384 7 18432 + 100000 5m 36864 18432 18432 7 18432 + 400000 5m 36864 18432 18432 7 18432 + 10000 40m 36864 18432 18432 7 18432 + 25000 40m 39936 18432 21504 7 18432 + 40000 40m 41984 18432 23552 7 18432 + 50000 40m 41984 18432 23552 7 18432 + 100000 40m 54272 18432 35840 7 18432 + 400000 40m 54272 18432 35840 7 18432 + 10000 300m 49152 18432 30720 7 18432 + 25000 300m 71680 18432 53248 7 18432 + 40000 300m 94208 18432 75776 7 18432 + 50000 300m 94208 18432 75776 7 18432 + 100000 300m 184320 18432 165888 7 18432 + 400000 300m 184320 18432 165888 7 18432 + diff --git a/device/barefoot/x86_64-accton_wedge100bf_32x-r0/montara/pg_profile_lookup.ini b/device/barefoot/x86_64-accton_wedge100bf_32x-r0/montara/pg_profile_lookup.ini index b66b129fe43f..8011959df7da 100644 --- a/device/barefoot/x86_64-accton_wedge100bf_32x-r0/montara/pg_profile_lookup.ini +++ b/device/barefoot/x86_64-accton_wedge100bf_32x-r0/montara/pg_profile_lookup.ini @@ -1,17 +1,18 @@ # PG lossless profiles. -# speed cable size xon xoff threshold - 10000 5m 34816 18432 16384 0 - 25000 5m 34816 18432 16384 0 - 40000 5m 34816 18432 16384 0 - 50000 5m 34816 18432 16384 0 - 100000 5m 36864 18432 18432 0 - 10000 40m 36864 18432 18432 0 - 25000 40m 39936 18432 21504 0 - 40000 40m 41984 18432 23552 0 - 50000 40m 41984 18432 23552 0 - 100000 40m 54272 18432 35840 0 - 10000 300m 49152 18432 30720 0 - 25000 300m 71680 18432 53248 0 - 40000 300m 94208 18432 75776 0 - 50000 300m 94208 18432 75776 0 - 100000 300m 184320 18432 165888 0 +# speed cable size xon xoff threshold xon_offset + 10000 5m 34816 18432 16384 0 18432 + 25000 5m 34816 18432 16384 0 18432 + 40000 5m 34816 18432 16384 0 18432 + 50000 5m 34816 18432 16384 0 18432 + 100000 5m 36864 18432 18432 0 18432 + 10000 40m 36864 18432 18432 0 18432 + 25000 40m 39936 18432 21504 0 18432 + 40000 40m 41984 18432 23552 0 18432 + 50000 40m 41984 18432 23552 0 18432 + 100000 40m 54272 18432 35840 0 18432 + 10000 300m 49152 18432 30720 0 18432 + 25000 300m 71680 18432 53248 0 18432 + 40000 300m 94208 18432 75776 0 18432 + 50000 300m 94208 18432 75776 0 18432 + 100000 300m 184320 18432 165888 0 18432 + diff --git a/device/barefoot/x86_64-accton_wedge100bf_65x-r0/mavericks/pg_profile_lookup.ini b/device/barefoot/x86_64-accton_wedge100bf_65x-r0/mavericks/pg_profile_lookup.ini index b66b129fe43f..8011959df7da 100644 --- a/device/barefoot/x86_64-accton_wedge100bf_65x-r0/mavericks/pg_profile_lookup.ini +++ b/device/barefoot/x86_64-accton_wedge100bf_65x-r0/mavericks/pg_profile_lookup.ini @@ -1,17 +1,18 @@ # PG lossless profiles. -# speed cable size xon xoff threshold - 10000 5m 34816 18432 16384 0 - 25000 5m 34816 18432 16384 0 - 40000 5m 34816 18432 16384 0 - 50000 5m 34816 18432 16384 0 - 100000 5m 36864 18432 18432 0 - 10000 40m 36864 18432 18432 0 - 25000 40m 39936 18432 21504 0 - 40000 40m 41984 18432 23552 0 - 50000 40m 41984 18432 23552 0 - 100000 40m 54272 18432 35840 0 - 10000 300m 49152 18432 30720 0 - 25000 300m 71680 18432 53248 0 - 40000 300m 94208 18432 75776 0 - 50000 300m 94208 18432 75776 0 - 100000 300m 184320 18432 165888 0 +# speed cable size xon xoff threshold xon_offset + 10000 5m 34816 18432 16384 0 18432 + 25000 5m 34816 18432 16384 0 18432 + 40000 5m 34816 18432 16384 0 18432 + 50000 5m 34816 18432 16384 0 18432 + 100000 5m 36864 18432 18432 0 18432 + 10000 40m 36864 18432 18432 0 18432 + 25000 40m 39936 18432 21504 0 18432 + 40000 40m 41984 18432 23552 0 18432 + 50000 40m 41984 18432 23552 0 18432 + 100000 40m 54272 18432 35840 0 18432 + 10000 300m 49152 18432 30720 0 18432 + 25000 300m 71680 18432 53248 0 18432 + 40000 300m 94208 18432 75776 0 18432 + 50000 300m 94208 18432 75776 0 18432 + 100000 300m 184320 18432 165888 0 18432 + From 13111d949bfb6016b3fe8a6c844267189489e8cf Mon Sep 17 00:00:00 2001 From: Dror Prital <76714716+dprital@users.noreply.github.com> Date: Thu, 20 Oct 2022 09:48:22 +0300 Subject: [PATCH 078/174] [submodule] Advance sonic-sairedis pointer (#12147) - Why I did it Update sonic-sairedis submodule pointer to include the following: 2585a1f [Support gearbox SAI_PORT_ATTR_PORT_SERDES_ID on vs (#1082) fd9bc84 [SAI NAT aging notification (#987) 3fa8f34 [[doc]: Update README.md (#1122) 157e573 [[lgtm] Fix libyang missing in lgtm validation issue (#1135) af80caa Add Voqs to Virtual Switch (#1061) f9008ad [fastboot] fastboot enhancement: Use warm-boot infrastructure for fast-boot (#1100) - How I did it Advance sonic-sairedis pointer Signed-off-by: dprital --- src/sonic-sairedis | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-sairedis b/src/sonic-sairedis index 228e37c5e996..2585a1f29e1d 160000 --- a/src/sonic-sairedis +++ b/src/sonic-sairedis @@ -1 +1 @@ -Subproject commit 228e37c5e9968278280202ba77cb85714ba2d8fa +Subproject commit 2585a1f29e1d04f9b8de4215b7d7169ff3fc8693 From 5a9a25ee7d2640f11425c467349036dab0a2d213 Mon Sep 17 00:00:00 2001 From: Dror Prital <76714716+dprital@users.noreply.github.com> Date: Thu, 20 Oct 2022 09:49:02 +0300 Subject: [PATCH 079/174] [submodule] Advance sonic-platform-common pointer (#11965) - Why I did it Update sonic-platform-common submodule pointer to include the following: Read CMIS data path state duration (#312) Remove shell=True (#313) [credo][ycable] remove mux-toggle inprogress flags for some API's (#311) [Cdb fw upgrade (#308) [Credo][Ycable] enhancement and error exception for some APIs (#303) [ycable] add definitions of some new API's for Y-Cable infrastructure (#301) Install libyang to azure pipeline (#310) Update the return of update_firmware for the image not exist case (#306) [CMIS] 'get_transceiver_info' should return 'None' when CMIS cable EEPROM is not ready (#305) uplift code coverage 80% (#307) [sonic-pcie] Add UT for pcie_common.py (#293) [CMIS] Catch Exception to avoid CMIS code crash (#299) [Credo][Ycable] fix incorrect uart statistics (#296) Add PSU input voltage and input current (#295) - How I did it Advance sonic-platform-common pointer Signed-off-by: dprital --- src/sonic-platform-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-platform-common b/src/sonic-platform-common index 50c24a5c43e5..86bab38c723e 160000 --- a/src/sonic-platform-common +++ b/src/sonic-platform-common @@ -1 +1 @@ -Subproject commit 50c24a5c43e5c33f79f70dfb3d0aecc30e13739d +Subproject commit 86bab38c723eb4ebbfa16feed66344d1b3ffd46e From a68ce12dd60164d06f9a21fadbc8663a0c5c144b Mon Sep 17 00:00:00 2001 From: Vivek Date: Wed, 19 Oct 2022 23:50:07 -0700 Subject: [PATCH 080/174] [Mellanox] [SKU] Added Mellanox-SN4700-A96C8V8 SKU (#12347) - Why I did it A new SKU for MSN4700 Platform i.e. Mellanox-SN4700-V16A96 Requirements: Breakout: Port 1-24: 4x25G(4)[10G,1G] Port 25-28: 2x100G[200G,50G,40G,25G,10G,1G] Port 29-32: 2x200G[100G,50G,40G,25G,10G,1G] Downlinks: 96 (1-24) + 4 (25-28) Uplinks: 4 (29-32) Shared Headroom: Enabled Over Subscribe Ratio: 1:4 Default Topology: T0 Default Cable Length for T1: 5m VxLAN source port range set: No Static Policy Based Hashing Supported: No Additional Details: QoS params: The default ones defined in qos_config.j2 will be applied Small Packet Percentage: Used 50% for traditional buffer model Note: For dynamic model, the value defined in LOSSLESS_TRAFFIC_PATTERN|AZURE|small_packet_percentage is used SKU was drafted under the assumption that the downlink ports uses xcvr's that will only support the first 4 lanes of the physical port they are connected to. Hence for the ports 1-24, the last four lanes are not used Cable Lengths used for generating buffer_defaults_{t0,t1}.j2 values Signed-off-by: Vivek Reddy Karri --- .../Mellanox-SN4700-A96C8V8/buffers.json.j2 | 16 + .../buffers_defaults_objects.j2 | 1 + .../buffers_defaults_t0.j2 | 46 +++ .../buffers_defaults_t1.j2 | 40 +++ .../buffers_dynamic.json.j2 | 17 + .../Mellanox-SN4700-A96C8V8/hwsku.json | 340 ++++++++++++++++++ .../pg_profile_lookup.ini | 1 + .../Mellanox-SN4700-A96C8V8/port_config.ini | 113 ++++++ .../Mellanox-SN4700-A96C8V8/qos.json.j2 | 1 + .../Mellanox-SN4700-A96C8V8/sai.profile | 3 + .../sai_4700_8x200g_8x100g_96x25g.xml | 277 ++++++++++++++ 11 files changed, 855 insertions(+) create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers.json.j2 create mode 120000 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_objects.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_t0.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_t1.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_dynamic.json.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/hwsku.json create mode 120000 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/pg_profile_lookup.ini create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/port_config.ini create mode 120000 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/qos.json.j2 create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/sai.profile create mode 100644 device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/sai_4700_8x200g_8x100g_96x25g.xml diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers.json.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers.json.j2 new file mode 100644 index 000000000000..afbd130a436d --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers.json.j2 @@ -0,0 +1,16 @@ + +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{%- set default_topo = 't0' %} +{%- include 'buffers_config.j2' %} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_objects.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_objects.j2 new file mode 120000 index 000000000000..c01aebb7ae12 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_objects.j2 @@ -0,0 +1 @@ +../../x86_64-mlnx_msn2700-r0/Mellanox-SN2700-D48C8/buffers_defaults_objects.j2 \ No newline at end of file diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_t0.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_t0.j2 new file mode 100644 index 000000000000..bdca5962fd58 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_t0.j2 @@ -0,0 +1,46 @@ + +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{% set default_cable = '5m' %} +{%-set ports2cable = { + 'torrouter_server' : '5m', + 'leafrouter_torrouter' : '300m', + 'spinerouter_leafrouter' : '1500m' + } +-%} +{% set ingress_lossless_pool_size = '46415872' %} +{% set ingress_lossless_pool_xoff = '1982464' %} +{% set egress_lossless_pool_size = '60817392' %} +{% set egress_lossy_pool_size = '46415872' %} + +{% import 'buffers_defaults_objects.j2' as defs with context %} + +{%- macro generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) %} +{{ defs.generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_profile_lists_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_profile_lists(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_queue_buffers_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_queue_buffers(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_pg_profiles_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_pg_profiles(port_names_active, port_names_inactive) }} +{%- endmacro %} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_t1.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_t1.j2 new file mode 100644 index 000000000000..444c728c5ba1 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_defaults_t1.j2 @@ -0,0 +1,40 @@ + +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{% set default_cable = '300m' %} +{% set ingress_lossless_pool_size = '45432832' %} +{% set ingress_lossless_pool_xoff = '2965504' %} +{% set egress_lossless_pool_size = '60817392' %} +{% set egress_lossy_pool_size = '45432832' %} + +{% import 'buffers_defaults_objects.j2' as defs with context %} + +{%- macro generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) %} +{{ defs.generate_buffer_pool_and_profiles_with_inactive_ports(port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_profile_lists_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_profile_lists(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_queue_buffers_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_queue_buffers(port_names_active, port_names_inactive) }} +{%- endmacro %} + +{%- macro generate_pg_profiles_with_inactive_ports(port_names_active, port_names_inactive) %} +{{ defs.generate_pg_profiles(port_names_active, port_names_inactive) }} +{%- endmacro %} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_dynamic.json.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_dynamic.json.j2 new file mode 100644 index 000000000000..ac50755abbcb --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/buffers_dynamic.json.j2 @@ -0,0 +1,17 @@ + +{# + Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. + Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +#} +{%- set default_topo = 't0' %} +{%- set dynamic_mode = 'true' %} +{%- include 'buffers_config.j2' %} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/hwsku.json b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/hwsku.json new file mode 100644 index 000000000000..03ed2d395ce8 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/hwsku.json @@ -0,0 +1,340 @@ +{ + "interfaces": { + "Ethernet0": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet1": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet2": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet3": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet8": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet9": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet10": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet11": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet16": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet17": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet18": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet19": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet24": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet25": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet26": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet27": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet32": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet33": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet34": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet35": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet40": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet41": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet42": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet43": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet48": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet49": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet50": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet51": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet56": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet57": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet58": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet59": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet64": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet65": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet66": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet67": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet72": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet73": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet74": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet75": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet80": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet81": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet82": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet83": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet88": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet89": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet90": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet91": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet96": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet97": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet98": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet99": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet104": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet105": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet106": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet107": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet112": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet113": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet114": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet115": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet120": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet121": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet122": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet123": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet128": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet129": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet130": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet131": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet136": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet137": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet138": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet139": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet144": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet145": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet146": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet147": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet152": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet153": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet154": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet155": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet160": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet161": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet162": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet163": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet168": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet169": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet170": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet171": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet176": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet177": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet178": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet179": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet184": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet185": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet186": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet187": { + "default_brkout_mode": "4x25G(4)[10G,1G]" + }, + "Ethernet192": { + "default_brkout_mode": "2x100G[200G,50G,40G,25G,10G,1G]" + }, + "Ethernet196": { + "default_brkout_mode": "2x100G[200G,50G,40G,25G,10G,1G]" + }, + "Ethernet200": { + "default_brkout_mode": "2x100G[200G,50G,40G,25G,10G,1G]" + }, + "Ethernet204": { + "default_brkout_mode": "2x100G[200G,50G,40G,25G,10G,1G]" + }, + "Ethernet208": { + "default_brkout_mode": "2x100G[200G,50G,40G,25G,10G,1G]" + }, + "Ethernet212": { + "default_brkout_mode": "2x100G[200G,50G,40G,25G,10G,1G]" + }, + "Ethernet216": { + "default_brkout_mode": "2x100G[200G,50G,40G,25G,10G,1G]" + }, + "Ethernet220": { + "default_brkout_mode": "2x100G[200G,50G,40G,25G,10G,1G]" + }, + "Ethernet224": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet228": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet232": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet236": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet240": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet244": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet248": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + }, + "Ethernet252": { + "default_brkout_mode": "2x200G[100G,50G,40G,25G,10G,1G]" + } + } +} diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/pg_profile_lookup.ini b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/pg_profile_lookup.ini new file mode 120000 index 000000000000..66cab04d2c42 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/pg_profile_lookup.ini @@ -0,0 +1 @@ +../Mellanox-SN4700-C128/pg_profile_lookup.ini \ No newline at end of file diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/port_config.ini b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/port_config.ini new file mode 100644 index 000000000000..bd24d40599fe --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/port_config.ini @@ -0,0 +1,113 @@ +# name lanes alias index speed +Ethernet0 0 etp1a 1 25000 +Ethernet1 1 etp1b 1 25000 +Ethernet2 2 etp1c 1 25000 +Ethernet3 3 etp1d 1 25000 +Ethernet8 8 etp2a 2 25000 +Ethernet9 9 etp2b 2 25000 +Ethernet10 10 etp2c 2 25000 +Ethernet11 11 etp2d 2 25000 +Ethernet16 16 etp3a 3 25000 +Ethernet17 17 etp3b 3 25000 +Ethernet18 18 etp3c 3 25000 +Ethernet19 19 etp3d 3 25000 +Ethernet24 24 etp4a 4 25000 +Ethernet25 25 etp4b 4 25000 +Ethernet26 26 etp4c 4 25000 +Ethernet27 27 etp4d 4 25000 +Ethernet32 32 etp5a 5 25000 +Ethernet33 33 etp5b 5 25000 +Ethernet34 34 etp5c 5 25000 +Ethernet35 35 etp5d 5 25000 +Ethernet40 40 etp6a 6 25000 +Ethernet41 41 etp6b 6 25000 +Ethernet42 42 etp6c 6 25000 +Ethernet43 43 etp6d 6 25000 +Ethernet48 48 etp7a 7 25000 +Ethernet49 49 etp7b 7 25000 +Ethernet50 50 etp7c 7 25000 +Ethernet51 51 etp7d 7 25000 +Ethernet56 56 etp8a 8 25000 +Ethernet57 57 etp8b 8 25000 +Ethernet58 58 etp8c 8 25000 +Ethernet59 59 etp8d 8 25000 +Ethernet64 64 etp9a 9 25000 +Ethernet65 65 etp9b 9 25000 +Ethernet66 66 etp9c 9 25000 +Ethernet67 67 etp9d 9 25000 +Ethernet72 72 etp10a 10 25000 +Ethernet73 73 etp10b 10 25000 +Ethernet74 74 etp10c 10 25000 +Ethernet75 75 etp10d 10 25000 +Ethernet80 80 etp11a 11 25000 +Ethernet81 81 etp11b 11 25000 +Ethernet82 82 etp11c 11 25000 +Ethernet83 83 etp11d 11 25000 +Ethernet88 88 etp12a 12 25000 +Ethernet89 89 etp12b 12 25000 +Ethernet90 90 etp12c 12 25000 +Ethernet91 91 etp12d 12 25000 +Ethernet96 96 etp13a 13 25000 +Ethernet97 97 etp13b 13 25000 +Ethernet98 98 etp13c 13 25000 +Ethernet99 99 etp13d 13 25000 +Ethernet104 104 etp14a 14 25000 +Ethernet105 105 etp14b 14 25000 +Ethernet106 106 etp14c 14 25000 +Ethernet107 107 etp14d 14 25000 +Ethernet112 112 etp15a 15 25000 +Ethernet113 113 etp15b 15 25000 +Ethernet114 114 etp15c 15 25000 +Ethernet115 115 etp15d 15 25000 +Ethernet120 120 etp16a 16 25000 +Ethernet121 121 etp16b 16 25000 +Ethernet122 122 etp16c 16 25000 +Ethernet123 123 etp16d 16 25000 +Ethernet128 128 etp17a 17 25000 +Ethernet129 129 etp17b 17 25000 +Ethernet130 130 etp17c 17 25000 +Ethernet131 131 etp17d 17 25000 +Ethernet136 136 etp18a 18 25000 +Ethernet137 137 etp18b 18 25000 +Ethernet138 138 etp18c 18 25000 +Ethernet139 139 etp18d 18 25000 +Ethernet144 144 etp19a 19 25000 +Ethernet145 145 etp19b 19 25000 +Ethernet146 146 etp19c 19 25000 +Ethernet147 147 etp19d 19 25000 +Ethernet152 152 etp20a 20 25000 +Ethernet153 153 etp20b 20 25000 +Ethernet154 154 etp20c 20 25000 +Ethernet155 155 etp20d 20 25000 +Ethernet160 160 etp21a 21 25000 +Ethernet161 161 etp21b 21 25000 +Ethernet162 162 etp21c 21 25000 +Ethernet163 163 etp21d 21 25000 +Ethernet168 168 etp22a 22 25000 +Ethernet169 169 etp22b 22 25000 +Ethernet170 170 etp22c 22 25000 +Ethernet171 171 etp22d 22 25000 +Ethernet176 176 etp23a 23 25000 +Ethernet177 177 etp23b 23 25000 +Ethernet178 178 etp23c 23 25000 +Ethernet179 179 etp23d 23 25000 +Ethernet184 184 etp24a 24 25000 +Ethernet185 185 etp24b 24 25000 +Ethernet186 186 etp24c 24 25000 +Ethernet187 187 etp24d 24 25000 +Ethernet192 192,193,194,195 etp25a 25 100000 +Ethernet196 196,197,198,199 etp25b 25 100000 +Ethernet200 200,201,202,203 etp26a 26 100000 +Ethernet204 204,205,206,207 etp26b 26 100000 +Ethernet208 208,209,210,211 etp27a 27 100000 +Ethernet212 212,213,214,215 etp27b 27 100000 +Ethernet216 216,217,218,219 etp28a 28 100000 +Ethernet220 220,221,222,223 etp28b 28 100000 +Ethernet224 224,225,226,227 etp29a 29 200000 +Ethernet228 228,229,230,231 etp29b 29 200000 +Ethernet232 232,233,234,235 etp30a 30 200000 +Ethernet236 236,237,238,239 etp30b 30 200000 +Ethernet240 240,241,242,243 etp31a 31 200000 +Ethernet244 244,245,246,247 etp31b 31 200000 +Ethernet248 248,249,250,251 etp32a 32 200000 +Ethernet252 252,253,254,255 etp32b 32 200000 diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/qos.json.j2 b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/qos.json.j2 new file mode 120000 index 000000000000..eccf286dc879 --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/qos.json.j2 @@ -0,0 +1 @@ +../../x86_64-mlnx_msn2700-r0/ACS-MSN2700/qos.json.j2 \ No newline at end of file diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/sai.profile b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/sai.profile new file mode 100644 index 000000000000..377656b4ca8f --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/sai.profile @@ -0,0 +1,3 @@ +SAI_INIT_CONFIG_FILE=/usr/share/sonic/hwsku/sai_4700_8x200g_8x100g_96x25g.xml +SAI_DUMP_STORE_PATH=/var/log/mellanox/sdk-dumps +SAI_DUMP_STORE_AMOUNT=10 diff --git a/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/sai_4700_8x200g_8x100g_96x25g.xml b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/sai_4700_8x200g_8x100g_96x25g.xml new file mode 100644 index 000000000000..6b16851934dc --- /dev/null +++ b/device/mellanox/x86_64-mlnx_msn4700-r0/Mellanox-SN4700-A96C8V8/sai_4700_8x200g_8x100g_96x25g.xml @@ -0,0 +1,277 @@ + + + + + 00:02:03:04:05:00 + + + 1 + + + 32 + + + + + 1 + 4 + 17 + 3 + 100 + 4 + + + 5 + 4 + 16 + 3 + 100 + 4 + + + 9 + 4 + 19 + 3 + 100 + 4 + + + 13 + 4 + 18 + 3 + 100 + 4 + + + 17 + 4 + 21 + 3 + 100 + 4 + + + 21 + 4 + 20 + 3 + 100 + 4 + + + 25 + 4 + 23 + 3 + 100 + 4 + + + 29 + 4 + 22 + 3 + 100 + 4 + + + 33 + 8 + 29 + 1 + 4096 + 2 + + + 37 + 8 + 28 + 1 + 4096 + 2 + + + 41 + 8 + 31 + 1 + 4096 + 2 + + + 45 + 8 + 30 + 1 + 4096 + 2 + + + 49 + 8 + 25 + 1 + 1536 + 2 + + + 53 + 8 + 24 + 1 + 1536 + 2 + + + 57 + 8 + 27 + 1 + 1536 + 2 + + + 61 + 8 + 26 + 1 + 1536 + 2 + + + 65 + 4 + 14 + 3 + 100 + 4 + + + 69 + 4 + 15 + 3 + 100 + 4 + + + 73 + 4 + 12 + 3 + 100 + 4 + + + 77 + 4 + 13 + 3 + 100 + 4 + + + 81 + 4 + 10 + 3 + 100 + 4 + + + 85 + 4 + 11 + 3 + 100 + 4 + + + 89 + 4 + 8 + 3 + 100 + 4 + + + 93 + 4 + 9 + 3 + 100 + 4 + + + 97 + 4 + 2 + 3 + 100 + 4 + + + 101 + 4 + 3 + 3 + 100 + 4 + + + 105 + 4 + 0 + + + 3 + + + 100 + 4 + + + 109 + 4 + 1 + 3 + 100 + 4 + + + 113 + 4 + 6 + 3 + 100 + 4 + + + 117 + 4 + 7 + 3 + 100 + 4 + + + 121 + 4 + 4 + 3 + 100 + 4 + + + 125 + 4 + 5 + 3 + 100 + 4 + + + + From 2f490626a99c61bae2ed03a0b1eb72a38e0eea60 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Wed, 19 Oct 2022 23:50:53 -0700 Subject: [PATCH 081/174] [FRR]Adding patch to fix enhanced capability turned on for interface (#12453) Fixing issue FRRouting/frr#11108 For interface based peers with peer-groups, "no neighbor capability extended-nexthop" gets added by default. This will result in IPv4 routes not having ipv6 next hops. - How I did it Porting the commit FRRouting/frr@8e89adc to FRR 8.2.2 which fixes the issue - How to verify it Load FRR and verify if the "no neighbor capability extended-nexthop" not gets added for interfaces associated with peer-groups --- ...pability-is-always-turned-on-for-int.patch | 36 +++++++++++++++++++ src/sonic-frr/patch/series | 1 + 2 files changed, 37 insertions(+) create mode 100644 src/sonic-frr/patch/0011-bgpd-enhanced-capability-is-always-turned-on-for-int.patch diff --git a/src/sonic-frr/patch/0011-bgpd-enhanced-capability-is-always-turned-on-for-int.patch b/src/sonic-frr/patch/0011-bgpd-enhanced-capability-is-always-turned-on-for-int.patch new file mode 100644 index 000000000000..cefa7c31449f --- /dev/null +++ b/src/sonic-frr/patch/0011-bgpd-enhanced-capability-is-always-turned-on-for-int.patch @@ -0,0 +1,36 @@ +From 4db4fc1bf0599f79067bfd62aa435be8e161d81e Mon Sep 17 00:00:00 2001 +From: Donald Sharp +Date: Tue, 3 May 2022 12:51:21 -0400 +Subject: [PATCH] bgpd: enhanced capability is always turned on for interface + based peers + +FRR is displaying that the peer enhanced capability command is not +turned on when the interface is part of a peer group. Saving the +config and then reloading actually turns it off. + +Fix the code so that FRR does not display the enhanced capability +for interface based peers. + +Fixes: #11108 +Signed-off-by: Donald Sharp +--- + bgpd/bgp_vty.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c +index 4df2abef8..6fcce239b 100644 +--- a/bgpd/bgp_vty.c ++++ b/bgpd/bgp_vty.c +@@ -16586,7 +16586,8 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp, + + /* capability extended-nexthop */ + if (peergroup_flag_check(peer, PEER_FLAG_CAPABILITY_ENHE)) { +- if (CHECK_FLAG(peer->flags_invert, PEER_FLAG_CAPABILITY_ENHE)) ++ if (CHECK_FLAG(peer->flags_invert, PEER_FLAG_CAPABILITY_ENHE) && ++ !peer->conf_if) + vty_out(vty, + " no neighbor %s capability extended-nexthop\n", + addr); +-- +2.17.1 + diff --git a/src/sonic-frr/patch/series b/src/sonic-frr/patch/series index 34ec5d510a3c..c62bd218c7ee 100644 --- a/src/sonic-frr/patch/series +++ b/src/sonic-frr/patch/series @@ -10,3 +10,4 @@ Disable-ipv6-src-address-test-in-pceplib.patch cross-compile-changes.patch 0009-ignore-route-from-default-table.patch 0010-zebra-Note-when-the-netlink-DUMP-command-is-interrup.patch +0011-bgpd-enhanced-capability-is-always-turned-on-for-int.patch From e57cd472fb992e13bedb51b2bae16fa1bb954e07 Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Thu, 20 Oct 2022 08:13:08 -0700 Subject: [PATCH 082/174] Add YANG models for structured events (#12290) Add YANG models for structured events and unit tests. YANG events include bgp, common, dhcp-relay, swss, syncd, and host. --- .../libyang-python-tests/test_sonic_yang.py | 6 +- src/sonic-yang-models/setup.py | 12 + .../tests/sonic-events-bgp.json | 47 +++ .../tests/sonic-events-dhcp-relay.json | 29 ++ .../tests/sonic-events-host.json | 115 +++++++ .../tests/sonic-events-swss.json | 61 ++++ .../tests/sonic-events-syncd.json | 13 + .../tests_config/sonic-events-bgp.json | 118 +++++++ .../tests_config/sonic-events-dhcp-relay.json | 137 ++++++++ .../tests_config/sonic-events-host.json | 262 +++++++++++++++ .../tests_config/sonic-events-swss.json | 304 ++++++++++++++++++ .../tests_config/sonic-events-syncd.json | 26 ++ .../yang-models/sonic-events-bgp.yang | 94 ++++++ .../yang-models/sonic-events-common.yang | 80 +++++ .../yang-models/sonic-events-dhcp-relay.yang | 82 +++++ .../yang-models/sonic-events-host.yang | 183 +++++++++++ .../yang-models/sonic-events-swss.yang | 108 +++++++ .../yang-models/sonic-events-syncd.yang | 47 +++ 18 files changed, 1722 insertions(+), 2 deletions(-) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-bgp.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-dhcp-relay.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-host.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-swss.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-syncd.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-bgp.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-dhcp-relay.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-host.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-swss.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-syncd.json create mode 100644 src/sonic-yang-models/yang-models/sonic-events-bgp.yang create mode 100644 src/sonic-yang-models/yang-models/sonic-events-common.yang create mode 100644 src/sonic-yang-models/yang-models/sonic-events-dhcp-relay.yang create mode 100644 src/sonic-yang-models/yang-models/sonic-events-host.yang create mode 100644 src/sonic-yang-models/yang-models/sonic-events-swss.yang create mode 100644 src/sonic-yang-models/yang-models/sonic-events-syncd.yang diff --git a/src/sonic-yang-mgmt/tests/libyang-python-tests/test_sonic_yang.py b/src/sonic-yang-mgmt/tests/libyang-python-tests/test_sonic_yang.py index a13d4c02e9a0..86b27ef174e5 100644 --- a/src/sonic-yang-mgmt/tests/libyang-python-tests/test_sonic_yang.py +++ b/src/sonic-yang-mgmt/tests/libyang-python-tests/test_sonic_yang.py @@ -292,11 +292,13 @@ def test_validate_yang_models(self, sonic_yang_data): ''' test_file = sonic_yang_data['test_file'] syc = sonic_yang_data['syc'] - # Currently only 3 YANG files are not directly related to config + # Currently only 3 YANG files are not directly related to config, along with event YANG models # which are: sonic-extension.yang, sonic-types.yang and sonic-bgp-common.yang. Hard coding # it right now. + # event YANG models do not map directly to config_db and are included to NON_CONFIG_YANG_FILES at run time # If any more such helper yang files are added, we need to update here. - NON_CONFIG_YANG_FILES = 3 + EVENT_YANG_FILES = sum(1 for yang_model in syc.yangFiles if 'sonic-events' in yang_model) + NON_CONFIG_YANG_FILES = 3 + EVENT_YANG_FILES # read config jIn = self.readIjsonInput(test_file, 'SAMPLE_CONFIG_DB_JSON') jIn = json.loads(jIn) diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index b620438451ac..11756fe01614 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -106,6 +106,12 @@ def run(self): './yang-models/sonic-device_neighbor_metadata.yang', './yang-models/sonic-dhcp-server.yang', './yang-models/sonic-dhcpv6-relay.yang', + './yang-models/sonic-events-bgp.yang', + './yang-models/sonic-events-common.yang', + './yang-models/sonic-events-dhcp-relay.yang', + './yang-models/sonic-events-host.yang', + './yang-models/sonic-events-swss.yang', + './yang-models/sonic-events-syncd.yang', './yang-models/sonic-extension.yang', './yang-models/sonic-flex_counter.yang', './yang-models/sonic-feature.yang', @@ -179,6 +185,12 @@ def run(self): './cvlyang-models/sonic-crm.yang', './cvlyang-models/sonic-device_metadata.yang', './cvlyang-models/sonic-device_neighbor.yang', + './cvlyang-models/sonic-events-bgp.yang', + './cvlyang-models/sonic-events-common.yang', + './cvlyang-models/sonic-events-dhcp-relay.yang', + './cvlyang-models/sonic-events-host.yang', + './cvlyang-models/sonic-events-swss.yang', + './cvlyang-models/sonic-events-syncd.yang', './cvlyang-models/sonic-device_neighbor_metadata.yang', './cvlyang-models/sonic-extension.yang', './cvlyang-models/sonic-flex_counter.yang', diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-bgp.json b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-bgp.json new file mode 100644 index 000000000000..6749860b2f7f --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-bgp.json @@ -0,0 +1,47 @@ +{ + "SONIC_EVENTS_BGP_BGP_STATE_INCORRECT_IP": { + "desc": "BGP_STATE_EVENT_INCORRECT_IP failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_BGP_BGP_STATE_INCORRECT_STATUS": { + "desc": "BGP_STATE_EVENT_INCORRECT_STATUS failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_BGP_BGP_STATE_INCORRECT_TIMESTAMP": { + "desc": "BGP_STATE_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_BGP_BGP_STATE_VALID": { + "desc": "VALID BGP STATE EVENT." + }, + "SONIC_EVENTS_BGP_NOTIFICATION_INCORRECT_MAJOR_CODE": { + "desc": "BGP_NOTIFICATION_INCORRECT_MAJOR_CODE failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_BGP_NOTIFICATION_INCORRECT_MINOR_CODE": { + "desc": "BGP_NOTIFICATION_INCORRECT_MINOR_CODE failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_BGP_NOTIFICATION_INCORRECT_IP": { + "desc": "BGP_NOTIFICATION_INCORRECT_IP failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_BGP_NOTIFICATION_INCORRECT_IS-SENT": { + "desc": "BGP_NOTIFICATION_INCORRECT_IS-SENT failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_BGP_NOTIFICATION_INCORRECT_TIMESTAMP": { + "desc": "BGP_NOTIFICATION_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_BGP_NOTIFICATION_VALID": { + "desc": "VALID BGP NOTIFICATION." + }, + "SONIC_EVENTS_BGP_ZEBRA_NO_BUFF_INCORRECT_TIMESTAMP": { + "desc": "ZEBRA_NO_BUFF_EVENT_INCORRECT_TIMESTAMP.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_BGP_ZEBRA_NO_BUFF_VALID": { + "desc": "VALID ZEBRA_NO_BUFF EVENT." + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-dhcp-relay.json b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-dhcp-relay.json new file mode 100644 index 000000000000..575c2c587d5c --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-dhcp-relay.json @@ -0,0 +1,29 @@ +{ + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISCARD_INCORRECT_IFNAME": { + "desc": "DHCP_RELAY_DISCARD_EVENT_INCORRECT_IFNAME failure.", + "eStrKey": "InvalidValue", + "eStr": ["Eth"] + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISCARD_INCORRECT_TIMESTAMP": { + "desc": "DHCP_RELAY_DISCARD_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISCARD_VALID": { + "desc": "VALID DHCP_RELAY_DISCARD EVENT." + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISPARITY_INCORRECT_VLAN": { + "desc": "DHCP_RELAY_DISPARITY_EVENT_INCORRECT_VLAN failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISPARITY_INCORRECT_DURATION": { + "desc": "DHCP_RELAY_DISPARITY_EVENT_INCORRECT_DURATION failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISPARITY_INCORRECT_TIMESTAMP": { + "desc": "DHCP_RELAY_DISPARITY_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISPARITY_VALID": { + "desc": "VALID DHCP_RELAY_DISPARITY EVENT." + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-host.json b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-host.json new file mode 100644 index 000000000000..6e4a8dcbe84e --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-host.json @@ -0,0 +1,115 @@ +{ + "SONIC_EVENTS_HOST_DISK_USAGE_INCORRECT_USAGE": { + "desc": "DISK_USAGE_EVENT_INCORRECT_USAGE failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_HOST_DISK_USAGE_INCORRECT_LIMIT": { + "desc": "DISK_USAGE_EVENT_INCORRECT_LIMIT failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_HOST_DISK_USAGE_INCORRECT_TIMESTAMP": { + "desc": "DISK_USAGE_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_DISK_USAGE_VALID": { + "desc": "VALID DISK_USAGE EVENT." + }, + "SONIC_EVENTS_HOST_MEMORY_USAGE_INCORRECT_USAGE": { + "desc": "MEMORY_USAGE_EVENT_INCORRECT_USAGE failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_HOST_MEMORY_USAGE_INCORRECT_LIMIT": { + "desc": "MEMORY_USAGE_EVENT_INCORRECT_LIMIT failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_HOST_MEMORY_USAGE_INCORRECT_TIMESTAMP": { + "desc": "MEMORY_USAGE_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_MEMORY_USAGE_VALID": { + "desc": "VALID MEMORY_USAGE EVENT." + }, + "SONIC_EVENTS_HOST_CPU_USAGE_INCORRECT_USAGE": { + "desc": "CPU_USAGE_EVENT_INCORRECT_USAGE failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_HOST_CPU_USAGE_INCORRECT_LIMIT": { + "desc": "CPU_USAGE_EVENT_INCORRECT_LIMIT failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_HOST_CPU_USAGE_INCORRECT_TIMESTAMP": { + "desc": "CPU_USAGE_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_CPU_USAGE_VALID": { + "desc": "VALID CPU_USAGE EVENT." + }, + "SONIC_EVENTS_HOST_EVENT_SSHD_INCORRECT_TIMESTAMP": { + "desc": "EVENT_SSHD_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_EVENT_SSHD_VALID": { + "desc": "VALID EVENT_SSHD EVENT." + }, + "SONIC_EVENTS_HOST_EVENT_DISK_INCORRECT_FAIL_TYPE": { + "desc": "EVENT_DISK_EVENT_INCORRECT_FAIL_TYPE failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_HOST_EVENT_DISK_INCORRECT_TIMESTAMP": { + "desc": "EVENT_DISK_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_EVENT_DISK_VALID": { + "desc": "VALID EVENT_DISK EVENT." + }, + "SONIC_EVENTS_HOST_EVENT_KERNEL_INCORRECT_FAIL_TYPE": { + "desc": "EVENT_KERNEL_EVENT_INCORRECT_FAIL_TYPE failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_HOST_EVENT_KERNEL_INCORRECT_TIMESTAMP": { + "desc": "EVENT_KERNEL_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_EVENT_KERNEL_VALID": { + "desc": "VALID EVENT_KERNEL EVENT." + }, + "SONIC_EVENTS_HOST_EVENT_DOWN_CTR_INCORRECT_TIMESTAMP": { + "desc": "EVENT_DOWN_CTR_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_EVENT_DOWN_CTR_VALID": { + "desc": "VALID EVENT_DOWN_CTR EVENT." + }, + "SONIC_EVENTS_HOST_EVENT_STOPPED_CTR_INCORRECT_TIMESTAMP": { + "desc": "EVENT_STOPPED_CTR_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_EVENT_STOPPED_CTR_VALID": { + "desc": "VALID EVENT_STOPPED_CTR EVENT." + }, + "SONIC_EVENTS_HOST_WATCHDOG_TIMEOUT_INCORRECT_LIMIT": { + "desc": "WATCHDOG_TIMEOUT_EVENT_INCORRECT_LIMIT failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_HOST_WATCHDOG_TIMEOUT_INCORRECT_TIMESTAMP": { + "desc": "WATCHDOG_TIMEOUT_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_WATCHDOG_TIMEOUT_VALID": { + "desc": "VALID WATCHDOG_TIMEOUT EVENT." + }, + "SONIC_EVENTS_HOST_EVENT_SEU_INCORRECT_TIMESTAMP": { + "desc": "EVENT_SEU_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_EVENT_SEU_VALID": { + "desc": "VALID EVENT_SEU EVENT." + }, + "SONIC_EVENTS_HOST_INVALID_FREELIST_INCORRECT_TIMESTAMP": { + "desc": "INVALID_FREELIST_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_INVALID_FREELIST_VALID": { + "desc": "VALID INVALID_FREELIST EVENT." + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-swss.json b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-swss.json new file mode 100644 index 000000000000..2862dede7961 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-swss.json @@ -0,0 +1,61 @@ +{ + "SONIC_EVENTS_SWSS_IF_STATE_INCORRECT_IFNAME": { + "desc": "IF_STATE_EVENT_INCORRECT_IFNAME failure.", + "eStrKey": "LeafRef", + "eStr": ["Eth"] + }, + "SONIC_EVENTS_SWSS_IF_STATE_INCORRECT_STATUS": { + "desc": "IF_STATE_EVENT_INCORRECT_STATUS failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_SWSS_IF_STATE_INCORRECT_TIMESTAMP": { + "desc": "IF_STATE_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_SWSS_IF_STATE_VALID": { + "desc": "VALID IF_STATE EVENT." + }, + "SONIC_EVENTS_SWSS_PFC_STORM_INCORRECT_IFNAME": { + "desc": "PFC_STORM_EVENT_INCORRECT_IFNAME failure.", + "eStrKey": "LeafRef", + "eStr": ["Eth"] + }, + "SONIC_EVENTS_SWSS_PFC_STORM_INCORRECT_QUEUE_INDEX": { + "desc": "PFC_STORM_EVENT_INCORRECT_QUEUE_INDEX failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_SWSS_PFC_STORM_INCORRECT_QUEUE_ID": { + "desc": "PFC_STORM_EVENT_INCORRECT_QUEUE_ID failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_SWSS_PFC_STORM_INCORRECT_PORT_ID": { + "desc": "PFC_STORM_EVENT_INCORRECT_PORT_ID failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_SWSS_PFC_STORM_INCORRECT_TIMESTAMP": { + "desc": "PFC_STORM_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_SWSS_PFC_STORM_VALID": { + "desc": "VALID IF_STATE EVENT." + }, + "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_INCORRECT_PERCENT": { + "desc": "CHK_CRM_THRESHOLD_EVENT_INCORRECT_PERCENT failure.", + "eStrKey": "Range" + }, + "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_INCORRECT_USED_CNT": { + "desc": "CHK_CRM_THRESHOLD_EVENT_INCORRECT_USED_CNT failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_INCORRECT_FREE_CNT": { + "desc": "CHK_CRM_THRESHOLD_EVENT_INCORRECT_FREE_CNT failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_INCORRECT_TIMESTAMP": { + "desc": "CHK_CRM_THRESHOLD_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_VALID": { + "desc": "VALID CHK_CRM_THRESHOLD EVENT." + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-syncd.json b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-syncd.json new file mode 100644 index 000000000000..ed528e1bc297 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-syncd.json @@ -0,0 +1,13 @@ +{ + "SONIC_EVENTS_SYNCD_SYNCD_FAILURE_INCORRECT_FAIL_TYPE": { + "desc": "SYNCD_FAILURE_EVENT_INCORRECT_FAIL_TYPE failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_SYNCD_SYNCD_FAILURE_INCORRECT_TIMESTAMP": { + "desc": "SYNCD_FAILURE_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_SYNCD_SYNCD_FAILURE_VALID": { + "desc": "VALID SYNCD_FAILURE EVENT." + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-bgp.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-bgp.json new file mode 100644 index 000000000000..2fa562f5efa6 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-bgp.json @@ -0,0 +1,118 @@ +{ + "SONIC_EVENTS_BGP_BGP_STATE_INCORRECT_IP": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:bgp-state": { + "ip": "INCORRECT_IP", + "status": "up", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_BGP_BGP_STATE_INCORRECT_STATUS": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:bgp-state": { + "ip": "10.0.0.0", + "status": "INCORRECT_STATUS", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_BGP_BGP_STATE_INCORRECT_TIMESTAMP": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:bgp-state": { + "ip": "10.0.0.0", + "status": "down", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_BGP_BGP_STATE_VALID": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:bgp-state": { + "ip": "10.0.0.0", + "status": "down", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_BGP_NOTIFICATION_INCORRECT_MAJOR_CODE": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:notification": { + "major-code": "INCORRECT_MAJOR_CODE", + "minor-code": 2, + "ip": "10.0.0.0", + "is-sent": true, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_BGP_NOTIFICATION_INCORRECT_MINOR_CODE": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:notification": { + "major-code": 2, + "minor-code": "INCORRECT_MINOR_CODE", + "ip": "10.0.0.0", + "is-sent": true, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_BGP_NOTIFICATION_INCORRECT_IP": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:notification": { + "major-code": 2, + "minor-code": 2, + "ip": "INCORRECT_IP", + "is-sent": true, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_BGP_NOTIFICATION_INCORRECT_IS-SENT": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:notification": { + "major-code": 2, + "minor-code": 2, + "ip": "10.0.0.0", + "is-sent": "INCORRECT_VALUE", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_BGP_NOTIFICATION_INCORRECT_TIMESTAMP": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:notification": { + "major-code": 2, + "minor-code": 2, + "ip": "10.0.0.0", + "is-sent": true, + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_BGP_NOTIFICATION_VALID": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:notification": { + "major-code": 2, + "minor-code": 2, + "ip": "10.0.0.0", + "is-sent": true, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_BGP_ZEBRA_NO_BUFF_INCORRECT_TIMESTAMP": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:zebra-no-buff": { + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_BGP_ZEBRA_NO_BUFF_VALID": { + "sonic-events-bgp:sonic-events-bgp": { + "sonic-events-bgp:zebra-no-buff": { + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-dhcp-relay.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-dhcp-relay.json new file mode 100644 index 000000000000..114300f43176 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-dhcp-relay.json @@ -0,0 +1,137 @@ +{ + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISCARD_INCORRECT_IFNAME": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-portchannel:sonic-portchannel": { + "sonic-portchannel:PORTCHANNEL": { + "PORTCHANNEL_LIST": [ + { + "admin_status": "up", + "name": "PortChannel10" + } + ] + } + }, + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-discard": { + "ifname": "Eth", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISCARD_INCORRECT_TIMESTAMP": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-portchannel:sonic-portchannel": { + "sonic-portchannel:PORTCHANNEL": { + "PORTCHANNEL_LIST": [ + { + "admin_status": "up", + "name": "PortChannel10" + } + ] + } + }, + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-discard": { + "ifname": "Ethernet0", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISCARD_VALID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-portchannel:sonic-portchannel": { + "sonic-portchannel:PORTCHANNEL": { + "PORTCHANNEL_LIST": [ + { + "admin_status": "up", + "name": "PortChannel10" + } + ] + } + }, + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-discard": { + "ifname": "Ethernet0", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISPARITY_INCORRECT_VLAN": { + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-disparity": { + "vlan": "INCORRECT_VLAN", + "duration": 0, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISPARITY_INCORRECT_DURATION": { + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-disparity": { + "vlan": "Agg-Vlan100", + "duration": "INCORRECT_DURATION", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISPARITY_INCORRECT_TIMESTAMP": { + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-disparity": { + "vlan": "Vlan100", + "duration": 0, + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISPARITY_VALID": { + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-disparity": { + "vlan": "Agg-Vlan100", + "duration": 0, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-host.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-host.json new file mode 100644 index 000000000000..9a59457a8e54 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-host.json @@ -0,0 +1,262 @@ +{ + "SONIC_EVENTS_HOST_DISK_USAGE_INCORRECT_USAGE": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:disk-usage": { + "fs": "FILESYSTEM", + "usage": -30, + "limit": 99, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_DISK_USAGE_INCORRECT_LIMIT": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:disk-usage": { + "fs": "FILESYSTEM", + "usage": 32, + "limit": "INCORRECT_LIMIT", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_DISK_USAGE_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:disk-usage": { + "fs": "FILESYSTEM", + "usage": 32, + "limit": 99, + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_DISK_USAGE_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:disk-usage": { + "fs": "FILESYSTEM", + "usage": 32, + "limit": 99, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_MEMORY_USAGE_INCORRECT_USAGE": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:memory-usage": { + "usage": -30, + "limit": 99, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_MEMORY_USAGE_INCORRECT_LIMIT": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:memory-usage": { + "usage": 32, + "limit": "INCORRECT_LIMIT", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_MEMORY_USAGE_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:memory-usage": { + "usage": 32, + "limit": 99, + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_MEMORY_USAGE_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:memory-usage": { + "usage": 32, + "limit": 99, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_CPU_USAGE_INCORRECT_USAGE": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:cpu-usage": { + "usage": -30, + "limit": 99, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_CPU_USAGE_INCORRECT_LIMIT": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:cpu-usage": { + "usage": 32, + "limit": "INCORRECT_LIMIT", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_CPU_USAGE_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:cpu-usage": { + "usage": 32, + "limit": 99, + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_CPU_USAGE_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:cpu-usage": { + "usage": 32, + "limit": 99, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_SSHD_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-sshd": { + "username": "username", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_SSHD_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-sshd": { + "username": "username", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_DISK_INCORRECT_FAIL_TYPE": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-disk": { + "fail_type": -32, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_DISK_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-disk": { + "fail_type": "read_only", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_DISK_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-disk": { + "fail_type": "read_only", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_KERNEL_INCORRECT_FAIL_TYPE": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-kernel": { + "fail_type": -32, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_KERNEL_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-kernel": { + "fail_type": "write_failed", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_KERNEL_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-kernel": { + "fail_type": "write_protected", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_DOWN_CTR_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-down-ctr": { + "ctr_name": "container_name", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_DOWN_CTR_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-down-ctr": { + "ctr_name": "container_name", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_STOPPED_CTR_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-stopped-ctr": { + "ctr_name": "container_name", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_STOPPED_CTR_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-stopped-ctr": { + "ctr_name": "container_name", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_WATCHDOG_TIMEOUT_INCORRECT_LIMIT": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:watchdog-timeout": { + "limit": "INCORRECT_LIMIT", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_WATCHDOG_TIMEOUT_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:watchdog-timeout": { + "limit": 5, + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_WATCHDOG_TIMEOUT_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:watchdog-timeout": { + "limit": 5, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_SEU_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-seu": { + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_EVENT_SEU_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:event-seu": { + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_INVALID_FREELIST_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:invalid-freelist": { + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_INVALID_FREELIST_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:invalid-freelist": { + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-swss.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-swss.json new file mode 100644 index 000000000000..885bd45f5378 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-swss.json @@ -0,0 +1,304 @@ +{ + "SONIC_EVENTS_SWSS_IF_STATE_INCORRECT_IFNAME": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:if-state": { + "ifname": "Eth", + "status": "up", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_IF_STATE_INCORRECT_STATUS": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:if-state": { + "ifname": "Ethernet0", + "status": "INCORRECT_STATUS", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_IF_STATE_INCORRECT_TIMESTAMP": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:if-state": { + "ifname": "Ethernet0", + "status": "down", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_SWSS_IF_STATE_VALID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:if-state": { + "ifname": "Ethernet0", + "status": "down", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_PFC_STORM_INCORRECT_IFNAME": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:pfc-storm": { + "ifname": "Eth", + "queue_index": 0, + "queue_id": 0, + "port_id": 0, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_PFC_STORM_INCORRECT_QUEUE_INDEX": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:pfc-storm": { + "ifname": "Ethernet0", + "queue_index": "INCORRECT_QUEUE_INDEX", + "queue_id": 0, + "port_id": 0, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_PFC_STORM_INCORRECT_QUEUE_ID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:pfc-storm": { + "ifname": "Ethernet0", + "queue_index": 0, + "queue_id": "INCORRECT_QUEUE_ID", + "port_id": 0, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_PFC_STORM_INCORRECT_PORT_ID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:pfc-storm": { + "ifname": "Ethernet0", + "queue_index": 0, + "queue_id": 0, + "port_id": "INCORRECT_PORT_ID", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_PFC_STORM_INCORRECT_TIMESTAMP": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:pfc-storm": { + "ifname": "Ethernet0", + "queue_index": 0, + "queue_id": 0, + "port_id": 0, + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_SWSS_PFC_STORM_VALID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": 65, + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:pfc-storm": { + "ifname": "Ethernet0", + "queue_index": 0, + "queue_id": 0, + "port_id": 0, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_INCORRECT_PERCENT": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:chk_crm_threshold": { + "percent": 123, + "used_cnt": 0, + "free_cnt": 0, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_INCORRECT_USED_CNT": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:chk_crm_threshold": { + "percent": 0, + "used_cnt": "INCORRECT_USED_CNT", + "free_cnt": 0, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_INCORRECT_FREE_CNT": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:chk_crm_threshold": { + "percent": 0, + "used_cnt": 0, + "free_cnt": "INCORRECT_FREE_CNT", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_INCORRECT_TIMESTAMP": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:chk_crm_threshold": { + "percent": 0, + "used_cnt": 0, + "free_cnt": 0, + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_VALID": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:chk_crm_threshold": { + "percent": 0, + "used_cnt": 0, + "free_cnt": 0, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-syncd.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-syncd.json new file mode 100644 index 000000000000..778aad816b1c --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-syncd.json @@ -0,0 +1,26 @@ +{ + "SONIC_EVENTS_SYNCD_SYNCD_FAILURE_INCORRECT_FAIL_TYPE": { + "sonic-events-syncd:sonic-events-syncd": { + "sonic-events-syncd:syncd-failure": { + "fail_type": "INCORRECT_FAIL_TYPE", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SYNCD_SYNCD_FAILURE_INCORRECT_TIMESTAMP": { + "sonic-events-syncd:sonic-events-syncd": { + "sonic-events-syncd:syncd-failure": { + "fail_type": "mmu_err", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_SYNCD_SYNCD_FAILURE_VALID": { + "sonic-events-syncd:sonic-events-syncd": { + "sonic-events-syncd:syncd-failure": { + "fail_type": "switch_event", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-events-bgp.yang b/src/sonic-yang-models/yang-models/sonic-events-bgp.yang new file mode 100644 index 000000000000..26d2b85e954f --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-events-bgp.yang @@ -0,0 +1,94 @@ +module sonic-events-bgp { + namespace "http://github.com/sonic-net/sonic-events-bgp"; + prefix events-bgp; + yang-version 1.1; + + import sonic-events-common { + prefix evtcmn; + revision-date 2022-12-01; + } + + import sonic-types { + prefix stypes; + } + + import ietf-inet-types { + prefix inet; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC BGP events"; + + revision 2022-12-01 { + description "BGP alert events."; + } + + container sonic-events-bgp { + container bgp-state { + evtcmn:ALARM_SEVERITY_MINOR; + + description " + Declares an event for BGP state for a neighbor IP going up/down."; + + leaf ip { + type inet:ip-address; + description "IP of neighbor"; + } + + leaf status { + type stypes:admin_status; + description "Provides the status as up (true) or down (false)"; + } + + uses evtcmn:sonic-events-cmn; + } + + container notification { + evtcmn:ALARM_SEVERITY_MAJOR; + + description " + Reports an notification. + The error codes as per IANA. + The other params are as in the message"; + + leaf major-code { + type uint8; + description "Major IANA error code; [RFC4271][RFC7313]"; + } + + leaf minor-code { + type uint8; + description "Minor IANA error code; [RFC4271][RFC7313]"; + } + + leaf ip { + type inet:ip-address; + description "IP of neighbor associated with this notification"; + } + + leaf is-sent { + type boolean; + description "true - if this notification was for sent messages; false if it was for received."; + } + + uses evtcmn:sonic-events-cmn; + } + + container zebra-no-buff { + evtcmn:ALARM_SEVERITY_MINOR; + + description " + Declares an event for zebra running out of buffer. + This event does not have any other parameter. + Hence source + tag identifies an event"; + + uses evtcmn:sonic-events-cmn; + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-events-common.yang b/src/sonic-yang-models/yang-models/sonic-events-common.yang new file mode 100644 index 000000000000..822a6eefaf44 --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-events-common.yang @@ -0,0 +1,80 @@ +module sonic-events-common { + namespace "http://github.com/sonic-net/sonic-events-common"; + prefix evtcmn; + yang-version 1.1; + + import ietf-yang-types { + prefix yang; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC Events common definition"; + + revision 2022-12-01 { + description + "Common reusable definitions"; + } + + grouping sonic-events-cmn { + leaf timestamp { + type yang:date-and-time; + description "time of the event"; + } + } + + grouping sonic-events-usage { + leaf usage { + type uint8 { + range "0..100" { + error-message "Incorrect val for %"; + } + } + description "Percentage in use"; + } + + leaf limit { + type uint8 { + range "0..100" { + error-message "Incorrect val for %"; + } + } + description "Percentage limit set"; + } + } + + extension EVENT_SEVERITY_2 { + description + "Indicates that the severity level of this type of event is 2"; + } + + extension EVENT_SEVERITY_3 { + description + "Indicates that the severity level of this type of event is 3"; + } + + extension EVENT_SEVERITY_4 { + description + "Indicates that the severity level of this type of event is 4"; + } + + extension ALARM_SEVERITY_MINOR { + description + "Indicates the existence of a non-service affecting fault condition + and that corrective action should be taken in order to prevent a more serious + (for example, service affecting) fault. Such a severity can be reported, + for example, when the detected alarm condition is not currently degrading the capacity of the resource"; + } + + extension ALARM_SEVERITY_MAJOR { + description + "Indicates that a service affecting condition has developed and an urgent corrective + action is required. Such a severity can be reported, for example, when there is a severe + degradation in the capability of the resource and its full capability must be restored."; + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-events-dhcp-relay.yang b/src/sonic-yang-models/yang-models/sonic-events-dhcp-relay.yang new file mode 100644 index 000000000000..5119397968eb --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-events-dhcp-relay.yang @@ -0,0 +1,82 @@ +module sonic-events-dhcp-relay { + namespace "http://github.com/sonic-net/sonic-events-dhcp-relay"; + yang-version 1.1; + prefix events-dhcp-relay; + + import sonic-events-common { + prefix evtcmn; + revision-date 2022-12-01; + } + + import sonic-port { + prefix port; + } + + import sonic-portchannel { + prefix lag; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC dhcp-relay events"; + + revision 2022-12-01 { + description "dhcp-relay alert events."; + } + + container sonic-events-dhcp-relay { + container dhcp-relay-discard { + evtcmn:ALARM_SEVERITY_MAJOR; + + description " + Declares an event for dhcp-relay discarding packet on an + interface due to missing IP address assigned. + Params: + name of the interface discarding."; + + leaf ifname { + type union { + type leafref { + path "/port:sonic-port/port:PORT/port:PORT_LIST/port:name"; + } + type leafref { + path "/lag:sonic-portchannel/lag:PORTCHANNEL/lag:PORTCHANNEL_LIST/lag:name"; + } + } + description "Name of the i/f discarding"; + } + + uses evtcmn:sonic-events-cmn; + } + + container dhcp-relay-disparity { + evtcmn:ALARM_SEVERITY_MAJOR; + + description " + Declares an event for disparity detected in + DHCP Relay behavior by dhcpmon. + parameters: + vlan that shows this disparity + The duration of disparity"; + + leaf vlan { + type string { + pattern '(Agg-Vlan|Vlan)([0-9]{1,3}|[1-3][0-9]{3}|[4][0][0-8][0-9]|[4][0][9][0-4])'; + } + description "Name of the vlan affected"; + } + + leaf duration { + type uint32; + description "Duration of disparity"; + } + + uses evtcmn:sonic-events-cmn; + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-events-host.yang b/src/sonic-yang-models/yang-models/sonic-events-host.yang new file mode 100644 index 000000000000..3ac8213695ca --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-events-host.yang @@ -0,0 +1,183 @@ +module sonic-events-host { + namespace "http://github.com/sonic-net/sonic-events-host"; + yang-version 1.1; + prefix events-host; + + import sonic-events-common { + prefix evtcmn; + revision-date 2022-12-01; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "YANG schema defined for host events"; + + revision 2022-12-01 { + description "BGP alert events."; + } + + container sonic-events-host { + container disk-usage { + evtcmn:ALARM_SEVERITY_MINOR; + + description " + Declares an event for disk usage crossing set limit + The parameters describe the usage & limit set."; + + leaf fs { + type string; + description "Name of the file system"; + } + + uses evtcmn:sonic-events-usage; + + uses evtcmn:sonic-events-cmn; + } + + container memory-usage { + evtcmn:ALARM_SEVERITY_MINOR; + + description " + Declares an event for memory usage crossing set limit + The parameters describe the usage & limit set."; + + uses evtcmn:sonic-events-usage; + + uses evtcmn:sonic-events-cmn; + } + + container cpu-usage { + evtcmn:ALARM_SEVERITY_MINOR; + description " + Declares an event for cpu usage crossing set limit + The parameters describe the usage & limit set."; + + uses evtcmn:sonic-events-usage; + + uses evtcmn:sonic-events-cmn; + } + + container event-sshd { + evtcmn:ALARM_SEVERITY_MINOR; + + description " + Declares and event reported by sshd. + This implies an internal system state blocks sshd from + creating the new user."; + + leaf username { + type string; + description "Name of the new user"; + } + + uses evtcmn:sonic-events-cmn; + } + + container event-disk { + evtcmn:ALARM_SEVERITY_MINOR; + + description " + Declares an event reported by disk check. + The fail type declares the type of failure. + read-only - denotes that disk is in RO state."; + + leaf fail_type { + type enumeration { + enum "read_only"; + } + description "Type of failure"; + } + + uses evtcmn:sonic-events-cmn; + } + + container event-kernel { + evtcmn:ALARM_SEVERITY_MINOR; + + description " + Declares an event reported by kernel. + The fail type declares the type of failure."; + + leaf fail_type { + type enumeration { + enum "write_failed"; + enum "write_protected"; + enum "remount_read_only"; + enum "zlib_decompress"; + } + description "Type of failure"; + } + + uses evtcmn:sonic-events-cmn; + } + + container event-down-ctr { + evtcmn:EVENT_SEVERITY_2; + + description " + Declares an container that is expected to be up is down. + Reported by monit periodically."; + + leaf ctr_name { + type string; + description "Name of the container not running"; + } + + uses evtcmn:sonic-events-cmn; + } + + container event-stopped-ctr { + evtcmn:EVENT_SEVERITY_2; + + description " + Declare an event at the time point of container stopping. + event-down-ctr fires periodically until it starts up."; + + leaf ctr_name { + type string; + description "Name of the container"; + } + + uses evtcmn:sonic-events-cmn; + } + + container watchdog-timeout { + evtcmn:EVENT_SEVERITY_2; + + description " + Declares an event for watchdog timeout failure. + Params: + limit provides max timeout limit"; + + leaf limit { + type uint8; + description "Timeout limit"; + } + + uses evtcmn:sonic-events-cmn; + } + + container event-seu { + evtcmn:EVENT_SEVERITY_2; + + description " + Declares an event for SEU error."; + + uses evtcmn:sonic-events-cmn; + } + + container invalid-freelist { + evtcmn:EVENT_SEVERITY_2; + + description " + Declares an event for invalid freelist failure."; + + uses evtcmn:sonic-events-cmn; + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-events-swss.yang b/src/sonic-yang-models/yang-models/sonic-events-swss.yang new file mode 100644 index 000000000000..39d57cb2d256 --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-events-swss.yang @@ -0,0 +1,108 @@ +module sonic-events-swss { + namespace "http://github.com/sonic-net/sonic-events-swss"; + yang-version 1.1; + prefix events-swss; + + import sonic-events-common { + prefix evtcmn; + revision-date 2022-12-01; + } + + import sonic-types { + prefix stypes; + } + + import sonic-port { + prefix port; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC SWSS events"; + + revision 2022-12-01 { + description "SWSS alert events."; + } + + container sonic-events-swss { + container if-state { + evtcmn:ALARM_SEVERITY_MINOR; + + description " + Declares an event for i/f flap. + The name of the flapping i/f and status are the only params."; + + leaf ifname { + type leafref { + path "/port:sonic-port/port:PORT/port:PORT_LIST/port:name"; + } + description "Interface name"; + } + + leaf status { + type stypes:admin_status; + description "Provides the status as up (true) or down (false)"; + } + + uses evtcmn:sonic-events-cmn; + } + + container pfc-storm { + evtcmn:ALARM_SEVERITY_MAJOR; + + description " + Declares an event for PFC storm. + The name of the i/f facing the storm is the only param."; + + leaf ifname { + type leafref { + path "/port:sonic-port/port:PORT/port:PORT_LIST/port:name"; + } + description "Interface name"; + } + + leaf queue_index { + type uint8; + } + + leaf queue_id { + type uint64; + } + + leaf port_id { + type uint64; + } + + uses evtcmn:sonic-events-cmn; + } + + container chk_crm_threshold { + evtcmn:ALARM_SEVERITY_MAJOR; + + description " + Declares an event for CRM threshold."; + + leaf percent { + type uint8 { + range 0..100; + } + description "percentage used"; + } + + leaf used_cnt { + type uint8; + } + + leaf free_cnt { + type uint64; + } + + uses evtcmn:sonic-events-cmn; + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-events-syncd.yang b/src/sonic-yang-models/yang-models/sonic-events-syncd.yang new file mode 100644 index 000000000000..945afd79cf65 --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-events-syncd.yang @@ -0,0 +1,47 @@ +module sonic-events-syncd { + namespace "http://github.com/sonic-net/sonic-events-syncd"; + yang-version 1.1; + prefix events-syncd; + + import sonic-events-common { + prefix evtcmn; + revision-date 2022-12-01; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONIC syncd events"; + + revision 2022-12-01 { + description "syncd alert events."; + } + + container sonic-events-syncd { + container syncd-failure { + evtcmn:ALARM_SEVERITY_MAJOR; + + description " + Declares an event for all types of syncd failure. + The type of failure and the asic-index of failing syncd are + provided along with a human readable message to give the + dev debugging additional info."; + + leaf fail_type { + type enumeration { + enum "route_add_failed"; + enum "switch_event"; + enum "assert"; + enum "mmu_err"; + enum "parity_check"; + } + } + + uses evtcmn:sonic-events-cmn; + } + } +} From cf20aea1c4c566339c9bde431b5500abcaa5d54b Mon Sep 17 00:00:00 2001 From: Samuel Angebault Date: Thu, 20 Oct 2022 17:15:48 +0200 Subject: [PATCH 083/174] [Arista] Update platform driver library (#12450) fix linecard provisioning issue (500 error) fix some value types for get_system_eeprom_info API refactor code to leverage pci topology (enabling dynamic Pcie plugin) refactor asic declaration logic to new style misc fixes --- platform/barefoot/sonic-platform-modules-arista | 2 +- platform/broadcom/sonic-platform-modules-arista | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/platform/barefoot/sonic-platform-modules-arista b/platform/barefoot/sonic-platform-modules-arista index 11180c37fa17..2eae0dd1ad35 160000 --- a/platform/barefoot/sonic-platform-modules-arista +++ b/platform/barefoot/sonic-platform-modules-arista @@ -1 +1 @@ -Subproject commit 11180c37fa17421afdeef346b3896552872a2721 +Subproject commit 2eae0dd1ad35b6fd5a78f3eebed466f2744d4fdc diff --git a/platform/broadcom/sonic-platform-modules-arista b/platform/broadcom/sonic-platform-modules-arista index 11180c37fa17..2eae0dd1ad35 160000 --- a/platform/broadcom/sonic-platform-modules-arista +++ b/platform/broadcom/sonic-platform-modules-arista @@ -1 +1 @@ -Subproject commit 11180c37fa17421afdeef346b3896552872a2721 +Subproject commit 2eae0dd1ad35b6fd5a78f3eebed466f2744d4fdc From 37ad8befc14c6ad8023cf66d8a28d0b936373303 Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Thu, 20 Oct 2022 09:29:57 -0700 Subject: [PATCH 084/174] [tunnel_pkt_handler]: Skip nonexistent intfs (#12424) - Skip the interface status check if the interface does not exist. In the future, when the interface is created/comes up this check will be triggered again. Signed-off-by: Lawrence Lee --- dockers/docker-orchagent/tunnel_packet_handler.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/dockers/docker-orchagent/tunnel_packet_handler.py b/dockers/docker-orchagent/tunnel_packet_handler.py index 1ba29a542148..398d7956abf6 100755 --- a/dockers/docker-orchagent/tunnel_packet_handler.py +++ b/dockers/docker-orchagent/tunnel_packet_handler.py @@ -16,6 +16,7 @@ from sonic_py_common import logger as log from pyroute2 import IPRoute +from pyroute2.netlink.exceptions import NetlinkError from scapy.layers.inet import IP from scapy.layers.inet6 import IPv6 from scapy.sendrecv import AsyncSniffer @@ -115,7 +116,14 @@ def get_up_portchannels(self): portchannel_intf_names = [name for name, _ in self.portchannel_intfs] link_statuses = [] for intf in portchannel_intf_names: - status = self.netlink_api.link("get", ifname=intf) + try: + status = self.netlink_api.link("get", ifname=intf) + except NetlinkError: + # Continue if we find a non-existent interface since we don't + # need to listen on it while it's down/not created. Once it comes up, + # we will get another netlink message which will trigger this check again + logger.log_notice("Skipping non-existent interface {}".format(intf)) + continue link_statuses.append(status[0]) up_portchannels = [] From 35874895f2905d412580c2d4b90e2714feb26429 Mon Sep 17 00:00:00 2001 From: andywongarista <78833093+andywongarista@users.noreply.github.com> Date: Thu, 20 Oct 2022 23:25:24 -0700 Subject: [PATCH 085/174] Fix sensord service install (#12376) Why I did it #4021 describes an issue that is still being observed on master image whereby sensord does not start in pmon due to missing service. How I did it Updated the lm-sensors install patch with a case for systemd How to verify it Verified that sensord is up in pmon after boot Co-authored-by: Boyang Yu --- ...dh_installinit-to-include-sensord.in.patch | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/src/lm-sensors/patch/0002-Patch-to-peform-dh_installinit-to-include-sensord.in.patch b/src/lm-sensors/patch/0002-Patch-to-peform-dh_installinit-to-include-sensord.in.patch index 7bfdee11e3be..bb88f423f4e4 100644 --- a/src/lm-sensors/patch/0002-Patch-to-peform-dh_installinit-to-include-sensord.in.patch +++ b/src/lm-sensors/patch/0002-Patch-to-peform-dh_installinit-to-include-sensord.in.patch @@ -1,23 +1,13 @@ -From b11fd3d516b62c01513d289bc901820aa150c63e Mon Sep 17 00:00:00 2001 -From: Charlie Chen -Date: Wed, 1 Apr 2020 06:59:06 +0000 -Subject: Patch to peform dh_installinit to include sensord.install in the - packed deb - -Signed-off-by: Charlie Chen ---- - debian/rules | 1 + - 1 file changed, 1 insertion(+) - diff --git a/debian/rules b/debian/rules -index 5ebda06..1d77e28 100755 +index 3cd5314..1dd0983 100755 --- a/debian/rules +++ b/debian/rules -@@ -56,3 +56,4 @@ override_dh_auto_install-arch: - +@@ -66,6 +66,8 @@ override_dh_auto_install-arch: + override_dh_installinit-arch: dh_installinit -plm-sensors --no-start + dh_installinit -psensord --no-start --- -2.17.1 - + + override_dh_installsystemd-arch: + dh_installsystemd -plm-sensors --no-start ++ dh_installsystemd -psensord --no-start From 66012b4a289d1238fe532b781c9d68d1eb8f2821 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Fri, 21 Oct 2022 07:09:28 -0700 Subject: [PATCH 086/174] [bullseye] Update libswsscommon deps (#12463) Signed-off-by: maipbui #### Why I did it When updating the container from Buster to Bullseye in azure pipelines in sonic-utilities repo, the build checker failed due to missing one of the dependencies in libswsscommon ``` + sudo dpkg -i libswsscommon_1.0.0_amd64.deb Selecting previously unselected package libswsscommon. (Reading database ... 196324 files and directories currently installed.) Preparing to unpack libswsscommon_1.0.0_amd64.deb ... Unpacking libswsscommon (1.0.0) ... dpkg: dependency problems prevent configuration of libswsscommon: libswsscommon depends on libboost-serialization1.71.0; however: Package libboost-serialization1.71.0 is not installed. dpkg: error processing package libswsscommon (--install): dependency problems - leaving unconfigured Processing triggers for libc-bin (2.31-13+deb11u4) ... Errors were encountered while processing: libswsscommon ``` #### How I did it Update the libboost-serialization dependency to a specific version that >= 1.71 #### How to verify it Verified locally, build sonic-utilities successfully with this version --- sonic-slave-bullseye/Dockerfile.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sonic-slave-bullseye/Dockerfile.j2 b/sonic-slave-bullseye/Dockerfile.j2 index 1ffca8bc86c1..296905787130 100644 --- a/sonic-slave-bullseye/Dockerfile.j2 +++ b/sonic-slave-bullseye/Dockerfile.j2 @@ -404,7 +404,7 @@ RUN apt-get update && apt-get install -y \ libboost-dev \ libboost-program-options-dev \ libboost-system-dev \ - libboost-serialization-dev \ + libboost-serialization1.74-dev \ libboost-thread-dev \ libboost-atomic-dev \ libboost-chrono-dev \ From d7b9c64757fb57c110d37a58ef7ba4b581a152e0 Mon Sep 17 00:00:00 2001 From: Ye Jianquan Date: Sat, 22 Oct 2022 01:04:17 +0800 Subject: [PATCH 087/174] Fix the issue that test plan can't be canceled by KVM dump stage (#12469) Why I did it Fix the issue that test plan can't be canceled by KVM dump stage How I did it Fix the issue that test plan can't be canceled by KVM dump stage --- .azure-pipelines/run-test-scheduler-template.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/run-test-scheduler-template.yml b/.azure-pipelines/run-test-scheduler-template.yml index 41956381fbaf..4173b062adb2 100644 --- a/.azure-pipelines/run-test-scheduler-template.yml +++ b/.azure-pipelines/run-test-scheduler-template.yml @@ -95,7 +95,7 @@ steps: echo "Runtime detailed progress at https://www.testbed-tools.org/scheduler/testplan/$TEST_PLAN_ID" # When "KVMDUMP" finish, it changes into "FAILED", "CANCELLED" or "FINISHED" python ./.azure-pipelines/test_plan.py poll -i "$(TEST_PLAN_ID)" --timeout 43200 --expected-states FINISHED CANCELLED FAILED - condition: always() + condition: succeededOrFailed() env: TESTBED_TOOLS_URL: $(TESTBED_TOOLS_URL) displayName: KVM dump From f4046c141709f96657fa1f7b5331b4d904e0b726 Mon Sep 17 00:00:00 2001 From: kellyyeh <42761586+kellyyeh@users.noreply.github.com> Date: Fri, 21 Oct 2022 10:33:10 -0700 Subject: [PATCH 088/174] Add dhcp6relay dualtor option (#12459) --- dockers/docker-dhcp-relay/dhcpv6-relay.agents.j2 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dockers/docker-dhcp-relay/dhcpv6-relay.agents.j2 b/dockers/docker-dhcp-relay/dhcpv6-relay.agents.j2 index cca0b1c4b21a..8f83e05efc7c 100644 --- a/dockers/docker-dhcp-relay/dhcpv6-relay.agents.j2 +++ b/dockers/docker-dhcp-relay/dhcpv6-relay.agents.j2 @@ -12,6 +12,9 @@ {% set _dummy = relay_for_ipv6.update({'flag': False}) %} [program:dhcp6relay] command=/usr/sbin/dhcp6relay +{#- Dual ToR Option #} +{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %} -d{% endif %} + priority=3 autostart=false autorestart=false From 9cdd78788f093cc9df9d95fa00142e8f7cce1a8b Mon Sep 17 00:00:00 2001 From: Samuel Angebault Date: Sat, 22 Oct 2022 03:26:43 +0200 Subject: [PATCH 089/174] Add support for UpperlakeElite (#12280) Signed-off-by: Samuel Angebault Signed-off-by: Samuel Angebault --- files/Aboot/boot0.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/Aboot/boot0.j2 b/files/Aboot/boot0.j2 index 0ca7ed4cf8fa..1a5d85e642f5 100644 --- a/files/Aboot/boot0.j2 +++ b/files/Aboot/boot0.j2 @@ -487,7 +487,7 @@ write_platform_specific_cmdline() { aboot_machine=arista_7050_qx32s cmdline_add modprobe.blacklist=radeon,sp5100_tco fi - if [ "$sid" = "Upperlake" ] || [ "$sid" = "UpperlakeES" ]; then + if in_array "$sid" "Upperlake" "UpperlakeES" "UpperlakeElite"; then aboot_machine=arista_7060_cx32s flash_size=3700 fi From f39c2adc045e9b20c4fbdd5965d54d01dbec04ea Mon Sep 17 00:00:00 2001 From: Samuel Angebault Date: Sat, 22 Oct 2022 03:27:32 +0200 Subject: [PATCH 090/174] Fix extraction of platform.tar.gz for firsttime (#11935) --- files/Aboot/boot0.j2 | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/files/Aboot/boot0.j2 b/files/Aboot/boot0.j2 index 1a5d85e642f5..cf09536a5e29 100644 --- a/files/Aboot/boot0.j2 +++ b/files/Aboot/boot0.j2 @@ -401,9 +401,14 @@ extract_image() { extract_image_secureboot() { info "Extracting necessary swi content" # NOTE: boot/ is not used by the boot process but only extracted for kdump - unzip -oq "$swipath" 'boot/*' platform/firsttime .imagehash -d "$image_path" + unzip -oq "$swipath" 'boot/*' .imagehash -d "$image_path" - info "Installing image as $installer_image_path" + ## Extract platform.tar.gz + info "Extracting platform.tar.gz" + mkdir -p "$image_path/platform" + unzip -oqp "$swipath" "platform.tar.gz" | tar xzf - -C "$image_path/platform" $TAR_EXTRA_OPTION + + info "Installing swi under $installer_image_path" mv "$swipath" "$installer_image_path" chmod a+r "$installer_image_path" swipath="$installer_image_path" From 8c73e684682aa5844d0c69eaf1f375b12f796cca Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Sun, 23 Oct 2022 14:59:20 +0800 Subject: [PATCH 091/174] Remove \n from the end of fs_path in ONIEUpdater (#12465) This fixes the following error ``` admin@sonic:~$ sudo fwutil show status mount: /mnt/onie-fs: special device /dev/sda2 does not exist. Error: Command '['mount', '-n', '-r', '-t', 'ext4', '/dev/sda2\n', '/mnt/onie-fs']' returned non-zero exit status 32.. Aborting... Aborted! admin@sonic:~$ sudo vi /usr/local/lib/python3.9/dist-packages/sonic_platform/ ``` Seems like #11877 the rstrip('\n') was removed. Probably by mistake. Signed-off-by: Stephen Sun --- platform/mellanox/mlnx-platform-api/sonic_platform/component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/platform/mellanox/mlnx-platform-api/sonic_platform/component.py b/platform/mellanox/mlnx-platform-api/sonic_platform/component.py index 3b9ce86ac901..4befc7998425 100644 --- a/platform/mellanox/mlnx-platform-api/sonic_platform/component.py +++ b/platform/mellanox/mlnx-platform-api/sonic_platform/component.py @@ -172,7 +172,7 @@ def __mount_onie_fs(self): cmd1 = ['fdisk', '-l'] cmd2 = ['grep', 'ONIE boot'] cmd3 = ['awk', '{print $1}'] - fs_path = check_output_pipe(cmd1, cmd2, cmd3) + fs_path = check_output_pipe(cmd1, cmd2, cmd3).rstrip('\n') os.mkdir(fs_mountpoint) cmd = ["mount", "-n", "-r", "-t", "ext4", fs_path, fs_mountpoint] From 2041e76ee917414338964082bc3d636ba17af690 Mon Sep 17 00:00:00 2001 From: Vivek Date: Sat, 22 Oct 2022 23:59:54 -0700 Subject: [PATCH 092/174] [submodule] update sonic-utilities pointer (#12462) aedc05ecf [QoS] Support dynamic headroom calculation for Barefoot platforms (#2306) 7f4da26f2 [app_ext] [auto-ts] Add available_mem_threshold option (#2423) b25070176 YANG Validation for ConfigDB Updates: Fix Decorator Bug (#2405) f62d1e596 [watermarkstat] Add new warning message for the 'q_shared_multi' counters (#2408) 25fda264e [chassis]Add fabric counter cli commands (#1860) ae97e597e Update sonic command doc to add CLIs relative to SONiC fips (#2377) abd5eba49 [generate_dump]: Enhance show techsupport for cisco-8000 platform (#2403) ee15b74a2 Include configuring laser frequency and tx power (#2437) 70be50cdc Add a subcommand to display a hexdump of transceiver EEPROM page (#2379) c246801ba Filter port invalid MTU configuration (#2378) 362ec9bd7 [show] vnet advertised-route command (#2390) 2372e2983 [show priority-group drop counters] Remove backup with cached PG drop counters after 'config reload' (#2386) Signed-off-by: Vivek Reddy Karri Signed-off-by: Vivek Reddy Karri --- src/sonic-utilities | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-utilities b/src/sonic-utilities index 423779410d8f..aedc05ecf23d 160000 --- a/src/sonic-utilities +++ b/src/sonic-utilities @@ -1 +1 @@ -Subproject commit 423779410d8f8784bc6a116aa656cb4f822c0ac6 +Subproject commit aedc05ecf23d72fd28ed05dee0d082c307a888d0 From fff6808b9c01d09cb9e79cfbb1e952574a36a3df Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Mon, 24 Oct 2022 13:03:52 +0800 Subject: [PATCH 093/174] [openssh] Update openssh make file, add missing dependency to libnl. (#12327) Update openssh make file, add missing dependency to libnl. #### Why I did it Openssh indirectly depends on libnl. Another PR #12447 need add new patch to openssh, after adding new patch to openssh, PR build failed with libnl missing error. #### How I did it Update openssh make file, add missing dependency to libnl. #### How to verify it Pass all test case #### Which release branch to backport (provide reason below if selected) - [ ] 201811 - [ ] 201911 - [ ] 202006 - [ ] 202012 - [ ] 202106 - [ ] 202111 - [ ] 202205 #### Description for the changelog Update openssh make file, add missing dependency to libnl. #### Ensure to add label/tag for the feature raised. example - PR#2174 under sonic-utilities repo. where, Generic Config and Update feature has been labelled as GCU. #### Link to config_db schema for YANG module changes #### A picture of a cute animal (not mandatory but encouraged) --- rules/openssh.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/rules/openssh.mk b/rules/openssh.mk index 53438b76ab73..a5e4b5c4b7b0 100644 --- a/rules/openssh.mk +++ b/rules/openssh.mk @@ -6,6 +6,7 @@ export OPENSSH_VERSION OPENSSH_SERVER = openssh-server_$(OPENSSH_VERSION)_$(CONFIGURED_ARCH).deb $(OPENSSH_SERVER)_SRC_PATH = $(SRC_PATH)/openssh +$(OPENSSH_SERVER)_DEPENDS += $(LIBNL3_DEV) $(LIBNL_ROUTE3_DEV) SONIC_MAKE_DEBS += $(OPENSSH_SERVER) # The .c, .cpp, .h & .hpp files under src/{$DBG_SRC_ARCHIVE list} From c20707f52a3458acef675397c762a47b128d0626 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Mon, 24 Oct 2022 10:07:52 -0700 Subject: [PATCH 094/174] [master][sonic-linkmgrd] submodule update (#12132) 79edf66 Longxiang Lyu Wed Aug 17 08:12:37 2022 +0800 Fix azure pipeline (#118) 8e0f2c6 Longxiang Lyu Wed Aug 17 08:36:07 2022 +0800 Update linkmgr health after getting default route update (#117) b14ffb8 Jing Zhang Wed Aug 17 15:44:37 2022 -0700 [active-active] post mux metrics events (#123) a30dbb3 Jing Zhang Thu Aug 18 18:16:04 2022 -0700 Update handleMuxConfigNotification logic (#125) e14aaba Jing Zhang Tue Aug 23 10:02:17 2022 -0700 [active-active] Remove unnecessary mux wait timeout logs (#122) cc83717 Longxiang Lyu Fri Sep 2 02:17:53 2022 +0800 Fix mux config (#128) 5429281 Mai Bui Thu Sep 1 17:44:04 2022 -0400 [linkmgrd] Replace memset function in link_prober (#126) b5aaec1 Jing Zhang Fri Sep 9 14:01:03 2022 -0700 [active-active] shutdown link prober when starting as isolated (#130) 75f02cf Jing Zhang Tue Sep 13 10:34:32 2022 -0700 [active-standby] update warmboot reconciliation logic (#129) a5a9f90 Hua Liu Fri Sep 16 09:54:32 2022 +0800 Install libyang to azure pipeline (#132) 6fe4f0f Jing Zhang Tue Sep 20 10:10:16 2022 -0700 [Active-Active] flaky LinkmgrdBootupSequence unit tests (#134) ea68e8c Jing Zhang Wed Sep 21 10:52:18 2022 -0700 Post switchover reasons to STATE DB (#131) 60c35b5 Jing Zhang Thu Sep 22 13:00:41 2022 -0700 [Active-Active] server side admin forwarding state sync up (#133) 08e1be5 Jing Zhang Mon Sep 26 10:59:27 2022 -0700 [Active-Active] avoid being stuck in unknown after process init (#136) 2579988 Jing Zhang Mon Oct 3 09:40:55 2022 -0700 [Active-Standby] fix syslog flood caused by unkown -> standby switchovers (#137) 7e9f670 Jing Zhang Wed Oct 5 10:03:45 2022 -0700 [Active-Active] Retry config mux mode standby (#139) 23feb3b Jing Zhang Wed Oct 5 15:22:58 2022 -0700 [Active-Active] Post link prober stats to state db (#140) e650098 Jing Zhang Fri Oct 7 15:27:17 2022 -0700 [Active-Active] Update default route shutdown heartbeat logic (#141) d0653e7 Jing Zhang Tue Oct 11 10:22:02 2022 -0700 [Active-Standby] avoid posting mux metrics event when receiving unsolicited mux state notification (#142) dcf6460 Longxiang Lyu Fri Oct 21 12:15:42 2022 +0800 [active-active] Add support to send/handle mux probe request (#147) fdf42ed Longxiang Lyu Fri Oct 21 10:34:47 2022 +0800 Fix link prober state event report twice issue (#149) 5fd19a3 Longxiang Lyu Mon Oct 17 09:20:27 2022 +0800 [active-active] Fix config reload (#145) sign-off: Jing Zhang zhangjing@microsoft.com --- src/linkmgrd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/linkmgrd b/src/linkmgrd index 4bf8b3df8bde..dcf64601179e 160000 --- a/src/linkmgrd +++ b/src/linkmgrd @@ -1 +1 @@ -Subproject commit 4bf8b3df8bdebf1633b2dd54100aa1c6939fa7d8 +Subproject commit dcf64601179e5c4ef23fe0137acf6dd7fba0e604 From a0661e2dcb57c07d3ac93f9c91ed0969ba8c79c6 Mon Sep 17 00:00:00 2001 From: SuvarnaMeenakshi <50386592+SuvarnaMeenakshi@users.noreply.github.com> Date: Mon, 24 Oct 2022 14:42:18 -0700 Subject: [PATCH 095/174] [sonic-host-services]: Advance submodule (#12195) #### Why I did it Update sonic-host-services submodule to include below commits: ``` bc8698d Merge pull request #21 from abdosi/feature 557a110 Fix the issue where if dest port is not specified in ACL rule than for multi-asic where we create NAT rule to forward traffic from Namespace to host fail with exception. 6e45acc (master) Merge pull request #14 from abdosi/feature 4d6cad7 Merge remote-tracking branch 'upstream/master' into feature bceb13e Install libyang to azure pipeline (#20) 82299f5 Merge pull request #13 from SuvarnaMeenakshi/cacl_fabricns 15d3bf4 Merge branch 'master' into cacl_fabricns de54082 Merge pull request #16 from ZhaohuiS/feature/caclmgrd_external_client_warning_log b4b368d Add warning log if destination port is not defined d4bb96d Merge branch 'master' into cacl_fabricns 35c76cb Add unit-test and fix typo. 17d44c2 Made Changes to be Python 3.7 compatible 978afb5 Aligning Code 1fbf8fb Merge remote-tracking branch 'upstream/master' into feature 7b8c7d1 Added UT for the changes 91c4c42 Merge pull request #9 from ZhaohuiS/feature/caclmgrd_external_client 7c0b56a Add 4 test cases for external_client_acl, including single port and port range for ipv4 and ipv6 b71e507 Merge remote-tracking branch 'origin/master' into HEAD d992dc0 Merge branch 'master' into feature/caclmgrd_external_client bd7b172 DST_PORT is configuralbe in json config file for EXTERNAL_CLIENT_ACL f9af7ae [CLI] Move hostname, mgmt interface/vrf config to hostcfgd (#2) 70ce6a3 Merge pull request #10 from sujinmkang/cold_reset 29be8d2 Added Support to render Feature Table using Device running metadata. Also added support to render 'has_asic_scope' field of Feature Table. 3437e35 [caclmgrd][chassis]: Add ip tables rules to accept internal docker traffic from fabric asic namespaces. 8720561 Fix and add hardware reboot cause determination tests 0dcc7fe remove the empty bracket if no hardware reboot cause minor e47d831 fix the wrong expected result comparision ef86b53 Fix startswith Attribute error 8a630bb fix mock patch 8543ddf update the reboot cause logic and update the unit test 53ad7cd fix the mock patch function 7c8003d fix the reboot-cause regix for test 1ba611f fix typo 25379d3 Add unit test case a56133b Add hardware reboot cause as actual reboot cause for soft reboot failed c7d3833 Support Restapi/gnmi control plane acls f6ea036 caclmgrd: Don't block traffic to mgmt by default a712fc4 Update test cases adc058b caclmgrd: Don't block traffic to mgmt by default 06ff918 Merge pull request #7 from bluecmd/patch-1 e3e23bc ci: Rename sonic-buildimage repository e83a858 Merge pull request #4 from kamelnetworks/acl-ip2me-test f5a2e50 [caclmgrd]: Tests for IP2ME rules generation ``` --- src/sonic-host-services | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-host-services b/src/sonic-host-services index 709046bbec9d..bc8698d1d760 160000 --- a/src/sonic-host-services +++ b/src/sonic-host-services @@ -1 +1 @@ -Subproject commit 709046bbec9d05c9bf06e7c54a23ae0f9c970281 +Subproject commit bc8698d1d760fefedaeb4742ad19b25ef2b3c17b From 078608e7f00388fb5f9f79a9e2f0d5b301b119c5 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Tue, 25 Oct 2022 15:31:18 +0800 Subject: [PATCH 096/174] Add the original docker tag without username (#12472) Why I did it Add the original docker tag without username to support some of the docker file not changed build broken issue. The username suffix only required when the native build feature enabled, but if not enabled, the docker file not necessary to change, the build should be succeeded. It is to support cisco 202205 build. --- slave.mk | 3 +++ 1 file changed, 3 insertions(+) diff --git a/slave.mk b/slave.mk index b91dcd349629..54a01aaa1a29 100644 --- a/slave.mk +++ b/slave.mk @@ -896,6 +896,7 @@ $(addprefix $(TARGET_PATH)/, $(SONIC_SIMPLE_DOCKER_IMAGES)) : $(TARGET_PATH)/%.g --label Tag=$(SONIC_IMAGE_VERSION) \ -f $(TARGET_DOCKERFILE)/Dockerfile.buildinfo \ -t $(DOCKER_IMAGE_REF) $($*.gz_PATH) $(LOG) + if [ x$(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD) == x"y" ]; then docker tag $(DOCKER_IMAGE_REF) $*; fi scripts/collect_docker_version_files.sh $(DOCKER_IMAGE_REF) $(TARGET_PATH) $(call docker-image-save,$*,$@) # Clean up @@ -1015,6 +1016,7 @@ $(addprefix $(TARGET_PATH)/, $(DOCKER_IMAGES)) : $(TARGET_PATH)/%.gz : .platform --label Tag=$(SONIC_IMAGE_VERSION) \ $($(subst -,_,$(notdir $($*.gz_PATH)))_labels) \ -t $(DOCKER_IMAGE_REF) $($*.gz_PATH) $(LOG) + if [ x$(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD) == x"y" ]; then docker tag $(DOCKER_IMAGE_REF) $*; fi scripts/collect_docker_version_files.sh $(DOCKER_IMAGE_REF) $(TARGET_PATH) $(call docker-image-save,$*,$@) # Clean up @@ -1067,6 +1069,7 @@ $(addprefix $(TARGET_PATH)/, $(DOCKER_DBG_IMAGES)) : $(TARGET_PATH)/%-$(DBG_IMAG --label Tag=$(SONIC_IMAGE_VERSION) \ --file $($*.gz_PATH)/Dockerfile-dbg \ -t $(DOCKER_DBG_IMAGE_REF) $($*.gz_PATH) $(LOG) + if [ x$(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD) == x"y" ]; then docker tag $(DOCKER_IMAGE_REF) $*; fi scripts/collect_docker_version_files.sh $(DOCKER_DBG_IMAGE_REF) $(TARGET_PATH) $(call docker-image-save,$*-$(DBG_IMAGE_MARK),$@) # Clean up From 158371de3864d3b8752162e5e1ba348e30fbd579 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Tue, 25 Oct 2022 15:45:01 +0800 Subject: [PATCH 097/174] [Ci] clean up the old artifacts in the agent before downloading the new artifacts (#12391) Ci] clean up the old artifacts in the agent before downloading the new artifacts --- .azure-pipelines/run-test-template.yml | 4 ++++ azure-pipelines.yml | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/.azure-pipelines/run-test-template.yml b/.azure-pipelines/run-test-template.yml index 5c848ca1d4db..88404cabf416 100644 --- a/.azure-pipelines/run-test-template.yml +++ b/.azure-pipelines/run-test-template.yml @@ -27,6 +27,10 @@ steps: clean: true displayName: 'checkout sonic-mgmt repo' +- script: | + sudo rm -rf ../target + displayName: "Cleanup" + - task: DownloadPipelineArtifact@2 inputs: artifact: sonic-buildimage.vs diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 15554b8bc7b7..de84c66e8d3f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -98,6 +98,11 @@ stages: submodules: recursive displayName: 'Checkout code' + - script: | + sudo rm -rf ../target + sudo rm -rf ../*.deb + displayName: "Cleanup" + - task: DownloadPipelineArtifact@2 inputs: source: specific From 81ee9488e808d6bb5204f4662bcc2a492c125245 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Tue, 25 Oct 2022 16:34:07 +0800 Subject: [PATCH 098/174] [action] Use github code scan instead of LGTM. (#12402) * [action] Add code scan for python --- .github/codeql/codeql-config.yml | 4 +++ .github/workflows/codeql-analysis.yml | 43 +++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 .github/codeql/codeql-config.yml create mode 100644 .github/workflows/codeql-analysis.yml diff --git a/.github/codeql/codeql-config.yml b/.github/codeql/codeql-config.yml new file mode 100644 index 000000000000..2c8b0498f319 --- /dev/null +++ b/.github/codeql/codeql-config.yml @@ -0,0 +1,4 @@ +name: "CodeQL config" +queries: + - uses: security-and-quality + - uses: security-extended diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 000000000000..0a8f2dc58682 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,43 @@ +# For more infomation, please visit: https://github.com/github/codeql-action + +name: "CodeQL" + +on: + push: + branches: + - 'master' + - '202[0-9][0-9][0-9]' + pull_request: + branches: + - 'master' + - '202[0-9][0-9][0-9]' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + config-file: ./.github/codeql/codeql-config.yml + languages: ${{ matrix.language }} + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{matrix.language}}" From 85e3a81f47bfc67d0be7d0c5218194682f856420 Mon Sep 17 00:00:00 2001 From: Devesh Pathak <54966909+devpatha@users.noreply.github.com> Date: Tue, 25 Oct 2022 14:51:02 -0700 Subject: [PATCH 099/174] Fix to improve hostname handling (#12064) * Fix to improve hostname handling If config_db.json is missing hostname entry, hostname-config.sh ends up deleting existing entry too and hostname changes to default 'localhost' * default hostname to 'sonic` if missing in config file --- files/image_config/hostname/hostname-config.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/files/image_config/hostname/hostname-config.sh b/files/image_config/hostname/hostname-config.sh index e9f7fc122709..c2a4f1e546ce 100755 --- a/files/image_config/hostname/hostname-config.sh +++ b/files/image_config/hostname/hostname-config.sh @@ -3,6 +3,11 @@ CURRENT_HOSTNAME=`hostname` HOSTNAME=`sonic-cfggen -d -v DEVICE_METADATA[\'localhost\'][\'hostname\']` +if [ -z "$HOSTNAME" ] ; then + echo "Missing hostname in the config file, setting to default 'sonic'" + HOSTNAME='sonic' +fi + echo $HOSTNAME > /etc/hostname hostname -F /etc/hostname From dad3f61b887b4212fc3981d4e006e02ccc8024b3 Mon Sep 17 00:00:00 2001 From: Sumukha Tumkur Vani Date: Tue, 25 Oct 2022 15:44:13 -0700 Subject: [PATCH 100/174] [Restapi] Update submodule (#12006) Update with following commits: Fix missing dependencies and improve dependency management sonic-restapi#123 [Static Route Expiry] Update API contract sonic-restapi#125 [Static Route Expiry] Feature support sonic-restapi#124 Handle IPv6 VNET routes sonic-restapi#127 --- src/sonic-restapi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-restapi b/src/sonic-restapi index bcc6f704a544..dac446feb2be 160000 --- a/src/sonic-restapi +++ b/src/sonic-restapi @@ -1 +1 @@ -Subproject commit bcc6f704a54454f326f069501b01759dbb732bb3 +Subproject commit dac446feb2be3aa348f633a4d2e3c64993d7483a From 198b6298841760d9406bd01a9dfe38263fd0f282 Mon Sep 17 00:00:00 2001 From: Junhua Zhai Date: Wed, 26 Oct 2022 15:58:08 +0800 Subject: [PATCH 101/174] [submodule]: Update sonic-sairedis (#12475) 2022-10-21 b7c85ca: [gbsyncd] Add asic db prefix for channel NOTIFICATIONS (sonic-net/sonic-sairedis#1129) (Junhua Zhai) --- src/sonic-sairedis | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-sairedis b/src/sonic-sairedis index 2585a1f29e1d..b7c85caa547c 160000 --- a/src/sonic-sairedis +++ b/src/sonic-sairedis @@ -1 +1 @@ -Subproject commit 2585a1f29e1d04f9b8de4215b7d7169ff3fc8693 +Subproject commit b7c85caa547c8ef81c58bd0ed6ae80ade4ebe425 From 3058fb62e6d19bcd069259136f5d6a7761a68f62 Mon Sep 17 00:00:00 2001 From: Vivek Date: Wed, 26 Oct 2022 02:03:29 -0700 Subject: [PATCH 102/174] Loc moved to prev consolidation change (#12427) Why I did it Issue was caused by this #11341 *.bin image structure in 202205: vkarri@19d5638dde2d:/sonic$ ls -l /tmp/tmp.9ibWSipeRw/installer/x86_64/ total 12 drwxr-xr-x 2 vkarri dip 12288 Oct 14 13:16 platforms However install.sh which runs on ONiE parition expects the platform specific kernel cmd line conf file under platform/$onie_platform_string file https://github.com/sonic-net/sonic-buildimage/blob/master/installer/install.sh#L102 Thus, any platform which defines and depends on these params might be broken on master label. How I did it Since we are already filtering the conf files based on TARGET_PLATFORM in build_image.sh, i've just updated the location to installer/platforms instead of installer/$arch/platforms Signed-off-by: Vivek Reddy Karri --- build_image.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build_image.sh b/build_image.sh index 9c70713866a4..ea276367eb51 100755 --- a/build_image.sh +++ b/build_image.sh @@ -71,12 +71,12 @@ generate_onie_installer_image() output_file=$OUTPUT_ONIE_IMAGE [ -n "$1" ] && output_file=$1 # Copy platform-specific ONIE installer config files where onie-mk-demo.sh expects them - rm -rf ./installer/${TARGET_PLATFORM}/platforms/ - mkdir -p ./installer/${TARGET_PLATFORM}/platforms/ + rm -rf ./installer/platforms/ + mkdir -p ./installer/platforms/ for VENDOR in `ls ./device`; do for PLATFORM in `ls ./device/$VENDOR | grep ^${TARGET_PLATFORM}`; do if [ -f ./device/$VENDOR/$PLATFORM/installer.conf ]; then - cp ./device/$VENDOR/$PLATFORM/installer.conf ./installer/${TARGET_PLATFORM}/platforms/$PLATFORM + cp ./device/$VENDOR/$PLATFORM/installer.conf ./installer/platforms/$PLATFORM fi done From 3d9a6e46bc6a5c2d44381d48fb07a96af15bff22 Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Wed, 26 Oct 2022 13:11:42 -0700 Subject: [PATCH 103/174] Add yang_config_validation to DEVICE_METADATA yang model (#12497) * Add yang_config_validation to DEVICE_METADATA yang model --- src/sonic-yang-models/doc/Configuration.md | 3 ++- .../tests/files/sample_config_db.json | 3 ++- .../tests/yang_model_tests/tests/device_metadata.json | 9 +++++++++ .../tests_config/device_metadata.json | 11 +++++++++++ .../yang-models/sonic-device_metadata.yang | 5 +++++ 5 files changed, 29 insertions(+), 2 deletions(-) diff --git a/src/sonic-yang-models/doc/Configuration.md b/src/sonic-yang-models/doc/Configuration.md index f30a2c8c5f23..d09e2c8bac0b 100644 --- a/src/sonic-yang-models/doc/Configuration.md +++ b/src/sonic-yang-models/doc/Configuration.md @@ -831,7 +831,8 @@ instance is supported in SONiC. "deployment_id": "1", "type": "ToRRouter", "bgp_adv_lo_prefix_as_128" : "true", - "buffer_model": "traditional" + "buffer_model": "traditional", + "yang_config_validation": "disable" } } } diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index 9dec93b52037..ecb0be1c4ce8 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -332,7 +332,8 @@ "max_cores": "8", "sub_role": "FrontEnd", "dhcp_server": "disabled", - "bgp_adv_lo_prefix_as_128": "true" + "bgp_adv_lo_prefix_as_128": "true", + "yang_config_validation": "disable" } }, "VLAN": { diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/device_metadata.json b/src/sonic-yang-models/tests/yang_model_tests/tests/device_metadata.json index 8a562ef536a5..e55ba4c82578 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests/device_metadata.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/device_metadata.json @@ -51,6 +51,15 @@ "value": "enable" } }, + "DEVICE_METADATA_DEFAULT_YANG_CONFIG_VALIDATION": { + "desc": "DEVICE_METADATA DEFAULT VALUE FOR YANG CONFIG VALIDATION.", + "eStrKey" : "Verify", + "verify": { + "xpath": "/sonic-device_metadata:sonic-device_metadata/DEVICE_METADATA/localhost/hostname", + "key": "sonic-device_metadata:yang_config_validation", + "value": "disable" + } + }, "DEVICE_METADATA_CORRECT_BUFFER_MODEL_PATTERN": { "desc": "DEVICE_METADATA correct value for BUFFER_MODEL field" }, diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/device_metadata.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/device_metadata.json index ece046f2afb7..638f83d562cb 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests_config/device_metadata.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/device_metadata.json @@ -85,6 +85,17 @@ } } }, + "DEVICE_METADATA_DEFAULT_YANG_CONFIG_VALIDATION": { + "sonic-device_metadata:sonic-device_metadata": { + "sonic-device_metadata:DEVICE_METADATA": { + "sonic-device_metadata:localhost": { + "bgp_asn": "65001", + "hostname": "DUT-CSW", + "platform": "Stone-DX010" + } + } + } + }, "DEV_META_DEV_NEIGH_VERSION_TABLE": { "sonic-device_metadata:sonic-device_metadata": { "sonic-device_metadata:DEVICE_METADATA": { diff --git a/src/sonic-yang-models/yang-models/sonic-device_metadata.yang b/src/sonic-yang-models/yang-models/sonic-device_metadata.yang index b04f929cddc9..1d40dc31b798 100644 --- a/src/sonic-yang-models/yang-models/sonic-device_metadata.yang +++ b/src/sonic-yang-models/yang-models/sonic-device_metadata.yang @@ -119,6 +119,11 @@ module sonic-device_metadata { default enable; } + leaf yang_config_validation { + type stypes:mode-status; + default disable; + } + leaf cloudtype { type string; } From 80a7762ff91e18e4baddf3c1ff0cfc68f496dbff Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Wed, 26 Oct 2022 15:26:48 -0700 Subject: [PATCH 104/174] [netberg] Replace os.system (#12104) Signed-off-by: maipbui #### Why I did it `os` - not secure against maliciously constructed input and dangerous if used to evaluate dynamic content #### How I did it Replace `os` by `subprocess` --- .../aurora-610/sonic_platform/component.py | 31 ++++++++++++------- .../aurora-610/sonic_platform/qsfp.py | 3 +- .../aurora-610/sonic_platform/sfp.py | 3 +- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/component.py b/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/component.py index f2dfeb54f0d2..41a0260bbc05 100644 --- a/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/component.py +++ b/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/component.py @@ -4,7 +4,7 @@ import os import re import logging - from subprocess import Popen, PIPE + from subprocess import call, Popen, PIPE from sonic_platform_base.component_base import ComponentBase except ImportError as e: @@ -39,10 +39,10 @@ FW_INSTALL_CMD_LIST = [ - "/usr/share/sonic/platform/plugins/cpld -b 1 -s 0x60 {}", - "/usr/share/sonic/platform/plugins/Yafuflash -cd {} -img-select 3 -non-interactive", - "/usr/share/sonic/platform/plugins/afulnx_64 {} /B /P /N /K", - "/usr/share/sonic/platform/plugins/afulnx_64 {} /B /P /N /K", + ["/usr/share/sonic/platform/plugins/cpld", "-b", "1", "-s", "0x60", ""], + ["/usr/share/sonic/platform/plugins/Yafuflash", "-cd", "", "-img-select", "3", "-non-interactive"], + ["/usr/share/sonic/platform/plugins/afulnx_64", "", "/B", "/P", "/N", "/K"], + ["/usr/share/sonic/platform/plugins/afulnx_64", "", "/B", "/P", "/N", "/K"], ] BIOS_ID_MAPPING_TABLE = { @@ -157,11 +157,20 @@ def __get_bios_version(self): return bios_version + def __get_cmd(self, image_path): + if self.index == 0: + FW_INSTALL_CMD_LIST[self.index][5] = image_path + elif self.index == 1: + FW_INSTALL_CMD_LIST[self.index][2] = image_path + elif self.index == 2 or self.index == 3: + FW_INSTALL_CMD_LIST[self.index][1] = image_path + return FW_INSTALL_CMD_LIST + def __install_cpld_firmware(self, image_path): result = False - cmd = FW_INSTALL_CMD_LIST[self.index].format(image_path) + cmd = self.__get_cmd(image_path) - ret = os.system(cmd) + ret = call(cmd) if ret == OS_SYSTEM_SUCCESS: result = True @@ -169,9 +178,9 @@ def __install_cpld_firmware(self, image_path): def __install_bmc_firmware(self, image_path): result = False - cmd = FW_INSTALL_CMD_LIST[self.index].format(image_path) + cmd = self.__get_cmd(image_path) - ret = os.system(cmd) + ret = call(cmd) if ret == OS_SYSTEM_SUCCESS: result = True return result @@ -200,8 +209,8 @@ def __install_bios_firmware(self, image_path): logging.error("Not support BIOS index %d", self.index) if ret: - cmd = FW_INSTALL_CMD_LIST[self.index].format(image_path) - ret = os.system(cmd) + cmd = self.__get_cmd(image_path) + ret = call(cmd) if ret == OS_SYSTEM_SUCCESS: result = True else: diff --git a/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/qsfp.py b/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/qsfp.py index 0da5af9b39a8..ece9ba8348d5 100644 --- a/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/qsfp.py +++ b/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/qsfp.py @@ -8,6 +8,7 @@ try: import os import logging + import subprocess from ctypes import create_string_buffer from sonic_platform_base.sfp_base import SfpBase from sonic_platform_base.sonic_sfp.sff8436 import sff8436Dom @@ -131,7 +132,7 @@ def __set_attr_value(self, attr_path, value): return True def __is_host(self): - return os.system("docker > /dev/null 2>&1") == 0 + return subprocess.call(["docker"]) == 0 def __get_path_to_port_config_file(self): host_platform_root_path = '/usr/share/sonic/device' diff --git a/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/sfp.py b/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/sfp.py index bc26396279ae..035b9955dce9 100644 --- a/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/sfp.py +++ b/platform/barefoot/sonic-platform-modules-netberg/aurora-610/sonic_platform/sfp.py @@ -9,6 +9,7 @@ try: import os import logging + import subprocess from ctypes import create_string_buffer from sonic_platform_base.sfp_base import SfpBase from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom @@ -115,7 +116,7 @@ def __set_attr_value(self, attr_path, value): return True def __is_host(self): - return os.system("docker > /dev/null 2>&1") == 0 + return subprocess.call(["docker"]) == 0 def __get_path_to_port_config_file(self): host_platform_root_path = '/usr/share/sonic/device' From 558c904021dfa1af935062e312bf5dc0994f7da5 Mon Sep 17 00:00:00 2001 From: DavidZagury <32644413+DavidZagury@users.noreply.github.com> Date: Thu, 27 Oct 2022 01:54:44 +0300 Subject: [PATCH 105/174] Fix CVE-2022-37032 on FRR submodule (#12435) * Fix CVE-2022-37032 on FRR submodule Patch was cherry picked from FRRouting/frr repo - d8d77d3733bc299ed5dd7b44c4d464ba2bfed288 * Fix CVE-2022-37032 on FRR submodule Patch was cherry picked from FRRouting/frr repo - d8d77d3733bc299ed5dd7b44c4d464ba2bfed288 * Update patch version number --- ...e-cannot-accidently-write-into-stack.patch | 108 ++++++++++++++++++ src/sonic-frr/patch/series | 1 + 2 files changed, 109 insertions(+) create mode 100644 src/sonic-frr/patch/0012-Ensure-ospf_apiclient_lsa_originate-cannot-accidently-write-into-stack.patch diff --git a/src/sonic-frr/patch/0012-Ensure-ospf_apiclient_lsa_originate-cannot-accidently-write-into-stack.patch b/src/sonic-frr/patch/0012-Ensure-ospf_apiclient_lsa_originate-cannot-accidently-write-into-stack.patch new file mode 100644 index 000000000000..d46f13a3caf1 --- /dev/null +++ b/src/sonic-frr/patch/0012-Ensure-ospf_apiclient_lsa_originate-cannot-accidently-write-into-stack.patch @@ -0,0 +1,108 @@ +From d8d77d3733bc299ed5dd7b44c4d464ba2bfed288 Mon Sep 17 00:00:00 2001 +From: Donald Sharp +Date: Wed, 20 Jul 2022 16:43:17 -0400 +Subject: [PATCH 1/3] ospfclient: Ensure ospf_apiclient_lsa_originate cannot + accidently write into stack + +Even though OSPF_MAX_LSA_SIZE is quite large and holds the upper bound +on what can be written into a lsa, let's add a small check to ensure +it is not possible to do a bad thing. + +This wins one of the long standing bug awards. 2003! + +Fixes: #11602 +Signed-off-by: Donald Sharp +--- + ospfclient/ospf_apiclient.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/ospfclient/ospf_apiclient.c b/ospfclient/ospf_apiclient.c +index 29f1c0807db..51c8a5b8c06 100644 +--- a/ospfclient/ospf_apiclient.c ++++ b/ospfclient/ospf_apiclient.c +@@ -447,6 +447,12 @@ int ospf_apiclient_lsa_originate(struct ospf_apiclient *oclient, + return OSPF_API_ILLEGALLSATYPE; + } + ++ if ((size_t)opaquelen > sizeof(buf) - sizeof(struct lsa_header)) { ++ fprintf(stderr, "opaquelen(%d) is larger than buf size %zu\n", ++ opaquelen, sizeof(buf)); ++ return OSPF_API_NOMEMORY; ++ } ++ + /* Make a new LSA from parameters */ + lsah = (struct lsa_header *)buf; + lsah->ls_age = 0; + +From 519929cdd47ac4d9f7f33e13922e1a063f69bb24 Mon Sep 17 00:00:00 2001 +From: Donald Sharp +Date: Wed, 20 Jul 2022 16:49:09 -0400 +Subject: [PATCH 2/3] isisd: Ensure rcap is freed in error case + +unpack_tlv_router_cap allocates memory that in the error +case is not being freed. + +Signed-off-by: Donald Sharp +--- + isisd/isis_tlvs.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c +index f1aae7caf10..dad271225b3 100644 +--- a/isisd/isis_tlvs.c ++++ b/isisd/isis_tlvs.c +@@ -2966,9 +2966,9 @@ static int pack_tlv_router_cap(const struct isis_router_cap *router_cap, + } + + static int unpack_tlv_router_cap(enum isis_tlv_context context, +- uint8_t tlv_type, uint8_t tlv_len, +- struct stream *s, struct sbuf *log, +- void *dest, int indent) ++ uint8_t tlv_type, uint8_t tlv_len, ++ struct stream *s, struct sbuf *log, void *dest, ++ int indent) + { + struct isis_tlvs *tlvs = dest; + struct isis_router_cap *rcap; +@@ -3013,7 +3013,7 @@ static int unpack_tlv_router_cap(enum isis_tlv_context context, + log, indent, + "WARNING: Router Capability subTLV length too large compared to expected size\n"); + stream_forward_getp(s, STREAM_READABLE(s)); +- ++ XFREE(MTYPE_ISIS_TLV, rcap); + return 0; + } + + +From 3c4821679f2362bcd38fcc7803f28a5210441ddb Mon Sep 17 00:00:00 2001 +From: Donald Sharp +Date: Thu, 21 Jul 2022 08:11:58 -0400 +Subject: [PATCH 3/3] bgpd: Make sure hdr length is at a minimum of what is + expected + +Ensure that if the capability length specified is enough data. + +Signed-off-by: Donald Sharp +--- + bgpd/bgp_packet.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c +index 7c92a8d9e83..bcd47e32d45 100644 +--- a/bgpd/bgp_packet.c ++++ b/bgpd/bgp_packet.c +@@ -2440,6 +2440,14 @@ static int bgp_capability_msg_parse(struct peer *peer, uint8_t *pnt, + "%s CAPABILITY has action: %d, code: %u, length %u", + peer->host, action, hdr->code, hdr->length); + ++ if (hdr->length < sizeof(struct capability_mp_data)) { ++ zlog_info( ++ "%s Capability structure is not properly filled out, expected at least %zu bytes but header length specified is %d", ++ peer->host, sizeof(struct capability_mp_data), ++ hdr->length); ++ return BGP_Stop; ++ } ++ + /* Capability length check. */ + if ((pnt + hdr->length + 3) > end) { + zlog_info("%s Capability length error", peer->host); diff --git a/src/sonic-frr/patch/series b/src/sonic-frr/patch/series index c62bd218c7ee..d7d7046ee6f5 100644 --- a/src/sonic-frr/patch/series +++ b/src/sonic-frr/patch/series @@ -11,3 +11,4 @@ cross-compile-changes.patch 0009-ignore-route-from-default-table.patch 0010-zebra-Note-when-the-netlink-DUMP-command-is-interrup.patch 0011-bgpd-enhanced-capability-is-always-turned-on-for-int.patch +0012-Ensure-ospf_apiclient_lsa_originate-cannot-accidently-write-into-stack.patch From a4fe681b08986f0f35c932c74f4e82116dc2d82d Mon Sep 17 00:00:00 2001 From: Dmytro Lytvynenko Date: Thu, 27 Oct 2022 11:08:57 +0300 Subject: [PATCH 106/174] fix missing import error (#12511) Why I did it syseepromd in pmon crashes because of missing import in python script and doesn't get in running state How I did it Fix missing import issue to avoid python script failing How to verify it Boot system and wait till syseepromd gets into running state --- .../sonic-platform-modules-bfn-montara/sonic_platform/eeprom.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/eeprom.py b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/eeprom.py index 4b5c1e3051fb..5d3827e6eba2 100644 --- a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/eeprom.py +++ b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/eeprom.py @@ -60,7 +60,7 @@ def tlv_eeprom_get(client): try: self._eeprom_bin = bytearray.fromhex( thrift_try(tlv_eeprom_get, 1).raw_content_hex) - except TApplicationException as e: + except thrift.Thrift.TApplicationException as e: raise RuntimeError("api is not supported") except Exception as e: self._eeprom_bin = bytearray.fromhex( From 844f83171b2c43df4fa6dfcdbd9c09e95c5c5bca Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Thu, 27 Oct 2022 17:36:04 +0800 Subject: [PATCH 107/174] [action] Use pull_request_target trigger instead to avoid codeQL check approval (#12509) --- .github/workflows/codeql-analysis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 0a8f2dc58682..6478fb99f7a5 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -7,7 +7,7 @@ on: branches: - 'master' - '202[0-9][0-9][0-9]' - pull_request: + pull_request_target: branches: - 'master' - '202[0-9][0-9][0-9]' From 3df031c9b169a7ef34565211d42f396079465d23 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Thu, 27 Oct 2022 17:36:43 +0800 Subject: [PATCH 108/174] [ci] Add azp trigger for future release branches. (#12508) --- .azure-pipelines/official-build.yml | 2 +- azure-pipelines.yml | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.azure-pipelines/official-build.yml b/.azure-pipelines/official-build.yml index 3ebb106a621b..14308292f4b8 100644 --- a/.azure-pipelines/official-build.yml +++ b/.azure-pipelines/official-build.yml @@ -9,7 +9,7 @@ schedules: branches: include: - master - - 202012 + - 202??? always: true - cron: "0 4 * * *" displayName: nightly build for release diff --git a/azure-pipelines.yml b/azure-pipelines.yml index de84c66e8d3f..60ef591efe3c 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -7,7 +7,7 @@ trigger: branches: include: - master - - 202012 + - 202??? paths: exclude: - .github @@ -16,8 +16,7 @@ pr: branches: include: - master - - 202012 - - bullseye + - 202??? paths: exclude: - .github From a771a26d99f872bab9b844b71baa94cfd74e18e2 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Fri, 28 Oct 2022 01:15:14 +0800 Subject: [PATCH 109/174] [Build] Add the missing debian source bullseye-updates/buster-updates (#12522) Why I did it Add the missing debian source bullseye-updates/buster-updates The build failure as below, it is caused by the docker image debian:bullseye used the version 2.31-13+deb11u5, but the version only available in bullseye-update. --- dockers/docker-base-bullseye/sources.list | 1 + dockers/docker-base-buster/sources.list | 1 + dockers/docker-base-buster/sources.list.arm64 | 1 + dockers/docker-base-buster/sources.list.armhf | 1 + 4 files changed, 4 insertions(+) diff --git a/dockers/docker-base-bullseye/sources.list b/dockers/docker-base-bullseye/sources.list index 4a68761df7c8..c45ef0811f10 100644 --- a/dockers/docker-base-bullseye/sources.list +++ b/dockers/docker-base-bullseye/sources.list @@ -5,6 +5,7 @@ deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye main deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free deb [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free +deb [arch=amd64] http://debian-archive.trafficmanager.net/debian bullseye-updates main contrib non-free deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye-backports main contrib non-free # Debian mirror supports multiple versions for a package diff --git a/dockers/docker-base-buster/sources.list b/dockers/docker-base-buster/sources.list index 0eef72d9fa2d..473c9eb22e76 100644 --- a/dockers/docker-base-buster/sources.list +++ b/dockers/docker-base-buster/sources.list @@ -5,6 +5,7 @@ deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ buster main co deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian/ buster main contrib non-free deb [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ buster/updates main contrib non-free deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ buster/updates main contrib non-free +deb [arch=amd64] http://debian-archive.trafficmanager.net/debian buster-updates main contrib non-free deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ buster-backports main contrib non-free # Debian mirror supports multiple versions for a package diff --git a/dockers/docker-base-buster/sources.list.arm64 b/dockers/docker-base-buster/sources.list.arm64 index 6375734e99e6..249efc17b6fd 100644 --- a/dockers/docker-base-buster/sources.list.arm64 +++ b/dockers/docker-base-buster/sources.list.arm64 @@ -5,6 +5,7 @@ deb [arch=arm64] http://deb.debian.org/debian buster main contrib non-free deb-src [arch=arm64] http://deb.debian.org/debian buster main contrib non-free deb [arch=arm64] http://security.debian.org buster/updates main contrib non-free deb-src [arch=arm64] http://security.debian.org buster/updates main contrib non-free +deb [arch=arm64] http://deb.debian.org/debian buster-updates main contrib non-free deb [arch=arm64] http://deb.debian.org/debian/ buster-backports main contrib non-free deb [arch=arm64] http://packages.trafficmanager.net/debian/debian buster main contrib non-free deb [arch=arm64] http://packages.trafficmanager.net/debian/debian buster-updates main contrib non-free diff --git a/dockers/docker-base-buster/sources.list.armhf b/dockers/docker-base-buster/sources.list.armhf index a03af1a33ac0..ff6d5787b212 100644 --- a/dockers/docker-base-buster/sources.list.armhf +++ b/dockers/docker-base-buster/sources.list.armhf @@ -5,6 +5,7 @@ deb [arch=armhf] http://deb.debian.org/debian buster main contrib non-free deb-src [arch=armhf] http://deb.debian.org/debian buster main contrib non-free deb [arch=armhf] http://security.debian.org buster/updates main contrib non-free deb-src [arch=armhf] http://security.debian.org buster/updates main contrib non-free +deb [arch=armhf] http://deb.debian.org/debian buster-updates main contrib non-free deb [arch=armhf] http://deb.debian.org/debian/ buster-backports main contrib non-free deb [arch=armhf] http://packages.trafficmanager.net/debian/debian buster main contrib non-free deb [arch=armhf] http://packages.trafficmanager.net/debian/debian buster-updates main contrib non-free From 89f76829fc1abc5b71a1f13775cac2ae2a284b3e Mon Sep 17 00:00:00 2001 From: tjchadaga <85581939+tjchadaga@users.noreply.github.com> Date: Thu, 27 Oct 2022 13:31:34 -0700 Subject: [PATCH 110/174] Update BRCM SAI version to 7.1.16.4 (#12515) --- platform/broadcom/sai.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/platform/broadcom/sai.mk b/platform/broadcom/sai.mk index ab8d47148b03..f8f972897fd3 100644 --- a/platform/broadcom/sai.mk +++ b/platform/broadcom/sai.mk @@ -1,5 +1,5 @@ -LIBSAIBCM_XGS_VERSION = 7.1.10.4 -LIBSAIBCM_DNX_VERSION = 7.1.10.4 +LIBSAIBCM_XGS_VERSION = 7.1.16.4 +LIBSAIBCM_DNX_VERSION = 7.1.16.4 LIBSAIBCM_BRANCH_NAME = REL_7.0 LIBSAIBCM_XGS_URL_PREFIX = "https://sonicstorage.blob.core.windows.net/public/sai/bcmsai/$(LIBSAIBCM_BRANCH_NAME)/$(LIBSAIBCM_XGS_VERSION)" LIBSAIBCM_DNX_URL_PREFIX = "https://sonicstorage.blob.core.windows.net/public/sai/bcmsai/$(LIBSAIBCM_BRANCH_NAME)/$(LIBSAIBCM_DNX_VERSION)" From 538e4c0a97864b9b812c5388fc08fb3c14103ed3 Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Thu, 27 Oct 2022 14:18:37 -0700 Subject: [PATCH 111/174] [Restapi Yang] Fix issue with multiple certs (#12495) *[Restapi Yang] Fix issue with multiple certs (#12495) --- .../tests/yang_model_tests/tests/restapi.json | 3 +++ .../tests/yang_model_tests/tests_config/restapi.json | 12 ++++++++++++ src/sonic-yang-models/yang-models/sonic-restapi.yang | 2 +- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json b/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json index e18d6e163ce4..42ce64ba330f 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/restapi.json @@ -9,5 +9,8 @@ }, "RESTAPI_TABLE_WITH_VALID_CONFIG": { "desc": "RESTAPI TABLE WITH VALID CONFIG." + }, + "RESTAPI_TABLE_WITH_MULTIPLE_CERTS": { + "desc": "RESTAPI TABLE WITH MULTIPLE CERTS." } } diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json index 8ae212ed7026..f2a3b15643c6 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/restapi.json @@ -34,5 +34,17 @@ } } } + }, + "RESTAPI_TABLE_WITH_MULTIPLE_CERTS": { + "sonic-restapi:sonic-restapi": { + "sonic-restapi:RESTAPI": { + "certs": { + "ca_crt": "/etc/sonic/credentials/ame_root.pem", + "server_crt": "/etc/sonic/credentials/restapiserver.crt", + "server_key": "/etc/sonic/credentials/restapiserver.key", + "client_crt_cname": "client.sonic.net,clientds.prod.net" + } + } + } } } diff --git a/src/sonic-yang-models/yang-models/sonic-restapi.yang b/src/sonic-yang-models/yang-models/sonic-restapi.yang index e049e2fa5c23..af509f2335d8 100644 --- a/src/sonic-yang-models/yang-models/sonic-restapi.yang +++ b/src/sonic-yang-models/yang-models/sonic-restapi.yang @@ -45,7 +45,7 @@ module sonic-restapi { leaf client_crt_cname { type string { - pattern '([a-zA-Z0-9_\-\.]+)'; + pattern '([a-zA-Z0-9_\-\.]+,)*([a-zA-Z0-9_\-\.]+)'; } description "Client cert name."; } From db7459787c3ec1a364c671f0767402fc276d260b Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Thu, 27 Oct 2022 18:57:06 -0700 Subject: [PATCH 112/174] Add yang_config_validation to minigraph.py (#12504) --- src/sonic-config-engine/minigraph.py | 3 ++- src/sonic-config-engine/tests/test_cfggen_from_yang.py | 3 ++- src/sonic-config-engine/tests/test_yang_data.json | 5 +++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/sonic-config-engine/minigraph.py b/src/sonic-config-engine/minigraph.py index e677981b5d63..74b3e514dc23 100644 --- a/src/sonic-config-engine/minigraph.py +++ b/src/sonic-config-engine/minigraph.py @@ -1450,7 +1450,8 @@ def parse_xml(filename, platform=None, port_config_file=None, asic_name=None, hw 'hostname': hostname, 'hwsku': hwsku, 'type': device_type, - 'synchronous_mode': 'enable' + 'synchronous_mode': 'enable', + 'yang_config_validation': 'disable' } } diff --git a/src/sonic-config-engine/tests/test_cfggen_from_yang.py b/src/sonic-config-engine/tests/test_cfggen_from_yang.py index d673e1e206d7..801340ea4722 100644 --- a/src/sonic-config-engine/tests/test_cfggen_from_yang.py +++ b/src/sonic-config-engine/tests/test_cfggen_from_yang.py @@ -81,7 +81,8 @@ def test_device_metadata(self): 'hostname': 'sonic', 'hwsku': 'Force10-S6000', 'platform': 'x86_64-kvm_x86_64-r0', - 'type': 'LeafRouter' + 'type': 'LeafRouter', + 'yang_config_validation': 'disable' }) diff --git a/src/sonic-config-engine/tests/test_yang_data.json b/src/sonic-config-engine/tests/test_yang_data.json index 3a28872317a6..69c8125f7837 100644 --- a/src/sonic-config-engine/tests/test_yang_data.json +++ b/src/sonic-config-engine/tests/test_yang_data.json @@ -161,7 +161,8 @@ "hostname": "sonic", "hwsku": "Force10-S6000", "type": "LeafRouter", - "platform": "x86_64-kvm_x86_64-r0" + "platform": "x86_64-kvm_x86_64-r0", + "yang_config_validation": "disable" } } }, @@ -378,4 +379,4 @@ } } } -} \ No newline at end of file +} From 57e333e40a05ad2fddb9a7948906d84a4a44f438 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Fri, 28 Oct 2022 12:37:51 -0700 Subject: [PATCH 113/174] [sonic-bgpcfgd] Replace getstatusoutput function (#12535) Signed-off-by: maipbui #### Why I did it `getstatusoutput()` function from `subprocess` module has shell injection issue because it includes `shell=True` in the implementation #### How I did it Use `getstatusoutput_noshell()` from sonic_py_common library #### How to verify it Tested in DUT --- src/sonic-bgpcfgd/bgpmon/bgpmon.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/sonic-bgpcfgd/bgpmon/bgpmon.py b/src/sonic-bgpcfgd/bgpmon/bgpmon.py index 928f72fc34b6..b4b97a8d6445 100755 --- a/src/sonic-bgpcfgd/bgpmon/bgpmon.py +++ b/src/sonic-bgpcfgd/bgpmon/bgpmon.py @@ -23,12 +23,12 @@ is a need to perform update or the peer is stale to be removed from the state DB """ -import subprocess import json import os import syslog from swsscommon import swsscommon import time +from sonic_py_common.general import getstatusoutput_noshell PIPE_BATCH_MAX_COUNT = 50 @@ -72,8 +72,8 @@ def update_new_peer_states(self, peer_dict): # Get a new snapshot of BGP neighbors and store them in the "new" location def get_all_neigh_states(self): - cmd = "vtysh -c 'show bgp summary json'" - rc, output = subprocess.getstatusoutput(cmd) + cmd = ["vtysh", "-c", 'show bgp summary json'] + rc, output = getstatusoutput_noshell(cmd) if rc: syslog.syslog(syslog.LOG_ERR, "*ERROR* Failed with rc:{} when execute: {}".format(rc, cmd)) return From f34ca2b6a60f753e1983b552250547866fa10ed6 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Fri, 28 Oct 2022 12:50:04 -0700 Subject: [PATCH 114/174] [sonic-eventd] Replace subprocess with shell=True (#12536) Signed-off-by: maipbui #### Why I did it `subprocess` is used with `shell=True`, which is very dangerous for shell injection. #### How I did it remove `shell=True`, use `shell=False` --- src/sonic-eventd/tools/events_volume_test.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/sonic-eventd/tools/events_volume_test.py b/src/sonic-eventd/tools/events_volume_test.py index 73143d483cd8..af830e26510b 100644 --- a/src/sonic-eventd/tools/events_volume_test.py +++ b/src/sonic-eventd/tools/events_volume_test.py @@ -32,17 +32,16 @@ def run_test(process, file, count, duplicate): time.sleep(2) # buffer for events_tool to startup logging.info("Generating logger messages\n") + sub_cmd = ["logger", "-p", "local0.notice", "-t", process, "test", "message"] for i in range(count): - line = "" - state = "up" if duplicate: - line = "{} test message testmessage state up".format(process) + command = sub_cmd + ["testmessage", "state", "up"] else: if i % 2 != 1: - state = "down" - line = "{} test message testmessage{} state {}".format(process, i, state) - command = "logger -p local0.notice -t {}".format(line) - subprocess.run(command, shell=True, stdout=subprocess.PIPE) + command = sub_cmd + ["testmessage"+str(i), "state", "down"] + else: + command = sub_cmd + ["testmessage"+str(i), "state", "up"] + subprocess.run(command, stdout=subprocess.PIPE) time.sleep(2) # some buffer for all events to be published to file read_events_from_file(file, count) From a85b34fd362c7fb62da82414db67f8130c70c2dd Mon Sep 17 00:00:00 2001 From: arlakshm <55814491+arlakshm@users.noreply.github.com> Date: Fri, 28 Oct 2022 18:28:57 -0700 Subject: [PATCH 115/174] update notify-keyspace-events in redis.conf (#12540) Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan arlakshm@microsoft.com Why I did it closes #12343 Today in SONiC the notify-keyspace-events is from DbInterface class when application try do any configdb set. In Chassis the chassis_db may not get any configdb set operations, so there is chance this configuration will never be set. So the chassis_db updates from one line card will not be propogated to other linecards, which are doing a psubscribe to get these event. How I did it update the redis.conf to set notify-keyspace-events AKE so that the notify-keyspace-events are set when the redis instance is started How to verify it Test on chassis --- dockers/docker-database/Dockerfile.j2 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dockers/docker-database/Dockerfile.j2 b/dockers/docker-database/Dockerfile.j2 index e1aa3c4bcd7f..899c111da41b 100644 --- a/dockers/docker-database/Dockerfile.j2 +++ b/dockers/docker-database/Dockerfile.j2 @@ -32,7 +32,8 @@ RUN apt-get clean -y && \ s/^# syslog-enabled no$/syslog-enabled no/; \ s/^# unixsocket/unixsocket/; \ s/redis-server.sock/redis.sock/g; \ - s/^client-output-buffer-limit pubsub [0-9]+mb [0-9]+mb [0-9]+/client-output-buffer-limit pubsub 0 0 0/ \ + s/^client-output-buffer-limit pubsub [0-9]+mb [0-9]+mb [0-9]+/client-output-buffer-limit pubsub 0 0 0/; \ + s/^notify-keyspace-events ""$/notify-keyspace-events AKE/ \ ' /etc/redis/redis.conf COPY ["supervisord.conf.j2", "/usr/share/sonic/templates/"] From 917ad1ffe02e3f9dab399099ed6d635d8c6f7e5c Mon Sep 17 00:00:00 2001 From: Dror Prital <76714716+dprital@users.noreply.github.com> Date: Sun, 30 Oct 2022 09:31:09 +0200 Subject: [PATCH 116/174] [Mellanox] Update SDK/FW to version 4.5.3186/2010.3186 (#12542) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Why I did it Update SDK/FW version - 4.5.3186/2010_3186 in order to have the following changes: New functionality: 1. Added support for 6.5W (Class 8) in ports 49-50, 53-54, 57-58, and 61-62 on SN4600 system Fix the following issues: 1. On very rare occasion (~1/100K), during I2C transaction with MMS1V50-WM and MMS1V90-WR modules on SN4700 system, the module may send unexpected stop which violate the I2C specification, possibly affecting the link up flow 2. When running 1GbE speeds on SN4600 system, the port remained active while peer side was closed 3. While toggling the cable with ‘sfputil lpmode on/off’, error msg like “ERR pmon#xcvrd: Receive PMPE error event on module 1: status {X} error type {y}” could be received 4. When toggling many ports of the Spectrum devices while raising 10GbE link up and link maintenance is enabled, the switch may get stuck and may need to be rebooted 5. When trying to reconfigure the Flex Parser header and Flex transition parameters after ISSU, the switch will returned an error even if the configuration was identical to that done before performing the ISSU 6. While moving from lossless to lossy mode while shared headroom was used, reduction of the shared headroom can only be done prior to pool type change and when shared headroom is not utilized 7. SLL configuration is missing in SDK dump 8. If TTL_CMD_COPY is used in Encap direction for a packet with no TTL, then the value passed in the ttl data structure will be used if non-zero (default 255 if zero) 9. PCI calibration changes from a static to a dynamic mechanism 10. Layer 4 port information is not initialized for BFD packet event. To address the issue, remote peer UDP port information was added in BFD packet event 11. SDK returned error when FEC mode is set on twisted pair, when FEC was set to None - How I did it Update pointer for the SDK/FW - How to verify it Run regression tests Signed-off-by: dprital --- platform/mellanox/fw.mk | 6 +++--- platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers | 2 +- platform/mellanox/sdk.mk | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/platform/mellanox/fw.mk b/platform/mellanox/fw.mk index 92aed0663311..ebfec14fb6e2 100644 --- a/platform/mellanox/fw.mk +++ b/platform/mellanox/fw.mk @@ -27,17 +27,17 @@ else FW_FROM_URL = n endif -MLNX_SPC_FW_VERSION = 13.2010.3170 +MLNX_SPC_FW_VERSION = 13.2010.3186 MLNX_SPC_FW_FILE = fw-SPC-rel-$(subst .,_,$(MLNX_SPC_FW_VERSION))-EVB.mfa $(MLNX_SPC_FW_FILE)_PATH = $(MLNX_FW_BASE_PATH) $(MLNX_SPC_FW_FILE)_URL = $(MLNX_FW_BASE_URL)/$(MLNX_SPC_FW_FILE) -MLNX_SPC2_FW_VERSION = 29.2010.3170 +MLNX_SPC2_FW_VERSION = 29.2010.3186 MLNX_SPC2_FW_FILE = fw-SPC2-rel-$(subst .,_,$(MLNX_SPC2_FW_VERSION))-EVB.mfa $(MLNX_SPC2_FW_FILE)_PATH = $(MLNX_FW_BASE_PATH) $(MLNX_SPC2_FW_FILE)_URL = $(MLNX_FW_BASE_URL)/$(MLNX_SPC2_FW_FILE) -MLNX_SPC3_FW_VERSION = 30.2010.3170 +MLNX_SPC3_FW_VERSION = 30.2010.3186 MLNX_SPC3_FW_FILE = fw-SPC3-rel-$(subst .,_,$(MLNX_SPC3_FW_VERSION))-EVB.mfa $(MLNX_SPC3_FW_FILE)_PATH = $(MLNX_FW_BASE_PATH) $(MLNX_SPC3_FW_FILE)_URL = $(MLNX_FW_BASE_URL)/$(MLNX_SPC3_FW_FILE) diff --git a/platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers b/platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers index 8b1f1c0f1164..17a3b3089c9c 160000 --- a/platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers +++ b/platform/mellanox/sdk-src/sx-kernel/Switch-SDK-drivers @@ -1 +1 @@ -Subproject commit 8b1f1c0f11647749f79ebc4e823c157513067412 +Subproject commit 17a3b3089c9cbca4a62fc7d2fe0894186b801625 diff --git a/platform/mellanox/sdk.mk b/platform/mellanox/sdk.mk index 5a6864bc1e4a..5c3a42ab02e5 100644 --- a/platform/mellanox/sdk.mk +++ b/platform/mellanox/sdk.mk @@ -16,7 +16,7 @@ # MLNX_SDK_BASE_PATH = $(PLATFORM_PATH)/sdk-src/sx-kernel/Switch-SDK-drivers/bin/ MLNX_SDK_PKG_BASE_PATH = $(MLNX_SDK_BASE_PATH)/$(BLDENV)/$(CONFIGURED_ARCH)/ -MLNX_SDK_VERSION = 4.5.3168 +MLNX_SDK_VERSION = 4.5.3186 MLNX_SDK_ISSU_VERSION = 101 MLNX_SDK_DEB_VERSION = $(subst -,.,$(subst _,.,$(MLNX_SDK_VERSION))) From 4216f34453cfe88e29aa0b63d984ef9a2f6d0050 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:54:48 +0800 Subject: [PATCH 117/174] [submodule] Advance sonic-swss-common pointer (#12510) d0fdf62 Check whether a pointer created by dynamic_cast is null before using it. (#689) 2cae742 [Fast/Warm restart] Implement helper class for waiting restart done (#691) --- src/sonic-swss-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-swss-common b/src/sonic-swss-common index bcf48b26361f..d0fdf6274206 160000 --- a/src/sonic-swss-common +++ b/src/sonic-swss-common @@ -1 +1 @@ -Subproject commit bcf48b26361f94e10a0eafc2c49c0bf0f440b2d5 +Subproject commit d0fdf62742069c474e820b68b2748234a740c347 From b841e95824ad79eb9c2fef34ea4b8f9e4e57026f Mon Sep 17 00:00:00 2001 From: Dror Prital <76714716+dprital@users.noreply.github.com> Date: Mon, 31 Oct 2022 10:40:43 +0200 Subject: [PATCH 118/174] [submodule] Advance sonic-swss-common pointer (#12553) Update sonic-swss-common submodule pointer to include the following: * abda263 Make the loglevel persistent by moving the LOGGER table from the LOGLEVEL DB to the CONFIG DB ([#687](https://github.com/sonic-net/sonic-swss-common/pull/687)) Signed-off-by: dprital --- src/sonic-swss-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-swss-common b/src/sonic-swss-common index d0fdf6274206..abda263ba0a5 160000 --- a/src/sonic-swss-common +++ b/src/sonic-swss-common @@ -1 +1 @@ -Subproject commit d0fdf62742069c474e820b68b2748234a740c347 +Subproject commit abda263ba0a584799312366b01dacd86dbaeb7cc From d1df2843fa316618f40b1fd2b27fef094273a041 Mon Sep 17 00:00:00 2001 From: Dror Prital <76714716+dprital@users.noreply.github.com> Date: Mon, 31 Oct 2022 10:41:25 +0200 Subject: [PATCH 119/174] [submodule] Advance sonic-utilities pointer (#12544) Update sonic-utilities submodule pointer to include the following: 4a3d49d Fix exception in adding mirror_session when gre_type is absent (#2458) 7e7d05c Update the DBmigrator to support persistent loglevel during warm-upgrade (#2370) c2841b8 [doc]: Update Command-Reference.md (#2444) 254cafc Event Counters CLI (#2449) 2dab0d0 [techsupport] Adding FRR EVPN dumps (#2442) 3c0aece [show][muxcable] add support for show mux firmware version all (#2441) Signed-off-by: dprital --- src/sonic-utilities | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-utilities b/src/sonic-utilities index aedc05ecf23d..4a3d49d359f7 160000 --- a/src/sonic-utilities +++ b/src/sonic-utilities @@ -1 +1 @@ -Subproject commit aedc05ecf23d72fd28ed05dee0d082c307a888d0 +Subproject commit 4a3d49d359f787cae896d65fa882fbaaac6e57f2 From 6bed69af6d7c6e649709c493c83edd82d4f6fbba Mon Sep 17 00:00:00 2001 From: EdenGri <63317673+EdenGri@users.noreply.github.com> Date: Mon, 31 Oct 2022 10:42:12 +0200 Subject: [PATCH 120/174] Add a yang model for the new "Logger" table in the CONFIG DB (#12067) - Why I did it Add the ability to the user to save the loglevel and make it persistent to reboot. - How I did it Move the logger tables from the LOGLEVEL DB to the CONFIG DB. Add new yang model to verify the new config schema. - How to verify it 1. change the orchagent loglevel (for example) -> swssloglevel -c orchagent -l DEBUG 2. save the loglevel -> run config save 3. reboot 4. verify that the orchagent log level is still DEBUG ->run run redis-cli -n 4 hgetall "LOGGER|orchagent" --- src/sonic-yang-models/doc/Configuration.md | 29 ++++++++ src/sonic-yang-models/setup.py | 2 + .../tests/files/sample_config_db.json | 14 ++++ .../tests/yang_model_tests/tests/logger.json | 18 +++++ .../yang_model_tests/tests_config/logger.json | 55 ++++++++++++++ .../yang-models/sonic-logger.yang | 73 +++++++++++++++++++ 6 files changed, 191 insertions(+) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/logger.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/logger.json create mode 100644 src/sonic-yang-models/yang-models/sonic-logger.yang diff --git a/src/sonic-yang-models/doc/Configuration.md b/src/sonic-yang-models/doc/Configuration.md index d09e2c8bac0b..ba51e8e1aa1b 100644 --- a/src/sonic-yang-models/doc/Configuration.md +++ b/src/sonic-yang-models/doc/Configuration.md @@ -58,6 +58,7 @@ Table of Contents * [VOQ Inband Interface](#voq-inband-interface) * [VXLAN](#vxlan) * [Virtual router](#virtual-router) + * [LOGGER](#logger) * [WRED_PROFILE](#wred_profile) * [PASSWORD_HARDENING](#password_hardening) * [SYSTEM_DEFAULTS table](#systemdefaults-table) @@ -1705,6 +1706,34 @@ The packet action could be: } } ``` + +### Logger + +In this table, the loglevel and logoutput of the components are defined. Each component +will have the component name as its key; and LOGLEVEL and LOGOUTPUT as attributes. +The LOGLEVEL attribute will define the verbosity of the component. +The LOGOUTPUT attribute will define the file of printing the logs. + +``` +{ + "LOGGER": { + "orchagent": { + "LOGLEVEL": "NOTICE", + "LOGOUTPUT": "SYSLOG" + }, + "syncd": { + "LOGLEVEL": "DEBUG", + "LOGOUTPUT": "STDOUT" + }, + "SAI_API_LAG": { + "LOGLEVEL": "ERROR", + "LOGOUTPUT": "STDERR" + } + } +} + +``` + ### PASSWORD_HARDENING Password Hardening, a user password is the key credential used in order to verify the user accessing the switch and acts as the first line of defense in regards to securing the switch. PASSWORD_HARDENING - support the enforce strong policies. diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index 11756fe01614..8c318e583fa0 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -169,6 +169,7 @@ def run(self): './yang-models/sonic-peer-switch.yang', './yang-models/sonic-pfc-priority-queue-map.yang', './yang-models/sonic-pfc-priority-priority-group-map.yang', + './yang-models/sonic-logger.yang', './yang-models/sonic-port-qos-map.yang', './yang-models/sonic-static-route.yang', './yang-models/sonic-macsec.yang']), @@ -234,6 +235,7 @@ def run(self): './cvlyang-models/sonic-tc-queue-map.yang', './cvlyang-models/sonic-pfc-priority-queue-map.yang', './cvlyang-models/sonic-pfc-priority-priority-group-map.yang', + './cvlyang-models/sonic-logger.yang', './cvlyang-models/sonic-port-qos-map.yang', './cvlyang-models/sonic-static-route.yang', './cvlyang-models/sonic-macsec.yang']), diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index ecb0be1c4ce8..ab4aae47a1d9 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -837,6 +837,20 @@ "tpid": "0x8100" } }, + "LOGGER": { + "orchagent": { + "LOGLEVEL": "NOTICE", + "LOGOUTPUT": "SYSLOG" + }, + "syncd": { + "LOGLEVEL": "NOTICE", + "LOGOUTPUT": "SYSLOG" + }, + "SAI_API_LAG": { + "LOGLEVEL": "SAI_LOG_LEVEL_NOTICE", + "LOGOUTPUT": "SYSLOG" + } + }, "ACL_TABLE": { "V4-ACL-TABLE": { "type": "L3", diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/logger.json b/src/sonic-yang-models/tests/yang_model_tests/tests/logger.json new file mode 100644 index 000000000000..523755afd40b --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/logger.json @@ -0,0 +1,18 @@ +{ + "LOGGER_CORRECT_LOGLEVEL_AND_LOGOUTPUT": { + "desc": "LOGGER_CORRECT_LOGLEVEL_AND_LOGOUTPUT_1 no failure." + }, + "LOGGER_WRONG_LOGLEVEL": { + "desc": "LOGGER_WRONG_LOGLEVEL pattern failure.", + "eStr": ["wrong", "LOGLEVEL"] + }, + "LOGGER_WRONG_LOGOUTPUT": { + "desc": "LOGGER_WRONG_LOGOUTPUT pattern failure.", + "eStr": ["wrong", "LOGOUTPUT"] + }, + "LOGGER_MANDATORY_LOGLEVEL": { + "desc": "LOGGER_MANDATORY_LOGOUTPUT no logoutput.", + "estr": ["LOGLEVEL"], + "eStrKey": "Mandatory" + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/logger.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/logger.json new file mode 100644 index 000000000000..8ee308c8485f --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/logger.json @@ -0,0 +1,55 @@ +{ + "LOGGER_CORRECT_LOGLEVEL_AND_LOGOUTPUT": { + "sonic-logger:sonic-logger": { + "sonic-logger:LOGGER": { + "LOGGER_LIST": [{ + "name": "orchagent", + "LOGLEVEL": "NOTICE", + "LOGOUTPUT": "SYSLOG" + }, + { + "name": "syncd", + "LOGLEVEL": "DEBUG", + "LOGOUTPUT": "STDOUT" + }, + { + "name": "SAI_API_LAG", + "LOGLEVEL": "SAI_LOG_LEVEL_ERROR", + "LOGOUTPUT": "STDERR" + }] + } + } + }, + "LOGGER_WRONG_LOGLEVEL": { + "sonic-logger:sonic-logger": { + "sonic-logger:LOGGER": { + "LOGGER_LIST": [{ + "name": "orchagent", + "LOGLEVEL": "wrong", + "LOGOUTPUT": "SYSLOG" + }] + } + } + }, + "LOGGER_WRONG_LOGOUTPUT": { + "sonic-logger:sonic-logger": { + "sonic-logger:LOGGER": { + "LOGGER_LIST": [{ + "name": "orchagent", + "LOGLEVEL": "NOTICE", + "LOGOUTPUT": "wrong" + }] + } + } + }, + "LOGGER_MANDATORY_LOGLEVEL": { + "sonic-logger:sonic-logger": { + "sonic-logger:LOGGER": { + "LOGGER_LIST": [{ + "name": "orchagent", + "LOGOUTPUT": "SYSLOG" + }] + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-logger.yang b/src/sonic-yang-models/yang-models/sonic-logger.yang new file mode 100644 index 000000000000..f9a89bda49e8 --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-logger.yang @@ -0,0 +1,73 @@ +module sonic-logger{ + + yang-version 1.1; + + namespace "http://github.com/Azure/sonic-logger"; + prefix logger; + + import sonic-types { + prefix stypes; + } + + description "Logger Table yang Module for SONiC"; + + typedef swss_loglevel { + type enumeration { + enum EMERG; + enum ALERT; + enum CRIT; + enum ERROR; + enum WARN; + enum NOTICE; + enum INFO; + enum DEBUG; + } + } + + typedef sai_loglevel { + type enumeration { + enum SAI_LOG_LEVEL_CRITICAL; + enum SAI_LOG_LEVEL_ERROR; + enum SAI_LOG_LEVEL_WARN; + enum SAI_LOG_LEVEL_NOTICE; + enum SAI_LOG_LEVEL_INFO; + enum SAI_LOG_LEVEL_DEBUG; + } + } + + container sonic-logger { + + container LOGGER { + + description "Logger table in config_db.json"; + + list LOGGER_LIST { + + key "name"; + + leaf name { + description "Component name in LOGGER table (example for component: orchagent, Syncd, SAI components)."; + type string; + } + + leaf LOGLEVEL { + description "The log verbosity for the component"; + mandatory true; + type union { + type swss_loglevel; + type sai_loglevel; + } + } + + leaf LOGOUTPUT { + type enumeration { + enum SYSLOG; + enum STDOUT; + enum STDERR; + } + default SYSLOG; + } + }/* end of list LOGGER_LIST */ + }/* end of LOGGER container */ + }/* end of sonic-logger container */ +}/* end of sonic-logger module */ From a60ebd387c42c01c3064e04069bf4c194982641f Mon Sep 17 00:00:00 2001 From: zitingguo-ms Date: Mon, 31 Oct 2022 20:08:45 +0800 Subject: [PATCH 121/174] Update BRCM SAI version to 7.1.17.4 (#12546) Signed-off-by: zitingguo-ms Signed-off-by: zitingguo-ms --- platform/broadcom/sai.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/platform/broadcom/sai.mk b/platform/broadcom/sai.mk index f8f972897fd3..444df18c587c 100644 --- a/platform/broadcom/sai.mk +++ b/platform/broadcom/sai.mk @@ -1,5 +1,5 @@ -LIBSAIBCM_XGS_VERSION = 7.1.16.4 -LIBSAIBCM_DNX_VERSION = 7.1.16.4 +LIBSAIBCM_XGS_VERSION = 7.1.17.4 +LIBSAIBCM_DNX_VERSION = 7.1.17.4 LIBSAIBCM_BRANCH_NAME = REL_7.0 LIBSAIBCM_XGS_URL_PREFIX = "https://sonicstorage.blob.core.windows.net/public/sai/bcmsai/$(LIBSAIBCM_BRANCH_NAME)/$(LIBSAIBCM_XGS_VERSION)" LIBSAIBCM_DNX_URL_PREFIX = "https://sonicstorage.blob.core.windows.net/public/sai/bcmsai/$(LIBSAIBCM_BRANCH_NAME)/$(LIBSAIBCM_DNX_VERSION)" From 934871cce14701e59df0a5f4bc60b13db9423e00 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Mon, 31 Oct 2022 07:43:46 -0700 Subject: [PATCH 122/174] [sonic-config-engine] Replace os.system, replace yaml.load, remove subprocess with shell=True (#12533) Signed-off-by: maipbui #### Why I did it `subprocess` is used with `shell=True`, which is very dangerous for shell injection. `os` - not secure against maliciously constructed input and dangerous if used to evaluate dynamic content `yaml.load` can create arbitrary Python objects #### How I did it Replace `os` by `subprocess`, remove `shell=True` Use `yaml.safe_load()` #### How to verify it Pass UT --- src/sonic-config-engine/sonic-cfggen | 2 +- src/sonic-config-engine/tests/common_utils.py | 21 +- src/sonic-config-engine/tests/test_cfggen.py | 202 +++++++++--------- .../tests/test_cfggen_from_yang.py | 54 +++-- .../tests/test_cfggen_pfx_filter.py | 13 +- .../tests/test_cfggen_platformJson.py | 26 +-- .../tests/test_cfggen_t2_chassis_fe.py | 21 +- src/sonic-config-engine/tests/test_frr.py | 23 +- src/sonic-config-engine/tests/test_j2files.py | 167 +++++++-------- .../tests/test_j2files_t2_chassis_fe.py | 20 +- .../tests/test_minigraph_case.py | 95 ++++---- .../tests/test_multinpu_cfggen.py | 117 +++++----- 12 files changed, 377 insertions(+), 384 deletions(-) diff --git a/src/sonic-config-engine/sonic-cfggen b/src/sonic-config-engine/sonic-cfggen index d5358f633dbf..287640d8a119 100755 --- a/src/sonic-config-engine/sonic-cfggen +++ b/src/sonic-config-engine/sonic-cfggen @@ -351,7 +351,7 @@ def main(): if yaml.__version__ >= "5.1": additional_data = yaml.full_load(stream) else: - additional_data = yaml.load(stream) + additional_data = yaml.safe_load(stream) deep_update(data, FormatConverter.to_deserialized(additional_data)) if args.additional_data is not None: diff --git a/src/sonic-config-engine/tests/common_utils.py b/src/sonic-config-engine/tests/common_utils.py index 72325ecbc1e0..d2be32c8544d 100644 --- a/src/sonic-config-engine/tests/common_utils.py +++ b/src/sonic-config-engine/tests/common_utils.py @@ -5,7 +5,6 @@ import sys import subprocess import argparse -import shlex PY3x = sys.version_info >= (3, 0) PYvX_DIR = "py3" if PY3x else "py2" @@ -47,7 +46,7 @@ def __init__(self, path=YANG_MODELS_DIR): self.yang_parser = sonic_yang.SonicYang(path) self.yang_parser.loadYangModel() self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') + self.script_file = [PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] def validate(self, argument): """ @@ -62,22 +61,22 @@ def validate(self, argument): parser.add_argument("-p", "--port-config", help="port config file, used with -m or -k", nargs='?', const=None) parser.add_argument("-S", "--hwsku-config", help="hwsku config file, used with -p and -m or -k", nargs='?', const=None) parser.add_argument("-j", "--json", help="additional json file input, used with -p, -S and -m or -k", nargs='?', const=None) - args, unknown = parser.parse_known_args(shlex.split(argument)) + args, unknown = parser.parse_known_args(argument) print('\n Validating yang schema') - cmd = self.script_file + ' -m ' + args.minigraph + cmd = self.script_file + ['-m', args.minigraph] if args.hwsku is not None: - cmd += ' -k ' + args.hwsku + cmd += ['-k', args.hwsku] if args.hwsku_config is not None: - cmd += ' -S ' + args.hwsku_config + cmd += ['-S', args.hwsku_config] if args.port_config is not None: - cmd += ' -p ' + args.port_config + cmd += ['-p', args.port_config] if args.namespace is not None: - cmd += ' -n ' + args.namespace + cmd += ['-n', args.namespace] if args.json is not None: - cmd += ' -j ' + args.json - cmd += ' --print-data' - output = subprocess.check_output(cmd, shell=True).decode() + cmd += ['-j', args.json] + cmd += ['--print-data'] + output = subprocess.check_output(cmd).decode() try: self.yang_parser.loadData(configdbJson=json.loads(output)) self.yang_parser.validate_data_tree() diff --git a/src/sonic-config-engine/tests/test_cfggen.py b/src/sonic-config-engine/tests/test_cfggen.py index bcc0625ec8a8..3b979d4a52ec 100644 --- a/src/sonic-config-engine/tests/test_cfggen.py +++ b/src/sonic-config-engine/tests/test_cfggen.py @@ -1,7 +1,6 @@ import json import subprocess import os - import tests.common_utils as utils from unittest import TestCase @@ -16,7 +15,7 @@ class TestCfgGen(TestCase): def setUp(self): self.yang = utils.YangWrapper() self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') + self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] self.sample_graph = os.path.join(self.test_dir, 'sample_graph.xml') self.sample_graph_t0 = os.path.join(self.test_dir, 't0-sample-graph.xml') self.sample_graph_simple = os.path.join(self.test_dir, 'simple-sample-graph.xml') @@ -52,13 +51,12 @@ def tearDown(self): pass def run_script(self, argument, check_stderr=False, verbose=False): - print('\n Running sonic-cfggen ' + argument) + print('\n Running sonic-cfggen ' + ' '.join(argument)) self.assertTrue(self.yang.validate(argument)) - if check_stderr: - output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) else: - output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) + output = subprocess.check_output(self.script_file + argument) if utils.PY3x: output = output.decode() @@ -73,52 +71,52 @@ def run_script(self, argument, check_stderr=False, verbose=False): return output def test_dummy_run(self): - argument = '' + argument = [] output = self.run_script(argument) self.assertEqual(output, '') def test_device_desc(self): - argument = '-v "DEVICE_METADATA[\'localhost\'][\'hwsku\']" -M "' + self.sample_device_desc + '"' + argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'hwsku\']", "-M", self.sample_device_desc] output = self.run_script(argument) self.assertEqual(output.strip(), 'ACS-MSN2700') def test_device_desc_mgmt_ip(self): - argument = '-v "(MGMT_INTERFACE.keys()|list)[0]" -M "' + self.sample_device_desc + '"' + argument = ['-v', "(MGMT_INTERFACE.keys()|list)[0]", '-M', self.sample_device_desc] output = self.run_script(argument) self.assertEqual(output.strip(), "('eth0', '10.0.1.5/28')") def test_minigraph_hostname(self): - argument = '-v "DEVICE_METADATA[\'localhost\'][\'hostname\']" -m "' + self.sample_graph + '" -p "' + self.port_config + '"' + argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'hostname\']", '-m', self.sample_graph, "-p", self.port_config] output = self.run_script(argument) self.assertEqual(output.strip(), 'OCPSCH01040DDLF') def test_minigraph_sku(self): - argument = '-v "DEVICE_METADATA[\'localhost\'][\'hwsku\']" -m "' + self.sample_graph + '" -p "' + self.port_config + '"' + argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'hwsku\']", '-m', self.sample_graph, '-p', self.port_config] output = self.run_script(argument) self.assertEqual(output.strip(), 'Force10-Z9100') def test_minigraph_region(self): - argument = '-v "DEVICE_METADATA[\'localhost\'][\'region\']" -m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '"' + argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'region\']", '-m', self.sample_graph_metadata, '-p', self.port_config] output = self.run_script(argument) self.assertEqual(output.strip(), 'usfoo') def test_minigraph_cloudtype(self): - argument = '-v "DEVICE_METADATA[\'localhost\'][\'cloudtype\']" -m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '"' + argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'cloudtype\']", '-m', self.sample_graph_metadata, '-p', self.port_config] output = self.run_script(argument) self.assertEqual(output.strip(), 'Public') def test_minigraph_resourcetype(self): - argument = '-v "DEVICE_METADATA[\'localhost\'][\'resource_type\']" -m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '"' + argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'resource_type\']", '-m', self.sample_graph_metadata, '-p', self.port_config] output = self.run_script(argument) self.assertEqual(output.strip(), 'resource_type_x') def test_minigraph_downstream_subrole(self): - argument = '-v "DEVICE_METADATA[\'localhost\'][\'downstream_subrole\']" -m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '"' + argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'downstream_subrole\']", '-m', self.sample_graph_metadata, '-p', self.port_config] output = self.run_script(argument) self.assertEqual(output.strip(), 'downstream_subrole_y') def test_print_data(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" --print-data' + argument = ['-m', self.sample_graph, '-p', self.port_config, '--print-data'] output = self.run_script(argument) self.assertTrue(len(output.strip()) > 0) @@ -127,29 +125,29 @@ def test_jinja_expression(self, graph=None, port_config=None, expected_router_ty graph = self.sample_graph if port_config is None: port_config = self.port_config - argument = '-m "' + graph + '" -p "' + port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'type\']"' + argument = ['-m', graph, '-p', port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'type\']"] output = self.run_script(argument) self.assertEqual(output.strip(), expected_router_type) def test_additional_json_data(self): - argument = '-a \'{"key1":"value1"}\' -v key1' + argument = ['-a', '{"key1":"value1"}', '-v', 'key1'] output = self.run_script(argument) self.assertEqual(output.strip(), 'value1') def test_additional_json_data_level1_key(self): - argument = '-a \'{"k1":{"k11":"v11","k12":"v12"}, "k2":{"k22":"v22"}}\' --var-json k1' + argument = ['-a', '{"k1":{"k11":"v11","k12":"v12"}, "k2":{"k22":"v22"}}', '--var-json', 'k1'] output = self.run_script(argument) self.assertEqual(utils.to_dict(output.strip()), utils.to_dict('{\n "k11": "v11", \n "k12": "v12"\n}')) def test_additional_json_data_level2_key(self): - argument = '-a \'{"k1":{"k11":"v11","k12":"v12"},"k2":{"k22":"v22"}}\' --var-json k1 -K k11' + argument = ['-a', '{"k1":{"k11":"v11","k12":"v12"},"k2":{"k22":"v22"}}', '--var-json', 'k1', '-K', 'k11'] output = self.run_script(argument) self.assertEqual(utils.to_dict(output.strip()), utils.to_dict('{\n "k11": "v11"\n}')) def test_var_json_data(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) tag_mode = kwargs.get('tag_mode', 'untagged') - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" --var-json VLAN_MEMBER' + argument = ['-m', graph_file, '-p', self.port_config, '--var-json', 'VLAN_MEMBER'] output = self.run_script(argument) if tag_mode == "tagged": self.assertEqual( @@ -175,20 +173,20 @@ def test_var_json_data(self, **kwargs): ) def test_read_yaml(self): - argument = '-v yml_item -y ' + os.path.join(self.test_dir, 'test.yml') + argument = ['-v', 'yml_item', '-y', os.path.join(self.test_dir, 'test.yml')] output = self.run_script(argument) self.assertEqual(output.strip(), '[\'value1\', \'value2\']') def test_render_template(self): - argument = '-y ' + os.path.join(self.test_dir, 'test.yml') + ' -t ' + os.path.join(self.test_dir, 'test.j2') + argument = ['-y', os.path.join(self.test_dir, 'test.yml'), '-t', os.path.join(self.test_dir, 'test.j2')] output = self.run_script(argument) self.assertEqual(output.strip(), 'value1\nvalue2') def test_template_batch_mode(self): - argument = '-y ' + os.path.join(self.test_dir, 'test.yml') - argument += ' -a \'{"key1":"value"}\'' - argument += ' -t ' + os.path.join(self.test_dir, 'test.j2') + ',' + self.output_file - argument += ' -t ' + os.path.join(self.test_dir, 'test2.j2') + ',' + self.output2_file + argument = ['-y', os.path.join(self.test_dir, 'test.yml')] + argument += ['-a', '{"key1":"value"}'] + argument += ['-t', os.path.join(self.test_dir, 'test.j2') + ',' + self.output_file] + argument += ['-t', os.path.join(self.test_dir, 'test2.j2') + ',' + self.output2_file] output = self.run_script(argument) assert(os.path.exists(self.output_file)) assert(os.path.exists(self.output2_file)) @@ -199,10 +197,10 @@ def test_template_batch_mode(self): def test_template_json_batch_mode(self): data = {"key1_1":"value1_1", "key1_2":"value1_2", "key2_1":"value2_1", "key2_2":"value2_2"} - argument = " -a '{0}'".format(repr(data).replace('\'', '"')) - argument += ' -t ' + os.path.join(self.test_dir, 'sample-template-1.json.j2') + ",config-db" - argument += ' -t ' + os.path.join(self.test_dir, 'sample-template-2.json.j2') + ",config-db" - argument += ' --print-data' + argument = ["-a", '{0}'.format(repr(data).replace('\'', '"'))] + argument += ['-t', os.path.join(self.test_dir, 'sample-template-1.json.j2') + ",config-db"] + argument += ['-t', os.path.join(self.test_dir, 'sample-template-2.json.j2') + ",config-db"] + argument += ['--print-data'] output = self.run_script(argument) output_data = json.loads(output) for key, value in data.items(): @@ -212,7 +210,7 @@ def test_template_json_batch_mode(self): # it is not at all intuitive what that ordering should be. Could make it # more robust by adding better parsing logic. def test_minigraph_acl(self): - argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v ACL_TABLE' + argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', 'ACL_TABLE'] output = self.run_script(argument, True, True) self.assertEqual( utils.to_dict(output.strip().replace("Warning: Ignoring Control Plane ACL NTP_ACL without type\n", '')), @@ -236,7 +234,7 @@ def test_minigraph_acl(self): # self.assertEqual(output.strip(), "{'everflow0': {'src_ip': '10.1.0.32', 'dst_ip': '2.2.2.2'}}") def test_minigraph_mgmt_ports(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v MGMT_PORT' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v','MGMT_PORT'] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -244,13 +242,13 @@ def test_minigraph_mgmt_ports(self): ) def test_minigraph_interfaces(self): - argument = '-m "' + self.sample_graph_simple + '" -p "' + self.port_config + '" -v "INTERFACE.keys()|list"' + argument = ['-m', self.sample_graph_simple, '-p', self.port_config, '-v', "INTERFACE.keys()|list"] output = self.run_script(argument) self.assertEqual(output.strip(), "[('Ethernet0', '10.0.0.58/31'), 'Ethernet0', ('Ethernet0', 'FC00::75/126')]") def test_minigraph_vlans(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v VLAN' + argument = ['-m', graph_file, '-p', self.port_config, '-v', 'VLAN'] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -266,7 +264,7 @@ def test_minigraph_vlans(self, **kwargs): def test_minigraph_vlan_members(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) tag_mode = kwargs.get('tag_mode', 'untagged') - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v VLAN_MEMBER' + argument = ['-m', graph_file, '-p', self.port_config, '-v', 'VLAN_MEMBER'] output = self.run_script(argument) if tag_mode == "tagged": self.assertEqual( @@ -293,12 +291,12 @@ def test_minigraph_vlan_members(self, **kwargs): def test_minigraph_vlan_interfaces(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "VLAN_INTERFACE.keys()|list"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "VLAN_INTERFACE.keys()|list"] output = self.run_script(argument) self.assertEqual(output.strip(), "[('Vlan1000', '192.168.0.1/27'), 'Vlan1000']") def test_minigraph_ecmp_fg_nhg(self): - argument = '-m "' + self.ecmp_graph + '" -p "' + self.mlnx_port_config + '" -v FG_NHG' + argument = ['-m', self.ecmp_graph, '-p', self.mlnx_port_config, '-v', 'FG_NHG'] output = self.run_script(argument) print(output.strip()) self.assertEqual(utils.to_dict(output.strip()), @@ -308,7 +306,7 @@ def test_minigraph_ecmp_fg_nhg(self): )) def test_minigraph_ecmp_members(self): - argument = '-m "' + self.ecmp_graph + '" -p "' + self.mlnx_port_config + '" -v "FG_NHG_MEMBER.keys()|list|sort"' + argument = ['-m', self.ecmp_graph, '-p', self.mlnx_port_config, '-v', "FG_NHG_MEMBER.keys()|list|sort"] output = self.run_script(argument) self.assertEqual(output.strip(), "['200.200.200.1', '200.200.200.10', '200.200.200.2', '200.200.200.3', '200.200.200.4', '200.200.200.5'," " '200.200.200.6', '200.200.200.7', '200.200.200.8', '200.200.200.9', '200:200:200:200::1', '200:200:200:200::10'," @@ -316,7 +314,7 @@ def test_minigraph_ecmp_members(self): " '200:200:200:200::7', '200:200:200:200::8', '200:200:200:200::9']") def test_minigraph_ecmp_neighbors(self): - argument = '-m "' + self.ecmp_graph + '" -p "' + self.mlnx_port_config + '" -v "NEIGH.keys()|list|sort"' + argument = ['-m', self.ecmp_graph, '-p', self.mlnx_port_config, '-v', "NEIGH.keys()|list|sort"] output = self.run_script(argument) self.assertEqual(output.strip(), "['Vlan31|200.200.200.1', 'Vlan31|200.200.200.10', 'Vlan31|200.200.200.2', 'Vlan31|200.200.200.3'," " 'Vlan31|200.200.200.4', 'Vlan31|200.200.200.5', 'Vlan31|200.200.200.6', 'Vlan31|200.200.200.7'," @@ -326,7 +324,7 @@ def test_minigraph_ecmp_neighbors(self): def test_minigraph_portchannels(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v PORTCHANNEL' + argument = ['-m', graph_file, '-p', self.port_config, '-v', 'PORTCHANNEL'] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -334,7 +332,7 @@ def test_minigraph_portchannels(self, **kwargs): ) def test_minigraph_portchannel_with_more_member(self): - argument = '-m "' + self.sample_graph_pc_test + '" -p "' + self.port_config + '" -v PORTCHANNEL' + argument = ['-m', self.sample_graph_pc_test, '-p', self.port_config, '-v', 'PORTCHANNEL'] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -342,7 +340,7 @@ def test_minigraph_portchannel_with_more_member(self): ) def test_minigraph_portchannel_members(self): - argument = '-m "' + self.sample_graph_pc_test + '" -p "' + self.port_config + '" -v "PORTCHANNEL_MEMBER.keys()|list"' + argument = ['-m', self.sample_graph_pc_test, '-p', self.port_config, '-v', "PORTCHANNEL_MEMBER.keys()|list"] output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -350,7 +348,7 @@ def test_minigraph_portchannel_members(self): ) def test_minigraph_portchannel_interfaces(self): - argument = '-m "' + self.sample_graph_simple + '" -p "' + self.port_config + '" -v "PORTCHANNEL_INTERFACE.keys()|list"' + argument = ['-m', self.sample_graph_simple, '-p', self.port_config, '-v', "PORTCHANNEL_INTERFACE.keys()|list"] output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -358,7 +356,7 @@ def test_minigraph_portchannel_interfaces(self): ) def test_minigraph_neighbors(self): - argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v "DEVICE_NEIGHBOR[\'Ethernet124\']"' + argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', "DEVICE_NEIGHBOR[\'Ethernet124\']"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -369,7 +367,7 @@ def test_minigraph_neighbors(self): # it is not at all intuitive what that ordering should be. Could make it # more robust by adding better parsing logic. def test_minigraph_extra_neighbors(self): - argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v DEVICE_NEIGHBOR' + argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', 'DEVICE_NEIGHBOR'] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -383,7 +381,7 @@ def test_minigraph_extra_neighbors(self): ) def test_minigraph_port_description(self): - argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v "PORT[\'Ethernet124\']"' + argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', "PORT[\'Ethernet124\']"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -392,7 +390,7 @@ def test_minigraph_port_description(self): def test_minigraph_port_fec_disabled(self): # Test for FECDisabled - argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v "PORT[\'Ethernet4\']"' + argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', "PORT[\'Ethernet4\']"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -401,7 +399,7 @@ def test_minigraph_port_fec_disabled(self): def test_minigraph_port_autonegotiation(self): # Test with a port_config.ini file which doesn't have an 'autoneg' column - argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v "PORT"' + argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', "PORT"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -442,7 +440,7 @@ def test_minigraph_port_autonegotiation(self): ) # Test with a port_config.ini file which has an 'autoneg' column - argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config_autoneg + '" -v "PORT"' + argument = ['-m', self.sample_graph_t0, '-p', self.port_config_autoneg, '-v', "PORT"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -483,7 +481,7 @@ def test_minigraph_port_autonegotiation(self): ) def test_minigraph_port_rs(self): - argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v "PORT[\'Ethernet124\']"' + argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', "PORT[\'Ethernet124\']"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -491,7 +489,7 @@ def test_minigraph_port_rs(self): ) def test_minigraph_bgp(self): - argument = '-m "' + self.sample_graph_bgp_speaker + '" -p "' + self.port_config + '" -v "BGP_NEIGHBOR[\'10.0.0.59\']"' + argument = ['-m', self.sample_graph_bgp_speaker, '-p', self.port_config, '-v', "BGP_NEIGHBOR[\'10.0.0.59\']"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -499,7 +497,7 @@ def test_minigraph_bgp(self): ) def test_minigraph_peers_with_range(self): - argument = "-m " + self.sample_graph_bgp_speaker + " -p " + self.port_config + " -v \"BGP_PEER_RANGE.values()|list\"" + argument = ["-m", self.sample_graph_bgp_speaker, "-p", self.port_config, "-v", "BGP_PEER_RANGE.values()|list"] output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -507,24 +505,24 @@ def test_minigraph_peers_with_range(self): ) def test_minigraph_deployment_id(self): - argument = '-m "' + self.sample_graph_bgp_speaker + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'deployment_id\']"' + argument = ['-m', self.sample_graph_bgp_speaker, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'deployment_id\']"] output = self.run_script(argument) self.assertEqual(output.strip(), "1") def test_minigraph_deployment_id_null(self): - argument = '-m "' + self.sample_graph_deployment_id + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\']"' + argument = ['-m', self.sample_graph_deployment_id, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\']"] output = self.run_script(argument) self.assertNotIn('deployment_id', output.strip()) def test_minigraph_ethernet_interfaces(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "PORT[\'Ethernet8\']"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "PORT[\'Ethernet8\']"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), utils.to_dict("{'lanes': '37,38,39,40', 'description': 'Interface description', 'pfc_asym': 'off', 'mtu': '9100', 'alias': 'fortyGigE0/8', 'admin_status': 'up', 'speed': '1000', 'tpid': '0x8100'}") ) - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "PORT[\'Ethernet12\']"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "PORT[\'Ethernet12\']"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -532,7 +530,7 @@ def test_minigraph_ethernet_interfaces(self, **kwargs): ) def test_minigraph_neighbor_interfaces(self): - argument = '-m "' + self.sample_graph_simple_case + '" -p "' + self.port_config + '" -v "PORT"' + argument = ['-m', self.sample_graph_simple_case, '-p', self.port_config, '-v', "PORT"] output = self.run_script(argument) self.assertEqual( @@ -575,7 +573,7 @@ def test_minigraph_neighbor_interfaces(self): def test_minigraph_neighbor_interfaces_config_db(self): # test to check if PORT table is retrieved from config_db - argument = '-m "' + self.sample_graph_simple_case + '" -p "' + self.port_config + '" -v "PORT"' + argument = ['-m', self.sample_graph_simple_case, '-p', self.port_config, '-v', "PORT"] output = self.run_script(argument) self.assertEqual( @@ -618,7 +616,7 @@ def test_minigraph_neighbor_interfaces_config_db(self): def test_minigraph_extra_ethernet_interfaces(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "PORT"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "PORT"] output = self.run_script(argument) self.assertEqual( @@ -666,7 +664,7 @@ def test_minigraph_extra_ethernet_interfaces(self, **kwargs): # self.assertEqual(output.strip(), "{'everflow0': {'src_ip': '10.1.0.32', 'dst_ip': '10.0.100.1'}}") def test_metadata_tacacs(self): - argument = '-m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '" -v "TACPLUS_SERVER"' + argument = ['-m', self.sample_graph_metadata, '-p', self.port_config, '-v', "TACPLUS_SERVER"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -674,24 +672,24 @@ def test_metadata_tacacs(self): ) def test_metadata_ntp(self): - argument = '-m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '" -v "NTP_SERVER"' + argument = ['-m', self.sample_graph_metadata, '-p', self.port_config, '-v', "NTP_SERVER"] output = self.run_script(argument) self.assertEqual(utils.to_dict(output.strip()), utils.to_dict("{'10.0.10.1': {}, '10.0.10.2': {}}")) def test_minigraph_vnet(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "VNET"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "VNET"] output = self.run_script(argument) self.assertEqual(output.strip(), "") def test_minigraph_vxlan(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "VXLAN_TUNNEL"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "VXLAN_TUNNEL"] output = self.run_script(argument) self.assertEqual(output.strip(), "") def test_minigraph_bgp_mon(self): - argument = '-m "' + self.sample_graph_simple + '" -p "' + self.port_config + '" -v "BGP_MONITORS"' + argument = ['-m', self.sample_graph_simple, '-p', self.port_config, '-v', "BGP_MONITORS"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -699,7 +697,7 @@ def test_minigraph_bgp_mon(self): ) def test_minigraph_bgp_voq_chassis_peer(self): - argument = '-m "' + self.sample_graph_simple + '" -p "' + self.port_config + '" -v "BGP_VOQ_CHASSIS_NEIGHBOR[\'10.2.0.21\']"' + argument = ['-m', self.sample_graph_simple, '-p', self.port_config, '-v', "BGP_VOQ_CHASSIS_NEIGHBOR[\'10.2.0.21\']"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -707,7 +705,7 @@ def test_minigraph_bgp_voq_chassis_peer(self): ) # make sure VoQChassisInternal value of false is honored - argument = '-m "' + self.sample_graph_simple + '" -p "' + self.port_config + '" -v "BGP_VOQ_CHASSIS_NEIGHBOR[\'10.0.0.57\']"' + argument = ['-m', self.sample_graph_simple, '-p', self.port_config, '-v', "BGP_VOQ_CHASSIS_NEIGHBOR[\'10.0.0.57\']"] output = self.run_script(argument) self.assertEqual(output.strip(), "") @@ -727,14 +725,14 @@ def test_minigraph_backend_acl_leaf(self, check_stderr=True): try: print('\n Change device type to %s' % (BACKEND_LEAF_ROUTER)) if check_stderr: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_LEAF_ROUTER, self.sample_backend_graph), stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_LEAF_ROUTER), self.sample_backend_graph], stderr=subprocess.STDOUT) else: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_LEAF_ROUTER, self.sample_backend_graph), shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_LEAF_ROUTER), self.sample_backend_graph]) self.test_jinja_expression(self.sample_backend_graph, self.port_config, BACKEND_LEAF_ROUTER) # ACL_TABLE should contain EVERFLOW related entries - argument = '-m "' + self.sample_backend_graph + '" -p "' + self.port_config + '" -v "ACL_TABLE"' + argument = ['-m', self.sample_backend_graph, '-p', self.port_config, '-v', "ACL_TABLE"] output = self.run_script(argument) sample_output = utils.to_dict(output.strip()).keys() assert 'DATAACL' not in sample_output, sample_output @@ -743,9 +741,9 @@ def test_minigraph_backend_acl_leaf(self, check_stderr=True): finally: print('\n Change device type back to %s' % (TOR_ROUTER)) if check_stderr: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_LEAF_ROUTER, TOR_ROUTER, self.sample_backend_graph), stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_LEAF_ROUTER, TOR_ROUTER), self.sample_backend_graph], stderr=subprocess.STDOUT) else: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_LEAF_ROUTER, TOR_ROUTER, self.sample_backend_graph), shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_LEAF_ROUTER, TOR_ROUTER), self.sample_backend_graph]) self.test_jinja_expression(self.sample_backend_graph, self.port_config, TOR_ROUTER) @@ -753,23 +751,23 @@ def test_minigraph_sub_port_no_vlan_member(self, check_stderr=True): try: print('\n Change device type to %s' % (BACKEND_LEAF_ROUTER)) if check_stderr: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (LEAF_ROUTER, BACKEND_LEAF_ROUTER, self.sample_graph), stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (LEAF_ROUTER, BACKEND_LEAF_ROUTER), self.sample_graph], stderr=subprocess.STDOUT) else: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (LEAF_ROUTER, BACKEND_LEAF_ROUTER, self.sample_graph), shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (LEAF_ROUTER, BACKEND_LEAF_ROUTER), self.sample_graph]) self.test_jinja_expression(self.sample_graph, self.port_config, BACKEND_LEAF_ROUTER) self.verify_no_vlan_member() finally: print('\n Change device type back to %s' % (LEAF_ROUTER)) if check_stderr: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_LEAF_ROUTER, LEAF_ROUTER, self.sample_graph), stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_LEAF_ROUTER, LEAF_ROUTER), self.sample_graph], stderr=subprocess.STDOUT) else: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_LEAF_ROUTER, LEAF_ROUTER, self.sample_graph), shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_LEAF_ROUTER, LEAF_ROUTER), self.sample_graph]) self.test_jinja_expression(self.sample_graph, self.port_config, LEAF_ROUTER) def verify_no_vlan_member(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "VLAN_MEMBER"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "VLAN_MEMBER"] output = self.run_script(argument) self.assertEqual(output.strip(), "{}") @@ -787,33 +785,33 @@ def verify_sub_intf(self, **kwargs): try: print('\n Change device type to %s' % (BACKEND_TOR_ROUTER)) if check_stderr: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_TOR_ROUTER, graph_file), stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_TOR_ROUTER), graph_file], stderr=subprocess.STDOUT) else: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_TOR_ROUTER, graph_file), shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_TOR_ROUTER), graph_file]) self.test_jinja_expression(graph_file, self.port_config, BACKEND_TOR_ROUTER) # INTERFACE table does not exist - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "INTERFACE"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "INTERFACE"] output = self.run_script(argument) self.assertEqual(output.strip(), "") # PORTCHANNEL_INTERFACE table does not exist - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "PORTCHANNEL_INTERFACE"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "PORTCHANNEL_INTERFACE"] output = self.run_script(argument) self.assertEqual(output.strip(), "") # SLB and BGP Monitor table does not exist - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "BGP_PEER_RANGE"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "BGP_PEER_RANGE"] output = self.run_script(argument) self.assertEqual(output.strip(), "{}") - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "BGP_MONITORS"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "BGP_MONITORS"] output = self.run_script(argument) self.assertEqual(output.strip(), "{}") # ACL_TABLE should not contain EVERFLOW related entries - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "ACL_TABLE"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "ACL_TABLE"] output = self.run_script(argument) sample_output = utils.to_dict(output.strip()).keys() assert 'DATAACL' in sample_output, sample_output @@ -829,7 +827,7 @@ def verify_sub_intf(self, **kwargs): self.test_minigraph_vxlan(graph_file=graph_file) # VLAN_SUB_INTERFACE - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v VLAN_SUB_INTERFACE' + argument = ['-m', graph_file, '-p', self.port_config, '-v', 'VLAN_SUB_INTERFACE'] output = self.run_script(argument) print(output.strip()) # not a usecase to parse SubInterfaces under PortChannel @@ -862,24 +860,24 @@ def verify_sub_intf(self, **kwargs): finally: print('\n Change device type back to %s' % (TOR_ROUTER)) if check_stderr: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_TOR_ROUTER, TOR_ROUTER, graph_file), stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_TOR_ROUTER, TOR_ROUTER), graph_file], stderr=subprocess.STDOUT) else: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_TOR_ROUTER, TOR_ROUTER, graph_file), shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_TOR_ROUTER, TOR_ROUTER), graph_file]) self.test_jinja_expression(graph_file, self.port_config, TOR_ROUTER) def test_show_run_acl(self): - argument = '-a \'{"key1":"value"}\' --var-json ACL_RULE' + argument = ['-a', '{"key1":"value"}', '--var-json', 'ACL_RULE'] output = self.run_script(argument) self.assertEqual(output, '') def test_show_run_interfaces(self): - argument = '-a \'{"key1":"value"}\' --var-json INTERFACE' + argument = ['-a', '{"key1":"value"}', '--var-json', 'INTERFACE'] output = self.run_script(argument) self.assertEqual(output, '') def test_minigraph_voq_metadata(self): - argument = "-j {} -m {} -p {} --var-json DEVICE_METADATA".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) + argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "--var-json", "DEVICE_METADATA"] output = json.loads(self.run_script(argument)) self.assertEqual(output['localhost']['asic_name'], 'Asic0') self.assertEqual(output['localhost']['switch_id'], '0') @@ -887,7 +885,7 @@ def test_minigraph_voq_metadata(self): self.assertEqual(output['localhost']['max_cores'], '16') def test_minigraph_voq_system_ports(self): - argument = "-j {} -m {} -p {} --var-json SYSTEM_PORT".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) + argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "--var-json", "SYSTEM_PORT"] self.assertDictEqual( json.loads(self.run_script(argument)), { @@ -906,7 +904,7 @@ def test_minigraph_voq_system_ports(self): ) def test_minigraph_voq_port_macsec_enabled(self): - argument = '-j "' + self.macsec_profile + '" -m "' + self.sample_graph_voq + '" -p "' + self.voq_port_config + '" -v "PORT[\'Ethernet0\']"' + argument = ['-j', self.macsec_profile, '-m', self.sample_graph_voq, '-p', self.voq_port_config, '-v', "PORT[\'Ethernet0\']"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -914,7 +912,7 @@ def test_minigraph_voq_port_macsec_enabled(self): ) def test_minigraph_voq_inband_interface_port(self): - argument = "-j {} -m {} -p {} --var-json VOQ_INBAND_INTERFACE".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) + argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "--var-json", "VOQ_INBAND_INTERFACE"] output = self.run_script(argument) output_dict = utils.to_dict(output.strip()) self.assertDictEqual( @@ -927,7 +925,7 @@ def test_minigraph_voq_inband_interface_port(self): ) def test_minigraph_voq_inband_port(self): - argument = "-j {} -m {} -p {} --var-json PORT".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) + argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "--var-json", "PORT"] output = self.run_script(argument) output_dict = utils.to_dict(output.strip()) self.assertDictEqual( @@ -945,7 +943,7 @@ def test_minigraph_voq_inband_port(self): }) def test_minigraph_voq_recirc_ports(self): - argument = "-j {} -m {} -p {} --var-json PORT".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) + argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "--var-json", "PORT"] output = self.run_script(argument) output_dict = utils.to_dict(output.strip()) self.assertDictEqual( @@ -963,7 +961,7 @@ def test_minigraph_voq_recirc_ports(self): }) def test_minigraph_dhcp(self): - argument = '-m "' + self.sample_graph_simple_case + '" -p "' + self.port_config + '" -v DHCP_RELAY' + argument = ['-m', self.sample_graph_simple_case, '-p', self.port_config, '-v', 'DHCP_RELAY'] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -974,7 +972,7 @@ def test_minigraph_dhcp(self): ) def test_minigraph_bgp_packet_chassis_peer(self): - argument = '-m "' + self.packet_chassis_graph + '" -p "' + self.packet_chassis_port_ini + '" -n "' + "asic1" + '" -v "BGP_INTERNAL_NEIGHBOR[\'8.0.0.1\']"' + argument = ['-m', self.packet_chassis_graph, '-p', self.packet_chassis_port_ini, '-n', "asic1", '-v', "BGP_INTERNAL_NEIGHBOR[\'8.0.0.1\']"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -982,14 +980,14 @@ def test_minigraph_bgp_packet_chassis_peer(self): ) def test_minigraph_bgp_packet_chassis_static_route(self): - argument = '-m "' + self.packet_chassis_graph + '" -p "' + self.packet_chassis_port_ini + '" -v "STATIC_ROUTE"' + argument = ['-m', self.packet_chassis_graph, '-p', self.packet_chassis_port_ini, '-v', "STATIC_ROUTE"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), utils.to_dict("{'8.0.0.1/32': {'nexthop': '192.168.1.2,192.168.2.2', 'ifname': 'PortChannel40,PortChannel50', 'advertise':'false'}}") ) - argument = '-m "' + self.packet_chassis_graph + '" -p "' + self.packet_chassis_port_ini + '" -n "' + "asic1" + '" -v "STATIC_ROUTE"' + argument = ['-m', self.packet_chassis_graph, '-p', self.packet_chassis_port_ini, '-n', "asic1", '-v', "STATIC_ROUTE"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -997,7 +995,7 @@ def test_minigraph_bgp_packet_chassis_static_route(self): ) def test_minigraph_bgp_packet_chassis_vlan_subintf(self): - argument = '-m "' + self.packet_chassis_graph + '" -p "' + self.packet_chassis_port_ini + '" -n "' + "asic1" + '" -v "VLAN_SUB_INTERFACE"' + argument = ['-m', self.packet_chassis_graph, '-p', self.packet_chassis_port_ini, '-n', "asic1", '-v', "VLAN_SUB_INTERFACE"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -1005,14 +1003,14 @@ def test_minigraph_bgp_packet_chassis_vlan_subintf(self): ) def test_minigraph_voq_400g_zr_port_config(self): - argument = "-j {} -m {} -p {} -v \"PORT[\'Ethernet4\']\"".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) + argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "-v" "PORT[\'Ethernet4\']"] output = self.run_script(argument) output_dict = utils.to_dict(output.strip()) self.assertEqual(output_dict['tx_power'], '-10') self.assertEqual(output_dict['laser_freq'], 195875) def test_minigraph_packet_chassis_400g_zr_port_config(self): - argument = "-m {} -p {} -n asic1 -v \"PORT[\'Ethernet13\']\"".format(self.packet_chassis_graph, self.packet_chassis_port_ini) + argument = ["-m", self.packet_chassis_graph, "-p", self.packet_chassis_port_ini, "-n", "asic1", "-v", "PORT[\'Ethernet13\']"] output = self.run_script(argument) output_dict = utils.to_dict(output.strip()) self.assertEqual(output_dict['tx_power'], '7.5') diff --git a/src/sonic-config-engine/tests/test_cfggen_from_yang.py b/src/sonic-config-engine/tests/test_cfggen_from_yang.py index 801340ea4722..ddc77c8b0311 100644 --- a/src/sonic-config-engine/tests/test_cfggen_from_yang.py +++ b/src/sonic-config-engine/tests/test_cfggen_from_yang.py @@ -4,6 +4,7 @@ import os import tests.common_utils as utils +from sonic_py_common.general import getstatusoutput_noshell #TODO: Remove this fixuture once SONiC moves to python3.x @@ -21,20 +22,18 @@ class TestCfgGen(object): @pytest.fixture(autouse=True) def setup_teardown(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join( - self.test_dir, '..', 'sonic-cfggen') + self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join( + self.test_dir, '..', 'sonic-cfggen')] self.sample_yang_file = os.path.join(self.test_dir, 'test_yang_data.json') def run_script(self, arg, check_stderr=False): - print('\n Running sonic-cfggen ' + arg) + print('\n Running sonic-cfggen ', arg) if check_stderr: - output = subprocess.check_output(self.script_file + ' ' + arg, - stderr=subprocess.STDOUT, - shell=True) + output = subprocess.check_output(self.script_file + arg, + stderr=subprocess.STDOUT) else: - output = subprocess.check_output(self.script_file + ' ' + arg, - shell=True) + output = subprocess.check_output(self.script_file + arg) if utils.PY3x: output = output.decode() @@ -48,32 +47,31 @@ def run_script(self, arg, check_stderr=False): return output def run_diff(self, file1, file2): - return subprocess.check_output('diff -u {} {} || true'.format( - file1, file2), - shell=True) + _, output = getstatusoutput_noshell(['diff', '-u', file1, file2]) + return output def run_script_with_yang_arg(self, arg, check_stderr=False): - args = "-Y {} {}".format(self.sample_yang_file, arg) + args = ["-Y", self.sample_yang_file] + arg return self.run_script(arg=args, check_stderr=check_stderr) def test_print_data(self): - arg = "--print-data" + arg = ["--print-data"] output = self.run_script_with_yang_arg(arg) assert len(output.strip()) > 0 def test_jinja_expression(self, expected_router_type='LeafRouter'): - arg = " -v \"DEVICE_METADATA[\'localhost\'][\'type\']\" " + arg = ["-v", "DEVICE_METADATA[\'localhost\'][\'type\']"] output = self.run_script_with_yang_arg(arg) assert output.strip() == expected_router_type def test_hwsku(self): - arg = "-v \"DEVICE_METADATA[\'localhost\'][\'hwsku\']\" " + arg = ["-v", "DEVICE_METADATA[\'localhost\'][\'hwsku\']"] output = self.run_script_with_yang_arg(arg) assert output.strip() == "Force10-S6000" def test_device_metadata(self): - arg = "--var-json \"DEVICE_METADATA\" " + arg = ["--var-json", "DEVICE_METADATA"] output = json.loads(self.run_script_with_yang_arg(arg)) assert (output['localhost'] == {\ 'bgp_asn': '65100', @@ -87,7 +85,7 @@ def test_device_metadata(self): def test_port_table(self): - arg = "--var-json \"PORT\"" + arg = ["--var-json", "PORT"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == \ {'Ethernet0': {'admin_status': 'up', 'alias': 'eth0', 'description': 'Ethernet0', 'fec': 'rs', 'lanes': '65, 66', 'mtu': '9100', 'pfc_asym': 'on', 'speed': '40000'}, @@ -101,7 +99,7 @@ def test_port_table(self): }) def test_portchannel_table(self): - arg = "--var-json \"PORTCHANNEL\"" + arg = ["--var-json", "PORTCHANNEL"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == \ {'PortChannel1001': {'admin_status': 'up', @@ -116,7 +114,7 @@ def test_portchannel_table(self): 'mtu': '9100'}}) def test_portchannel_member_table(self): - arg = "--var-json \"PORTCHANNEL_MEMBER\"" + arg = ["--var-json", "PORTCHANNEL_MEMBER"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output ==\ { "PortChannel1001|Ethernet0": {}, @@ -126,7 +124,7 @@ def test_portchannel_member_table(self): }) def test_interface_table(self): - arg = "--var-json \"INTERFACE\"" + arg = ["--var-json", "INTERFACE"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output =={\ "Ethernet8": {}, @@ -150,7 +148,7 @@ def test_interface_table(self): }) def test_portchannel_interface_table(self): - arg = "--var-json \"PORTCHANNEL_INTERFACE\"" + arg = ["--var-json", "PORTCHANNEL_INTERFACE"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output =={\ "PortChannel1001|10.0.0.1/31": {}, @@ -158,7 +156,7 @@ def test_portchannel_interface_table(self): }) def test_loopback_table(self): - arg = "--var-json \"LOOPBACK_INTERFACE\"" + arg = ["--var-json", "LOOPBACK_INTERFACE"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "Loopback0": {}, @@ -173,7 +171,7 @@ def test_loopback_table(self): }) def test_acl_table(self): - arg = "--var-json \"ACL_TABLE\"" + arg = ["--var-json", "ACL_TABLE"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ 'DATAACL': {'policy_desc': 'DATAACL', 'ports': ['PortChannel1001','PortChannel1002'], 'stage': 'ingress', 'type': 'L3'}, @@ -183,7 +181,7 @@ def test_acl_table(self): 'SSH_ONLY': {'policy_desc': 'SSH_ONLY', 'services': ['SSH'], 'stage': 'ingress', 'type': 'CTRLPLANE'}}) def test_acl_rule(self): - arg = "--var-json \"ACL_RULE\"" + arg = ["--var-json", "ACL_RULE"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "DATAACL|Rule1": { @@ -201,7 +199,7 @@ def test_acl_rule(self): }) def test_vlan_table(self): - arg = "--var-json \"VLAN\"" + arg = ["--var-json", "VLAN"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "Vlan100": { @@ -218,7 +216,7 @@ def test_vlan_table(self): }) def test_vlan_interface(self): - arg = "--var-json \"VLAN_INTERFACE\"" + arg = ["--var-json", "VLAN_INTERFACE"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "Vlan100": {}, @@ -233,7 +231,7 @@ def test_vlan_interface(self): }) def test_vlan_member(self): - arg = "--var-json \"VLAN_MEMBER\"" + arg = ["--var-json", "VLAN_MEMBER"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "Vlan100|Ethernet24": { @@ -245,7 +243,7 @@ def test_vlan_member(self): }) def test_vlan_crm(self): - arg = "--var-json \"CRM\"" + arg = ["--var-json", "CRM"] output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "Config": { diff --git a/src/sonic-config-engine/tests/test_cfggen_pfx_filter.py b/src/sonic-config-engine/tests/test_cfggen_pfx_filter.py index 1ac2b7f7f5f3..b3cad3aa2152 100644 --- a/src/sonic-config-engine/tests/test_cfggen_pfx_filter.py +++ b/src/sonic-config-engine/tests/test_cfggen_pfx_filter.py @@ -9,13 +9,14 @@ class TestPfxFilter(TestCase): def test_comprehensive(self): # Generate output data_dir = "tests/data/pfx_filter" - cmd = "{} ./sonic-cfggen -j {}/param_1.json -t {}/tmpl_1.txt.j2 > /tmp/result_1.txt".format( - utils.PYTHON_INTERPRETTER, data_dir, data_dir - ) - subprocess.check_output(cmd, shell=True) + output_file = "/tmp/result_1.txt" + cmd = [utils.PYTHON_INTERPRETTER, "./sonic-cfggen", "-j", "{}/param_1.json".format(data_dir), "-t", "{}/tmpl_1.txt.j2".format(data_dir)] + output = subprocess.check_output(cmd, universal_newlines=True) + with open(output_file, 'w') as f: + f.write(output) # Compare outputs - cmd = "diff -u tests/data/pfx_filter/result_1.txt /tmp/result_1.txt" + cmd = ["diff", "-u", "tests/data/pfx_filter/result_1.txt", "/tmp/result_1.txt"] try: - res = subprocess.check_output(cmd, shell=True) + res = subprocess.check_output(cmd) except subprocess.CalledProcessError as e: assert False, "Wrong output. return code: %d, Diff: %s" % (e.returncode, e.output) diff --git a/src/sonic-config-engine/tests/test_cfggen_platformJson.py b/src/sonic-config-engine/tests/test_cfggen_platformJson.py index 0af361718b99..5d39fd2f3660 100644 --- a/src/sonic-config-engine/tests/test_cfggen_platformJson.py +++ b/src/sonic-config-engine/tests/test_cfggen_platformJson.py @@ -3,7 +3,7 @@ import os import subprocess import sys - +import ast import tests.common_utils as utils from unittest import TestCase @@ -21,17 +21,17 @@ class TestCfgGenPlatformJson(TestCase): def setUp(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') + self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] self.platform_sample_graph = os.path.join(self.test_dir, 'platform-sample-graph.xml') self.platform_json = os.path.join(self.test_dir, 'sample_platform.json') self.hwsku_json = os.path.join(self.test_dir, 'sample_hwsku.json') def run_script(self, argument, check_stderr=False): - print('\n Running sonic-cfggen ' + argument) + print('\n Running sonic-cfggen ', argument) if check_stderr: - output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) else: - output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) + output = subprocess.check_output(self.script_file + argument) if utils.PY3x: output = output.decode() @@ -44,18 +44,18 @@ def run_script(self, argument, check_stderr=False): return output def test_dummy_run(self): - argument = '' + argument = [] output = self.run_script(argument) self.assertEqual(output, '') def test_print_data(self): - argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" --print-data' + argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '--print-data'] output = self.run_script(argument) self.assertTrue(len(output.strip()) > 0) # Check whether all interfaces present or not as per platform.json def test_platform_json_interfaces_keys(self): - argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" -S "' + self.hwsku_json + '" -v "PORT.keys()|list"' + argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '-S', self.hwsku_json, '-v', "PORT.keys()|list"] output = self.run_script(argument) self.maxDiff = None @@ -71,24 +71,24 @@ def test_platform_json_interfaces_keys(self): 'Ethernet139', 'Ethernet140', 'Ethernet141', 'Ethernet142', 'Ethernet144' ] - self.assertEqual(sorted(eval(output.strip())), sorted(expected)) + self.assertEqual(sorted(ast.literal_eval(output.strip())), sorted(expected)) # Check specific Interface with it's proper configuration as per platform.json def test_platform_json_specific_ethernet_interfaces(self): - argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" -S "' + self.hwsku_json + '" -v "PORT[\'Ethernet8\']"' + argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '-S', self.hwsku_json, '-v', "PORT[\'Ethernet8\']"] output = self.run_script(argument) self.maxDiff = None expected = "{'index': '3', 'lanes': '8', 'description': 'Eth3/1', 'mtu': '9100', 'alias': 'Eth3/1', 'pfc_asym': 'off', 'speed': '25000', 'tpid': '0x8100'}" self.assertEqual(utils.to_dict(output.strip()), utils.to_dict(expected)) - argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" -S "' + self.hwsku_json + '" -v "PORT[\'Ethernet112\']"' + argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '-S', self.hwsku_json, '-v', "PORT[\'Ethernet112\']"] output = self.run_script(argument) self.maxDiff = None expected = "{'index': '29', 'lanes': '112', 'description': 'Eth29/1', 'mtu': '9100', 'alias': 'Eth29/1', 'pfc_asym': 'off', 'speed': '25000', 'tpid': '0x8100'}" self.assertEqual(utils.to_dict(output.strip()), utils.to_dict(expected)) - argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" -S "' + self.hwsku_json + '" -v "PORT[\'Ethernet4\']"' + argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '-S', self.hwsku_json, '-v', "PORT[\'Ethernet4\']"] output = self.run_script(argument) self.maxDiff = None expected = "{'index': '2', 'lanes': '4,5', 'description': 'Eth2/1', 'admin_status': 'up', 'mtu': '9100', 'alias': 'Eth2/1', 'pfc_asym': 'off', 'speed': '50000', 'tpid': '0x8100'}" @@ -97,7 +97,7 @@ def test_platform_json_specific_ethernet_interfaces(self): # Check all Interface with it's proper configuration as per platform.json def test_platform_json_all_ethernet_interfaces(self): - argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" -S "' + self.hwsku_json + '" -v "PORT"' + argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '-S', self.hwsku_json, '-v', "PORT"] output = self.run_script(argument) self.maxDiff = None diff --git a/src/sonic-config-engine/tests/test_cfggen_t2_chassis_fe.py b/src/sonic-config-engine/tests/test_cfggen_t2_chassis_fe.py index a3d6d02a7ff2..9bf3fc8a3ed5 100644 --- a/src/sonic-config-engine/tests/test_cfggen_t2_chassis_fe.py +++ b/src/sonic-config-engine/tests/test_cfggen_t2_chassis_fe.py @@ -1,6 +1,5 @@ import os import subprocess - import tests.common_utils as utils from unittest import TestCase @@ -10,18 +9,18 @@ class TestCfgGenT2ChassisFe(TestCase): def setUp(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') + self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] self.sample_graph_t2_chassis_fe = os.path.join(self.test_dir, 't2-chassis-fe-graph.xml') self.sample_graph_t2_chassis_fe_vni = os.path.join(self.test_dir, 't2-chassis-fe-graph-vni.xml') self.sample_graph_t2_chassis_fe_pc = os.path.join(self.test_dir, 't2-chassis-fe-graph-pc.xml') self.t2_chassis_fe_port_config = os.path.join(self.test_dir, 't2-chassis-fe-port-config.ini') def run_script(self, argument, check_stderr=False): - print('\n Running sonic-cfggen ' + argument) + print('\n Running sonic-cfggen ' + ' '.join(argument)) if check_stderr: - output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) else: - output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) + output = subprocess.check_output(self.script_file + argument) if utils.PY3x: output = output.decode() @@ -34,12 +33,12 @@ def run_script(self, argument, check_stderr=False): return output def test_minigraph_t2_chassis_fe_type(self): - argument = '-m "' + self.sample_graph_t2_chassis_fe + '" -p "' + self.t2_chassis_fe_port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'type\']"' + argument = ['-m', self.sample_graph_t2_chassis_fe, '-p', self.t2_chassis_fe_port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'type\']"] output = self.run_script(argument) self.assertEqual(output.strip(), 'SpineChassisFrontendRouter') def test_minigraph_t2_chassis_fe_interfaces(self): - argument = '-m "' + self.sample_graph_t2_chassis_fe + '" -p "' + self.t2_chassis_fe_port_config + '" -v "INTERFACE"' + argument = ['-m', self.sample_graph_t2_chassis_fe, '-p', self.t2_chassis_fe_port_config, '-v', "INTERFACE"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -53,7 +52,7 @@ def test_minigraph_t2_chassis_fe_interfaces(self): ) ) def test_minigraph_t2_chassis_fe_pc_interfaces(self): - argument = '-m "' + self.sample_graph_t2_chassis_fe_pc + '" -p "' + self.t2_chassis_fe_port_config + '" -v "PORTCHANNEL_INTERFACE"' + argument = ['-m', self.sample_graph_t2_chassis_fe_pc, '-p', self.t2_chassis_fe_port_config, '-v', "PORTCHANNEL_INTERFACE"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -70,17 +69,17 @@ def test_minigraph_t2_chassis_fe_pc_interfaces(self): # Test a minigraph file where VNI is not specified # Default VNI is 8000 def test_minigraph_t2_chassis_fe_vnet_default(self): - argument = '-m "' + self.sample_graph_t2_chassis_fe + '" -p "' + self.t2_chassis_fe_port_config + '" -v "VNET"' + argument = ['-m', self.sample_graph_t2_chassis_fe, '-p', self.t2_chassis_fe_port_config, '-v', "VNET"] output = self.run_script(argument) self.assertEqual(output.strip(), "{'VnetFE': {'vxlan_tunnel': 'TunnelInt', 'vni': 8000}}") # Test a minigraph file where VNI is specified def test_minigraph_t2_chassis_fe_vnet(self): - argument = '-m "' + self.sample_graph_t2_chassis_fe_vni + '" -p "' + self.t2_chassis_fe_port_config + '" -v "VNET"' + argument = ['-m', self.sample_graph_t2_chassis_fe_vni, '-p', self.t2_chassis_fe_port_config, '-v', "VNET"] output = self.run_script(argument) self.assertEqual(output.strip(), "{'VnetFE': {'vxlan_tunnel': 'TunnelInt', 'vni': 9000}}") def test_minigraph_t2_chassis_fe_vxlan(self): - argument = '-m "' + self.sample_graph_t2_chassis_fe + '" -p "' + self.t2_chassis_fe_port_config + '" -v "VXLAN_TUNNEL"' + argument = ['-m', self.sample_graph_t2_chassis_fe, '-p', self.t2_chassis_fe_port_config, '-v', "VXLAN_TUNNEL"] output = self.run_script(argument) self.assertEqual(output.strip(), "{'TunnelInt': {'src_ip': '4.0.0.0'}}") diff --git a/src/sonic-config-engine/tests/test_frr.py b/src/sonic-config-engine/tests/test_frr.py index 3934f8c7d70c..c30bc8b4969f 100644 --- a/src/sonic-config-engine/tests/test_frr.py +++ b/src/sonic-config-engine/tests/test_frr.py @@ -3,13 +3,13 @@ import subprocess import tests.common_utils as utils - +from sonic_py_common.general import getstatusoutput_noshell from unittest import TestCase class TestCfgGen(TestCase): def setUp(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') + self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] self.t0_minigraph = os.path.join(self.test_dir, 't0-sample-graph.xml') self.t0_port_config = os.path.join(self.test_dir, 't0-sample-port-config.ini') self.output_file = os.path.join(self.test_dir, 'output') @@ -21,15 +21,19 @@ def tearDown(self): pass - def run_script(self, argument, check_stderr=False): + def run_script(self, argument, check_stderr=False, output_file=None): # print '\n Running sonic-cfggen ' + argument + if check_stderr: - output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) else: - output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) + output = subprocess.check_output(self.script_file + argument) if utils.PY3x: output = output.decode() + if output_file: + with open(output_file, 'w') as f: + f.write(output) linecount = output.strip().count('\n') if linecount <= 0: @@ -39,8 +43,7 @@ def run_script(self, argument, check_stderr=False): return output def run_diff(self, file1, file2): - output = subprocess.check_output('diff -u {} {} || true'.format(file1, file2), shell=True) - + _, output = getstatusoutput_noshell(['diff', '-u', file1, file2]) if utils.PY3x: output = output.decode() @@ -50,9 +53,8 @@ def run_case(self, template, target): template_dir = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-fpm-frr', "frr") conf_template = os.path.join(template_dir, template) constants = os.path.join(self.test_dir, '..', '..', '..', 'files', 'image_config', 'constants', 'constants.yml') - cmd_args = self.t0_minigraph, self.t0_port_config, constants, conf_template, template_dir, self.output_file - cmd = "-m %s -p %s -y %s -t %s -T %s > %s" % cmd_args - self.run_script(cmd) + cmd = ['-m', self.t0_minigraph, '-p', self.t0_port_config, '-y', constants, '-t', conf_template, '-T', template_dir] + self.run_script(cmd, output_file=self.output_file) original_filename = os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, target) r = filecmp.cmp(original_filename, self.output_file) @@ -69,4 +71,3 @@ def test_bgpd_frr(self): def test_zebra_frr(self): self.assertTrue(*self.run_case('zebra/zebra.conf.j2', 'zebra_frr.conf')) - diff --git a/src/sonic-config-engine/tests/test_j2files.py b/src/sonic-config-engine/tests/test_j2files.py index 3ac219468e43..ad86f1cb4b2a 100644 --- a/src/sonic-config-engine/tests/test_j2files.py +++ b/src/sonic-config-engine/tests/test_j2files.py @@ -5,12 +5,13 @@ from unittest import TestCase import tests.common_utils as utils +from sonic_py_common.general import getstatusoutput_noshell_pipe class TestJ2Files(TestCase): def setUp(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') + self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] self.simple_minigraph = os.path.join(self.test_dir, 'simple-sample-graph.xml') self.port_data = os.path.join(self.test_dir, 'sample-port-data.json') self.ztp = os.path.join(self.test_dir, "sample-ztp.json") @@ -40,17 +41,21 @@ def setUp(self): self.output_file = os.path.join(self.test_dir, 'output') os.environ["CFGGEN_UNIT_TESTING"] = "2" - def run_script(self, argument): - print('CMD: sonic-cfggen ' + argument) - output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) + def run_script(self, argument, output_file=None): + print('CMD: sonic-cfggen ', argument) + output = subprocess.check_output(self.script_file + argument) if utils.PY3x: output = output.decode() + if output_file: + with open(output_file, 'w') as f: + f.write(output) return output def run_diff(self, file1, file2): - return subprocess.check_output('diff -u {} {} || true'.format(file1, file2), shell=True) + _, output = getstatusoutput_noshell(['diff', '-u', file1, file2]) + return output def create_machine_conf(self, platform, vendor): file_exist = True @@ -59,23 +64,24 @@ def create_machine_conf(self, platform, vendor): 'dell': 'onie', 'mellanox': 'onie' } - echo_cmd = "echo '{}_platform={}' | sudo tee -a /host/machine.conf > /dev/null".format(mode[vendor], platform) + echo_cmd1 = ["echo", '{}_platform={}'.format(mode[vendor], platform)] + echo_cmd2 = ["sudo", "tee", "-a", "/host/machine.conf"] if not os.path.exists('/host/machine.conf'): file_exist = False if not os.path.isdir('/host'): dir_exist = False - os.system('sudo mkdir /host') - os.system('sudo touch /host/machine.conf') - os.system(echo_cmd) + subprocess.call(['sudo', 'mkdir', '/host']) + subprocess.call(['sudo', 'touch', '/host/machine.conf']) + getstatusoutput_noshell_pipe(echo_cmd1, echo_cmd2) return file_exist, dir_exist def remove_machine_conf(self, file_exist, dir_exist): if not file_exist: - os.system('sudo rm -f /host/machine.conf') + subprocess.call(['sudo', 'rm', '-f', '/host/machine.conf']) if not dir_exist: - os.system('sudo rmdir /host') + subprocess.call(['sudo', 'rmdir', '/host']) def modify_cable_len(self, base_file, file_dir): input_file = os.path.join(file_dir, base_file) @@ -95,76 +101,76 @@ def test_interfaces(self): interfaces_template = os.path.join(self.test_dir, '..', '..', '..', 'files', 'image_config', 'interfaces', 'interfaces.j2') # ZTP enabled - argument = '-m ' + self.t0_minigraph_nomgmt + ' -p ' + self.t0_port_config_tiny + ' -j ' + self.ztp + ' -j ' + self.port_data + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + '> ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_minigraph_nomgmt, '-p', self.t0_port_config_tiny, '-j', self.ztp, '-j', self.port_data, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces_nomgmt_ztp'), self.output_file)) - argument = '-m ' + self.t0_minigraph_nomgmt + ' -p ' + self.t0_port_config_tiny + ' -j ' + self.ztp_inband + ' -j ' + self.port_data + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + '> ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_minigraph_nomgmt, '-p', self.t0_port_config_tiny, '-j', self.ztp_inband, '-j', self.port_data, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces_nomgmt_ztp_inband'), self.output_file)) - argument = '-m ' + self.t0_minigraph_nomgmt + ' -p ' + self.t0_port_config_tiny + ' -j ' + self.ztp_ip + ' -j ' + self.port_data + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + '> ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_minigraph_nomgmt, '-p', self.t0_port_config_tiny, '-j', self.ztp_ip, '-j', self.port_data, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces_nomgmt_ztp_ip'), self.output_file)) - argument = '-m ' + self.t0_minigraph_nomgmt + ' -p ' + self.t0_port_config_tiny + ' -j ' + self.ztp_inband_ip + ' -j ' + self.port_data + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + '> ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_minigraph_nomgmt, '-p', self.t0_port_config_tiny, '-j', self.ztp_inband_ip, '-j', self.port_data, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces_nomgmt_ztp_inband_ip'), self.output_file)) # ZTP disabled, MGMT_INTERFACE defined - argument = '-m ' + self.t0_minigraph + ' -p ' + self.t0_port_config + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_minigraph, '-p', self.t0_port_config, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces'), self.output_file)) - argument = '-m ' + self.t0_mvrf_minigraph + ' -p ' + self.t0_port_config + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_mvrf_minigraph, '-p', self.t0_port_config, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'mvrf_interfaces'), self.output_file)) - argument = '-m ' + self.t0_minigraph_two_mgmt + ' -p ' + self.t0_port_config + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_minigraph_two_mgmt, '-p', self.t0_port_config, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'two_mgmt_interfaces'), self.output_file), self.output_file) # ZTP disabled, no MGMT_INTERFACE defined - argument = '-m ' + self.t0_minigraph_nomgmt + ' -p ' + self.t0_port_config + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_minigraph_nomgmt, '-p', self.t0_port_config, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces_nomgmt'), self.output_file)) - argument = '-m ' + self.t0_mvrf_minigraph_nomgmt + ' -p ' + self.t0_port_config + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_mvrf_minigraph_nomgmt, '-p', self.t0_port_config, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'mvrf_interfaces_nomgmt'), self.output_file)) def test_ports_json(self): ports_template = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-orchagent', 'ports.json.j2') - argument = '-m ' + self.simple_minigraph + ' -p ' + self.t0_port_config + ' -t ' + ports_template + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.simple_minigraph, '-p', self.t0_port_config, '-t', ports_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'ports.json'), self.output_file)) def test_dhcp_relay(self): # Test generation of wait_for_intf.sh dhc_sample_data = os.path.join(self.test_dir, "dhcp-relay-sample.json") template_path = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-dhcp-relay', 'wait_for_intf.sh.j2') - argument = '-m ' + self.t0_minigraph + ' -j ' + dhc_sample_data + ' -p ' + self.t0_port_config + ' -t ' + template_path + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_minigraph, '-j', dhc_sample_data, '-p', self.t0_port_config, '-t', template_path] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'wait_for_intf.sh'), self.output_file)) # Test generation of docker-dhcp-relay.supervisord.conf template_path = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-dhcp-relay', 'docker-dhcp-relay.supervisord.conf.j2') - argument = '-m ' + self.t0_minigraph + ' -p ' + self.t0_port_config + ' -t ' + template_path + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_minigraph, '-p', self.t0_port_config, '-t', template_path] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'docker-dhcp-relay.supervisord.conf'), self.output_file)) # Test generation of docker-dhcp-relay.supervisord.conf when a vlan is missing ip/ipv6 helpers template_path = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-dhcp-relay', 'docker-dhcp-relay.supervisord.conf.j2') - argument = '-m ' + self.no_ip_helper_minigraph + ' -p ' + self.t0_port_config + ' -t ' + template_path + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.no_ip_helper_minigraph, '-p', self.t0_port_config, '-t', template_path] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'docker-dhcp-relay-no-ip-helper.supervisord.conf'), self.output_file)) def test_radv(self): # Test generation of radvd.conf with multiple ipv6 prefixes template_path = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-router-advertiser', 'radvd.conf.j2') - argument = '-m ' + self.radv_test_minigraph + ' -p ' + self.t0_port_config + ' -t ' + template_path + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.radv_test_minigraph, '-p', self.t0_port_config, '-t', template_path] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'radvd.conf'), self.output_file)) def test_lldp(self): @@ -176,32 +182,32 @@ def test_lldp(self): # Test generation of lldpd.conf if IPv4 and IPv6 management interfaces exist mgmt_iface_ipv4_and_ipv6_json = os.path.join(self.test_dir, "data", "lldp", "mgmt_iface_ipv4_and_ipv6.json") - argument = '-j {} -t {} > {}'.format(mgmt_iface_ipv4_and_ipv6_json, lldpd_conf_template, self.output_file) - self.run_script(argument) + argument = ['-j', mgmt_iface_ipv4_and_ipv6_json, '-t', lldpd_conf_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(expected_mgmt_ipv4_and_ipv6, self.output_file)) # Test generation of lldpd.conf if management interface IPv4 only exist mgmt_iface_ipv4_json = os.path.join(self.test_dir, "data", "lldp", "mgmt_iface_ipv4.json") - argument = '-j {} -t {} > {}'.format(mgmt_iface_ipv4_json, lldpd_conf_template, self.output_file) - self.run_script(argument) + argument = ['-j', mgmt_iface_ipv4_json, '-t', lldpd_conf_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(expected_mgmt_ipv4, self.output_file)) # Test generation of lldpd.conf if Management interface IPv6 only exist mgmt_iface_ipv6_json = os.path.join(self.test_dir, "data", "lldp", "mgmt_iface_ipv6.json") - argument = '-j {} -t {} > {}'.format(mgmt_iface_ipv6_json, lldpd_conf_template, self.output_file) - self.run_script(argument) + argument = ['-j', mgmt_iface_ipv6_json, '-t', lldpd_conf_template] + self.run_script(argument, output_file=self.output_file) self.assertTrue(utils.cmp(expected_mgmt_ipv6, self.output_file)) def test_ipinip(self): ipinip_file = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-orchagent', 'ipinip.json.j2') - argument = '-m ' + self.t0_minigraph + ' -p ' + self.t0_port_config + ' -t ' + ipinip_file + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.t0_minigraph, '-p', self.t0_port_config, '-t', ipinip_file] + self.run_script(argument, output_file=self.output_file) sample_output_file = os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'ipinip.json') assert utils.cmp(sample_output_file, self.output_file), self.run_diff(sample_output_file, self.output_file) def test_l2switch_template(self): - argument = '-k Mellanox-SN2700 --preset l2 -p ' + self.t0_port_config + argument = ['-k', 'Mellanox-SN2700', '--preset', 'l2', '-p', self.t0_port_config] output = self.run_script(argument) output_json = json.loads(output) @@ -212,14 +218,14 @@ def test_l2switch_template(self): self.assertTrue(json.dumps(sample_output_json, sort_keys=True) == json.dumps(output_json, sort_keys=True)) template_dir = os.path.join(self.test_dir, '..', 'data', 'l2switch.j2') - argument = '-t ' + template_dir + ' -k Mellanox-SN2700 -p ' + self.t0_port_config + argument = ['-t', template_dir, '-k', 'Mellanox-SN2700', '-p', self.t0_port_config] output = self.run_script(argument) output_json = json.loads(output) self.assertTrue(json.dumps(sample_output_json, sort_keys=True) == json.dumps(output_json, sort_keys=True)) def test_l1_ports_template(self): - argument = '-k 32x1000Gb --preset l1 -p ' + self.l1_l3_port_config + argument = ['-k', '32x1000Gb', '--preset', 'l1', '-p', self.l1_l3_port_config] output = self.run_script(argument) output_json = json.loads(output) @@ -230,14 +236,14 @@ def test_l1_ports_template(self): self.assertTrue(json.dumps(sample_output_json, sort_keys=True) == json.dumps(output_json, sort_keys=True)) template_dir = os.path.join(self.test_dir, '..', 'data', 'l1intf.j2') - argument = '-t ' + template_dir + ' -k 32x1000Gb -p ' + self.l1_l3_port_config + argument = ['-t', template_dir, '-k', '32x1000Gb', '-p', self.l1_l3_port_config] output = self.run_script(argument) output_json = json.loads(output) self.assertTrue(json.dumps(sample_output_json, sort_keys=True) == json.dumps(output_json, sort_keys=True)) def test_l3_ports_template(self): - argument = '-k 32x1000Gb --preset l3 -p ' + self.l1_l3_port_config + argument = ['-k', '32x1000Gb', '--preset', 'l3', '-p', self.l1_l3_port_config] output = self.run_script(argument) output_json = json.loads(output) @@ -248,7 +254,7 @@ def test_l3_ports_template(self): self.assertTrue(json.dumps(sample_output_json, sort_keys=True) == json.dumps(output_json, sort_keys=True)) template_dir = os.path.join(self.test_dir, '..', 'data', 'l3intf.j2') - argument = '-t ' + template_dir + ' -k 32x1000Gb -p ' + self.l1_l3_port_config + argument = ['-t', template_dir, '-k', '32x1000Gb', '-p', self.l1_l3_port_config] output = self.run_script(argument) output_json = json.loads(output) @@ -270,9 +276,7 @@ def test_l2switch_template_dualtor(self): "Ethernet112", "Ethernet116", "Ethernet120", "Ethernet124" ] } - argument = '-a \'{}\' -k Arista-7050CX3-32S-D48C8 --preset l2 -p {}'.format( - json.dumps(extra_args), self.t0_7050cx3_port_config - ) + argument = ['-a', json.dumps(extra_args), '-k', 'Arista-7050CX3-32S-D48C8', '--preset', 'l2', '-p', self.t0_7050cx3_port_config] output = self.run_script(argument) output_json = json.loads(output) @@ -299,8 +303,8 @@ def do_test_qos_and_buffer_arista7800r3_48cq2_lc_render_template(self, platform, for template_file, cfg_file, sample_output_file in [(qos_file, 'qos_config.j2', 'qos-arista7800r3-48cq2-lc.json'), (buffer_file, 'buffers_config.j2', 'buffer-arista7800r3-48cq2-lc.json') ]: - argument = '-m ' + self.arista7800r3_48cq2_lc_t2_minigraph + ' -p ' + port_config_ini_file + ' -t ' + template_file + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', self.arista7800r3_48cq2_lc_t2_minigraph, '-p', port_config_ini_file, '-t', template_file] + self.run_script(argument, output_file=self.output_file) # cleanup cfg_file_new = os.path.join(arista_dir_path, cfg_file) @@ -335,8 +339,8 @@ def _test_qos_render_template(self, vendor, platform, sku, minigraph, expected): shutil.copy2(qos_config_file, dir_path) minigraph = os.path.join(self.test_dir, minigraph) - argument = '-m ' + minigraph + ' -p ' + port_config_ini_file + ' -t ' + qos_file + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', minigraph, '-p', port_config_ini_file, '-t', qos_file] + self.run_script(argument, output_file=self.output_file) # cleanup qos_config_file_new = os.path.join(dir_path, 'qos_config.j2') @@ -398,8 +402,8 @@ def test_qos_dscp_remapping_render_template(self): qos_config_file = os.path.join(self.test_dir, '..', '..', '..', 'files', 'build_templates', 'qos_config.j2') shutil.copy2(qos_config_file, device_template_path) - argument = '-m ' + sample_minigraph_file + ' -p ' + port_config_ini_file + ' -t ' + qos_file + ' > ' + test_output - self.run_script(argument) + argument = ['-m', sample_minigraph_file, '-p', port_config_ini_file, '-t', qos_file] + self.run_script(argument, output_file=test_output) # cleanup qos_config_file_new = os.path.join(device_template_path, 'qos_config.j2') @@ -432,8 +436,8 @@ def test_config_brcm_render_template(self): config_bcm_file = os.path.join(device_template_path, 'config.bcm.j2') config_test_output = os.path.join(self.test_dir, 'config_output.bcm') - argument = '-m ' + sample_minigraph_file + ' -p ' + port_config_ini_file + ' -t ' + config_bcm_file + ' > ' + config_test_output - self.run_script(argument) + argument = ['-m', sample_minigraph_file, '-p', port_config_ini_file, '-t', config_bcm_file] + self.run_script(argument, output_file=config_test_output) #check output config.bcm config_sample_output_file = os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, config_sample_output) @@ -451,8 +455,8 @@ def _test_buffers_render_template(self, vendor, platform, sku, minigraph, buffer shutil.copy2(buffers_config_file, dir_path) minigraph = os.path.join(self.test_dir, minigraph) - argument = '-m ' + minigraph + ' -p ' + port_config_ini_file + ' -t ' + buffers_file + ' > ' + self.output_file - self.run_script(argument) + argument = ['-m', minigraph, '-p', port_config_ini_file, '-t', buffers_file] + self.run_script(argument, output_file=self.output_file) # cleanup buffers_config_file_new = os.path.join(dir_path, 'buffers_config.j2') @@ -516,9 +520,9 @@ def test_extra_lossless_buffer_for_tunnel_remapping(self): def test_ipinip_multi_asic(self): ipinip_file = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-orchagent', 'ipinip.json.j2') - argument = '-m ' + self.multi_asic_minigraph + ' -p ' + self.multi_asic_port_config + ' -t ' + ipinip_file + ' -n asic0 ' + ' > ' + self.output_file + argument = ['-m', self.multi_asic_minigraph, '-p', self.multi_asic_port_config, '-t', ipinip_file, '-n', 'asic0'] print(argument) - self.run_script(argument) + self.run_script(argument, output_file=self.output_file) sample_output_file = os.path.join(self.test_dir, 'multi_npu_data', utils.PYvX_DIR, 'ipinip.json') assert utils.cmp(sample_output_file, self.output_file), self.run_diff(sample_output_file, self.output_file) @@ -544,13 +548,11 @@ def test_swss_switch_render_template(self): }, } for _, v in test_list.items(): - argument = " -m {} -p {} -y {} -t {} > {}".format( - v["graph"], v["port_config"], constants_yml, switch_template, self.output_file - ) + argument = ["-m", v["graph"], "-p", v["port_config"], "-y", constants_yml, "-t", switch_template] sample_output_file = os.path.join( self.test_dir, 'sample_output', v["output"] ) - self.run_script(argument) + self.run_script(argument, output_file=self.output_file) assert utils.cmp(sample_output_file, self.output_file), self.run_diff(sample_output_file, self.output_file) def test_swss_switch_render_template_multi_asic(self): @@ -575,14 +577,11 @@ def test_swss_switch_render_template_multi_asic(self): } for _, v in test_list.items(): os.environ["NAMESPACE_ID"] = v["namespace_id"] - argument = " -m {} -y {} -t {} > {}".format( - self.t1_mlnx_minigraph, constants_yml, switch_template, - self.output_file - ) + argument = ["-m", self.t1_mlnx_minigraph, "-y", constants_yml, "-t", switch_template] sample_output_file = os.path.join( self.test_dir, 'sample_output', v["output"] ) - self.run_script(argument) + self.run_script(argument, output_file=self.output_file) assert utils.cmp(sample_output_file, self.output_file), self.run_diff(sample_output_file, self.output_file) os.environ["NAMESPACE_ID"] = "" @@ -591,8 +590,8 @@ def test_ndppd_conf(self): vlan_interfaces_json = os.path.join(self.test_dir, "data", "ndppd", "vlan_interfaces.json") expected = os.path.join(self.test_dir, "sample_output", utils.PYvX_DIR, "ndppd.conf") - argument = '-j {} -t {} > {}'.format(vlan_interfaces_json, conf_template, self.output_file) - self.run_script(argument) + argument = ['-j', vlan_interfaces_json, '-t', conf_template] + self.run_script(argument, output_file=self.output_file) assert utils.cmp(expected, self.output_file), self.run_diff(expected, self.output_file) def test_ntp_conf(self): @@ -600,8 +599,8 @@ def test_ntp_conf(self): ntp_interfaces_json = os.path.join(self.test_dir, "data", "ntp", "ntp_interfaces.json") expected = os.path.join(self.test_dir, "sample_output", utils.PYvX_DIR, "ntp.conf") - argument = '-j {} -t {} > {}'.format(ntp_interfaces_json, conf_template, self.output_file) - self.run_script(argument) + argument = ['-j', ntp_interfaces_json, '-t', conf_template] + self.run_script(argument, output_file=self.output_file) assert utils.cmp(expected, self.output_file), self.run_diff(expected, self.output_file) def test_backend_acl_template_render(self): @@ -623,13 +622,11 @@ def test_backend_acl_template_render(self): input_file = os.path.join( self.test_dir, 'data', 'backend_acl', v['input'] ) - argument = " -j {} -t {} > {}".format( - input_file, acl_template, self.output_file - ) + argument = ["-j", input_file, "-t", acl_template] sample_output_file = os.path.join( self.test_dir, 'data', 'backend_acl', v['output'] ) - self.run_script(argument) + self.run_script(argument, output_file=self.output_file) assert utils.cmp(sample_output_file, self.output_file), self.run_diff(sample_output_file, self.output_file) def tearDown(self): diff --git a/src/sonic-config-engine/tests/test_j2files_t2_chassis_fe.py b/src/sonic-config-engine/tests/test_j2files_t2_chassis_fe.py index e6bc82941bf6..7041bc1b9b5c 100644 --- a/src/sonic-config-engine/tests/test_j2files_t2_chassis_fe.py +++ b/src/sonic-config-engine/tests/test_j2files_t2_chassis_fe.py @@ -3,7 +3,6 @@ import os import shutil import subprocess - from unittest import TestCase import tests.common_utils as utils @@ -11,7 +10,7 @@ class TestJ2FilesT2ChassisFe(TestCase): def setUp(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') + self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] self.t2_chassis_fe_minigraph = os.path.join(self.test_dir, 't2-chassis-fe-graph.xml') self.t2_chassis_fe_vni_minigraph = os.path.join(self.test_dir, 't2-chassis-fe-graph-vni.xml') self.t2_chassis_fe_pc_minigraph = os.path.join(self.test_dir, 't2-chassis-fe-graph-pc.xml') @@ -24,25 +23,28 @@ def tearDown(self): except OSError: pass - def run_script(self, argument): - print('CMD: sonic-cfggen ' + argument) - output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) + def run_script(self, argument, output_file=None): + print('CMD: sonic-cfggen ' + ' '.join(argument)) + output = subprocess.check_output(self.script_file + argument) if utils.PY3x: output = output.decode() + if output_file: + with open(output_file, 'w') as f: + f.write(output) return output def run_diff(self, file1, file2): - return subprocess.check_output('diff -u {} {} || true'.format(file1, file2), shell=True) + _, output = getstatusoutput_noshell(['diff', '-u', file1, file2]) + return output def run_case(self, minigraph, template, target): template_dir = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-fpm-frr', "frr") conf_template = os.path.join(template_dir, template) constants = os.path.join(self.test_dir, '..', '..', '..', 'files', 'image_config', 'constants', 'constants.yml') - cmd_args = minigraph, self.t2_chassis_fe_port_config, constants, conf_template, template_dir, self.output_file - cmd = "-m %s -p %s -y %s -t %s -T %s > %s" % cmd_args - self.run_script(cmd) + cmd = ["-m", minigraph, "-p", self.t2_chassis_fe_port_config, "-y", constants, "-t", conf_template, "-T", template_dir] + self.run_script(cmd, output_file=self.output_file) original_filename = os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, target) r = filecmp.cmp(original_filename, self.output_file) diff --git a/src/sonic-config-engine/tests/test_minigraph_case.py b/src/sonic-config-engine/tests/test_minigraph_case.py index 9ee8a49db6ae..8c399920b88b 100644 --- a/src/sonic-config-engine/tests/test_minigraph_case.py +++ b/src/sonic-config-engine/tests/test_minigraph_case.py @@ -2,7 +2,6 @@ import os import subprocess import ipaddress - import tests.common_utils as utils import minigraph @@ -17,7 +16,7 @@ class TestCfgGenCaseInsensitive(TestCase): def setUp(self): self.yang = utils.YangWrapper() self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') + self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] self.sample_graph = os.path.join(self.test_dir, 'simple-sample-graph-case.xml') self.sample_simple_graph = os.path.join(self.test_dir, 'simple-sample-graph.xml') self.sample_resource_graph = os.path.join(self.test_dir, 'sample-graph-resource-type.xml') @@ -27,13 +26,13 @@ def setUp(self): self.port_config = os.path.join(self.test_dir, 't0-sample-port-config.ini') def run_script(self, argument, check_stderr=False): - print('\n Running sonic-cfggen ' + argument) + print('\n Running sonic-cfggen ' + ' '.join(argument)) self.assertTrue(self.yang.validate(argument)) if check_stderr: - output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) else: - output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) + output = subprocess.check_output(self.script_file + argument) if utils.PY3x: output = output.decode() @@ -46,47 +45,47 @@ def run_script(self, argument, check_stderr=False): return output def test_dummy_run(self): - argument = '' + argument = [] output = self.run_script(argument) self.assertEqual(output, '') def test_minigraph_sku(self): - argument = '-v "DEVICE_METADATA[\'localhost\'][\'hwsku\']" -m "' + self.sample_graph + '" -p "' + self.port_config + '"' + argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'hwsku\']", '-m', self.sample_graph, '-p', self.port_config] output = self.run_script(argument) self.assertEqual(output.strip(), 'Force10-S6000') def test_print_data(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" --print-data' + argument = ['-m', self.sample_graph, '-p', self.port_config, '--print-data'] output = self.run_script(argument) self.assertTrue(len(output.strip()) > 0) def test_jinja_expression(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'type\']"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'type\']"] output = self.run_script(argument) self.assertEqual(output.strip(), 'ToRRouter') def test_minigraph_subtype(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'subtype\']"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'subtype\']"] output = self.run_script(argument) self.assertEqual(output.strip(), 'DualToR') def test_minigraph_peer_switch_hostname(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'peer_switch\']"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'peer_switch\']"] output = self.run_script(argument) self.assertEqual(output.strip(), 'switch2-t0') def test_additional_json_data(self): - argument = '-a \'{"key1":"value1"}\' -v key1' + argument = ['-a', '{"key1":"value1"}', '-v', 'key1'] output = self.run_script(argument) self.assertEqual(output.strip(), 'value1') def test_read_yaml(self): - argument = '-v yml_item -y ' + os.path.join(self.test_dir, 'test.yml') + argument = ['-v', 'yml_item', '-y', os.path.join(self.test_dir, 'test.yml')] output = self.run_script(argument) self.assertEqual(output.strip(), '[\'value1\', \'value2\']') def test_render_template(self): - argument = '-y ' + os.path.join(self.test_dir, 'test.yml') + ' -t ' + os.path.join(self.test_dir, 'test.j2') + argument = ['-y', os.path.join(self.test_dir, 'test.yml'), '-t', os.path.join(self.test_dir, 'test.j2')] output = self.run_script(argument) self.assertEqual(output.strip(), 'value1\nvalue2') @@ -97,12 +96,12 @@ def test_render_template(self): # self.assertEqual(output.strip(), "{'everflow0': {'src_ip': '10.1.0.32', 'dst_ip': '10.0.100.1'}}") def test_minigraph_interfaces(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v \'INTERFACE.keys()|list\'' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'INTERFACE.keys()|list'] output = self.run_script(argument) self.assertEqual(output.strip(), "[('Ethernet0', '10.0.0.58/31'), 'Ethernet0', ('Ethernet0', 'FC00::75/126')]") def test_minigraph_vlans(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v VLAN' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'VLAN'] output = self.run_script(argument) expected = { @@ -126,7 +125,7 @@ def test_minigraph_vlans(self): ) def test_minigraph_vlan_members(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v VLAN_MEMBER' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'VLAN_MEMBER'] output = self.run_script(argument) expected = { 'Vlan1000|Ethernet8': {'tagging_mode': 'untagged'}, @@ -138,12 +137,12 @@ def test_minigraph_vlan_members(self): ) def test_minigraph_vlan_interfaces_keys(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "VLAN_INTERFACE.keys()|list"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "VLAN_INTERFACE.keys()|list"] output = self.run_script(argument) self.assertEqual(output.strip(), "[('Vlan1000', '192.168.0.1/27'), 'Vlan1000']") def test_minigraph_vlan_interfaces(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "VLAN_INTERFACE"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "VLAN_INTERFACE"] output = self.run_script(argument) expected_table = { 'Vlan1000|192.168.0.1/27': {}, @@ -155,7 +154,7 @@ def test_minigraph_vlan_interfaces(self): self.assertEqual(utils.to_dict(output.strip()), expected_table) def test_minigraph_portchannels(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v PORTCHANNEL' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'PORTCHANNEL'] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -163,44 +162,44 @@ def test_minigraph_portchannels(self): ) def test_minigraph_console_mgmt_feature(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v CONSOLE_SWITCH' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'CONSOLE_SWITCH'] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), utils.to_dict("{'console_mgmt': {'enabled': 'no'}}")) def test_minigraph_console_port(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v CONSOLE_PORT' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'CONSOLE_PORT'] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), utils.to_dict("{'1': {'baud_rate': '9600', 'remote_device': 'managed_device', 'flow_control': 1}}")) def test_minigraph_dhcp_server_feature(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'dhcp_server\']"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'dhcp_server\']"] output = self.run_script(argument) self.assertEqual(output.strip(), '') try: # For DHCP server enabled device type - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BMC_MGMT_TOR_ROUTER, self.sample_graph), shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BMC_MGMT_TOR_ROUTER), self.sample_graph]) output = self.run_script(argument) self.assertEqual(output.strip(), 'enabled') finally: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BMC_MGMT_TOR_ROUTER, TOR_ROUTER, self.sample_graph), shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BMC_MGMT_TOR_ROUTER, TOR_ROUTER), self.sample_graph]) def test_minigraph_deployment_id(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'deployment_id\']"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'deployment_id\']"] output = self.run_script(argument) self.assertEqual(output.strip(), "1") def test_minigraph_cluster(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'cluster\']"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'cluster\']"] output = self.run_script(argument) self.assertEqual(output.strip(), "AAA00PrdStr00") def test_minigraph_neighbor_metadata(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_NEIGHBOR_METADATA"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_NEIGHBOR_METADATA"] expected_table = { 'switch2-t0': { @@ -253,43 +252,43 @@ def test_minigraph_neighbor_metadata(self): # self.assertEqual(output.strip(), "{'everflow0': {'src_ip': '10.1.0.32', 'dst_ip': '10.0.100.1'}}") def test_metadata_tacacs(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "TACPLUS_SERVER"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "TACPLUS_SERVER"] output = self.run_script(argument) self.assertEqual(output.strip(), "{'10.0.10.7': {'priority': '1', 'tcp_port': '49'}, '10.0.10.8': {'priority': '1', 'tcp_port': '49'}}") def test_metadata_kube(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "KUBERNETES_MASTER[\'SERVER\']"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "KUBERNETES_MASTER[\'SERVER\']"] output = self.run_script(argument) self.assertEqual(json.loads(output.strip().replace("'", "\"")), json.loads('{"ip": "10.10.10.10", "disable": "True"}')) def test_minigraph_mgmt_port(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "MGMT_PORT"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "MGMT_PORT"] output = self.run_script(argument) self.assertEqual(output.strip(), "{'eth0': {'alias': 'eth0', 'admin_status': 'up', 'speed': '1000'}}") def test_metadata_ntp(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "NTP_SERVER"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "NTP_SERVER"] output = self.run_script(argument) self.assertEqual(output.strip(), "{'10.0.10.1': {}, '10.0.10.2': {}}") def test_minigraph_vnet(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "VNET"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "VNET"] output = self.run_script(argument) self.assertEqual(output.strip(), "") def test_minigraph_vxlan(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "VXLAN_TUNNEL"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "VXLAN_TUNNEL"] output = self.run_script(argument) self.assertEqual(output.strip(), "") def test_minigraph_bgp_mon(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "BGP_MONITORS"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "BGP_MONITORS"] output = self.run_script(argument) self.assertEqual(output.strip(), "{}") def test_minigraph_peer_switch(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "PEER_SWITCH"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "PEER_SWITCH"] expected_table = { 'switch2-t0': { 'address_ipv4': "25.1.1.10" @@ -314,7 +313,7 @@ def test_mux_cable_parsing(self): self.assertTrue("mux_cable" not in port) def test_minigraph_storage_device(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'storage_device\']"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'storage_device\']"] output = self.run_script(argument) self.assertEqual(output.strip(), "true") @@ -331,23 +330,23 @@ def verify_storage_device_set(self, graph_file, check_stderr=False): try: print('\n Change device type to %s' % (BACKEND_TOR_ROUTER)) if check_stderr: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_TOR_ROUTER, graph_file), stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_TOR_ROUTER), graph_file], stderr=subprocess.STDOUT) else: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_TOR_ROUTER, graph_file), shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_TOR_ROUTER), graph_file]) - argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'storage_device\']"' + argument = ['-m', graph_file, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'storage_device\']"] output = self.run_script(argument) self.assertEqual(output.strip(), "true") finally: print('\n Change device type back to %s' % (TOR_ROUTER)) if check_stderr: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_TOR_ROUTER, TOR_ROUTER, graph_file), stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_TOR_ROUTER, TOR_ROUTER), graph_file], stderr=subprocess.STDOUT) else: - output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_TOR_ROUTER, TOR_ROUTER, graph_file), shell=True) + output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_TOR_ROUTER, TOR_ROUTER), graph_file]) def test_minigraph_tunnel_table(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "TUNNEL"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "TUNNEL"] expected_tunnel = { "MuxTunnel0": { "tunnel_type": "IPINIP", @@ -367,7 +366,7 @@ def test_minigraph_tunnel_table(self): # Validate tunnel config is as before when tunnel_qos_remap = disabled sample_graph_disabled_remap = os.path.join(self.test_dir, 'simple-sample-graph-case-remap-disabled.xml') - argument = '-m "' + sample_graph_disabled_remap + '" -p "' + self.port_config + '" -v "TUNNEL"' + argument = ['-m', sample_graph_disabled_remap, '-p', self.port_config, '-v', "TUNNEL"] output = self.run_script(argument) self.assertEqual( @@ -377,7 +376,7 @@ def test_minigraph_tunnel_table(self): # Validate extra config is generated when tunnel_qos_remap = enabled sample_graph_enabled_remap = os.path.join(self.test_dir, 'simple-sample-graph-case-remap-enabled.xml') - argument = '-m "' + sample_graph_enabled_remap + '" -p "' + self.port_config + '" -v "TUNNEL"' + argument = ['-m', sample_graph_enabled_remap, '-p', self.port_config, '-v', "TUNNEL"] expected_tunnel = { "MuxTunnel0": { "tunnel_type": "IPINIP", @@ -402,7 +401,7 @@ def test_minigraph_tunnel_table(self): # Validate extra config for mux tunnel is generated automatically when tunnel_qos_remap = enabled sample_graph_enabled_remap = os.path.join(self.test_dir, 'simple-sample-graph-case-remap-enabled-no-tunnel-attributes.xml') - argument = '-m "' + sample_graph_enabled_remap + '" -p "' + self.port_config + '" -v "TUNNEL"' + argument = ['-m', sample_graph_enabled_remap, '-p', self.port_config, '-v', "TUNNEL"] output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -410,7 +409,7 @@ def test_minigraph_tunnel_table(self): ) def test_minigraph_mux_cable_table(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "MUX_CABLE"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "MUX_CABLE"] expected_table = { 'Ethernet4': { 'state': 'auto', @@ -434,7 +433,7 @@ def test_minigraph_mux_cable_table(self): ) def test_dhcp_table(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DHCP_RELAY"' + argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DHCP_RELAY"] expected = { 'Vlan1000': { 'dhcpv6_servers': [ diff --git a/src/sonic-config-engine/tests/test_multinpu_cfggen.py b/src/sonic-config-engine/tests/test_multinpu_cfggen.py index 2bfb879a1ad2..070137af3960 100644 --- a/src/sonic-config-engine/tests/test_multinpu_cfggen.py +++ b/src/sonic-config-engine/tests/test_multinpu_cfggen.py @@ -5,7 +5,6 @@ import subprocess import unittest import yaml - import tests.common_utils as utils from unittest import TestCase @@ -23,7 +22,7 @@ def setUp(self): self.yang = utils.YangWrapper() self.test_dir = os.path.dirname(os.path.realpath(__file__)) self.test_data_dir = os.path.join(self.test_dir, 'multi_npu_data') - self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') + self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] self.sample_graph = os.path.join(self.test_data_dir, 'sample-minigraph.xml') self.sample_graph1 = os.path.join(self.test_data_dir, 'sample-minigraph-noportchannel.xml') self.sample_port_config = os.path.join(self.test_data_dir, 'sample_port_config.ini') @@ -33,17 +32,19 @@ def setUp(self): self.output_file = os.path.join(self.test_dir, 'output') os.environ["CFGGEN_UNIT_TESTING"] = "2" - def run_script(self, argument, check_stderr=False): - print('\n Running sonic-cfggen ' + argument) + def run_script(self, argument, check_stderr=False, output_file=None): + print('\n Running sonic-cfggen ' + ' '.join(argument)) self.assertTrue(self.yang.validate(argument)) - if check_stderr: - output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) + output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) else: - output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) + output = subprocess.check_output(self.script_file + argument) if utils.PY3x: output = output.decode() + if output_file: + with open(output_file, 'w') as f: + f.write(output) linecount = output.strip().count('\n') if linecount <= 0: @@ -53,15 +54,15 @@ def run_script(self, argument, check_stderr=False): return output def run_diff(self, file1, file2): - return subprocess.check_output('diff -u {} {} || true'.format(file1, file2), shell=True) + _, output = getstatusoutput_noshell(['diff', '-u', file1, file2]) + return output def run_frr_asic_case(self, template, target, asic, port_config): template_dir = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-fpm-frr', "frr") conf_template = os.path.join(template_dir, template) constants = os.path.join(self.test_dir, '..', '..', '..', 'files', 'image_config', 'constants', 'constants.yml') - cmd_args = asic, self.sample_graph, port_config, constants, conf_template, template_dir, self.output_file - cmd = "-n %s -m %s -p %s -y %s -t %s -T %s > %s" % cmd_args - self.run_script(cmd) + cmd = ['-n', asic, '-m', self.sample_graph, '-p', port_config, '-y', constants, '-t', conf_template, '-T', template_dir] + self.run_script(cmd, output_file=self.output_file) original_filename = os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, target) r = filecmp.cmp(original_filename, self.output_file) @@ -72,37 +73,37 @@ def run_frr_asic_case(self, template, target, asic, port_config): def run_script_for_asic(self,argument,asic, port_config=None): - argument = "{} -n asic{} ".format(argument, asic) + cmd = argument + ["-n", "asic{}".format(asic)] if port_config: - argument += "-p {}".format(port_config) - output = self.run_script(argument) + cmd = argument + ["-n", "asic{}".format(asic), "-p", port_config] + output = self.run_script(cmd) return output def test_dummy_run(self): - argument = '' + argument = [] output = self.run_script(argument) self.assertEqual(output, '') def test_hwsku(self): - argument = "-v \"DEVICE_METADATA[\'localhost\'][\'hwsku\']\" -m \"{}\" -p \"{}\"".format(self.sample_graph, self.sample_port_config) + argument = ["-v", "DEVICE_METADATA[\'localhost\'][\'hwsku\']", "-m", self.sample_graph, "-p", self.sample_port_config] output = self.run_script(argument) self.assertEqual(output.strip(), SKU) - argument = "-v \"DEVICE_METADATA[\'localhost\'][\'hwsku\']\" -m \"{}\"".format(self.sample_graph) + argument = ["-v", "DEVICE_METADATA[\'localhost\'][\'hwsku\']", "-m", self.sample_graph] for asic in range(NUM_ASIC): output = self.run_script_for_asic(argument, asic, self.port_config[asic]) self.assertEqual(output.strip(), SKU) def test_print_data(self): - argument = "-m \"{}\" -p \"{}\" --print-data".format(self.sample_graph, self.sample_port_config) + argument = ["-m", self.sample_graph, "-p", self.sample_port_config, "--print-data"] output = self.run_script(argument) self.assertGreater(len(output.strip()) , 0) - argument = "-m \"{}\" --print-data".format(self.sample_graph) + argument = ["-m", self.sample_graph, "--print-data"] for asic in range(NUM_ASIC): output = self.run_script_for_asic(argument, asic, self.port_config[asic]) self.assertGreater(len(output.strip()) , 0) def test_additional_json_data(self): - argument = '-a \'{"key1":"value1"}\' -v key1' + argument = ['-a', '{"key1":"value1"}', '-v', 'key1'] output = self.run_script(argument) self.assertEqual(output.strip(), 'value1') for asic in range(NUM_ASIC): @@ -110,15 +111,15 @@ def test_additional_json_data(self): self.assertEqual(output.strip(), 'value1') def test_read_yaml(self): - argument = '-v yml_item -y ' + os.path.join(self.test_dir, 'test.yml') - output = yaml.load(self.run_script(argument)) + argument = ['-v', 'yml_item', '-y', os.path.join(self.test_dir, 'test.yml')] + output = yaml.safe_load(self.run_script(argument)) self.assertListEqual(output, ['value1', 'value2']) for asic in range(NUM_ASIC): - output = yaml.load(self.run_script_for_asic(argument, asic, self.port_config[asic])) + output = yaml.safe_load(self.run_script_for_asic(argument, asic, self.port_config[asic])) self.assertListEqual(output, ['value1', 'value2']) def test_render_template(self): - argument = '-y ' + os.path.join(self.test_dir, 'test.yml') + ' -t ' + os.path.join(self.test_dir, 'test.j2') + argument = ['-y', os.path.join(self.test_dir, 'test.yml'), '-t', os.path.join(self.test_dir, 'test.j2')] output = self.run_script(argument) self.assertEqual(output.strip(), 'value1\nvalue2') for asic in range(NUM_ASIC): @@ -126,37 +127,37 @@ def test_render_template(self): self.assertEqual(output.strip(), 'value1\nvalue2') def test_metadata_tacacs(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.sample_port_config + '" --var-json "TACPLUS_SERVER"' + argument = ['-m', self.sample_graph, '-p', self.sample_port_config, '--var-json', "TACPLUS_SERVER"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {'123.46.98.21': {'priority': '1', 'tcp_port': '49'}}) #TACPLUS_SERVER not present in the asic configuration. - argument = '-m "' + self.sample_graph + '" --var-json "TACPLUS_SERVER"' + argument = ['-m', self.sample_graph, '--var-json', "TACPLUS_SERVER"] for asic in range(NUM_ASIC): output = json.loads(self.run_script_for_asic(argument, asic, self.port_config[asic])) self.assertDictEqual(output, {}) def test_metadata_ntp(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.sample_port_config + '" --var-json "NTP_SERVER"' + argument = ['-m', self.sample_graph, '-p', self.sample_port_config, '--var-json', "NTP_SERVER"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {'17.39.1.130': {}, '17.39.1.129': {}}) #NTP data is present only in the host config - argument = '-m "' + self.sample_graph + '" --var-json "NTP_SERVER"' + argument = ['-m', self.sample_graph, '--var-json', "NTP_SERVER"] for asic in range(NUM_ASIC): output = json.loads(self.run_script_for_asic(argument, asic, self.port_config[asic])) print("Log:asic{} sku {}".format(asic,output)) self.assertDictEqual(output, {}) def test_mgmt_port(self): - argument = '-m "' + self.sample_graph + '" -p "' + self.sample_port_config + '" --var-json "MGMT_PORT"' + argument = ['-m', self.sample_graph, '-p', self.sample_port_config, '--var-json', "MGMT_PORT"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {'eth0': {'alias': 'eth0', 'admin_status': 'up'}}) - argument = '-m "' + self.sample_graph + '" --var-json "MGMT_PORT"' + argument = ['-m', self.sample_graph, '--var-json', "MGMT_PORT"] for asic in range(NUM_ASIC): output = json.loads(self.run_script_for_asic(argument, asic, self.port_config[asic])) self.assertDictEqual(output, {'eth0': {'alias': 'eth0', 'admin_status': 'up'}}) def test_frontend_asic_portchannels(self): - argument = "-m {} -p {} -n asic0 --var-json \"PORTCHANNEL\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "PORTCHANNEL"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'PortChannel0002': {'admin_status': 'up', 'min_links': '2', 'members': ['Ethernet0', 'Ethernet4'], 'mtu': '9100', 'tpid': '0x8100'}, @@ -164,14 +165,14 @@ def test_frontend_asic_portchannels(self): 'PortChannel4002': {'admin_status': 'up', 'min_links': '2', 'members': ['Ethernet-BP8', 'Ethernet-BP12'], 'mtu': '9100', 'tpid': '0x8100'}}) def test_backend_asic_portchannels(self): - argument = "-m {} -p {} -n asic3 --var-json \"PORTCHANNEL\"".format(self.sample_graph, self.port_config[3]) + argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "PORTCHANNEL"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'PortChannel4013': {'admin_status': 'up', 'min_links': '2', 'members': ['Ethernet-BP384', 'Ethernet-BP388'], 'mtu': '9100', 'tpid': '0x8100'}, 'PortChannel4014': {'admin_status': 'up', 'min_links': '2', 'members': ['Ethernet-BP392', 'Ethernet-BP396'], 'mtu': '9100', 'tpid': '0x8100'}}) def test_frontend_asic_portchannel_mem(self): - argument = "-m {} -p {} -n asic0 -v \"PORTCHANNEL_MEMBER.keys()|list\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "-v", "PORTCHANNEL_MEMBER.keys()|list"] output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -179,7 +180,7 @@ def test_frontend_asic_portchannel_mem(self): ) def test_backend_asic_portchannels_mem(self): - argument = "-m {} -p {} -n asic3 -v \"PORTCHANNEL_MEMBER.keys()|list\"".format(self.sample_graph, self.port_config[3]) + argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "-v", "PORTCHANNEL_MEMBER.keys()|list"] output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -187,7 +188,7 @@ def test_backend_asic_portchannels_mem(self): ) def test_frontend_asic_portchannel_intf(self): - argument = "-m {} -p {} -n asic0 -v \"PORTCHANNEL_INTERFACE.keys()|list\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "-v", "PORTCHANNEL_INTERFACE.keys()|list"] output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -195,7 +196,7 @@ def test_frontend_asic_portchannel_intf(self): ) def test_frontend_asic_routerport_intf(self): - argument = "-m {} -p {} -n asic0 -v \"INTERFACE.keys()|list\"".format(self.sample_graph1, self.port_config[0]) + argument = ["-m", self.sample_graph1, "-p", self.port_config[0], "-n", "asic0", "-v", "INTERFACE.keys()|list"] output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -203,7 +204,7 @@ def test_frontend_asic_routerport_intf(self): ) def test_backend_asic_portchannel_intf(self): - argument = "-m {} -p {} -n asic3 -v \"PORTCHANNEL_INTERFACE.keys()|list\"".format(self.sample_graph, self.port_config[3]) + argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "-v", "PORTCHANNEL_INTERFACE.keys()|list"] output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -211,7 +212,7 @@ def test_backend_asic_portchannel_intf(self): ) def test_frontend_asic_ports(self): - argument = "-m {} -p {} -n asic0 --var-json \"PORT\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "PORT"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {"Ethernet0": { "admin_status": "up", "alias": "Ethernet1/1", "asic_port_name": "Eth0-ASIC0", "description": "01T2:Ethernet1", "index": "0", "lanes": "33,34,35,36", "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", "role": "Ext", "speed": "40000", "autoneg": "on" }, @@ -224,7 +225,7 @@ def test_frontend_asic_ports(self): "Ethernet-BP12": { "admin_status": "up", "alias": "Eth7-ASIC0", "asic_port_name": "Eth7-ASIC0", "description": "ASIC3:Eth1-ASIC3", "index": "3", "lanes": "25,26,27,28", "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", "role": "Int", "speed": "40000" }}) def test_frontend_asic_ports_config_db(self): - argument = "-m {} -p {} -n asic0 --var-json \"PORT\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "PORT"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {"Ethernet0": { "admin_status": "up", "alias": "Ethernet1/1", "asic_port_name": "Eth0-ASIC0", "description": "01T2:Ethernet1", "index": "0", "lanes": "33,34,35,36", "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", "role": "Ext", "speed": "40000", "autoneg": "on" }, @@ -237,7 +238,7 @@ def test_frontend_asic_ports_config_db(self): "Ethernet-BP12": { "admin_status": "up", "alias": "Eth7-ASIC0", "asic_port_name": "Eth7-ASIC0", "description": "ASIC3:Eth1-ASIC3", "index": "3", "lanes": "25,26,27,28", "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", "role": "Int", "speed": "40000" }}) def test_frontend_asic_device_neigh(self): - argument = "-m {} -p {} -n asic0 --var-json \"DEVICE_NEIGHBOR\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "DEVICE_NEIGHBOR"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'Ethernet0': {'name': '01T2', 'port': 'Ethernet1'}, @@ -248,7 +249,7 @@ def test_frontend_asic_device_neigh(self): 'Ethernet-BP8': {'name': 'ASIC3', 'port': 'Eth0-ASIC3'}}) def test_frontend_asic_device_neigh_metadata(self): - argument = "-m {} -p {} -n asic0 --var-json \"DEVICE_NEIGHBOR_METADATA\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "DEVICE_NEIGHBOR_METADATA"] output = json.loads(self.run_script(argument)) print(output) self.assertDictEqual(output, \ @@ -257,7 +258,7 @@ def test_frontend_asic_device_neigh_metadata(self): 'ASIC2': {'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'lo_addr': '0.0.0.0/0', 'type': 'Asic', 'mgmt_addr_v6': '::/0'}}) def test_backend_asic_device_neigh(self): - argument = "-m {} -p {} -n asic3 --var-json \"DEVICE_NEIGHBOR\"".format(self.sample_graph, self.port_config[3]) + argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "DEVICE_NEIGHBOR"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'Ethernet-BP396': {'name': 'ASIC1', 'port': 'Eth7-ASIC1'}, @@ -266,7 +267,7 @@ def test_backend_asic_device_neigh(self): 'Ethernet-BP388': {'name': 'ASIC0', 'port': 'Eth7-ASIC0'}}) def test_backend_device_neigh_metadata(self): - argument = "-m {} -p {} -n asic3 --var-json \"DEVICE_NEIGHBOR_METADATA\"".format(self.sample_graph, self.port_config[3]) + argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "DEVICE_NEIGHBOR_METADATA"] output = json.loads(self.run_script(argument)) print(output) self.assertDictEqual(output, \ @@ -274,28 +275,28 @@ def test_backend_device_neigh_metadata(self): 'ASIC0': {'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'lo_addr': '0.0.0.0/0', 'type': 'Asic', 'mgmt_addr_v6': '::/0'}}) def test_frontend_bgp_neighbor(self): - argument = "-m {} -p {} -n asic0 --var-json \"BGP_NEIGHBOR\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "BGP_NEIGHBOR"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'10.0.0.1': {'rrclient': 0, 'name': '01T2', 'local_addr': '10.0.0.0', 'nhopself': 0, 'holdtime': '10', 'asn': '65200', 'keepalive': '3'}, 'fc00::2': {'rrclient': 0, 'name': '01T2', 'local_addr': 'fc00::1', 'nhopself': 0, 'holdtime': '10', 'asn': '65200', 'keepalive': '3'}}) def test_frontend_asic_bgp_neighbor(self): - argument = "-m {} -p {} -n asic0 --var-json \"BGP_INTERNAL_NEIGHBOR\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "BGP_INTERNAL_NEIGHBOR"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'10.1.0.0': {'rrclient': 0, 'name': 'ASIC2', 'local_addr': '10.1.0.1', 'nhopself': 0, 'admin_status': 'up', 'holdtime': '0', 'asn': '65100', 'keepalive': '0'}, '10.1.0.2': {'rrclient': 0, 'name': 'ASIC3', 'local_addr': '10.1.0.3', 'nhopself': 0, 'admin_status': 'up', 'holdtime': '0', 'asn': '65100', 'keepalive': '0'}}) def test_backend_asic_bgp_neighbor(self): - argument = "-m {} -p {} -n asic3 --var-json \"BGP_INTERNAL_NEIGHBOR\"".format(self.sample_graph, self.port_config[3]) + argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "BGP_INTERNAL_NEIGHBOR"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'10.1.0.7': {'rrclient': 0, 'name': 'ASIC1', 'local_addr': '10.1.0.6', 'nhopself': 0, 'admin_status': 'up', 'holdtime': '0', 'asn': '65100', 'keepalive': '0'}, '10.1.0.3': {'rrclient': 0, 'name': 'ASIC0', 'local_addr': '10.1.0.2', 'nhopself': 0, 'admin_status': 'up', 'holdtime': '0', 'asn': '65100', 'keepalive': '0'}}) def test_device_asic_metadata(self): - argument = "-m {} --var-json DEVICE_METADATA".format(self.sample_graph) + argument = ["-m", self.sample_graph, "--var-json", "DEVICE_METADATA"] for asic in range(NUM_ASIC): output = json.loads(self.run_script_for_asic(argument, asic,self.port_config[asic])) asic_name = "asic{}".format(asic) @@ -309,7 +310,7 @@ def test_device_asic_metadata(self): self.assertEqual(output['localhost']['deployment_id'], "1") def test_global_asic_acl(self): - argument = "-m {} -p {} --var-json \"ACL_TABLE\"".format(self.sample_graph, self.sample_port_config) + argument = ["-m", self.sample_graph, "-p", self.sample_port_config, "--var-json", "ACL_TABLE"] output = json.loads(self.run_script(argument)) exp = {\ 'SNMP_ACL': {'policy_desc': 'SNMP_ACL', 'type': 'CTRLPLANE', 'stage': 'ingress', 'services': ['SNMP']}, @@ -326,7 +327,7 @@ def test_global_asic_acl(self): self.assertDictEqual(output, exp) def test_global_asic_acl1(self): - argument = "-m {} -p {} --var-json \"ACL_TABLE\"".format(self.sample_graph1, self.sample_port_config) + argument = ["-m", self.sample_graph1, "-p", self.sample_port_config, "--var-json", "ACL_TABLE"] self.maxDiff = None output = json.loads(self.run_script(argument)) exp = {\ @@ -343,7 +344,7 @@ def test_global_asic_acl1(self): self.assertDictEqual(output, exp) def test_front_end_asic_acl(self): - argument = "-m {} -p {} -n asic0 --var-json \"ACL_TABLE\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "ACL_TABLE"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {\ 'DATAACL': {'policy_desc': 'DATAACL', 'ports': ['PortChannel0002'], 'stage': 'ingress', 'type': 'L3'}, @@ -353,7 +354,7 @@ def test_front_end_asic_acl(self): 'SSH_ONLY': {'policy_desc': 'SSH_ONLY', 'services': ['SSH'], 'stage': 'ingress', 'type': 'CTRLPLANE'}}) def test_front_end_asic_acl1(self): - argument = "-m {} -p {} -n asic0 --var-json \"ACL_TABLE\"".format(self.sample_graph1, self.port_config[0]) + argument = ["-m", self.sample_graph1, "-p", self.port_config[0], "-n", "asic0", "--var-json", "ACL_TABLE"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {\ 'EVERFLOW': {'policy_desc': 'EVERFLOW', 'ports': ['Ethernet0','Ethernet4'], 'stage': 'ingress', 'type': 'MIRROR'}, @@ -363,18 +364,18 @@ def test_front_end_asic_acl1(self): def test_back_end_asic_acl(self): - argument = "-m {} -p {} -n asic3 --var-json \"ACL_TABLE\"".format(self.sample_graph, self.port_config[3]) + argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "ACL_TABLE"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {}) def test_back_end_asic_acl1(self): - argument = "-m {} -p {} -n asic3 --var-json \"ACL_TABLE\"".format(self.sample_graph1, self.port_config[3]) + argument = ["-m", self.sample_graph1, "-p", self.port_config[3], "-n", "asic3", "--var-json", "ACL_TABLE"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {}) def test_loopback_intfs(self): - argument = "-m {} -p {} --var-json \"LOOPBACK_INTERFACE\"".format(self.sample_graph, self.sample_port_config) + argument = ["-m", self.sample_graph, "-p", self.sample_port_config, "--var-json", "LOOPBACK_INTERFACE"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {\ "Loopback0": {}, @@ -382,7 +383,7 @@ def test_loopback_intfs(self): "Loopback0|FC00:1::32/128": {}}) # The asic configuration should have 2 loopback interfaces - argument = "-m {} -p {} -n asic0 --var-json \"LOOPBACK_INTERFACE\"".format(self.sample_graph, self.port_config[0]) + argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "LOOPBACK_INTERFACE"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, { \ "Loopback0": {}, @@ -392,7 +393,7 @@ def test_loopback_intfs(self): "Loopback4096|8.0.0.0/32": {}, "Loopback4096|FD00:1::32/128": {}}) - argument = "-m {} -p {} -n asic3 --var-json \"LOOPBACK_INTERFACE\"".format(self.sample_graph, self.port_config[3]) + argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "LOOPBACK_INTERFACE"] output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {\ "Loopback0": {}, @@ -425,9 +426,7 @@ def test_buffers_multi_asic_template(self): ) # asic0 - mix of front end and back end ports shutil.copy2(buffer_template, device_config_dir) - argument = "-m {} -p {} -n asic0 -t {}".format( - self.sample_graph, port_config_ini_asic0, device_buffer_template - ) + argument = ["-m", self.sample_graph, "-p", port_config_ini_asic0, "-n", "asic0", "-t", device_buffer_template] output = json.loads(self.run_script(argument)) os.remove(os.path.join(device_config_dir, "buffers_config.j2")) self.assertDictEqual( From a0055abb5d245246187746ad674728f9e30b6445 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Mon, 31 Oct 2022 07:44:54 -0700 Subject: [PATCH 123/174] [sonic-yang-mgmt] Replace subprocess using with shell=True (#12537) Signed-off-by: maipbui #### Why I did it `subprocess` is used with `shell=True`, which is very dangerous for shell injection. #### How I did it remove `shell=True`, use `shell=False` #### How to verify it Pass UT --- src/sonic-yang-mgmt/tests/test_cfghelp.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/sonic-yang-mgmt/tests/test_cfghelp.py b/src/sonic-yang-mgmt/tests/test_cfghelp.py index 2c09625a0e6a..5867c78e5282 100644 --- a/src/sonic-yang-mgmt/tests/test_cfghelp.py +++ b/src/sonic-yang-mgmt/tests/test_cfghelp.py @@ -1,7 +1,6 @@ import json import subprocess import os - from unittest import TestCase output1="""\ @@ -126,8 +125,8 @@ def setUp(self): self.script_file = os.path.join(self.test_dir, '..', 'sonic-cfg-help') def run_script(self, argument): - print('\n Running sonic-cfg-help ' + argument) - output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) + print('\n Running sonic-cfg-help ' + ' '.join(argument)) + output = subprocess.check_output([self.script_file] + argument) output = output.decode() @@ -139,32 +138,32 @@ def run_script(self, argument): return output def test_dummy_run(self): - argument = '' + argument = [] output = self.run_script(argument) self.assertEqual(output, output1) def test_single_table(self): - argument = '-t AUTO_TECHSUPPORT' + argument = ['-t', 'AUTO_TECHSUPPORT'] output = self.run_script(argument) self.assertEqual(output, techsupport_table_output) def test_single_field(self): - argument = '-t AUTO_TECHSUPPORT -f state' + argument = ['-t', 'AUTO_TECHSUPPORT', '-f', 'state'] output = self.run_script(argument) self.assertEqual(output, techsupport_table_field_output) def test_leaf_list(self): - argument = '-t PORTCHANNEL -f members' + argument = ['-t', 'PORTCHANNEL', '-f', 'members'] output = self.run_script(argument) self.assertEqual(output, portchannel_table_field_output) def test_leaf_list_map(self): - argument = '-t DSCP_TO_TC_MAP' + argument = ['-t', 'DSCP_TO_TC_MAP'] output = self.run_script(argument) self.maxDiff = None self.assertEqual(output, dscp_to_tc_table_field_output) def test_when_condition(self): - argument = '-t ACL_RULE -f ICMP_TYPE' + argument = ['-t', 'ACL_RULE', '-f', 'ICMP_TYPE'] output = self.run_script(argument) self.assertEqual(output, acl_rule_table_field_output) From 0fcd219c3bbcfdf9d96e291793579576faf77814 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Mon, 31 Oct 2022 08:12:03 -0700 Subject: [PATCH 124/174] [sonic-ctrmgrd] Replace os.system and remove subprocess with shell=True (#12534) Signed-off-by: maipbui #### Why I did it `subprocess.Popen()` and `subprocess.run()` is used with `shell=True`, which is very dangerous for shell injection. `os` - not secure against maliciously constructed input and dangerous if used to evaluate dynamic content #### How I did it Replace `os` by `subprocess`, remove `shell=True` #### How to verify it Passed UT Tested in DUT --- src/sonic-ctrmgrd/ctrmgr/container_startup.py | 11 +++-- src/sonic-ctrmgrd/ctrmgr/ctrmgr_iptables.py | 13 +++--- src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py | 6 +-- .../tests/ctrmgr_iptables_test.py | 41 ++++++++++--------- 4 files changed, 37 insertions(+), 34 deletions(-) diff --git a/src/sonic-ctrmgrd/ctrmgr/container_startup.py b/src/sonic-ctrmgrd/ctrmgr/container_startup.py index c56160aa488d..7fcfbfc8a29a 100755 --- a/src/sonic-ctrmgrd/ctrmgr/container_startup.py +++ b/src/sonic-ctrmgrd/ctrmgr/container_startup.py @@ -4,11 +4,11 @@ import datetime import inspect import json -import subprocess import syslog import time from swsscommon import swsscommon +from sonic_py_common.general import getstatusoutput_noshell_pipe # DB field names SET_OWNER = "set_owner" @@ -114,9 +114,12 @@ def get_docker_id(): # Read the container-id # Note: This script runs inside the context of container # - cmd = 'cat /proc/self/cgroup | grep -e ":memory:" | rev | cut -f1 -d\'/\' | rev' - proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) - output = proc.communicate()[0].decode("utf-8") + cmd0 = ['cat', '/proc/self/cgroup'] + cmd1 = ['grep', '-e', ":memory:"] + cmd2 = ['rev'] + cmd3 = ['cut', '-f1', '-d', '/'] + cmd4 = ['rev'] + _, output = getstatusoutput_noshell_pipe(cmd0, cmd1, cmd2, cmd3, cmd4) return output.strip()[:12] diff --git a/src/sonic-ctrmgrd/ctrmgr/ctrmgr_iptables.py b/src/sonic-ctrmgrd/ctrmgr/ctrmgr_iptables.py index 74b9bfe44fc9..aae2131a4985 100644 --- a/src/sonic-ctrmgrd/ctrmgr/ctrmgr_iptables.py +++ b/src/sonic-ctrmgrd/ctrmgr/ctrmgr_iptables.py @@ -105,8 +105,8 @@ def iptable_proxy_rule_upd(ip_str, port = SQUID_PORT): while True: num += 1 - cmd = "sudo iptables -t nat -n -L OUTPUT {}".format(num) - proc = subprocess.run(cmd, shell=True, capture_output=True) + cmd = ["sudo", "iptables", "-t", "nat", "-n", "-L", "OUTPUT", str(num)] + proc = subprocess.run(cmd, shell=False, capture_output=True) check_proc(proc) if not proc.stdout: @@ -119,16 +119,15 @@ def iptable_proxy_rule_upd(ip_str, port = SQUID_PORT): found = True else: # Duplicate or different IP - delete it - cmd = "sudo iptables -t nat -D OUTPUT {}".format(num) - proc = subprocess.run(cmd, shell=True, capture_output=True) + cmd = ["sudo", "iptables", "-t", "nat", "-D", "OUTPUT", str(num)] + proc = subprocess.run(cmd, shell=False, capture_output=True) check_proc(proc) # Decrement number to accommodate deleted rule num -= 1 if destination and not found: - cmd = "sudo iptables -t nat -A OUTPUT -p tcp -d {} --dport {} -j DNAT --to-destination {}".format( - DST_IP, DST_PORT, destination) - proc = subprocess.run(cmd, shell=True, capture_output=True) + cmd = ["sudo", "iptables", "-t", "nat", "-A", "OUTPUT", "-p", "tcp", "-d", DST_IP, "--dport", DST_PORT, "-j", "DNAT", "--to-destination", destination] + proc = subprocess.run(cmd, shell=False, capture_output=True) check_proc(proc) diff --git a/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py b/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py index 2defb6e45387..154591a695fa 100755 --- a/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py +++ b/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py @@ -6,7 +6,7 @@ import os import sys import syslog - +import subprocess from collections import defaultdict from ctrmgr.ctrmgr_iptables import iptable_proxy_rule_upd @@ -119,7 +119,7 @@ def ts_now(): def is_systemd_active(feat): if not UNIT_TESTING: - status = os.system('systemctl is-active --quiet {}'.format(feat)) + status = subprocess.call(['systemctl', 'is-active', '--quiet', str(feat)]) else: status = UNIT_TESTING_ACTIVE log_debug("system status for {}: {}".format(feat, str(status))) @@ -129,7 +129,7 @@ def is_systemd_active(feat): def restart_systemd_service(server, feat, owner): log_debug("Restart service {} to owner:{}".format(feat, owner)) if not UNIT_TESTING: - status = os.system("systemctl restart {}".format(feat)) + status = subprocess.call(["systemctl", "restart", str(feat)]) else: server.mod_db_entry(STATE_DB_NAME, FEATURE_TABLE, feat, {"restart": "true"}) diff --git a/src/sonic-ctrmgrd/tests/ctrmgr_iptables_test.py b/src/sonic-ctrmgrd/tests/ctrmgr_iptables_test.py index fc28cbecc8da..cb7178fb43bd 100755 --- a/src/sonic-ctrmgrd/tests/ctrmgr_iptables_test.py +++ b/src/sonic-ctrmgrd/tests/ctrmgr_iptables_test.py @@ -103,30 +103,31 @@ def __init__(self, ret, stdout, stderr): def mock_subproc_run(cmd, shell, capture_output): - cmd_prefix = "sudo iptables -t nat " - list_cmd = "{}-n -L OUTPUT ".format(cmd_prefix) - del_cmd = "{}-D OUTPUT ".format(cmd_prefix) - ins_cmd = "{}-A OUTPUT -p tcp -d ".format(cmd_prefix) + list_cmd = ["sudo", "iptables", "-t", "nat", "-n", "-L", "OUTPUT"] + del_cmd = ["sudo", "iptables", "-t", "nat", "-D", "OUTPUT"] + ins_cmd = ["sudo", "iptables", "-t", "nat", "-A", "OUTPUT", "-p", "tcp", "-d"] - assert shell + assert shell == False print("cmd={}".format(cmd)) - if cmd.startswith(list_cmd): - num = int(cmd[len(list_cmd):]) - out = current_rules[num] if len(current_rules) > num else "" - return proc(0, out, "") - - if cmd.startswith(del_cmd): - num = int(cmd[len(del_cmd):]) - if num >= len(current_rules): - print("delete num={} is greater than len={}".format(num, len(current_rules))) - print("current_rules = {}".format(current_rules)) - assert False - del current_rules[num] - return proc(0, "", "") + if list_cmd == cmd[:len(list_cmd)]: + if len(cmd) == len(list_cmd) + 1: + num = int(cmd[len(list_cmd)]) + out = current_rules[num] if len(current_rules) > num else "" + return proc(0, out, "") + + if del_cmd == cmd[:len(del_cmd)]: + if len(cmd) == len(del_cmd) + 1: + num = int(cmd[len(del_cmd)]) + if num >= len(current_rules): + print("delete num={} is greater than len={}".format(num, len(current_rules))) + print("current_rules = {}".format(current_rules)) + assert False + del current_rules[num] + return proc(0, "", "") - if cmd.startswith(ins_cmd): - l = cmd.split() + if ins_cmd == cmd[:len(ins_cmd)]: + l = cmd assert len(l) == 16 rule = "DNAT tcp -- 0.0.0.0/0 {} tcp dpt:{} to:{}".format(l[9], l[11], l[-1]) current_rules.append(rule) From 5d83d424b1ae2dc22c6344afcc18c2375f7752a5 Mon Sep 17 00:00:00 2001 From: Vivek Date: Mon, 31 Oct 2022 12:16:05 -0700 Subject: [PATCH 125/174] Added BUILD flags to provision for building the kernel with non-upstream patches (#12428) * Added ENV vars for non-upstream patches Signed-off-by: Vivek Reddy * Made MLNX_PATCH_LOC an absolute path Signed-off-by: Vivek Reddy * Added non-upstream-patches dir Signed-off-by: Vivek Reddy * Update README.md * Addressed comments * Env vars updated Signed-off-by: Vivek Reddy * Readme updated Signed-off-by: Vivek Reddy Signed-off-by: Vivek Reddy --- .../mellanox/non-upstream-patches/README.md | 31 +++++++++++++++++++ platform/mellanox/rules.mk | 3 ++ rules/linux-kernel.mk | 10 ++++++ slave.mk | 4 ++- 4 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 platform/mellanox/non-upstream-patches/README.md diff --git a/platform/mellanox/non-upstream-patches/README.md b/platform/mellanox/non-upstream-patches/README.md new file mode 100644 index 000000000000..9138d750cbf4 --- /dev/null +++ b/platform/mellanox/non-upstream-patches/README.md @@ -0,0 +1,31 @@ +## Mellanox non-upstream linux kernel patches ## + +To include non-upstream patches into the sonic-linux image during build time, this folder must contain a patch archive. + +### Structure of the patch archive + + 1. It should contain a file named series. series should provide an order in which the patches have to be applied + ``` + admin@build-server:/sonic-buildimage/src/sonic-linux-kernel$ cat linux-5.10.103/non_upstream_patches/series + mlx5-Refactor-module-EEPROM-query.patch + mlx5-Implement-get_module_eeprom_by_page.patch + mlx5-Add-support-for-DSFP-module-EEPROM-dumps.patch + ``` + 2. All the patches should be present in the same folder where series resides. + 3. Developers should make sure patches apply cleanly over the existing patches present in the src/sonic-linux-kernel . + 4. Name of the tarball should match with the one specified under EXTERNAL_KERNEL_PATCH_TAR + +#### Example +``` +admin@build-server:/sonic-buildimage/platform/mellanox/non-upstream-patches$ tar -tf patches.tar.gz +./ +./mlx5-Implement-get_module_eeprom_by_page.patch +./mlx5-Add-support-for-DSFP-module-EEPROM-dumps.patch +./series +./mlx5-Refactor-module-EEPROM-query.patch +``` + +### Include the archive while building sonic linux kernel + +Set `INCLUDE_EXTERNAL_PATCH_TAR=y` using `SONIC_OVERRIDE_BUILD_VARS` to include these changes before building the kernel. +- Eg: `NOJESSIE=1 NOSTRETCH=1 NOBUSTER=1 make SONIC_OVERRIDE_BUILD_VARS=' INCLUDE_EXTERNAL_PATCH_TAR=y ' target/debs/bullseye/linux-headers-5.10.0-12-2-common_5.10.103-1_all.deb` diff --git a/platform/mellanox/rules.mk b/platform/mellanox/rules.mk index 5b5e55cdf494..43aa1829953e 100644 --- a/platform/mellanox/rules.mk +++ b/platform/mellanox/rules.mk @@ -51,4 +51,7 @@ $(DOCKER_PLATFORM_MONITOR)_DEPENDS += $(APPLIBS) $(SX_COMPLIB) $(SXD_LIBS) $(SX_ # Force the target bootloader for mellanox platforms to grub regardless of arch TARGET_BOOTLOADER = grub +# location for the platform specific external kernel patches tarball +override EXTERNAL_KERNEL_PATCH_TAR := $(BUILD_WORKDIR)/$(PLATFORM_PATH)/non-upstream-patches/patches.tar.gz + export SONIC_BUFFER_MODEL=dynamic diff --git a/rules/linux-kernel.mk b/rules/linux-kernel.mk index b51c4d12d05f..23d85054968e 100644 --- a/rules/linux-kernel.mk +++ b/rules/linux-kernel.mk @@ -9,7 +9,17 @@ ifeq ($(CONFIGURED_ARCH), armhf) KVERSION = $(KVERSION_SHORT)-armmp endif +# Place an URL here to .tar.gz file if you want to include those patches +EXTERNAL_KERNEL_PATCH_URL = +# Set y to include non upstream patches tarball provided by the corresponding platform +INCLUDE_EXTERNAL_PATCH_TAR = n +# platforms should override this and provide an absolute path to the tarball +EXTERNAL_KERNEL_PATCH_TAR = + export KVERSION_SHORT KVERSION KERNEL_VERSION KERNEL_SUBVERSION +export EXTERNAL_KERNEL_PATCH_URL +export INCLUDE_EXTERNAL_PATCH_TAR +export EXTERNAL_KERNEL_PATCH_TAR LINUX_HEADERS_COMMON = linux-headers-$(KVERSION_SHORT)-common_$(KERNEL_VERSION)-$(KERNEL_SUBVERSION)_all.deb $(LINUX_HEADERS_COMMON)_SRC_PATH = $(SRC_PATH)/sonic-linux-kernel diff --git a/slave.mk b/slave.mk index 54a01aaa1a29..561f6b47db3f 100644 --- a/slave.mk +++ b/slave.mk @@ -43,7 +43,8 @@ BULLSEYE_DEBS_PATH = $(TARGET_PATH)/debs/bullseye BULLSEYE_FILES_PATH = $(TARGET_PATH)/files/bullseye DBG_IMAGE_MARK = dbg DBG_SRC_ARCHIVE_FILE = $(TARGET_PATH)/sonic_src.tar.gz -DPKG_ADMINDIR_PATH = /sonic/dpkg +BUILD_WORKDIR = /sonic +DPKG_ADMINDIR_PATH = $(BUILD_WORKDIR)/dpkg CONFIGURED_PLATFORM := $(shell [ -f .platform ] && cat .platform || echo generic) PLATFORM_PATH = platform/$(CONFIGURED_PLATFORM) @@ -84,6 +85,7 @@ export MULTIARCH_QEMU_ENVIRON export DOCKER_BASE_ARCH export CROSS_BUILD_ENVIRON export BLDENV +export BUILD_WORKDIR ############################################################################### ## Utility rules From 0dd4d5dda9be48a523b3015ca1c2c324e25c64f2 Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Mon, 31 Oct 2022 16:00:05 -0700 Subject: [PATCH 126/174] [openssh]: Restore behavior of ClientAliveCountMax=0 (#12549) * [openssh]: Restore behavior of ClientAliveCountMax=0 OpenSSH 8.2 changed the behavior of ClientAliveCountMax=0 such that setting it to 0 disables connection-killing entirely when the connection is idle. Revert that change. Signed-off-by: Saikrishna Arcot * Remove build-dep command that should not be there Signed-off-by: Saikrishna Arcot Signed-off-by: Saikrishna Arcot --- build_debian.sh | 2 +- rules/sonic-fips.mk | 2 +- src/openssh/Makefile | 1 - ...334996-make-sshd_config-ClientAliveC.patch | 48 +++++++++++++++++++ src/openssh/patch/series | 1 + 5 files changed, 51 insertions(+), 3 deletions(-) create mode 100644 src/openssh/patch/0002-Revert-commit-69334996-make-sshd_config-ClientAliveC.patch diff --git a/build_debian.sh b/build_debian.sh index 01a10d78d678..dd98b052e16e 100755 --- a/build_debian.sh +++ b/build_debian.sh @@ -468,7 +468,7 @@ rm /files/etc/ssh/sshd_config/ClientAliveCountMax touch /files/etc/ssh/sshd_config/EmptyLineHack rename /files/etc/ssh/sshd_config/EmptyLineHack "" set /files/etc/ssh/sshd_config/ClientAliveInterval 300 -set /files/etc/ssh/sshd_config/ClientAliveCountMax 1 +set /files/etc/ssh/sshd_config/ClientAliveCountMax 0 ins #comment before /files/etc/ssh/sshd_config/ClientAliveInterval set /files/etc/ssh/sshd_config/#comment[following-sibling::*[1][self::ClientAliveInterval]] "Close inactive client sessions after 5 minutes" rm /files/etc/ssh/sshd_config/MaxAuthTries diff --git a/rules/sonic-fips.mk b/rules/sonic-fips.mk index e5b6e4ad3547..8303918e2e1b 100644 --- a/rules/sonic-fips.mk +++ b/rules/sonic-fips.mk @@ -1,6 +1,6 @@ # fips packages -FIPS_VERSION = 0.3 +FIPS_VERSION = 0.4 FIPS_OPENSSL_VERSION = 1.1.1n-0+deb11u3+fips FIPS_OPENSSH_VERSION = 8.4p1-5+deb11u1+fips FIPS_PYTHON_MAIN_VERSION = 3.9 diff --git a/src/openssh/Makefile b/src/openssh/Makefile index ec7942fe7e5e..29df8e76292c 100644 --- a/src/openssh/Makefile +++ b/src/openssh/Makefile @@ -23,7 +23,6 @@ ifeq ($(CROSS_BUILD_ENVIRON), y) patch -p1 < ../patch/cross-compile-changes.patch dpkg-buildpackage -rfakeroot -b -us -uc -a$(CONFIGURED_ARCH) -Pcross,nocheck -j$(SONIC_CONFIG_MAKE_JOBS) --admindir $(SONIC_DPKG_ADMINDIR) else - sudo http_proxy=$(http_proxy) apt-get -y build-dep openssh dpkg-buildpackage -rfakeroot -b -us -uc -j$(SONIC_CONFIG_MAKE_JOBS) --admindir $(SONIC_DPKG_ADMINDIR) endif popd diff --git a/src/openssh/patch/0002-Revert-commit-69334996-make-sshd_config-ClientAliveC.patch b/src/openssh/patch/0002-Revert-commit-69334996-make-sshd_config-ClientAliveC.patch new file mode 100644 index 000000000000..3eb04bc4e064 --- /dev/null +++ b/src/openssh/patch/0002-Revert-commit-69334996-make-sshd_config-ClientAliveC.patch @@ -0,0 +1,48 @@ +From 2bc575c74aa811a60682e989d07675b8e7ac8a12 Mon Sep 17 00:00:00 2001 +From: Saikrishna Arcot +Date: Thu, 13 Oct 2022 13:45:17 -0700 +Subject: [PATCH] Revert commit 69334996: make + sshd_config:ClientAliveCountMax=0 disable the connection-killing behavior + +SONiC (and others) use this feature to kill connections when the session +is idle after some duration of time. OpenSSH 8.2 defined setting +ClientAliveCountMax=0, but by doing so, broke the current use case of +it. + +Signed-off-by: Saikrishna Arcot +--- + serverloop.c | 3 +-- + sshd_config.5 | 3 --- + 2 files changed, 1 insertion(+), 5 deletions(-) + +diff --git a/serverloop.c b/serverloop.c +index 48d936d..1b30498 100644 +--- a/serverloop.c ++++ b/serverloop.c +@@ -184,8 +184,7 @@ client_alive_check(struct ssh *ssh) + int r, channel_id; + + /* timeout, check to see how many we have had */ +- if (options.client_alive_count_max > 0 && +- ssh_packet_inc_alive_timeouts(ssh) > ++ if (ssh_packet_inc_alive_timeouts(ssh) > + options.client_alive_count_max) { + sshpkt_fmt_connection_id(ssh, remote_id, sizeof(remote_id)); + logit("Timeout, client not responding from %s", remote_id); +diff --git a/sshd_config.5 b/sshd_config.5 +index a555e7e..a5815d3 100644 +--- a/sshd_config.5 ++++ b/sshd_config.5 +@@ -545,9 +545,6 @@ is set to 15, and + .Cm ClientAliveCountMax + is left at the default, unresponsive SSH clients + will be disconnected after approximately 45 seconds. +-Setting a zero +-.Cm ClientAliveCountMax +-disables connection termination. + .It Cm ClientAliveInterval + Sets a timeout interval in seconds after which if no data has been received + from the client, +-- +2.25.1 + diff --git a/src/openssh/patch/series b/src/openssh/patch/series index a645ad25833f..e320bcab2113 100644 --- a/src/openssh/patch/series +++ b/src/openssh/patch/series @@ -1 +1,2 @@ 0001-Put-style-as-line-number-to-ssh-session-environment-.patch +0002-Revert-commit-69334996-make-sshd_config-ClientAliveC.patch From 45d174663a1dff965b06e7778311ae1d29770bcb Mon Sep 17 00:00:00 2001 From: ntoorchi <52223501+ntoorchi@users.noreply.github.com> Date: Mon, 31 Oct 2022 16:18:42 -0700 Subject: [PATCH 127/174] Enable P4RT at build time and disable at startup (#10499) #### Why I did it Currently at the Azure build system, the P4RT container is disabled by default at the build time. Here the goal is to include the P4RT container at the build time while disabling it at the runtime. The user can enable/disable the p4rt app through the config based on the preference. #### How I did it Changed the config in rules/config and init-cfg.json.j2 --- files/build_templates/init_cfg.json.j2 | 2 +- rules/config | 2 +- rules/docker-p4rt.mk | 5 +++-- rules/p4rt.mk | 3 ++- slave.mk | 16 ++++++++-------- 5 files changed, 15 insertions(+), 13 deletions(-) diff --git a/files/build_templates/init_cfg.json.j2 b/files/build_templates/init_cfg.json.j2 index 38bd7c2e43a6..3f0465be15ef 100644 --- a/files/build_templates/init_cfg.json.j2 +++ b/files/build_templates/init_cfg.json.j2 @@ -49,7 +49,7 @@ {%- if include_mgmt_framework == "y" %}{% do features.append(("mgmt-framework", "enabled", true, "enabled")) %}{% endif %} {%- if include_mux == "y" %}{% do features.append(("mux", "{% if 'subtype' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['subtype'] == 'DualToR' %}enabled{% else %}always_disabled{% endif %}", false, "enabled")) %}{% endif %} {%- if include_nat == "y" %}{% do features.append(("nat", "disabled", false, "enabled")) %}{% endif %} -{%- if include_p4rt == "y" %}{% do features.append(("p4rt", "enabled", false, "enabled")) %}{% endif %} +{%- if include_p4rt == "y" %}{% do features.append(("p4rt", "disabled", false, "enabled")) %}{% endif %} {%- if include_restapi == "y" %}{% do features.append(("restapi", "enabled", false, "enabled")) %}{% endif %} {%- if include_sflow == "y" %}{% do features.append(("sflow", "disabled", false, "enabled")) %}{% endif %} {%- if include_macsec == "y" %}{% do features.append(("macsec", "disabled", false, "enabled")) %}{% endif %} diff --git a/rules/config b/rules/config index 888c470cdee2..0391abff36bd 100644 --- a/rules/config +++ b/rules/config @@ -151,7 +151,7 @@ INCLUDE_NAT = y INCLUDE_DHCP_RELAY = y # INCLUDE_P4RT - build docker-p4rt for P4RT support -INCLUDE_P4RT = n +INCLUDE_P4RT = y # ENABLE_AUTO_TECH_SUPPORT - Enable the configuration for event-driven techsupport & coredump mgmt feature ENABLE_AUTO_TECH_SUPPORT = y diff --git a/rules/docker-p4rt.mk b/rules/docker-p4rt.mk index 60e0d7d75244..172346742ce1 100644 --- a/rules/docker-p4rt.mk +++ b/rules/docker-p4rt.mk @@ -19,12 +19,13 @@ $(DOCKER_P4RT)_PACKAGE_NAME = p4rt $(DOCKER_P4RT)_WARM_SHUTDOWN_BEFORE = swss $(DOCKER_P4RT)_FAST_SHUTDOWN_BEFORE = swss +# TODO: Enable P4RT DBG SONIC_DOCKER_IMAGES += $(DOCKER_P4RT) -SONIC_DOCKER_DBG_IMAGES += $(DOCKER_P4RT_DBG) +# SONIC_DOCKER_DBG_IMAGES += $(DOCKER_P4RT_DBG) ifeq ($(INCLUDE_P4RT), y) SONIC_INSTALL_DOCKER_IMAGES += $(DOCKER_P4RT) -SONIC_INSTALL_DOCKER_DBG_IMAGES += $(DOCKER_P4RT_DBG) +# SONIC_INSTALL_DOCKER_DBG_IMAGES += $(DOCKER_P4RT_DBG) endif $(DOCKER_P4RT)_CONTAINER_NAME = p4rt diff --git a/rules/p4rt.mk b/rules/p4rt.mk index a402bac002f7..9f191687950d 100644 --- a/rules/p4rt.mk +++ b/rules/p4rt.mk @@ -17,4 +17,5 @@ export SONIC_P4RT SONIC_P4RT_DBG SONIC_P4RT_VERSION # The .c, .cpp, .h & .hpp files under src/{$DBG_SRC_ARCHIVE list} # are archived into debug one image to facilitate debugging. -DBG_SRC_ARCHIVE += sonic-p4rt +# TODO: Enable P4RT DBG +# DBG_SRC_ARCHIVE += sonic-p4rt diff --git a/slave.mk b/slave.mk index 561f6b47db3f..ec484af94489 100644 --- a/slave.mk +++ b/slave.mk @@ -178,10 +178,10 @@ endif # Pre-built Bazel is not available for armhf, so exclude P4RT # TODO(PINS): Remove when Bazel binaries are available for armhf ifeq ($(CONFIGURED_ARCH),armhf) - ifeq ($(INCLUDE_P4RT),y) - $(Q)echo "Disabling P4RT due to incompatible CPU architecture: $(CONFIGURED_ARCH)" - endif - override INCLUDE_P4RT = n +ifeq ($(INCLUDE_P4RT),y) +$(Q)echo "Disabling P4RT due to incompatible CPU architecture: $(CONFIGURED_ARCH)" +endif +override INCLUDE_P4RT = n endif ifeq ($(SONIC_INCLUDE_MACSEC),y) @@ -206,10 +206,10 @@ endif ifeq ($(ENABLE_ASAN),y) - ifneq ($(CONFIGURED_ARCH),amd64) - $(Q)echo "Disabling SWSS address sanitizer due to incompatible CPU architecture: $(CONFIGURED_ARCH)" - override ENABLE_ASAN = n - endif +ifneq ($(CONFIGURED_ARCH),amd64) +$(Q)echo "Disabling SWSS address sanitizer due to incompatible CPU architecture: $(CONFIGURED_ARCH)" +override ENABLE_ASAN = n +endif endif include $(RULES_PATH)/functions From 2b7a3ac6c05ca6e3b1fa27e22da740bf90b284b3 Mon Sep 17 00:00:00 2001 From: Praveen Chaudhary Date: Mon, 31 Oct 2022 16:37:25 -0700 Subject: [PATCH 128/174] [yang-models]: Change name-space from Azure to sonic-net. (#12416) Changes: -- Change name-space from Azure to sonic-net. -- Sort yang list in setup.py for yang-models list. #### Why I did it Sonic repo has moved to Linux-foundation. #### How I did it [yang-models]: Change name-space from Azure to sonic-net. #### How to verify it PR Tests are good enough to verify. --- src/sonic-yang-models/yang-models/sonic-auto_techsupport.yang | 2 +- .../yang-models/sonic-bgp-allowed-prefix.yang | 2 +- src/sonic-yang-models/yang-models/sonic-bgp-common.yang | 2 +- src/sonic-yang-models/yang-models/sonic-bgp-device-global.yang | 2 +- src/sonic-yang-models/yang-models/sonic-bgp-global.yang | 2 +- .../yang-models/sonic-bgp-internal-neighbor.yang | 2 +- src/sonic-yang-models/yang-models/sonic-bgp-monitor.yang | 2 +- src/sonic-yang-models/yang-models/sonic-bgp-neighbor.yang | 2 +- src/sonic-yang-models/yang-models/sonic-bgp-peergroup.yang | 2 +- src/sonic-yang-models/yang-models/sonic-bgp-peerrange.yang | 2 +- .../yang-models/sonic-bgp-voq-chassis-neighbor.yang | 2 +- src/sonic-yang-models/yang-models/sonic-breakout_cfg.yang | 2 +- src/sonic-yang-models/yang-models/sonic-buffer-pg.yang | 2 +- src/sonic-yang-models/yang-models/sonic-buffer-pool.yang | 2 +- .../yang-models/sonic-buffer-port-egress-profile-list.yang | 2 +- .../yang-models/sonic-buffer-port-ingress-profile-list.yang | 2 +- src/sonic-yang-models/yang-models/sonic-buffer-profile.yang | 2 +- src/sonic-yang-models/yang-models/sonic-buffer-queue.yang | 2 +- src/sonic-yang-models/yang-models/sonic-cable-length.yang | 2 +- src/sonic-yang-models/yang-models/sonic-console.yang | 2 +- src/sonic-yang-models/yang-models/sonic-copp.yang | 2 +- src/sonic-yang-models/yang-models/sonic-crm.yang | 2 +- .../yang-models/sonic-default-lossless-buffer-parameter.yang | 2 +- src/sonic-yang-models/yang-models/sonic-device_metadata.yang | 2 +- src/sonic-yang-models/yang-models/sonic-device_neighbor.yang | 2 +- .../yang-models/sonic-device_neighbor_metadata.yang | 2 +- src/sonic-yang-models/yang-models/sonic-dhcp-server.yang | 2 +- src/sonic-yang-models/yang-models/sonic-dhcpv6-relay.yang | 2 +- src/sonic-yang-models/yang-models/sonic-dot1p-tc-map.yang | 2 +- src/sonic-yang-models/yang-models/sonic-dscp-fc-map.yang | 2 +- src/sonic-yang-models/yang-models/sonic-dscp-tc-map.yang | 2 +- src/sonic-yang-models/yang-models/sonic-exp-fc-map.yang | 2 +- src/sonic-yang-models/yang-models/sonic-feature.yang | 2 +- src/sonic-yang-models/yang-models/sonic-flex_counter.yang | 2 +- src/sonic-yang-models/yang-models/sonic-interface.yang | 2 +- src/sonic-yang-models/yang-models/sonic-kdump.yang | 2 +- src/sonic-yang-models/yang-models/sonic-kubernetes_master.yang | 3 ++- src/sonic-yang-models/yang-models/sonic-lldp.yang | 2 +- .../yang-models/sonic-loopback-interface.yang | 2 +- .../yang-models/sonic-lossless-traffic-pattern.yang | 2 +- src/sonic-yang-models/yang-models/sonic-macsec.yang | 2 +- src/sonic-yang-models/yang-models/sonic-mclag.yang | 2 +- src/sonic-yang-models/yang-models/sonic-mgmt_interface.yang | 2 +- src/sonic-yang-models/yang-models/sonic-mgmt_port.yang | 2 +- src/sonic-yang-models/yang-models/sonic-mgmt_vrf.yang | 2 +- src/sonic-yang-models/yang-models/sonic-mirror-session.yang | 2 +- src/sonic-yang-models/yang-models/sonic-mpls-tc-map.yang | 2 +- src/sonic-yang-models/yang-models/sonic-mux-cable.yang | 2 +- src/sonic-yang-models/yang-models/sonic-nat.yang | 2 +- src/sonic-yang-models/yang-models/sonic-ntp.yang | 2 +- src/sonic-yang-models/yang-models/sonic-nvgre-tunnel.yang | 2 +- src/sonic-yang-models/yang-models/sonic-passwh.yang | 2 +- src/sonic-yang-models/yang-models/sonic-pbh.yang | 2 +- src/sonic-yang-models/yang-models/sonic-peer-switch.yang | 2 +- .../yang-models/sonic-pfc-priority-priority-group-map.yang | 2 +- .../yang-models/sonic-pfc-priority-queue-map.yang | 2 +- src/sonic-yang-models/yang-models/sonic-pfcwd.yang | 2 +- src/sonic-yang-models/yang-models/sonic-port-qos-map.yang | 2 +- src/sonic-yang-models/yang-models/sonic-port.yang | 2 +- src/sonic-yang-models/yang-models/sonic-portchannel.yang | 2 +- src/sonic-yang-models/yang-models/sonic-queue.yang | 2 +- src/sonic-yang-models/yang-models/sonic-restapi.yang | 2 +- src/sonic-yang-models/yang-models/sonic-route-common.yang | 2 +- src/sonic-yang-models/yang-models/sonic-route-map.yang | 2 +- .../yang-models/sonic-routing-policy-sets.yang | 2 +- src/sonic-yang-models/yang-models/sonic-scheduler.yang | 2 +- src/sonic-yang-models/yang-models/sonic-sflow.yang | 2 +- src/sonic-yang-models/yang-models/sonic-snmp.yang | 2 +- src/sonic-yang-models/yang-models/sonic-static-route.yang | 2 +- src/sonic-yang-models/yang-models/sonic-storm-control.yang | 2 +- src/sonic-yang-models/yang-models/sonic-syslog.yang | 2 +- src/sonic-yang-models/yang-models/sonic-system-aaa.yang | 2 +- src/sonic-yang-models/yang-models/sonic-system-tacacs.yang | 2 +- .../yang-models/sonic-tc-priority-group-map.yang | 2 +- src/sonic-yang-models/yang-models/sonic-tc-queue-map.yang | 2 +- src/sonic-yang-models/yang-models/sonic-telemetry.yang | 2 +- src/sonic-yang-models/yang-models/sonic-versions.yang | 2 +- .../yang-models/sonic-vlan-sub-interface.yang | 2 +- src/sonic-yang-models/yang-models/sonic-vlan.yang | 2 +- src/sonic-yang-models/yang-models/sonic-vrf.yang | 2 +- src/sonic-yang-models/yang-models/sonic-vxlan.yang | 2 +- src/sonic-yang-models/yang-models/sonic-warm-restart.yang | 2 +- src/sonic-yang-models/yang-models/sonic-wred-profile.yang | 2 +- src/sonic-yang-models/yang-templates/sonic-acl.yang.j2 | 2 +- src/sonic-yang-models/yang-templates/sonic-extension.yang.j2 | 2 +- src/sonic-yang-models/yang-templates/sonic-policer.yang.j2 | 2 +- src/sonic-yang-models/yang-templates/sonic-types.yang.j2 | 2 +- 87 files changed, 88 insertions(+), 87 deletions(-) diff --git a/src/sonic-yang-models/yang-models/sonic-auto_techsupport.yang b/src/sonic-yang-models/yang-models/sonic-auto_techsupport.yang index 02e29463d594..078609c0e1f7 100644 --- a/src/sonic-yang-models/yang-models/sonic-auto_techsupport.yang +++ b/src/sonic-yang-models/yang-models/sonic-auto_techsupport.yang @@ -2,7 +2,7 @@ module sonic-auto_techsupport { yang-version 1.1; - namespace "http://github.com/Azure/sonic-auto_techsupport"; + namespace "http://github.com/sonic-net/sonic-auto_techsupport"; prefix auto_techsupport; import sonic-types { diff --git a/src/sonic-yang-models/yang-models/sonic-bgp-allowed-prefix.yang b/src/sonic-yang-models/yang-models/sonic-bgp-allowed-prefix.yang index 942387983775..65f5dab74733 100644 --- a/src/sonic-yang-models/yang-models/sonic-bgp-allowed-prefix.yang +++ b/src/sonic-yang-models/yang-models/sonic-bgp-allowed-prefix.yang @@ -1,5 +1,5 @@ module sonic-bgp-allowed-prefix { - namespace "http://github.com/Azure/sonic-bgp-allowed-prefix"; + namespace "http://github.com/sonic-net/sonic-bgp-allowed-prefix"; prefix bgppre; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-bgp-common.yang b/src/sonic-yang-models/yang-models/sonic-bgp-common.yang index 1c360faa9cd4..5d9f71026327 100644 --- a/src/sonic-yang-models/yang-models/sonic-bgp-common.yang +++ b/src/sonic-yang-models/yang-models/sonic-bgp-common.yang @@ -1,5 +1,5 @@ module sonic-bgp-common { - namespace "http://github.com/Azure/sonic-bgp-common"; + namespace "http://github.com/sonic-net/sonic-bgp-common"; prefix bgpcmn; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-bgp-device-global.yang b/src/sonic-yang-models/yang-models/sonic-bgp-device-global.yang index 728714c7d51f..277be6870922 100644 --- a/src/sonic-yang-models/yang-models/sonic-bgp-device-global.yang +++ b/src/sonic-yang-models/yang-models/sonic-bgp-device-global.yang @@ -1,5 +1,5 @@ module sonic-bgp-device-global { - namespace "http://github.com/Azure/sonic-bgp-device-global"; + namespace "http://github.com/sonic-net/sonic-bgp-device-global"; prefix bgp_device_global; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-bgp-global.yang b/src/sonic-yang-models/yang-models/sonic-bgp-global.yang index a428635e6037..db6c02356aa6 100644 --- a/src/sonic-yang-models/yang-models/sonic-bgp-global.yang +++ b/src/sonic-yang-models/yang-models/sonic-bgp-global.yang @@ -1,5 +1,5 @@ module sonic-bgp-global { - namespace "http://github.com/Azure/sonic-bgp-global"; + namespace "http://github.com/sonic-net/sonic-bgp-global"; prefix bgpg; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-bgp-internal-neighbor.yang b/src/sonic-yang-models/yang-models/sonic-bgp-internal-neighbor.yang index 69a089f7cab8..a58f404cb693 100644 --- a/src/sonic-yang-models/yang-models/sonic-bgp-internal-neighbor.yang +++ b/src/sonic-yang-models/yang-models/sonic-bgp-internal-neighbor.yang @@ -1,5 +1,5 @@ module sonic-bgp-internal-neighbor { - namespace "http://github.com/Azure/sonic-bgp-internal-neighbor"; + namespace "http://github.com/sonic-net/sonic-bgp-internal-neighbor"; prefix bgpintnbr; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-bgp-monitor.yang b/src/sonic-yang-models/yang-models/sonic-bgp-monitor.yang index d152ea94166b..537a919b6f94 100644 --- a/src/sonic-yang-models/yang-models/sonic-bgp-monitor.yang +++ b/src/sonic-yang-models/yang-models/sonic-bgp-monitor.yang @@ -1,5 +1,5 @@ module sonic-bgp-monitor { - namespace "http://github.com/Azure/sonic-bgp-monitor"; + namespace "http://github.com/sonic-net/sonic-bgp-monitor"; prefix bgpmon; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-bgp-neighbor.yang b/src/sonic-yang-models/yang-models/sonic-bgp-neighbor.yang index 12432ffc35bb..3ae4d89d52d3 100644 --- a/src/sonic-yang-models/yang-models/sonic-bgp-neighbor.yang +++ b/src/sonic-yang-models/yang-models/sonic-bgp-neighbor.yang @@ -1,5 +1,5 @@ module sonic-bgp-neighbor { - namespace "http://github.com/Azure/sonic-bgp-neighbor"; + namespace "http://github.com/sonic-net/sonic-bgp-neighbor"; prefix bgpnbr; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-bgp-peergroup.yang b/src/sonic-yang-models/yang-models/sonic-bgp-peergroup.yang index 0cc9d5cb3f6a..a1f9bd2ce9e2 100644 --- a/src/sonic-yang-models/yang-models/sonic-bgp-peergroup.yang +++ b/src/sonic-yang-models/yang-models/sonic-bgp-peergroup.yang @@ -1,5 +1,5 @@ module sonic-bgp-peergroup { - namespace "http://github.com/Azure/sonic-bgp-peergroup"; + namespace "http://github.com/sonic-net/sonic-bgp-peergroup"; prefix pg; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-bgp-peerrange.yang b/src/sonic-yang-models/yang-models/sonic-bgp-peerrange.yang index 88d39d82d68c..01348096ccab 100644 --- a/src/sonic-yang-models/yang-models/sonic-bgp-peerrange.yang +++ b/src/sonic-yang-models/yang-models/sonic-bgp-peerrange.yang @@ -1,5 +1,5 @@ module sonic-bgp-peerrange { - namespace "http://github.com/Azure/sonic-bgp-peerrange"; + namespace "http://github.com/sonic-net/sonic-bgp-peerrange"; prefix pr; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-bgp-voq-chassis-neighbor.yang b/src/sonic-yang-models/yang-models/sonic-bgp-voq-chassis-neighbor.yang index 662f77c3c899..ad4d227ee0a8 100644 --- a/src/sonic-yang-models/yang-models/sonic-bgp-voq-chassis-neighbor.yang +++ b/src/sonic-yang-models/yang-models/sonic-bgp-voq-chassis-neighbor.yang @@ -1,5 +1,5 @@ module sonic-bgp-voq-chassis-neighbor { - namespace "http://github.com/Azure/sonic-bgp-voq-chassis-neighbor"; + namespace "http://github.com/sonic-net/sonic-bgp-voq-chassis-neighbor"; prefix bgpintnbr; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-breakout_cfg.yang b/src/sonic-yang-models/yang-models/sonic-breakout_cfg.yang index 85c0cf997d40..7d77669c19df 100644 --- a/src/sonic-yang-models/yang-models/sonic-breakout_cfg.yang +++ b/src/sonic-yang-models/yang-models/sonic-breakout_cfg.yang @@ -2,7 +2,7 @@ module sonic-breakout_cfg { yang-version 1.1; - namespace "http://github.com/Azure/sonic-breakout_cfg"; + namespace "http://github.com/sonic-net/sonic-breakout_cfg"; prefix breakout_cfg; import sonic-extension { diff --git a/src/sonic-yang-models/yang-models/sonic-buffer-pg.yang b/src/sonic-yang-models/yang-models/sonic-buffer-pg.yang index 60d06bf6ef6c..2886174faeda 100644 --- a/src/sonic-yang-models/yang-models/sonic-buffer-pg.yang +++ b/src/sonic-yang-models/yang-models/sonic-buffer-pg.yang @@ -1,5 +1,5 @@ module sonic-buffer-pg { - namespace "http://github.com/Azure/sonic-buffer-pg"; + namespace "http://github.com/sonic-net/sonic-buffer-pg"; prefix bpg; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-buffer-pool.yang b/src/sonic-yang-models/yang-models/sonic-buffer-pool.yang index c59d4ec66ab5..fd83218573b5 100644 --- a/src/sonic-yang-models/yang-models/sonic-buffer-pool.yang +++ b/src/sonic-yang-models/yang-models/sonic-buffer-pool.yang @@ -1,5 +1,5 @@ module sonic-buffer-pool { - namespace "http://github.com/Azure/sonic-buffer-pool"; + namespace "http://github.com/sonic-net/sonic-buffer-pool"; prefix bpl; organization diff --git a/src/sonic-yang-models/yang-models/sonic-buffer-port-egress-profile-list.yang b/src/sonic-yang-models/yang-models/sonic-buffer-port-egress-profile-list.yang index 10af27b77458..6c1a75fbfd30 100644 --- a/src/sonic-yang-models/yang-models/sonic-buffer-port-egress-profile-list.yang +++ b/src/sonic-yang-models/yang-models/sonic-buffer-port-egress-profile-list.yang @@ -1,5 +1,5 @@ module sonic-buffer-port-egress-profile-list { - namespace "http://github.com/Azure/sonic-buffer-port-egress-profile-list"; + namespace "http://github.com/sonic-net/sonic-buffer-port-egress-profile-list"; prefix bpg; import sonic-extension { diff --git a/src/sonic-yang-models/yang-models/sonic-buffer-port-ingress-profile-list.yang b/src/sonic-yang-models/yang-models/sonic-buffer-port-ingress-profile-list.yang index 603f50bf2ec2..01a7f6bb6e21 100644 --- a/src/sonic-yang-models/yang-models/sonic-buffer-port-ingress-profile-list.yang +++ b/src/sonic-yang-models/yang-models/sonic-buffer-port-ingress-profile-list.yang @@ -1,5 +1,5 @@ module sonic-buffer-port-ingress-profile-list { - namespace "http://github.com/Azure/sonic-buffer-port-ingress-profile-list"; + namespace "http://github.com/sonic-net/sonic-buffer-port-ingress-profile-list"; prefix bpg; import sonic-extension { diff --git a/src/sonic-yang-models/yang-models/sonic-buffer-profile.yang b/src/sonic-yang-models/yang-models/sonic-buffer-profile.yang index 204e8aa796be..418302e9fc68 100644 --- a/src/sonic-yang-models/yang-models/sonic-buffer-profile.yang +++ b/src/sonic-yang-models/yang-models/sonic-buffer-profile.yang @@ -1,5 +1,5 @@ module sonic-buffer-profile { - namespace "http://github.com/Azure/sonic-buffer-profile"; + namespace "http://github.com/sonic-net/sonic-buffer-profile"; prefix bpf; import sonic-buffer-pool { diff --git a/src/sonic-yang-models/yang-models/sonic-buffer-queue.yang b/src/sonic-yang-models/yang-models/sonic-buffer-queue.yang index f8c951aa4dd1..bd1130fcc4e2 100644 --- a/src/sonic-yang-models/yang-models/sonic-buffer-queue.yang +++ b/src/sonic-yang-models/yang-models/sonic-buffer-queue.yang @@ -1,5 +1,5 @@ module sonic-buffer-queue { - namespace "http://github.com/Azure/sonic-buffer-queue"; + namespace "http://github.com/sonic-net/sonic-buffer-queue"; prefix bqueue; import sonic-extension { diff --git a/src/sonic-yang-models/yang-models/sonic-cable-length.yang b/src/sonic-yang-models/yang-models/sonic-cable-length.yang index 2af765f17d10..84ca01f34a00 100644 --- a/src/sonic-yang-models/yang-models/sonic-cable-length.yang +++ b/src/sonic-yang-models/yang-models/sonic-cable-length.yang @@ -2,7 +2,7 @@ module sonic-cable-length { yang-version 1.1; - namespace "http://github.com/Azure/sonic-cable-length"; + namespace "http://github.com/sonic-net/sonic-cable-length"; prefix cable-length; diff --git a/src/sonic-yang-models/yang-models/sonic-console.yang b/src/sonic-yang-models/yang-models/sonic-console.yang index ed0af5390f49..65a8e0dc5b43 100644 --- a/src/sonic-yang-models/yang-models/sonic-console.yang +++ b/src/sonic-yang-models/yang-models/sonic-console.yang @@ -1,6 +1,6 @@ module sonic-console { yang-version 1.1; - namespace "http://github.com/Azure/sonic-console"; + namespace "http://github.com/sonic-net/sonic-console"; prefix console; import sonic-types { diff --git a/src/sonic-yang-models/yang-models/sonic-copp.yang b/src/sonic-yang-models/yang-models/sonic-copp.yang index d735150a5c40..0c282472548b 100644 --- a/src/sonic-yang-models/yang-models/sonic-copp.yang +++ b/src/sonic-yang-models/yang-models/sonic-copp.yang @@ -2,7 +2,7 @@ module sonic-copp { yang-version 1.1; - namespace "http://github.com/Azure/sonic-copp"; + namespace "http://github.com/sonic-net/sonic-copp"; prefix copp; import sonic-types { diff --git a/src/sonic-yang-models/yang-models/sonic-crm.yang b/src/sonic-yang-models/yang-models/sonic-crm.yang index f554358645f6..87b36276d663 100644 --- a/src/sonic-yang-models/yang-models/sonic-crm.yang +++ b/src/sonic-yang-models/yang-models/sonic-crm.yang @@ -2,7 +2,7 @@ module sonic-crm { yang-version 1.1; - namespace "http://github.com/Azure/sonic-crm"; + namespace "http://github.com/sonic-net/sonic-crm"; prefix crm; import sonic-types { diff --git a/src/sonic-yang-models/yang-models/sonic-default-lossless-buffer-parameter.yang b/src/sonic-yang-models/yang-models/sonic-default-lossless-buffer-parameter.yang index 6f98d9a922b7..4fccf943fc3a 100644 --- a/src/sonic-yang-models/yang-models/sonic-default-lossless-buffer-parameter.yang +++ b/src/sonic-yang-models/yang-models/sonic-default-lossless-buffer-parameter.yang @@ -2,7 +2,7 @@ module sonic-default-lossless-buffer-parameter { yang-version 1.1; - namespace "http://github.com/Azure/sonic-default-lossless-buffer-parameter"; + namespace "http://github.com/sonic-net/sonic-default-lossless-buffer-parameter"; prefix default-lossless-buffer-parameter; diff --git a/src/sonic-yang-models/yang-models/sonic-device_metadata.yang b/src/sonic-yang-models/yang-models/sonic-device_metadata.yang index 1d40dc31b798..d7b92bea82fd 100644 --- a/src/sonic-yang-models/yang-models/sonic-device_metadata.yang +++ b/src/sonic-yang-models/yang-models/sonic-device_metadata.yang @@ -2,7 +2,7 @@ module sonic-device_metadata { yang-version 1.1; - namespace "http://github.com/Azure/sonic-device_metadata"; + namespace "http://github.com/sonic-net/sonic-device_metadata"; prefix device_metadata; import ietf-yang-types { diff --git a/src/sonic-yang-models/yang-models/sonic-device_neighbor.yang b/src/sonic-yang-models/yang-models/sonic-device_neighbor.yang index a7284ccec50c..efca22dafe6c 100644 --- a/src/sonic-yang-models/yang-models/sonic-device_neighbor.yang +++ b/src/sonic-yang-models/yang-models/sonic-device_neighbor.yang @@ -2,7 +2,7 @@ module sonic-device_neighbor { yang-version 1.1; - namespace "http://github.com/Azure/sonic-device_neighbor"; + namespace "http://github.com/sonic-net/sonic-device_neighbor"; prefix device_neighbor; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-device_neighbor_metadata.yang b/src/sonic-yang-models/yang-models/sonic-device_neighbor_metadata.yang index 76526f801c92..6f1aa6b84d71 100644 --- a/src/sonic-yang-models/yang-models/sonic-device_neighbor_metadata.yang +++ b/src/sonic-yang-models/yang-models/sonic-device_neighbor_metadata.yang @@ -2,7 +2,7 @@ module sonic-device_neighbor_metadata { yang-version 1.1; - namespace "http://github.com/Azure/sonic-device_neighbor_metadata"; + namespace "http://github.com/sonic-net/sonic-device_neighbor_metadata"; prefix device_neighbor_metadata; import ietf-yang-types { diff --git a/src/sonic-yang-models/yang-models/sonic-dhcp-server.yang b/src/sonic-yang-models/yang-models/sonic-dhcp-server.yang index 8dc88c541423..0ca5e8636c91 100644 --- a/src/sonic-yang-models/yang-models/sonic-dhcp-server.yang +++ b/src/sonic-yang-models/yang-models/sonic-dhcp-server.yang @@ -2,7 +2,7 @@ module sonic-dhcp-server { yang-version 1.1; - namespace "http://github.com/Azure/sonic-dhcp-server"; + namespace "http://github.com/sonic-net/sonic-dhcp-server"; prefix dhcp-server; diff --git a/src/sonic-yang-models/yang-models/sonic-dhcpv6-relay.yang b/src/sonic-yang-models/yang-models/sonic-dhcpv6-relay.yang index f1e73397dff4..80794114e647 100644 --- a/src/sonic-yang-models/yang-models/sonic-dhcpv6-relay.yang +++ b/src/sonic-yang-models/yang-models/sonic-dhcpv6-relay.yang @@ -1,6 +1,6 @@ module sonic-dhcpv6-relay { - namespace "http://github.com/Azure/sonic-dhcpv6-relay"; + namespace "http://github.com/sonic-net/sonic-dhcpv6-relay"; prefix sdhcpv6relay; diff --git a/src/sonic-yang-models/yang-models/sonic-dot1p-tc-map.yang b/src/sonic-yang-models/yang-models/sonic-dot1p-tc-map.yang index 1636f43f6c64..12564956b11b 100644 --- a/src/sonic-yang-models/yang-models/sonic-dot1p-tc-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-dot1p-tc-map.yang @@ -2,7 +2,7 @@ module sonic-dot1p-tc-map { yang-version 1.1; - namespace "http://github.com/Azure/sonic-dot1p-tc-map"; + namespace "http://github.com/sonic-net/sonic-dot1p-tc-map"; prefix dot1ptm; diff --git a/src/sonic-yang-models/yang-models/sonic-dscp-fc-map.yang b/src/sonic-yang-models/yang-models/sonic-dscp-fc-map.yang index 0b489a57bd0a..369064ea5885 100644 --- a/src/sonic-yang-models/yang-models/sonic-dscp-fc-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-dscp-fc-map.yang @@ -2,7 +2,7 @@ module sonic-dscp-fc-map { yang-version 1.1; - namespace "http://github.com/Azure/sonic-dscp-fc-map"; + namespace "http://github.com/sonic-net/sonic-dscp-fc-map"; prefix dtm; diff --git a/src/sonic-yang-models/yang-models/sonic-dscp-tc-map.yang b/src/sonic-yang-models/yang-models/sonic-dscp-tc-map.yang index fb590b85c6ff..8bfc02fdf5fa 100644 --- a/src/sonic-yang-models/yang-models/sonic-dscp-tc-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-dscp-tc-map.yang @@ -2,7 +2,7 @@ module sonic-dscp-tc-map { yang-version 1.1; - namespace "http://github.com/Azure/sonic-dscp-tc-map"; + namespace "http://github.com/sonic-net/sonic-dscp-tc-map"; prefix dtm; diff --git a/src/sonic-yang-models/yang-models/sonic-exp-fc-map.yang b/src/sonic-yang-models/yang-models/sonic-exp-fc-map.yang index 79f492bff4f8..43f182c03bdc 100644 --- a/src/sonic-yang-models/yang-models/sonic-exp-fc-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-exp-fc-map.yang @@ -2,7 +2,7 @@ module sonic-exp-fc-map { yang-version 1.1; - namespace "http://github.com/Azure/sonic-exp-fc-map"; + namespace "http://github.com/sonic-net/sonic-exp-fc-map"; prefix dtm; diff --git a/src/sonic-yang-models/yang-models/sonic-feature.yang b/src/sonic-yang-models/yang-models/sonic-feature.yang index f27411788e8d..797affc7ca3a 100644 --- a/src/sonic-yang-models/yang-models/sonic-feature.yang +++ b/src/sonic-yang-models/yang-models/sonic-feature.yang @@ -2,7 +2,7 @@ module sonic-feature{ yang-version 1.1; - namespace "http://github.com/Azure/sonic-feature"; + namespace "http://github.com/sonic-net/sonic-feature"; prefix feature; import sonic-types { diff --git a/src/sonic-yang-models/yang-models/sonic-flex_counter.yang b/src/sonic-yang-models/yang-models/sonic-flex_counter.yang index 9ced223e94bc..e94aa76ccf42 100644 --- a/src/sonic-yang-models/yang-models/sonic-flex_counter.yang +++ b/src/sonic-yang-models/yang-models/sonic-flex_counter.yang @@ -2,7 +2,7 @@ module sonic-flex_counter { yang-version 1.1; - namespace "http://github.com/Azure/sonic-flex_counter"; + namespace "http://github.com/sonic-net/sonic-flex_counter"; prefix flex_counter; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-interface.yang b/src/sonic-yang-models/yang-models/sonic-interface.yang index 74f241fed0d4..afd46a655501 100644 --- a/src/sonic-yang-models/yang-models/sonic-interface.yang +++ b/src/sonic-yang-models/yang-models/sonic-interface.yang @@ -2,7 +2,7 @@ module sonic-interface { yang-version 1.1; - namespace "http://github.com/Azure/sonic-interface"; + namespace "http://github.com/sonic-net/sonic-interface"; prefix intf; import sonic-types { diff --git a/src/sonic-yang-models/yang-models/sonic-kdump.yang b/src/sonic-yang-models/yang-models/sonic-kdump.yang index c7640ee0c754..364ccdc1a2ca 100644 --- a/src/sonic-yang-models/yang-models/sonic-kdump.yang +++ b/src/sonic-yang-models/yang-models/sonic-kdump.yang @@ -1,5 +1,5 @@ module sonic-kdump { - namespace "http://github.com/Azure/sonic-kdump"; + namespace "http://github.com/sonic-net/sonic-kdump"; prefix kdump; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-kubernetes_master.yang b/src/sonic-yang-models/yang-models/sonic-kubernetes_master.yang index 6ac153c606d0..331270122ef9 100644 --- a/src/sonic-yang-models/yang-models/sonic-kubernetes_master.yang +++ b/src/sonic-yang-models/yang-models/sonic-kubernetes_master.yang @@ -2,7 +2,8 @@ module sonic-kubernetes_master { yang-version 1.1; - namespace "http://github.com/Azure/sonic-kubernetes_master"; + namespace "http://github.com/sonic-net/sonic-kubernetes_master"; + prefix kubernetes_master; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-lldp.yang b/src/sonic-yang-models/yang-models/sonic-lldp.yang index a3620f30fe67..e83d73bbc912 100644 --- a/src/sonic-yang-models/yang-models/sonic-lldp.yang +++ b/src/sonic-yang-models/yang-models/sonic-lldp.yang @@ -1,5 +1,5 @@ module sonic-lldp { - namespace "http://github.com/Azure/sonic-lldp"; + namespace "http://github.com/sonic-net/sonic-lldp"; prefix slldp; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-loopback-interface.yang b/src/sonic-yang-models/yang-models/sonic-loopback-interface.yang index c3ada71aaeae..5d64b61c59c5 100644 --- a/src/sonic-yang-models/yang-models/sonic-loopback-interface.yang +++ b/src/sonic-yang-models/yang-models/sonic-loopback-interface.yang @@ -2,7 +2,7 @@ module sonic-loopback-interface { yang-version 1.1; - namespace "http://github.com/Azure/sonic-loopback-interface"; + namespace "http://github.com/sonic-net/sonic-loopback-interface"; prefix lointf; import sonic-types { diff --git a/src/sonic-yang-models/yang-models/sonic-lossless-traffic-pattern.yang b/src/sonic-yang-models/yang-models/sonic-lossless-traffic-pattern.yang index 6901dc01e007..5ac4d9243b04 100644 --- a/src/sonic-yang-models/yang-models/sonic-lossless-traffic-pattern.yang +++ b/src/sonic-yang-models/yang-models/sonic-lossless-traffic-pattern.yang @@ -2,7 +2,7 @@ module sonic-lossless-traffic-pattern { yang-version 1.1; - namespace "http://github.com/Azure/sonic-lossless-traffic-pattern"; + namespace "http://github.com/sonic-net/sonic-lossless-traffic-pattern"; prefix lossless-traffic-pattern; diff --git a/src/sonic-yang-models/yang-models/sonic-macsec.yang b/src/sonic-yang-models/yang-models/sonic-macsec.yang index 4e3412f86a3d..f9085891900e 100644 --- a/src/sonic-yang-models/yang-models/sonic-macsec.yang +++ b/src/sonic-yang-models/yang-models/sonic-macsec.yang @@ -2,7 +2,7 @@ module sonic-macsec { yang-version 1.1; - namespace "http://github.com/Azure/sonic-macsec"; + namespace "http://github.com/sonic-net/sonic-macsec"; prefix macsec; diff --git a/src/sonic-yang-models/yang-models/sonic-mclag.yang b/src/sonic-yang-models/yang-models/sonic-mclag.yang index 39e4c0b2b46f..59799baf4e4d 100644 --- a/src/sonic-yang-models/yang-models/sonic-mclag.yang +++ b/src/sonic-yang-models/yang-models/sonic-mclag.yang @@ -1,5 +1,5 @@ module sonic-mclag { - namespace "http://github.com/Azure/sonic-mclag"; + namespace "http://github.com/sonic-net/sonic-mclag"; prefix smclag; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-mgmt_interface.yang b/src/sonic-yang-models/yang-models/sonic-mgmt_interface.yang index c834a28d44e2..119e02f478a5 100644 --- a/src/sonic-yang-models/yang-models/sonic-mgmt_interface.yang +++ b/src/sonic-yang-models/yang-models/sonic-mgmt_interface.yang @@ -2,7 +2,7 @@ module sonic-mgmt_interface { yang-version 1.1; - namespace "http://github.com/Azure/sonic-mgmt_interface"; + namespace "http://github.com/sonic-net/sonic-mgmt_interface"; prefix mgmtintf; import sonic-mgmt_port { diff --git a/src/sonic-yang-models/yang-models/sonic-mgmt_port.yang b/src/sonic-yang-models/yang-models/sonic-mgmt_port.yang index fc722abf6488..96a2dfaf3fd1 100644 --- a/src/sonic-yang-models/yang-models/sonic-mgmt_port.yang +++ b/src/sonic-yang-models/yang-models/sonic-mgmt_port.yang @@ -2,7 +2,7 @@ module sonic-mgmt_port { yang-version 1.1; - namespace "http://github.com/Azure/sonic-mgmt_port"; + namespace "http://github.com/sonic-net/sonic-mgmt_port"; prefix mgmtprt; import sonic-types { diff --git a/src/sonic-yang-models/yang-models/sonic-mgmt_vrf.yang b/src/sonic-yang-models/yang-models/sonic-mgmt_vrf.yang index 35ac15e1fb85..c851d057b87a 100644 --- a/src/sonic-yang-models/yang-models/sonic-mgmt_vrf.yang +++ b/src/sonic-yang-models/yang-models/sonic-mgmt_vrf.yang @@ -2,7 +2,7 @@ module sonic-mgmt_vrf { yang-version 1.1; - namespace "http://github.com/Azure/sonic-mgmt_vrf"; + namespace "http://github.com/sonic-net/sonic-mgmt_vrf"; prefix mvrf; description diff --git a/src/sonic-yang-models/yang-models/sonic-mirror-session.yang b/src/sonic-yang-models/yang-models/sonic-mirror-session.yang index 9ea1954d7a37..98e0e9dfac8d 100644 --- a/src/sonic-yang-models/yang-models/sonic-mirror-session.yang +++ b/src/sonic-yang-models/yang-models/sonic-mirror-session.yang @@ -2,7 +2,7 @@ module sonic-mirror-session { yang-version 1.1; - namespace "http://github.com/Azure/sonic-mirror-session"; + namespace "http://github.com/sonic-net/sonic-mirror-session"; prefix sms; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-mpls-tc-map.yang b/src/sonic-yang-models/yang-models/sonic-mpls-tc-map.yang index 064e448d305c..96392222d1d4 100644 --- a/src/sonic-yang-models/yang-models/sonic-mpls-tc-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-mpls-tc-map.yang @@ -2,7 +2,7 @@ module sonic-mpls-tc-map { yang-version 1.1; - namespace "http://github.com/Azure/sonic-mpls-tc-map"; + namespace "http://github.com/sonic-net/sonic-mpls-tc-map"; prefix mpls_tc_map; diff --git a/src/sonic-yang-models/yang-models/sonic-mux-cable.yang b/src/sonic-yang-models/yang-models/sonic-mux-cable.yang index a66a588c91da..c70002afdd1a 100644 --- a/src/sonic-yang-models/yang-models/sonic-mux-cable.yang +++ b/src/sonic-yang-models/yang-models/sonic-mux-cable.yang @@ -1,5 +1,5 @@ module sonic-mux-cable { - namespace "http://github.com/Azure/sonic-mux-cable"; + namespace "http://github.com/sonic-net/sonic-mux-cable"; prefix mux_cable; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-nat.yang b/src/sonic-yang-models/yang-models/sonic-nat.yang index d01a1a9617f4..c1e554012524 100644 --- a/src/sonic-yang-models/yang-models/sonic-nat.yang +++ b/src/sonic-yang-models/yang-models/sonic-nat.yang @@ -1,5 +1,5 @@ module sonic-nat { - namespace "http://github.com/Azure/sonic-nat"; + namespace "http://github.com/sonic-net/sonic-nat"; prefix snat; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-ntp.yang b/src/sonic-yang-models/yang-models/sonic-ntp.yang index bea02cb0d34d..65ee51052d07 100644 --- a/src/sonic-yang-models/yang-models/sonic-ntp.yang +++ b/src/sonic-yang-models/yang-models/sonic-ntp.yang @@ -2,7 +2,7 @@ module sonic-ntp { yang-version 1.1; - namespace "http://github.com/Azure/sonic-system-ntp"; + namespace "http://github.com/sonic-net/sonic-system-ntp"; prefix ntp; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-nvgre-tunnel.yang b/src/sonic-yang-models/yang-models/sonic-nvgre-tunnel.yang index b10c35d94043..d010812b25ba 100644 --- a/src/sonic-yang-models/yang-models/sonic-nvgre-tunnel.yang +++ b/src/sonic-yang-models/yang-models/sonic-nvgre-tunnel.yang @@ -2,7 +2,7 @@ module sonic-nvgre-tunnel { yang-version 1.1; - namespace "http://github.com/Azure/sonic-nvgre-tunnel"; + namespace "http://github.com/sonic-net/sonic-nvgre-tunnel"; prefix nvgre; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-passwh.yang b/src/sonic-yang-models/yang-models/sonic-passwh.yang index 8814bc2e5a29..347a740180db 100755 --- a/src/sonic-yang-models/yang-models/sonic-passwh.yang +++ b/src/sonic-yang-models/yang-models/sonic-passwh.yang @@ -1,6 +1,6 @@ module sonic-passwh { yang-version 1.1; - namespace "http://github.com/Azure/sonic-passwh"; + namespace "http://github.com/sonic-net/sonic-passwh"; prefix password; description "PASSWORD HARDENING YANG Module for SONiC OS"; diff --git a/src/sonic-yang-models/yang-models/sonic-pbh.yang b/src/sonic-yang-models/yang-models/sonic-pbh.yang index 21a4b5fecc69..54c728a939a1 100644 --- a/src/sonic-yang-models/yang-models/sonic-pbh.yang +++ b/src/sonic-yang-models/yang-models/sonic-pbh.yang @@ -2,7 +2,7 @@ module sonic-pbh { yang-version 1.1; - namespace "http://github.com/Azure/sonic-pbh"; + namespace "http://github.com/sonic-net/sonic-pbh"; prefix pbh; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-peer-switch.yang b/src/sonic-yang-models/yang-models/sonic-peer-switch.yang index 79a94c95c2c0..7f953c71bd1a 100644 --- a/src/sonic-yang-models/yang-models/sonic-peer-switch.yang +++ b/src/sonic-yang-models/yang-models/sonic-peer-switch.yang @@ -1,6 +1,6 @@ module sonic-peer-switch { yang-version 1.1; - namespace "http://github.com/Azure/sonic-peer-switch"; + namespace "http://github.com/sonic-net/sonic-peer-switch"; prefix peer_switch; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-pfc-priority-priority-group-map.yang b/src/sonic-yang-models/yang-models/sonic-pfc-priority-priority-group-map.yang index ccc86c064efb..8c537aa7a6b7 100644 --- a/src/sonic-yang-models/yang-models/sonic-pfc-priority-priority-group-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-pfc-priority-priority-group-map.yang @@ -2,7 +2,7 @@ module sonic-pfc-priority-priority-group-map { yang-version 1.1; - namespace "http://github.com/Azure/sonic-pfc-priority-priority-group-map"; + namespace "http://github.com/sonic-net/sonic-pfc-priority-priority-group-map"; prefix pppgm; diff --git a/src/sonic-yang-models/yang-models/sonic-pfc-priority-queue-map.yang b/src/sonic-yang-models/yang-models/sonic-pfc-priority-queue-map.yang index fb659d89bb4b..805cf841786d 100644 --- a/src/sonic-yang-models/yang-models/sonic-pfc-priority-queue-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-pfc-priority-queue-map.yang @@ -2,7 +2,7 @@ module sonic-pfc-priority-queue-map { yang-version 1.1; - namespace "http://github.com/Azure/sonic-pfc-priority-queue-map"; + namespace "http://github.com/sonic-net/sonic-pfc-priority-queue-map"; prefix ppqm; diff --git a/src/sonic-yang-models/yang-models/sonic-pfcwd.yang b/src/sonic-yang-models/yang-models/sonic-pfcwd.yang index 4ea0ffb8537f..27c2a49f2ed9 100644 --- a/src/sonic-yang-models/yang-models/sonic-pfcwd.yang +++ b/src/sonic-yang-models/yang-models/sonic-pfcwd.yang @@ -1,5 +1,5 @@ module sonic-pfcwd { - namespace "http://github.com/Azure/sonic-pfcwd"; + namespace "http://github.com/sonic-net/sonic-pfcwd"; prefix sonic-pfcwd; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-port-qos-map.yang b/src/sonic-yang-models/yang-models/sonic-port-qos-map.yang index 09dcb7ce9c39..6a2c69ab9935 100644 --- a/src/sonic-yang-models/yang-models/sonic-port-qos-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-port-qos-map.yang @@ -2,7 +2,7 @@ module sonic-port-qos-map { yang-version 1.1; - namespace "http://github.com/Azure/sonic-port-qos-map"; + namespace "http://github.com/sonic-net/sonic-port-qos-map"; prefix pqm; diff --git a/src/sonic-yang-models/yang-models/sonic-port.yang b/src/sonic-yang-models/yang-models/sonic-port.yang index a60e6b1624a3..e2408d593ddb 100644 --- a/src/sonic-yang-models/yang-models/sonic-port.yang +++ b/src/sonic-yang-models/yang-models/sonic-port.yang @@ -2,7 +2,7 @@ module sonic-port{ yang-version 1.1; - namespace "http://github.com/Azure/sonic-port"; + namespace "http://github.com/sonic-net/sonic-port"; prefix port; import sonic-types { diff --git a/src/sonic-yang-models/yang-models/sonic-portchannel.yang b/src/sonic-yang-models/yang-models/sonic-portchannel.yang index 27df0625dfee..31235a0d2277 100644 --- a/src/sonic-yang-models/yang-models/sonic-portchannel.yang +++ b/src/sonic-yang-models/yang-models/sonic-portchannel.yang @@ -2,7 +2,7 @@ module sonic-portchannel { yang-version 1.1; - namespace "http://github.com/Azure/sonic-portchannel"; + namespace "http://github.com/sonic-net/sonic-portchannel"; prefix lag; import sonic-types { diff --git a/src/sonic-yang-models/yang-models/sonic-queue.yang b/src/sonic-yang-models/yang-models/sonic-queue.yang index 8e24faa72cb0..e764f8149a36 100644 --- a/src/sonic-yang-models/yang-models/sonic-queue.yang +++ b/src/sonic-yang-models/yang-models/sonic-queue.yang @@ -2,7 +2,7 @@ module sonic-queue { yang-version 1.1; - namespace "http://github.com/Azure/sonic-queue"; + namespace "http://github.com/sonic-net/sonic-queue"; prefix squeue; diff --git a/src/sonic-yang-models/yang-models/sonic-restapi.yang b/src/sonic-yang-models/yang-models/sonic-restapi.yang index af509f2335d8..094ed18eb7bd 100644 --- a/src/sonic-yang-models/yang-models/sonic-restapi.yang +++ b/src/sonic-yang-models/yang-models/sonic-restapi.yang @@ -2,7 +2,7 @@ module sonic-restapi { yang-version 1.1; - namespace "http://github.com/Azure/sonic-restapi"; + namespace "http://github.com/sonic-net/sonic-restapi"; prefix restapi; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-route-common.yang b/src/sonic-yang-models/yang-models/sonic-route-common.yang index 9579739bcdd7..df0eae8f4ea9 100644 --- a/src/sonic-yang-models/yang-models/sonic-route-common.yang +++ b/src/sonic-yang-models/yang-models/sonic-route-common.yang @@ -1,5 +1,5 @@ module sonic-route-common { - namespace "http://github.com/Azure/sonic-route-common"; + namespace "http://github.com/sonic-net/sonic-route-common"; prefix rtcmn; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-route-map.yang b/src/sonic-yang-models/yang-models/sonic-route-map.yang index e4717cbc6288..cdfe5c6eda56 100644 --- a/src/sonic-yang-models/yang-models/sonic-route-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-route-map.yang @@ -1,5 +1,5 @@ module sonic-route-map { - namespace "http://github.com/Azure/sonic-route-map"; + namespace "http://github.com/sonic-net/sonic-route-map"; prefix rmap; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-routing-policy-sets.yang b/src/sonic-yang-models/yang-models/sonic-routing-policy-sets.yang index 5b178831fb8d..9b3e18960c43 100644 --- a/src/sonic-yang-models/yang-models/sonic-routing-policy-sets.yang +++ b/src/sonic-yang-models/yang-models/sonic-routing-policy-sets.yang @@ -1,5 +1,5 @@ module sonic-routing-policy-sets { - namespace "http://github.com/Azure/sonic-routing-policy-lists"; + namespace "http://github.com/sonic-net/sonic-routing-policy-lists"; prefix rpolsets; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-scheduler.yang b/src/sonic-yang-models/yang-models/sonic-scheduler.yang index 8dcd4bef79b2..7e580d848d44 100644 --- a/src/sonic-yang-models/yang-models/sonic-scheduler.yang +++ b/src/sonic-yang-models/yang-models/sonic-scheduler.yang @@ -2,7 +2,7 @@ module sonic-scheduler { yang-version 1.1; - namespace "http://github.com/Azure/sonic-scheduler"; + namespace "http://github.com/sonic-net/sonic-scheduler"; prefix sch; diff --git a/src/sonic-yang-models/yang-models/sonic-sflow.yang b/src/sonic-yang-models/yang-models/sonic-sflow.yang index 62984f064c51..adbeda80c4da 100644 --- a/src/sonic-yang-models/yang-models/sonic-sflow.yang +++ b/src/sonic-yang-models/yang-models/sonic-sflow.yang @@ -1,6 +1,6 @@ module sonic-sflow{ - namespace "http://github.com/Azure/sonic-sflow"; + namespace "http://github.com/sonic-net/sonic-sflow"; prefix sflow; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-snmp.yang b/src/sonic-yang-models/yang-models/sonic-snmp.yang index 7e3db7b5dd09..68eebd2fa487 100644 --- a/src/sonic-yang-models/yang-models/sonic-snmp.yang +++ b/src/sonic-yang-models/yang-models/sonic-snmp.yang @@ -1,5 +1,5 @@ module sonic-snmp { - namespace "http://github.com/Azure/sonic-snmp"; + namespace "http://github.com/sonic-net/sonic-snmp"; prefix ssnmp; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-static-route.yang b/src/sonic-yang-models/yang-models/sonic-static-route.yang index 48a562f242d9..019825b78881 100644 --- a/src/sonic-yang-models/yang-models/sonic-static-route.yang +++ b/src/sonic-yang-models/yang-models/sonic-static-route.yang @@ -1,6 +1,6 @@ module sonic-static-route { yang-version 1.1; - namespace "http://github.com/Azure/sonic-static-route"; + namespace "http://github.com/sonic-net/sonic-static-route"; prefix sroute; import sonic-vrf { diff --git a/src/sonic-yang-models/yang-models/sonic-storm-control.yang b/src/sonic-yang-models/yang-models/sonic-storm-control.yang index 6d4e2b66a0f5..3f0e48d413a1 100644 --- a/src/sonic-yang-models/yang-models/sonic-storm-control.yang +++ b/src/sonic-yang-models/yang-models/sonic-storm-control.yang @@ -1,5 +1,5 @@ module sonic-storm-control { - namespace "http://github.com/Azure/sonic-storm-control"; + namespace "http://github.com/sonic-net/sonic-storm-control"; yang-version "1"; prefix ssc; diff --git a/src/sonic-yang-models/yang-models/sonic-syslog.yang b/src/sonic-yang-models/yang-models/sonic-syslog.yang index c2aa73827f0f..dc1925f463fa 100644 --- a/src/sonic-yang-models/yang-models/sonic-syslog.yang +++ b/src/sonic-yang-models/yang-models/sonic-syslog.yang @@ -2,7 +2,7 @@ module sonic-syslog { yang-version 1.1; - namespace "http://github.com/Azure/sonic-system-syslog"; + namespace "http://github.com/sonic-net/sonic-system-syslog"; prefix syslog; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-system-aaa.yang b/src/sonic-yang-models/yang-models/sonic-system-aaa.yang index c3a442126959..f842d63cf6e8 100644 --- a/src/sonic-yang-models/yang-models/sonic-system-aaa.yang +++ b/src/sonic-yang-models/yang-models/sonic-system-aaa.yang @@ -1,5 +1,5 @@ module sonic-system-aaa { - namespace "http://github.com/Azure/sonic-system-aaa"; + namespace "http://github.com/sonic-net/sonic-system-aaa"; prefix ssys; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-system-tacacs.yang b/src/sonic-yang-models/yang-models/sonic-system-tacacs.yang index f82340b0e682..6abbcf3523b0 100644 --- a/src/sonic-yang-models/yang-models/sonic-system-tacacs.yang +++ b/src/sonic-yang-models/yang-models/sonic-system-tacacs.yang @@ -1,5 +1,5 @@ module sonic-system-tacacs { - namespace "http://github.com/Azure/sonic-system-tacacs"; + namespace "http://github.com/sonic-net/sonic-system-tacacs"; prefix ssys; yang-version 1.1; diff --git a/src/sonic-yang-models/yang-models/sonic-tc-priority-group-map.yang b/src/sonic-yang-models/yang-models/sonic-tc-priority-group-map.yang index 1b21673f58a0..e01fb7e6ac17 100644 --- a/src/sonic-yang-models/yang-models/sonic-tc-priority-group-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-tc-priority-group-map.yang @@ -2,7 +2,7 @@ module sonic-tc-priority-group-map { yang-version 1.1; - namespace "http://github.com/Azure/sonic-tc-priority-group-map"; + namespace "http://github.com/sonic-net/sonic-tc-priority-group-map"; prefix tpgm; diff --git a/src/sonic-yang-models/yang-models/sonic-tc-queue-map.yang b/src/sonic-yang-models/yang-models/sonic-tc-queue-map.yang index 3bcfab1f4598..c70a3ffe7eab 100644 --- a/src/sonic-yang-models/yang-models/sonic-tc-queue-map.yang +++ b/src/sonic-yang-models/yang-models/sonic-tc-queue-map.yang @@ -2,7 +2,7 @@ module sonic-tc-queue-map { yang-version 1.1; - namespace "http://github.com/Azure/sonic-tc-queue-map"; + namespace "http://github.com/sonic-net/sonic-tc-queue-map"; prefix tqm; diff --git a/src/sonic-yang-models/yang-models/sonic-telemetry.yang b/src/sonic-yang-models/yang-models/sonic-telemetry.yang index 0164da5157eb..d3d7600a8e98 100644 --- a/src/sonic-yang-models/yang-models/sonic-telemetry.yang +++ b/src/sonic-yang-models/yang-models/sonic-telemetry.yang @@ -2,7 +2,7 @@ module sonic-telemetry { yang-version 1.1; - namespace "http://github.com/Azure/sonic-telemetry"; + namespace "http://github.com/sonic-net/sonic-telemetry"; prefix telemetry; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-versions.yang b/src/sonic-yang-models/yang-models/sonic-versions.yang index 156102122706..ad9b95f94180 100644 --- a/src/sonic-yang-models/yang-models/sonic-versions.yang +++ b/src/sonic-yang-models/yang-models/sonic-versions.yang @@ -2,7 +2,7 @@ module sonic-versions { yang-version 1.1; - namespace "http://github.com/Azure/sonic-versions"; + namespace "http://github.com/sonic-net/sonic-versions"; prefix versions; diff --git a/src/sonic-yang-models/yang-models/sonic-vlan-sub-interface.yang b/src/sonic-yang-models/yang-models/sonic-vlan-sub-interface.yang index bc13113ef53e..5a7f02c75143 100644 --- a/src/sonic-yang-models/yang-models/sonic-vlan-sub-interface.yang +++ b/src/sonic-yang-models/yang-models/sonic-vlan-sub-interface.yang @@ -2,7 +2,7 @@ module sonic-vlan-sub-interface { yang-version 1.1; - namespace "http://github.com/Azure/sonic-vlan-sub-interface"; + namespace "http://github.com/sonic-net/sonic-vlan-sub-interface"; prefix vlan-sub-interface; diff --git a/src/sonic-yang-models/yang-models/sonic-vlan.yang b/src/sonic-yang-models/yang-models/sonic-vlan.yang index 48ec466b825b..b8f3e28c43f7 100644 --- a/src/sonic-yang-models/yang-models/sonic-vlan.yang +++ b/src/sonic-yang-models/yang-models/sonic-vlan.yang @@ -2,7 +2,7 @@ module sonic-vlan { yang-version 1.1; - namespace "http://github.com/Azure/sonic-vlan"; + namespace "http://github.com/sonic-net/sonic-vlan"; prefix vlan; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-models/sonic-vrf.yang b/src/sonic-yang-models/yang-models/sonic-vrf.yang index 0d7d0561dd1b..ff9fc5b5a4cc 100644 --- a/src/sonic-yang-models/yang-models/sonic-vrf.yang +++ b/src/sonic-yang-models/yang-models/sonic-vrf.yang @@ -1,5 +1,5 @@ module sonic-vrf { - namespace "http://github.com/Azure/sonic-vrf"; + namespace "http://github.com/sonic-net/sonic-vrf"; prefix vrf; import sonic-extension { diff --git a/src/sonic-yang-models/yang-models/sonic-vxlan.yang b/src/sonic-yang-models/yang-models/sonic-vxlan.yang index 6a9b4ffb5e3b..fc23ea9141a0 100644 --- a/src/sonic-yang-models/yang-models/sonic-vxlan.yang +++ b/src/sonic-yang-models/yang-models/sonic-vxlan.yang @@ -1,6 +1,6 @@ module sonic-vxlan { yang-version 1.1; - namespace "http://github.com/Azure/sonic-vxlan"; + namespace "http://github.com/sonic-net/sonic-vxlan"; prefix svxlan; import ietf-yang-types { diff --git a/src/sonic-yang-models/yang-models/sonic-warm-restart.yang b/src/sonic-yang-models/yang-models/sonic-warm-restart.yang index 8366016746ad..3025260d275d 100644 --- a/src/sonic-yang-models/yang-models/sonic-warm-restart.yang +++ b/src/sonic-yang-models/yang-models/sonic-warm-restart.yang @@ -2,7 +2,7 @@ module sonic-warm-restart { yang-version 1.1; - namespace "http://github.com/Azure/sonic-warm-restart"; + namespace "http://github.com/sonic-net/sonic-warm-restart"; prefix wrm; description "SONIC WARMRESTART"; diff --git a/src/sonic-yang-models/yang-models/sonic-wred-profile.yang b/src/sonic-yang-models/yang-models/sonic-wred-profile.yang index 2ca7be26ca0f..2f3559a23f9b 100644 --- a/src/sonic-yang-models/yang-models/sonic-wred-profile.yang +++ b/src/sonic-yang-models/yang-models/sonic-wred-profile.yang @@ -2,7 +2,7 @@ module sonic-wred-profile { yang-version 1.1; - namespace "http://github.com/Azure/sonic-wred-profile"; + namespace "http://github.com/sonic-net/sonic-wred-profile"; prefix wrd; diff --git a/src/sonic-yang-models/yang-templates/sonic-acl.yang.j2 b/src/sonic-yang-models/yang-templates/sonic-acl.yang.j2 index e9461be2ceed..2658016575e9 100644 --- a/src/sonic-yang-models/yang-templates/sonic-acl.yang.j2 +++ b/src/sonic-yang-models/yang-templates/sonic-acl.yang.j2 @@ -7,7 +7,7 @@ module sonic-acl { yang-version 1.1; - namespace "http://github.com/Azure/sonic-acl"; + namespace "http://github.com/sonic-net/sonic-acl"; prefix acl; import ietf-inet-types { diff --git a/src/sonic-yang-models/yang-templates/sonic-extension.yang.j2 b/src/sonic-yang-models/yang-templates/sonic-extension.yang.j2 index 142ef517b7c2..2ee583f1811f 100644 --- a/src/sonic-yang-models/yang-templates/sonic-extension.yang.j2 +++ b/src/sonic-yang-models/yang-templates/sonic-extension.yang.j2 @@ -2,7 +2,7 @@ module sonic-extension { yang-version 1.1; - namespace "http://github.com/Azure/sonic-extension"; + namespace "http://github.com/sonic-net/sonic-extension"; prefix sonic-extension; description "Extension yang Module for SONiC OS"; diff --git a/src/sonic-yang-models/yang-templates/sonic-policer.yang.j2 b/src/sonic-yang-models/yang-templates/sonic-policer.yang.j2 index a8c6f1445a46..90bba27425a2 100644 --- a/src/sonic-yang-models/yang-templates/sonic-policer.yang.j2 +++ b/src/sonic-yang-models/yang-templates/sonic-policer.yang.j2 @@ -7,7 +7,7 @@ module sonic-policer { yang-version 1.1; - namespace "http://github.com/Azure/sonic-policer"; + namespace "http://github.com/sonic-net/sonic-policer"; prefix policer; import sonic-types { diff --git a/src/sonic-yang-models/yang-templates/sonic-types.yang.j2 b/src/sonic-yang-models/yang-templates/sonic-types.yang.j2 index 9de37d2cfc08..f0cd6750f1f8 100644 --- a/src/sonic-yang-models/yang-templates/sonic-types.yang.j2 +++ b/src/sonic-yang-models/yang-templates/sonic-types.yang.j2 @@ -2,7 +2,7 @@ module sonic-types { yang-version 1.1; - namespace "http://github.com/Azure/sonic-head"; + namespace "http://github.com/sonic-net/sonic-head"; prefix sonic-types; description "SONiC type for yang Models of SONiC OS"; From 26265732238eee27857fec87a0013e7d0d9f4c7e Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Tue, 1 Nov 2022 08:42:55 +0800 Subject: [PATCH 129/174] [TACACS] Send remote address in TACACS+ authorization message. (#12190) Send remote address in TACACS+ authorization message. #### Why I did it TACACS+ authorization message not send remote address to server side. #### How I did it Send remote address in TACACS+ authorization message. #### How to verify it Pass all E2E test. Create new test case to validate remote address been send to server side. #### Which release branch to backport (provide reason below if selected) - [ ] 201811 - [ ] 201911 - [ ] 202006 - [ ] 202012 - [ ] 202106 - [ ] 202111 - [ ] 202205 #### Description for the changelog Send remote address in TACACS+ authorization message. #### Ensure to add label/tag for the feature raised. example - [PR#2174](https://github.com/sonic-net/sonic-utilities/pull/2174) where, Generic Config and Update feature has been labelled as GCU. #### Link to config_db schema for YANG module changes #### A picture of a cute animal (not mandatory but encouraged) --- ...ress-in-TACACS-authorization-message.patch | 127 ++++++++++++++++++ src/tacacs/nss/patch/series | 1 + 2 files changed, 128 insertions(+) create mode 100755 src/tacacs/nss/patch/0010-Send-remote-address-in-TACACS-authorization-message.patch diff --git a/src/tacacs/nss/patch/0010-Send-remote-address-in-TACACS-authorization-message.patch b/src/tacacs/nss/patch/0010-Send-remote-address-in-TACACS-authorization-message.patch new file mode 100755 index 000000000000..b0251a9a38a0 --- /dev/null +++ b/src/tacacs/nss/patch/0010-Send-remote-address-in-TACACS-authorization-message.patch @@ -0,0 +1,127 @@ +From ee47eb11cbfc37600a59f06ae153da5c2c486fea Mon Sep 17 00:00:00 2001 +From: liuh-80 +Date: Tue, 25 Oct 2022 10:34:08 +0800 +Subject: [PATCH] Send remote address in TACACS+ authorization message. + +--- + nss_tacplus.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++- + 1 file changed, 76 insertions(+), 1 deletion(-) + +diff --git a/nss_tacplus.c b/nss_tacplus.c +index 2de00a6..048745a 100644 +--- a/nss_tacplus.c ++++ b/nss_tacplus.c +@@ -33,12 +33,20 @@ + #include + #include + #include ++#include + + #include + + #define MIN_TACACS_USER_PRIV (1) + #define MAX_TACACS_USER_PRIV (15) + ++#define GET_ENV_VARIABLE_OK 0 ++#define GET_ENV_VARIABLE_NOT_FOUND 1 ++#define GET_ENV_VARIABLE_INCORRECT_FORMAT 2 ++#define GET_ENV_VARIABLE_NOT_ENOUGH_BUFFER 3 ++#define GET_REMOTE_ADDRESS_OK 0 ++#define GET_REMOTE_ADDRESS_FAILED 1 ++ + static const char *nssname = "nss_tacplus"; /* for syslogs */ + static const char *config_file = "/etc/tacplus_nss.conf"; + static const char *user_conf = "/etc/tacplus_user"; +@@ -717,6 +725,66 @@ connect_tacacs(struct tac_attrib **attr, int srvr) + return fd; + } + ++/* ++ * Get environment variable first part by name and delimiters ++ */ ++int get_environment_variable_first_part(char* dst, socklen_t size, const char* name, const char* delimiters) ++{ ++ memset(dst, 0, size); ++ ++ const char* variable = getenv(name); ++ if (variable == NULL) { ++ if (debug) { ++ syslog(LOG_DEBUG, "%s: can't get environment variable %s, errno=%d", nssname, name, errno); ++ } ++ ++ return GET_ENV_VARIABLE_NOT_FOUND; ++ } ++ ++ char* context = NULL; ++ char* first_part = strtok_r((char *)variable, delimiters, &context); ++ if (first_part == NULL) { ++ if (debug) { ++ syslog(LOG_DEBUG, "%s: can't split %s by delimiters %s", nssname, variable, delimiters); ++ } ++ ++ return GET_ENV_VARIABLE_INCORRECT_FORMAT; ++ } ++ ++ int first_part_len = strlen(first_part); ++ if (first_part_len >= size) { ++ if (debug) { ++ syslog(LOG_DEBUG, "%s: dest buffer size %d not enough for %s", nssname, size, first_part); ++ } ++ ++ return GET_ENV_VARIABLE_NOT_ENOUGH_BUFFER; ++ } ++ ++ strncpy(dst, first_part, size); ++ if (debug) { ++ syslog(LOG_DEBUG, "%s: remote address=%s", nssname, dst); ++ } ++ ++ return GET_ENV_VARIABLE_OK; ++} ++ ++/* ++ * Get current SSH session remote address from environment variable ++ */ ++int get_remote_address(char* dst, socklen_t size) ++{ ++ // SSHD will create environment variable SSH_CONNECTION after user session created. ++ if (get_environment_variable_first_part(dst, size, "SSH_CONNECTION", " ") == GET_ENV_VARIABLE_OK) { ++ return GET_REMOTE_ADDRESS_OK; ++ } ++ ++ // Before user session created, SSHD will create environment variable SSH_CLIENT_IPADDR_PORT. ++ if (get_environment_variable_first_part(dst, size, "SSH_CLIENT_IPADDR_PORT", " ") == GET_ENV_VARIABLE_OK) { ++ return GET_REMOTE_ADDRESS_OK; ++ } ++ ++ return GET_REMOTE_ADDRESS_FAILED; ++} + + /* + * lookup the user on a TACACS server. Returns 0 on successful lookup, else 1 +@@ -735,6 +803,13 @@ lookup_tacacs_user(struct pwbuf *pb) + int ret = 1, done = 0; + struct tac_attrib *attr; + int tac_fd, srvr; ++ char remote_addr[INET6_ADDRSTRLEN]; ++ const char* current_tty = getenv("SSH_TTY"); ++ ++ int result = get_remote_address(remote_addr, sizeof(remote_addr)); ++ if ((result != GET_REMOTE_ADDRESS_OK) && debug) { ++ syslog(LOG_DEBUG, "%s: can't get remote address from environment variable, result=%d", nssname, result); ++ } + + for(srvr=0; srvr < tac_srv_no && !done; srvr++) { + arep.msg = NULL; +@@ -748,7 +823,7 @@ lookup_tacacs_user(struct pwbuf *pb) + tac_ntop(tac_srv[srvr].addr->ai_addr) : "unknown", tac_fd); + continue; + } +- ret = tac_author_send(tac_fd, pb->name, "", "", attr); ++ ret = tac_author_send(tac_fd, pb->name, current_tty != NULL ? (char *)current_tty : "", remote_addr, attr); + if(ret < 0) { + if(debug) + syslog(LOG_WARNING, "%s: TACACS+ server %s send failed (%d) for" +-- +2.37.1.windows.1 + diff --git a/src/tacacs/nss/patch/series b/src/tacacs/nss/patch/series index 83e2b31e484f..7b9b5c5779a9 100644 --- a/src/tacacs/nss/patch/series +++ b/src/tacacs/nss/patch/series @@ -7,3 +7,4 @@ 0007-Add-support-for-TACACS-source-address.patch 0008-do-not-create-or-modify-local-user-if-there-is-no-pr.patch 0009-fix-compile-error-strncpy.patch +0010-Send-remote-address-in-TACACS-authorization-message.patch From a31a4e7f82857c1e42c5e22e5d7cdeaaaaeb7b93 Mon Sep 17 00:00:00 2001 From: roman_savchuk Date: Wed, 2 Nov 2022 03:10:07 +0200 Subject: [PATCH 130/174] Revert "[Barefoot] Add xon_offset to pg_profile_lookup.ini (#12073)" (#12568) Why I did it This changes should go with updated SDE for BFN. Without update we do see orchagent core dump. How I did it Revert changes How to verify it Deploy topology. No core dump appears --- .../newport/pg_profile_lookup.ini | 39 +++++++++---------- .../montara/pg_profile_lookup.ini | 33 ++++++++-------- .../mavericks/pg_profile_lookup.ini | 33 ++++++++-------- 3 files changed, 51 insertions(+), 54 deletions(-) diff --git a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/pg_profile_lookup.ini b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/pg_profile_lookup.ini index 4103c1fc7d44..602400325be0 100644 --- a/device/barefoot/x86_64-accton_as9516_32d-r0/newport/pg_profile_lookup.ini +++ b/device/barefoot/x86_64-accton_as9516_32d-r0/newport/pg_profile_lookup.ini @@ -1,21 +1,20 @@ # PG lossless profiles. -# speed cable size xon xoff threshold xon_offset - 10000 5m 34816 18432 16384 7 18432 - 25000 5m 34816 18432 16384 7 18432 - 40000 5m 34816 18432 16384 7 18432 - 50000 5m 34816 18432 16384 7 18432 - 100000 5m 36864 18432 18432 7 18432 - 400000 5m 36864 18432 18432 7 18432 - 10000 40m 36864 18432 18432 7 18432 - 25000 40m 39936 18432 21504 7 18432 - 40000 40m 41984 18432 23552 7 18432 - 50000 40m 41984 18432 23552 7 18432 - 100000 40m 54272 18432 35840 7 18432 - 400000 40m 54272 18432 35840 7 18432 - 10000 300m 49152 18432 30720 7 18432 - 25000 300m 71680 18432 53248 7 18432 - 40000 300m 94208 18432 75776 7 18432 - 50000 300m 94208 18432 75776 7 18432 - 100000 300m 184320 18432 165888 7 18432 - 400000 300m 184320 18432 165888 7 18432 - +# speed cable size xon xoff threshold + 10000 5m 34816 18432 16384 7 + 25000 5m 34816 18432 16384 7 + 40000 5m 34816 18432 16384 7 + 50000 5m 34816 18432 16384 7 + 100000 5m 36864 18432 18432 7 + 400000 5m 36864 18432 18432 7 + 10000 40m 36864 18432 18432 7 + 25000 40m 39936 18432 21504 7 + 40000 40m 41984 18432 23552 7 + 50000 40m 41984 18432 23552 7 + 100000 40m 54272 18432 35840 7 + 400000 40m 54272 18432 35840 7 + 10000 300m 49152 18432 30720 7 + 25000 300m 71680 18432 53248 7 + 40000 300m 94208 18432 75776 7 + 50000 300m 94208 18432 75776 7 + 100000 300m 184320 18432 165888 7 + 400000 300m 184320 18432 165888 7 diff --git a/device/barefoot/x86_64-accton_wedge100bf_32x-r0/montara/pg_profile_lookup.ini b/device/barefoot/x86_64-accton_wedge100bf_32x-r0/montara/pg_profile_lookup.ini index 8011959df7da..b66b129fe43f 100644 --- a/device/barefoot/x86_64-accton_wedge100bf_32x-r0/montara/pg_profile_lookup.ini +++ b/device/barefoot/x86_64-accton_wedge100bf_32x-r0/montara/pg_profile_lookup.ini @@ -1,18 +1,17 @@ # PG lossless profiles. -# speed cable size xon xoff threshold xon_offset - 10000 5m 34816 18432 16384 0 18432 - 25000 5m 34816 18432 16384 0 18432 - 40000 5m 34816 18432 16384 0 18432 - 50000 5m 34816 18432 16384 0 18432 - 100000 5m 36864 18432 18432 0 18432 - 10000 40m 36864 18432 18432 0 18432 - 25000 40m 39936 18432 21504 0 18432 - 40000 40m 41984 18432 23552 0 18432 - 50000 40m 41984 18432 23552 0 18432 - 100000 40m 54272 18432 35840 0 18432 - 10000 300m 49152 18432 30720 0 18432 - 25000 300m 71680 18432 53248 0 18432 - 40000 300m 94208 18432 75776 0 18432 - 50000 300m 94208 18432 75776 0 18432 - 100000 300m 184320 18432 165888 0 18432 - +# speed cable size xon xoff threshold + 10000 5m 34816 18432 16384 0 + 25000 5m 34816 18432 16384 0 + 40000 5m 34816 18432 16384 0 + 50000 5m 34816 18432 16384 0 + 100000 5m 36864 18432 18432 0 + 10000 40m 36864 18432 18432 0 + 25000 40m 39936 18432 21504 0 + 40000 40m 41984 18432 23552 0 + 50000 40m 41984 18432 23552 0 + 100000 40m 54272 18432 35840 0 + 10000 300m 49152 18432 30720 0 + 25000 300m 71680 18432 53248 0 + 40000 300m 94208 18432 75776 0 + 50000 300m 94208 18432 75776 0 + 100000 300m 184320 18432 165888 0 diff --git a/device/barefoot/x86_64-accton_wedge100bf_65x-r0/mavericks/pg_profile_lookup.ini b/device/barefoot/x86_64-accton_wedge100bf_65x-r0/mavericks/pg_profile_lookup.ini index 8011959df7da..b66b129fe43f 100644 --- a/device/barefoot/x86_64-accton_wedge100bf_65x-r0/mavericks/pg_profile_lookup.ini +++ b/device/barefoot/x86_64-accton_wedge100bf_65x-r0/mavericks/pg_profile_lookup.ini @@ -1,18 +1,17 @@ # PG lossless profiles. -# speed cable size xon xoff threshold xon_offset - 10000 5m 34816 18432 16384 0 18432 - 25000 5m 34816 18432 16384 0 18432 - 40000 5m 34816 18432 16384 0 18432 - 50000 5m 34816 18432 16384 0 18432 - 100000 5m 36864 18432 18432 0 18432 - 10000 40m 36864 18432 18432 0 18432 - 25000 40m 39936 18432 21504 0 18432 - 40000 40m 41984 18432 23552 0 18432 - 50000 40m 41984 18432 23552 0 18432 - 100000 40m 54272 18432 35840 0 18432 - 10000 300m 49152 18432 30720 0 18432 - 25000 300m 71680 18432 53248 0 18432 - 40000 300m 94208 18432 75776 0 18432 - 50000 300m 94208 18432 75776 0 18432 - 100000 300m 184320 18432 165888 0 18432 - +# speed cable size xon xoff threshold + 10000 5m 34816 18432 16384 0 + 25000 5m 34816 18432 16384 0 + 40000 5m 34816 18432 16384 0 + 50000 5m 34816 18432 16384 0 + 100000 5m 36864 18432 18432 0 + 10000 40m 36864 18432 18432 0 + 25000 40m 39936 18432 21504 0 + 40000 40m 41984 18432 23552 0 + 50000 40m 41984 18432 23552 0 + 100000 40m 54272 18432 35840 0 + 10000 300m 49152 18432 30720 0 + 25000 300m 71680 18432 53248 0 + 40000 300m 94208 18432 75776 0 + 50000 300m 94208 18432 75776 0 + 100000 300m 184320 18432 165888 0 From e1440f0044d8ee81ee321b00747f7d1608885277 Mon Sep 17 00:00:00 2001 From: lixiaoyuner <35456895+lixiaoyuner@users.noreply.github.com> Date: Wed, 2 Nov 2022 17:24:32 +0800 Subject: [PATCH 131/174] Improve feature mode switch process (#12188) * Fix kube mode to local mode long duration issue * Remove IPV6 parameters which is not necessary * Fix read node labels bug * Tag the running image to latest if it's stable * Disable image_version_higher check * Change image_version_higher checker test case Signed-off-by: Yun Li --- src/sonic-ctrmgrd/.gitignore | 1 + src/sonic-ctrmgrd/ctrmgr/container | 25 +++++-- src/sonic-ctrmgrd/ctrmgr/container_startup.py | 16 ++-- src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py | 75 +++++++++++++++++-- src/sonic-ctrmgrd/ctrmgr/kube_commands.py | 8 +- .../ctrmgr/remote_ctr.config.json | 1 + .../tests/container_startup_test.py | 2 +- src/sonic-ctrmgrd/tests/container_test.py | 5 ++ src/sonic-ctrmgrd/tests/ctrmgrd_test.py | 49 +++++++++++- src/sonic-ctrmgrd/tests/kube_commands_test.py | 34 ++------- 10 files changed, 165 insertions(+), 51 deletions(-) diff --git a/src/sonic-ctrmgrd/.gitignore b/src/sonic-ctrmgrd/.gitignore index bdebd5e838cd..2f42f4d2c5ea 100644 --- a/src/sonic-ctrmgrd/.gitignore +++ b/src/sonic-ctrmgrd/.gitignore @@ -10,3 +10,4 @@ tests/__pycache__/ ctrmgr/__pycache__/ venv tests/.coverage* +.pytest_cache/ \ No newline at end of file diff --git a/src/sonic-ctrmgrd/ctrmgr/container b/src/sonic-ctrmgrd/ctrmgr/container index db6ded635ee9..ca2394b057bb 100755 --- a/src/sonic-ctrmgrd/ctrmgr/container +++ b/src/sonic-ctrmgrd/ctrmgr/container @@ -30,6 +30,10 @@ STATE = "state" KUBE_LABEL_TABLE = "KUBE_LABELS" KUBE_LABEL_SET_KEY = "SET" +SERVER_TABLE = "KUBERNETES_MASTER" +SERVER_KEY = "SERVER" +ST_SER_CONNECTED = "connected" +ST_SER_UPDATE_TS = "update_time" # Get seconds to wait for remote docker to start. # If not, revert to local @@ -75,8 +79,10 @@ def read_data(is_config, feature, fields): ret = [] db = cfg_db if is_config else state_db - - tbl = swsscommon.Table(db, FEATURE_TABLE) + if feature == SERVER_KEY: + tbl = swsscommon.Table(db, SERVER_TABLE) + else: + tbl = swsscommon.Table(db, FEATURE_TABLE) data = dict(tbl.get(feature)[1]) for (field, default) in fields: @@ -104,6 +110,13 @@ def read_state(feature): [(CURRENT_OWNER, "none"), (REMOTE_STATE, "none"), (CONTAINER_ID, "")]) +def read_server_state(): + """ Read requried feature state """ + + return read_data(False, SERVER_KEY, + [(ST_SER_CONNECTED, "false"), (ST_SER_UPDATE_TS, "")]) + + def docker_action(action, feature, **kwargs): """ Execute docker action """ try: @@ -192,9 +205,10 @@ def container_start(feature, **kwargs): set_owner, fallback, _ = read_config(feature) _, remote_state, _ = read_state(feature) + server_connected, _ = read_server_state() - debug_msg("{}: set_owner:{} fallback:{} remote_state:{}".format( - feature, set_owner, fallback, remote_state)) + debug_msg("{}: set_owner:{} fallback:{} remote_state:{} server_connected:{}".format( + feature, set_owner, fallback, remote_state, server_connected)) data = { SYSTEM_STATE: "up", @@ -207,8 +221,9 @@ def container_start(feature, **kwargs): start_val = START_LOCAL else: start_val = START_KUBE - if fallback and (remote_state == "none"): + if fallback and (remote_state == "none" or server_connected == "false"): start_val |= START_LOCAL + data[REMOTE_STATE] = "none" if start_val == START_LOCAL: # Implies *only* local. diff --git a/src/sonic-ctrmgrd/ctrmgr/container_startup.py b/src/sonic-ctrmgrd/ctrmgr/container_startup.py index 7fcfbfc8a29a..13ce01ff1ead 100755 --- a/src/sonic-ctrmgrd/ctrmgr/container_startup.py +++ b/src/sonic-ctrmgrd/ctrmgr/container_startup.py @@ -232,14 +232,14 @@ def container_up(feature, owner, version): do_freeze(feature, "This version is marked disabled. Exiting ...") return - if not instance_higher(feature, state_data[VERSION], version): - # TODO: May Remove label __enabled - # Else kubelet will continue to re-deploy every 5 mins, until - # master removes the lable to un-deploy. - # - do_freeze(feature, "bail out as current deploy version {} is not higher". - format(version)) - return + # if not instance_higher(feature, state_data[VERSION], version): + # # TODO: May Remove label __enabled + # # Else kubelet will continue to re-deploy every 5 mins, until + # # master removes the lable to un-deploy. + # # + # do_freeze(feature, "bail out as current deploy version {} is not higher". + # format(version)) + # return update_data(state_db, feature, { VERSION: version }) diff --git a/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py b/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py index 154591a695fa..6be18146b2aa 100755 --- a/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py +++ b/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py @@ -60,7 +60,7 @@ CFG_SER_IP: "", CFG_SER_PORT: "6443", CFG_SER_DISABLE: "false", - CFG_SER_INSECURE: "false" + CFG_SER_INSECURE: "true" } dflt_st_ser = { @@ -88,18 +88,20 @@ JOIN_LATENCY = "join_latency_on_boot_seconds" JOIN_RETRY = "retry_join_interval_seconds" LABEL_RETRY = "retry_labels_update_seconds" +TAG_IMAGE_LATEST = "tag_latest_image_on_wait_seconds" USE_K8S_PROXY = "use_k8s_as_http_proxy" remote_ctr_config = { JOIN_LATENCY: 10, JOIN_RETRY: 10, LABEL_RETRY: 2, + TAG_IMAGE_LATEST: 30, USE_K8S_PROXY: "" } def log_debug(m): msg = "{}: {}".format(inspect.stack()[1][3], m) - print(msg) + #print(msg) syslog.syslog(syslog.LOG_DEBUG, msg) @@ -148,6 +150,8 @@ def init(): with open(SONIC_CTR_CONFIG, "r") as s: d = json.load(s) remote_ctr_config.update(d) + if UNIT_TESTING: + remote_ctr_config[TAG_IMAGE_LATEST] = 0 class MainServer: @@ -172,11 +176,11 @@ def register_db(self, db_name): self.db_connectors[db_name] = swsscommon.DBConnector(db_name, 0) - def register_timer(self, ts, handler): + def register_timer(self, ts, handler, args=()): """ Register timer based handler. The handler will be called on/after give timestamp, ts """ - self.timer_handlers[ts].append(handler) + self.timer_handlers[ts].append((handler, args)) def register_handler(self, db_name, table_name, handler): @@ -235,7 +239,7 @@ def run(self): lst = self.timer_handlers[k] del self.timer_handlers[k] for fn in lst: - fn() + fn[0](*fn[1]) else: timeout = (k - ct_ts).seconds break @@ -426,6 +430,54 @@ def do_join(self, ip, port, insecure): format(remote_ctr_config[JOIN_RETRY], self.start_time)) +def tag_latest_image(server, feat, docker_id, image_ver): + res = 1 + if not UNIT_TESTING: + status = os.system("docker ps |grep {} >/dev/null".format(docker_id)) + if status: + syslog.syslog(syslog.LOG_ERR, + "Feature {}:{} is not stable".format(feat, image_ver)) + else: + image_item = os.popen("docker inspect {} |jq -r .[].Image".format(docker_id)).read().strip() + if image_item: + image_id = image_item.split(":")[1][:12] + image_info = os.popen("docker images |grep {}".format(image_id)).read().split() + if image_info: + image_rep = image_info[0] + res = os.system("docker tag {} {}:latest".format(image_id, image_rep)) + if res != 0: + syslog.syslog(syslog.LOG_ERR, + "Failed to tag {}:{} to latest".format(image_rep, image_ver)) + else: + syslog.syslog(syslog.LOG_INFO, + "Successfully tag {}:{} to latest".format(image_rep, image_ver)) + feat_status = os.popen("docker inspect {} |jq -r .[].State.Running".format(feat)).read().strip() + if feat_status: + if feat_status == 'true': + os.system("docker stop {}".format(feat)) + syslog.syslog(syslog.LOG_ERR, + "{} should not run, stop it".format(feat)) + os.system("docker rm {}".format(feat)) + syslog.syslog(syslog.LOG_INFO, + "Delete previous {} container".format(feat)) + else: + syslog.syslog(syslog.LOG_ERR, + "Failed to docker images |grep {} to get image repo".format(image_id)) + else: + syslog.syslog(syslog.LOG_ERR, + "Failed to inspect container:{} to get image id".format(docker_id)) + else: + server.mod_db_entry(STATE_DB_NAME, + FEATURE_TABLE, feat, {"tag_latest": "true"}) + res = 0 + if res: + log_debug("failed to tag {}:{} to latest".format(feat, image_ver)) + else: + log_debug("successfully tag {}:{} to latest".format(feat, image_ver)) + + return res + + # # Feature changes # @@ -523,6 +575,19 @@ def on_state_update(self, key, op, data): self.st_data[key] = _update_entry(dflt_st_feat, data) remote_state = self.st_data[key][ST_FEAT_REMOTE_STATE] + if (old_remote_state != remote_state) and (remote_state == "running"): + # Tag latest + start_time = datetime.datetime.now() + datetime.timedelta( + seconds=remote_ctr_config[TAG_IMAGE_LATEST]) + self.server.register_timer(start_time, tag_latest_image, ( + self.server, + key, + self.st_data[key][ST_FEAT_CTR_ID], + self.st_data[key][ST_FEAT_CTR_VER])) + + log_debug("try to tag latest label after {} seconds @{}".format( + remote_ctr_config[TAG_IMAGE_LATEST], start_time)) + if (not init) and ( (old_remote_state == remote_state) or (remote_state != "pending")): # no change or nothing to do. diff --git a/src/sonic-ctrmgrd/ctrmgr/kube_commands.py b/src/sonic-ctrmgrd/ctrmgr/kube_commands.py index 3adea36ef12c..91415390ccd5 100755 --- a/src/sonic-ctrmgrd/ctrmgr/kube_commands.py +++ b/src/sonic-ctrmgrd/ctrmgr/kube_commands.py @@ -84,7 +84,7 @@ def _run_command(cmd, timeout=5): def kube_read_labels(): """ Read current labels on node and return as dict. """ - KUBECTL_GET_CMD = "kubectl --kubeconfig {} get nodes {} --show-labels |tr -s ' ' | cut -f6 -d' '" + KUBECTL_GET_CMD = "kubectl --kubeconfig {} get nodes {} --show-labels --no-headers |tr -s ' ' | cut -f6 -d' '" labels = {} ret, out, _ = _run_command(KUBECTL_GET_CMD.format( @@ -332,12 +332,12 @@ def _do_reset(pending_join = False): def _do_join(server, port, insecure): - KUBEADM_JOIN_CMD = "kubeadm join --discovery-file {} --node-name {} --apiserver-advertise-address {}" + KUBEADM_JOIN_CMD = "kubeadm join --discovery-file {} --node-name {}" err = "" out = "" ret = 0 try: - local_ipv6 = _get_local_ipv6() + #local_ipv6 = _get_local_ipv6() #_download_file(server, port, insecure) _gen_cli_kubeconf(server, port, insecure) _do_reset(True) @@ -349,7 +349,7 @@ def _do_join(server, port, insecure): if ret == 0: (ret, out, err) = _run_command(KUBEADM_JOIN_CMD.format( - KUBE_ADMIN_CONF, get_device_name(), local_ipv6), timeout=60) + KUBE_ADMIN_CONF, get_device_name()), timeout=60) log_debug("ret = {}".format(ret)) except IOError as e: diff --git a/src/sonic-ctrmgrd/ctrmgr/remote_ctr.config.json b/src/sonic-ctrmgrd/ctrmgr/remote_ctr.config.json index 3fb0c20fddcf..0b91fde36473 100644 --- a/src/sonic-ctrmgrd/ctrmgr/remote_ctr.config.json +++ b/src/sonic-ctrmgrd/ctrmgr/remote_ctr.config.json @@ -3,6 +3,7 @@ "retry_join_interval_seconds": 30, "retry_labels_update_seconds": 5, "revert_to_local_on_wait_seconds": 60, + "tag_latest_image_on_wait_seconds": 600, "use_k8s_as_http_proxy": "n" } diff --git a/src/sonic-ctrmgrd/tests/container_startup_test.py b/src/sonic-ctrmgrd/tests/container_startup_test.py index bffffaadca94..b21fe855662c 100755 --- a/src/sonic-ctrmgrd/tests/container_startup_test.py +++ b/src/sonic-ctrmgrd/tests/container_startup_test.py @@ -169,7 +169,7 @@ common_test.FEATURE_TABLE: { "snmp": { "container_id": "no_change", - "container_version": "20201230.77", + "container_version": "20201230.11", "current_owner": "no_change", "remote_state": "no_change", "system_state": "up" diff --git a/src/sonic-ctrmgrd/tests/container_test.py b/src/sonic-ctrmgrd/tests/container_test.py index 4738597c72c4..4581111015e5 100755 --- a/src/sonic-ctrmgrd/tests/container_test.py +++ b/src/sonic-ctrmgrd/tests/container_test.py @@ -125,6 +125,11 @@ "current_owner": "none", "container_id": "" } + }, + common_test.SERVER_TABLE: { + "SERVER": { + "connected": "true" + } } } }, diff --git a/src/sonic-ctrmgrd/tests/ctrmgrd_test.py b/src/sonic-ctrmgrd/tests/ctrmgrd_test.py index 171534b5a8d1..842b935396d1 100755 --- a/src/sonic-ctrmgrd/tests/ctrmgrd_test.py +++ b/src/sonic-ctrmgrd/tests/ctrmgrd_test.py @@ -106,7 +106,7 @@ common_test.KUBE_JOIN: { "ip": "10.10.10.10", "port": "6443", - "insecure": "false" + "insecure": "true" } } }, @@ -151,7 +151,7 @@ common_test.KUBE_JOIN: { "ip": "10.10.10.10", "port": "6443", - "insecure": "false" + "insecure": "true" }, common_test.KUBE_RESET: { "flag": "true" @@ -276,6 +276,51 @@ } } } + }, + 3: { + common_test.DESCR: "Tag image latest when remote_state changes to running", + common_test.ARGS: "ctrmgrd", + common_test.PRE: { + common_test.CONFIG_DB_NO: { + common_test.FEATURE_TABLE: { + "snmp": { + "set_owner": "kube" + } + } + }, + common_test.STATE_DB_NO: { + common_test.FEATURE_TABLE: { + "snmp": { + "remote_state": "pending" + } + } + } + }, + common_test.UPD: { + common_test.CONFIG_DB_NO: { + common_test.FEATURE_TABLE: { + "snmp": { + "set_owner": "kube" + } + } + }, + common_test.STATE_DB_NO: { + common_test.FEATURE_TABLE: { + "snmp": { + "remote_state": "running" + } + } + } + }, + common_test.POST: { + common_test.STATE_DB_NO: { + common_test.FEATURE_TABLE: { + "snmp": { + "tag_latest": "true" + } + } + } + } } } diff --git a/src/sonic-ctrmgrd/tests/kube_commands_test.py b/src/sonic-ctrmgrd/tests/kube_commands_test.py index d8e0939efb85..60da7fd2c073 100755 --- a/src/sonic-ctrmgrd/tests/kube_commands_test.py +++ b/src/sonic-ctrmgrd/tests/kube_commands_test.py @@ -27,7 +27,7 @@ common_test.DESCR: "read labels", common_test.RETVAL: 0, common_test.PROC_CMD: ["\ -kubectl --kubeconfig {} get nodes none --show-labels |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF)], +kubectl --kubeconfig {} get nodes none --show-labels --no-headers |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF)], common_test.PROC_OUT: ["foo=bar,hello=world"], common_test.POST: { "foo": "bar", @@ -40,7 +40,7 @@ common_test.TRIGGER_THROW: True, common_test.RETVAL: -1, common_test.PROC_CMD: ["\ -kubectl --kubeconfig {} get nodes none --show-labels |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF)], +kubectl --kubeconfig {} get nodes none --show-labels --no-headers |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF)], common_test.POST: { }, common_test.PROC_KILLED: 1 @@ -49,7 +49,7 @@ common_test.DESCR: "read labels fail", common_test.RETVAL: -1, common_test.PROC_CMD: ["\ -kubectl --kubeconfig {} get nodes none --show-labels |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF)], +kubectl --kubeconfig {} get nodes none --show-labels --no-headers |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF)], common_test.PROC_OUT: [""], common_test.PROC_ERR: ["command failed"], common_test.POST: { @@ -64,7 +64,7 @@ common_test.RETVAL: 0, common_test.ARGS: { "foo": "bar", "hello": "World!", "test": "ok" }, common_test.PROC_CMD: [ -"kubectl --kubeconfig {} get nodes none --show-labels |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF), +"kubectl --kubeconfig {} get nodes none --show-labels --no-headers |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF), "kubectl --kubeconfig {} label --overwrite nodes none hello-".format( KUBE_ADMIN_CONF), "kubectl --kubeconfig {} label --overwrite nodes none hello=World! test=ok".format( @@ -77,7 +77,7 @@ common_test.RETVAL: 0, common_test.ARGS: { "foo": "bar", "hello": "world" }, common_test.PROC_CMD: [ -"kubectl --kubeconfig {} get nodes none --show-labels |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF) +"kubectl --kubeconfig {} get nodes none --show-labels --no-headers |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF) ], common_test.PROC_OUT: ["foo=bar,hello=world"] }, @@ -87,7 +87,7 @@ common_test.ARGS: { "any": "thing" }, common_test.RETVAL: -1, common_test.PROC_CMD: [ -"kubectl --kubeconfig {} get nodes none --show-labels |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF) +"kubectl --kubeconfig {} get nodes none --show-labels --no-headers |tr -s ' ' | cut -f6 -d' '".format(KUBE_ADMIN_CONF) ], common_test.PROC_ERR: ["read failed"] } @@ -110,19 +110,10 @@ "mkdir -p {}".format(CNI_DIR), "cp {} {}".format(FLANNEL_CONF_FILE, CNI_DIR), "systemctl start kubelet", - "kubeadm join --discovery-file {} --node-name none --apiserver-advertise-address FC00:2::32".format( + "kubeadm join --discovery-file {} --node-name none".format( KUBE_ADMIN_CONF) ], common_test.PROC_RUN: [True, True], - common_test.PRE: { - common_test.CONFIG_DB_NO: { - common_test.MGMT_INTERFACE_TABLE: { - "eth0|FC00:2::32/64": { - "gwaddr": "fc00:2::1" - } - } - } - }, common_test.REQ: { "data": {"ca.crt": "test"} } @@ -143,19 +134,10 @@ "mkdir -p {}".format(CNI_DIR), "cp {} {}".format(FLANNEL_CONF_FILE, CNI_DIR), "systemctl start kubelet", - "kubeadm join --discovery-file {} --node-name none --apiserver-advertise-address FC00:2::32".format( + "kubeadm join --discovery-file {} --node-name none".format( KUBE_ADMIN_CONF) ], common_test.PROC_RUN: [True, True], - common_test.PRE: { - common_test.CONFIG_DB_NO: { - common_test.MGMT_INTERFACE_TABLE: { - "eth0|FC00:2::32/64": { - "gwaddr": "fc00:2::1" - } - } - } - }, common_test.REQ: { "data": {"ca.crt": "test"} } From b3a81679684d64ff380ab6f7e761587d9281679b Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Wed, 2 Nov 2022 10:16:48 -0400 Subject: [PATCH 132/174] [system-health] Remove subprocess with shell=True (#12572) Signed-off-by: maipbui #### Why I did it `subprocess` is used with `shell=True`, which is very dangerous for shell injection. #### How I did it remove `shell=True`, use `shell=False` #### How to verify it Pass UT Manual test --- src/system-health/health_checker/service_checker.py | 11 ++++++----- src/system-health/health_checker/sysmonitor.py | 2 +- src/system-health/health_checker/utils.py | 2 +- src/system-health/tests/test_system_health.py | 4 ++-- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/system-health/health_checker/service_checker.py b/src/system-health/health_checker/service_checker.py index 674ad9944280..c81948a7ae25 100644 --- a/src/system-health/health_checker/service_checker.py +++ b/src/system-health/health_checker/service_checker.py @@ -24,13 +24,13 @@ class ServiceChecker(HealthChecker): CRITICAL_PROCESSES_PATH = 'etc/supervisor/critical_processes' # Command to get merged directory of a container - GET_CONTAINER_FOLDER_CMD = 'docker inspect {} --format "{{{{.GraphDriver.Data.MergedDir}}}}"' + GET_CONTAINER_FOLDER_CMD = ['docker', 'inspect', '', '--format', "{{.GraphDriver.Data.MergedDir}}"] # Command to query the status of monit service. - CHECK_MONIT_SERVICE_CMD = 'systemctl is-active monit.service' + CHECK_MONIT_SERVICE_CMD = ['systemctl', 'is-active', 'monit.service'] # Command to get summary of critical system service. - CHECK_CMD = 'monit summary -B' + CHECK_CMD = ['monit', 'summary', '-B'] MIN_CHECK_CMD_LINES = 3 # Expect status for different system service category. @@ -168,7 +168,8 @@ def _update_container_critical_processes(self, container, critical_process_list) self.need_save_cache = True def _get_container_folder(self, container): - container_folder = utils.run_command(ServiceChecker.GET_CONTAINER_FOLDER_CMD.format(container)) + ServiceChecker.GET_CONTAINER_FOLDER_CMD[2] = str(container) + container_folder = utils.run_command(ServiceChecker.GET_CONTAINER_FOLDER_CMD) if container_folder is None: return container_folder @@ -327,7 +328,7 @@ def check_process_existence(self, container_name, critical_process_list, config, # We are using supervisorctl status to check the critical process status. We cannot leverage psutil here because # it not always possible to get process cmdline in supervisor.conf. E.g, cmdline of orchagent is "/usr/bin/orchagent", # however, in supervisor.conf it is "/usr/bin/orchagent.sh" - cmd = 'docker exec {} bash -c "supervisorctl status"'.format(container_name) + cmd = ['docker', 'exec', str(container_name), 'bash', '-c', "supervisorctl status"] process_status = utils.run_command(cmd) if process_status is None: for process_name in critical_process_list: diff --git a/src/system-health/health_checker/sysmonitor.py b/src/system-health/health_checker/sysmonitor.py index e69d289fc537..e4dbc68ebf1d 100755 --- a/src/system-health/health_checker/sysmonitor.py +++ b/src/system-health/health_checker/sysmonitor.py @@ -235,7 +235,7 @@ def get_app_ready_status(self, service): #Gets the service properties def run_systemctl_show(self, service): - command = ('systemctl show {} --property=Id,LoadState,UnitFileState,Type,ActiveState,SubState,Result'.format(service)) + command = ['systemctl', 'show', str(service), '--property=Id,LoadState,UnitFileState,Type,ActiveState,SubState,Result'] output = utils.run_command(command) srv_properties = output.split('\n') prop_dict = {} diff --git a/src/system-health/health_checker/utils.py b/src/system-health/health_checker/utils.py index 00e7754e1ec2..338ef1d3afe5 100644 --- a/src/system-health/health_checker/utils.py +++ b/src/system-health/health_checker/utils.py @@ -8,7 +8,7 @@ def run_command(command): :return: Output of the shell command. """ try: - process = subprocess.Popen(command, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen(command, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return process.communicate()[0] except Exception: return None diff --git a/src/system-health/tests/test_system_health.py b/src/system-health/tests/test_system_health.py index d58c69bececa..ab0a3bcb1145 100644 --- a/src/system-health/tests/test_system_health.py +++ b/src/system-health/tests/test_system_health.py @@ -504,10 +504,10 @@ def test_manager(mock_hw_info, mock_service_info, mock_udc_info): manager._set_system_led(chassis, manager.config, 'normal') def test_utils(): - output = utils.run_command('some invalid command') + output = utils.run_command(['some', 'invalid', 'command']) assert not output - output = utils.run_command('ls') + output = utils.run_command(['ls']) assert output From 1f88a3ee0a6b7a5f3d7fb7d7177095e40f09a57e Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Thu, 3 Nov 2022 17:31:13 +0800 Subject: [PATCH 133/174] [openssh] Export remote address to environment variable for TACACS authorization. (#12447) Export remote address to environment variable for TACACS authorization. #### Why I did it When remote user login, nss-tacplus need user remove address for TACACSS authorization. #### How I did it Export remote address to environment variable "SSH_REMOTE_IP" #### How to verify it Pass all E2E test. #### Which release branch to backport (provide reason below if selected) - [ ] 201811 - [ ] 201911 - [ ] 202006 - [ ] 202012 - [ ] 202106 - [ ] 202111 - [ ] 202205 #### Description for the changelog Export remote address to environment variable for TACACS authorization. #### Ensure to add label/tag for the feature raised. example - PR#2174 under sonic-utilities repo. where, Generic Config and Update feature has been labelled as GCU. #### Link to config_db schema for YANG module changes #### A picture of a cute animal (not mandatory but encouraged) --- ...Export-remote-info-for-authorization.patch | 86 +++++++++++++++++++ src/openssh/patch/series | 1 + 2 files changed, 87 insertions(+) create mode 100755 src/openssh/patch/0003-Export-remote-info-for-authorization.patch diff --git a/src/openssh/patch/0003-Export-remote-info-for-authorization.patch b/src/openssh/patch/0003-Export-remote-info-for-authorization.patch new file mode 100755 index 000000000000..3ef7623d7b86 --- /dev/null +++ b/src/openssh/patch/0003-Export-remote-info-for-authorization.patch @@ -0,0 +1,86 @@ +From 51b3d58afef6796fe0568deb4c3765e24cc828c9 Mon Sep 17 00:00:00 2001 +From: liuh-80 +Date: Fri, 30 Sep 2022 16:57:03 +0800 +Subject: [PATCH] Export remote info for authorization. authorization. + +--- + auth.c | 11 +++++++++++ + auth.h | 3 +++ + session.c | 3 +++ + sshd.c | 5 +++++ + 4 files changed, 22 insertions(+) + +diff --git a/auth.c b/auth.c +index c3693ba3f..96d551922 100644 +--- a/auth.c ++++ b/auth.c +@@ -914,3 +914,14 @@ auth_authorise_keyopts(struct ssh *ssh, struct passwd *pw, + + return 0; + } ++ ++/* Export remote IP address and port for authorization. */ ++void ++export_remote_info(struct ssh *ssh) ++{ ++ const char *remote_ip = ssh_remote_ipaddr(ssh); ++ const int remote_port = ssh_remote_port(ssh); ++ const char remote_addr_port[32 + INET6_ADDRSTRLEN]; ++ snprintf(remote_addr_port, sizeof(remote_addr_port), "%s %d", remote_ip, remote_port); ++ setenv("SSH_CLIENT_IPADDR_PORT", remote_addr_port, 1); ++} +\ No newline at end of file +diff --git a/auth.h b/auth.h +index 3cfce0eaf..3a34742b1 100644 +--- a/auth.h ++++ b/auth.h +@@ -229,6 +229,9 @@ struct passwd *fakepw(void); + + int sys_auth_passwd(struct ssh *, const char *); + ++/* Export remote IP address and port for authorization. */ ++void export_remote_info(struct ssh *); ++ + #if defined(KRB5) && !defined(HEIMDAL) + krb5_error_code ssh_krb5_cc_gen(krb5_context, krb5_ccache *); + #endif +diff --git a/session.c b/session.c +index a638ceef1..c615cb3d0 100644 +--- a/session.c ++++ b/session.c +@@ -619,6 +619,9 @@ do_exec_pty(struct ssh *ssh, Session *s, const char *command) + /* Close the extra descriptor for the pseudo tty. */ + close(ttyfd); + ++ /* Export remote IP address and port for authorization. */ ++ export_remote_info(ssh); ++ + /* record login, etc. similar to login(1) */ + #ifndef HAVE_OSF_SIA + do_login(ssh, s, command); +diff --git a/sshd.c b/sshd.c +index 3ef0c1452..2f67a0304 100644 +--- a/sshd.c ++++ b/sshd.c +@@ -1737,6 +1737,8 @@ main(int ac, char **av) + test_flag = 2; + break; + case 'C': ++ /* Export remote IP address and port for authorization. */ ++ export_remote_info(ssh); + connection_info = get_connection_info(ssh, 0, 0); + if (parse_server_match_testspec(connection_info, + optarg) == -1) +@@ -2252,6 +2254,9 @@ main(int ac, char **av) + */ + remote_ip = ssh_remote_ipaddr(ssh); + ++ /* Export remote IP address and port for authorization. */ ++ export_remote_info(ssh); ++ + #ifdef SSH_AUDIT_EVENTS + audit_connection_from(remote_ip, remote_port); + #endif +-- +2.35.1.windows.2 + diff --git a/src/openssh/patch/series b/src/openssh/patch/series index e320bcab2113..d27f92e97b27 100644 --- a/src/openssh/patch/series +++ b/src/openssh/patch/series @@ -1,2 +1,3 @@ 0001-Put-style-as-line-number-to-ssh-session-environment-.patch 0002-Revert-commit-69334996-make-sshd_config-ClientAliveC.patch +0003-Export-remote-info-for-authorization.patch From 763d3dc29dfa071823a35e66c37b48e6824759b2 Mon Sep 17 00:00:00 2001 From: tjchadaga <85581939+tjchadaga@users.noreply.github.com> Date: Thu, 3 Nov 2022 08:54:33 -0700 Subject: [PATCH 134/174] Allow TSA on ibgp sessions between linecards on packet chassis (#12589) --- dockers/docker-fpm-frr/TS | 3 ++- .../bgpcfgd/managers_device_global.py | 14 +++++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/dockers/docker-fpm-frr/TS b/dockers/docker-fpm-frr/TS index 5057802c7661..1ff08431edb3 100755 --- a/dockers/docker-fpm-frr/TS +++ b/dockers/docker-fpm-frr/TS @@ -1,9 +1,10 @@ #!/bin/bash +switch_type=`sonic-cfggen -d -v "DEVICE_METADATA['localhost']['switch_type']"` # Check whether the routemap is for internal BGP sessions. function is_internal_route_map() { - [[ "$1" =~ .*"_INTERNAL_".* ]] + [[ "$1" =~ .*"_INTERNAL_".* && $switch_type != "chassis-packet" ]] } function check_not_installed() diff --git a/src/sonic-bgpcfgd/bgpcfgd/managers_device_global.py b/src/sonic-bgpcfgd/bgpcfgd/managers_device_global.py index 1d30a5b94a64..192527f1cd85 100644 --- a/src/sonic-bgpcfgd/bgpcfgd/managers_device_global.py +++ b/src/sonic-bgpcfgd/bgpcfgd/managers_device_global.py @@ -13,11 +13,13 @@ def __init__(self, common_objs, db, table): :param db: name of the db :param table: name of the table in the db """ + self.switch_type = "" self.directory = common_objs['directory'] self.cfg_mgr = common_objs['cfg_mgr'] self.constants = common_objs['constants'] self.tsa_template = common_objs['tf'].from_file("bgpd/tsa/bgpd.tsa.isolate.conf.j2") self.tsb_template = common_objs['tf'].from_file("bgpd/tsa/bgpd.tsa.unisolate.conf.j2") + self.directory.subscribe([("CONFIG_DB", swsscommon.CFG_DEVICE_METADATA_TABLE_NAME, "localhost/switch_type"),], self.on_switch_type_change) super(DeviceGlobalCfgMgr, self).__init__( common_objs, [], @@ -25,8 +27,16 @@ def __init__(self, common_objs, db, table): table, ) + def on_switch_type_change(self): + log_debug("DeviceGlobalCfgMgr:: Switch type update handler") + if self.directory.path_exist("CONFIG_DB", swsscommon.CFG_DEVICE_METADATA_TABLE_NAME, "localhost/switch_type"): + self.switch_type = self.directory.get_slot("CONFIG_DB", swsscommon.CFG_DEVICE_METADATA_TABLE_NAME)["localhost"]["switch_type"] + log_debug("DeviceGlobalCfgMgr:: Switch type: %s" % self.switch_type) + def set_handler(self, key, data): log_debug("DeviceGlobalCfgMgr:: set handler") + if self.switch_type: + log_debug("DeviceGlobalCfgMgr:: Switch type: %s" % self.switch_type) """ Handle device tsa_enabled state change """ if not data: log_err("DeviceGlobalCfgMgr:: data is None") @@ -78,7 +88,9 @@ def get_ts_routemaps(self, cmds, ts_template): def __generate_routemaps_from_template(self, route_map_names, template): cmd = "\n" for rm in sorted(route_map_names): - if "_INTERNAL_" in rm: + # For packet-based chassis, the bgp session between the linecards are also considered internal sessions + # While isolating a single linecard, these sessions should not be skipped + if "_INTERNAL_" in rm and self.switch_type != "chassis-packet": continue if "V4" in rm: ipv="V4" ; ipp="ip" From 830b7d8cb4a649c46a0f4b3fb9c0c88754e98cdb Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Fri, 4 Nov 2022 02:17:44 +0800 Subject: [PATCH 135/174] [Mellanox] Use sdk sysfs instead of ethtool (#12480) --- .../mlnx-platform-api/sonic_platform/sfp.py | 342 ++++++++++-------- .../tests/input_platform/cmis_page0 | Bin 0 -> 256 bytes .../tests/input_platform/sff8472_page0 | Bin 0 -> 256 bytes .../tests/input_platform/sff8636_page0 | Bin 0 -> 256 bytes .../mlnx-platform-api/tests/test_sfp.py | 151 ++++++-- 5 files changed, 319 insertions(+), 174 deletions(-) create mode 100644 platform/mellanox/mlnx-platform-api/tests/input_platform/cmis_page0 create mode 100644 platform/mellanox/mlnx-platform-api/tests/input_platform/sff8472_page0 create mode 100644 platform/mellanox/mlnx-platform-api/tests/input_platform/sff8636_page0 diff --git a/platform/mellanox/mlnx-platform-api/sonic_platform/sfp.py b/platform/mellanox/mlnx-platform-api/sonic_platform/sfp.py index bc87fb8cd4df..2ca37fcefdfc 100644 --- a/platform/mellanox/mlnx-platform-api/sonic_platform/sfp.py +++ b/platform/mellanox/mlnx-platform-api/sonic_platform/sfp.py @@ -23,6 +23,7 @@ ############################################################################# try: + import ctypes import subprocess import os from sonic_py_common.logger import Logger @@ -126,11 +127,42 @@ SFP_STATUS_INSERTED = '1' # SFP constants -SFP_PAGE_SIZE = 256 -SFP_UPPER_PAGE_OFFSET = 128 -SFP_VENDOR_PAGE_START = 640 - -BYTES_IN_DWORD = 4 +SFP_PAGE_SIZE = 256 # page size of page0h +SFP_UPPER_PAGE_OFFSET = 128 # page size of other pages + +# SFP sysfs path constants +SFP_PAGE0_PATH = '0/i2c-0x50/data' +SFP_A2H_PAGE0_PATH = '0/i2c-0x51/data' +SFP_EEPROM_ROOT_TEMPLATE = '/sys/module/sx_core/asic0/module{}/eeprom/pages' + +# SFP type constants +SFP_TYPE_CMIS = 'cmis' +SFP_TYPE_SFF8472 = 'sff8472' +SFP_TYPE_SFF8636 = 'sff8636' + +# SFP stderr +SFP_EEPROM_NOT_AVAILABLE = 'Input/output error' + +# SFP EEPROM limited bytes +limited_eeprom = { + SFP_TYPE_CMIS: { + 'write': { + 0: [26, (31, 36), (126, 127)], + 16: [(0, 128)] + } + }, + SFP_TYPE_SFF8472: { + 'write': { + 0: [110, (114, 115), 118, 127] + } + }, + SFP_TYPE_SFF8636: { + 'write': { + 0: [(86, 88), 93, (98, 99), (100, 106), 127], + 3: [(230, 241), (242, 251)] + } + } +} # Global logger class instance logger = Logger() @@ -157,77 +189,6 @@ def deinitialize_sdk_handle(sdk_handle): logger.log_warning("Sdk handle is none") return False -class MlxregManager: - def __init__(self, mst_pci_device, slot_id, sdk_index): - self.mst_pci_device = mst_pci_device - self.slot_id = slot_id - self.sdk_index = sdk_index - - def construct_dword(self, write_buffer): - if len(write_buffer) == 0: - return None - - used_bytes_in_dword = len(write_buffer) % BYTES_IN_DWORD - - res = "dword[0]=0x" - for idx, x in enumerate(write_buffer): - word = hex(x)[2:] - - if (idx > 0) and (idx % BYTES_IN_DWORD) == 0: - res += ",dword[{}]=0x".format(str((idx + 1)//BYTES_IN_DWORD)) - res += word.zfill(2) - - if used_bytes_in_dword > 0: - res += (BYTES_IN_DWORD - used_bytes_in_dword) * "00" - return res - - def write_mlxreg_eeprom(self, num_bytes, dword, device_address, page): - if not dword: - return False - - try: - cmd = ["mlxreg", "-d", "", "--reg_name", "MCIA", "--indexes", "", "--set", "", "-y"] - cmd[2] = "/dev/mst/" + self.mst_pci_device - cmd[6] = "slot_index={},module={},device_address={},page_number={},i2c_device_address=0x50,size={},bank_number=0".format(self.slot_id, self.sdk_index, device_address, page, num_bytes) - cmd[8] = dword - subprocess.check_call(cmd, universal_newlines=True, stdout=subprocess.DEVNULL) - except subprocess.CalledProcessError as e: - logger.log_error("Error! Unable to write data dword={} for {} port, page {} offset {}, rc = {}, err msg: {}".format(dword, self.sdk_index, page, device_address, e.returncode, e.output)) - return False - return True - - def read_mlxred_eeprom(self, offset, page, num_bytes): - try: - - cmd = ["mlxreg", "-d", "", "--reg_name", "MCIA", "--indexes", "", "--get"] - cmd[2] = "/dev/mst/" + self.mst_pci_device - cmd[6] = "slot_index={},module={},device_address={},page_number={},i2c_device_address=0x50,size={},bank_number=0".format(self.slot_id, self.sdk_index, offset, page, num_bytes) - result = subprocess.check_output(cmd, universal_newlines=True) - except subprocess.CalledProcessError as e: - logger.log_error("Error! Unable to read data for {} port, page {} offset {}, rc = {}, err msg: {}".format(self.sdk_index, page, offset, e.returncode, e.output)) - return None - return result - - def parse_mlxreg_read_output(self, read_output, num_bytes): - if not read_output: - return None - - res = "" - dword_num = num_bytes // BYTES_IN_DWORD - used_bytes_in_dword = num_bytes % BYTES_IN_DWORD - arr = [value for value in read_output.split('\n') if value[0:5] == "dword"] - for i in range(dword_num): - dword = arr[i].split()[2] - res += dword[2:] - - if used_bytes_in_dword > 0: - # Cut needed info and insert into final hex string - # Example: 3 bytes : 0x12345600 - # ^ ^ - dword = arr[dword_num].split()[2] - res += dword[2 : 2 + used_bytes_in_dword * 2] - - return bytearray.fromhex(res) if res else None class SdkHandleContext(object): def __init__(self): @@ -312,6 +273,7 @@ def __init__(self, sfp_index, sfp_type=None, slot_id=0, linecard_port_count=0, l self.slot_id = slot_id self.mst_pci_device = self.get_mst_pci_device() + self._sfp_type_str = None # get MST PCI device name def get_mst_pci_device(self): @@ -337,6 +299,7 @@ def reinit(self): Re-initialize this SFP object when a new SFP inserted :return: """ + self._sfp_type_str = None self.refresh_xcvr_api() def get_presence(self): @@ -346,36 +309,9 @@ def get_presence(self): Returns: bool: True if device is present, False if not """ - eeprom_raw = self.read_eeprom(0, 1) - + eeprom_raw = self._read_eeprom(0, 1, log_on_error=False) return eeprom_raw is not None - # Read out any bytes from any offset - def _read_eeprom_specific_bytes(self, offset, num_bytes): - if offset + num_bytes > SFP_VENDOR_PAGE_START: - logger.log_error("Error mismatch between page size and bytes to read (offset: {} num_bytes: {}) ".format(offset, num_bytes)) - return None - - eeprom_raw = [] - ethtool_cmd = ["ethtool", "-m", "", "hex", "on", "offset", "", "length", ""] - ethtool_cmd[2] = "sfp" + str(self.index) - ethtool_cmd[6] = str(offset) - ethtool_cmd[8] = str(num_bytes) - try: - output = subprocess.check_output(ethtool_cmd, - universal_newlines=True) - output_lines = output.splitlines() - first_line_raw = output_lines[0] - if "Offset" in first_line_raw: - for line in output_lines[2:]: - line_split = line.split() - eeprom_raw = eeprom_raw + line_split[1:] - except subprocess.CalledProcessError as e: - return None - - eeprom_raw = list(map(lambda h: int(h, base=16), eeprom_raw)) - return bytearray(eeprom_raw) - # read eeprom specfic bytes beginning from offset with size as num_bytes def read_eeprom(self, offset, num_bytes): """ @@ -383,43 +319,37 @@ def read_eeprom(self, offset, num_bytes): Returns: bytearray, if raw sequence of bytes are read correctly from the offset of size num_bytes None, if the read_eeprom fails - Example: - mlxreg -d /dev/mst/mt52100_pciconf0 --reg_name MCIA --indexes slot_index=0,module=1,device_address=148,page_number=0,i2c_device_address=0x50,size=16,bank_number=0 -g - Sending access register... - Field Name | Data - =================================== - status | 0x00000000 - slot_index | 0x00000000 - module | 0x00000001 - l | 0x00000000 - device_address | 0x00000094 - page_number | 0x00000000 - i2c_device_address | 0x00000050 - size | 0x00000010 - bank_number | 0x00000000 - dword[0] | 0x43726564 - dword[1] | 0x6f202020 - dword[2] | 0x20202020 - dword[3] | 0x20202020 - dword[4] | 0x00000000 - dword[5] | 0x00000000 - .... - 16 bytes to read from dword -> 0x437265646f2020202020202020202020 -> Credo """ - # recalculate offset and page. Use 'ethtool' if there is no need to read vendor pages - if offset < SFP_VENDOR_PAGE_START: - return self._read_eeprom_specific_bytes(offset, num_bytes) - else: - page = (offset - SFP_PAGE_SIZE) // SFP_UPPER_PAGE_OFFSET + 1 - # calculate offset per page - device_address = (offset - SFP_PAGE_SIZE) % SFP_UPPER_PAGE_OFFSET + SFP_UPPER_PAGE_OFFSET + return self._read_eeprom(offset, num_bytes) + + def _read_eeprom(self, offset, num_bytes, log_on_error=True): + """Read eeprom specfic bytes beginning from a random offset with size as num_bytes + + Args: + offset (int): read offset + num_bytes (int): read size + log_on_error (bool, optional): whether log error when exception occurs. Defaults to True. + + Returns: + bytearray: the content of EEPROM + """ + _, page, page_offset = self._get_page_and_page_offset(offset) + if not page: + return None - if not self.mst_pci_device: + try: + with open(page, mode='rb', buffering=0) as f: + f.seek(page_offset) + content = f.read(num_bytes) + if ctypes.get_errno() != 0: + raise IOError(f'errno = {os.strerror(ctypes.get_errno())}') + except (OSError, IOError) as e: + if log_on_error: + logger.log_error(f'Failed to read sfp={self.sdk_index} EEPROM page={page}, page_offset={page_offset}, \ + size={num_bytes}, offset={offset}, error = {e}') return None - mlxreg_mngr = MlxregManager(self.mst_pci_device, self.slot_id, self.sdk_index) - read_output = mlxreg_mngr.read_mlxred_eeprom(device_address, page, num_bytes) - return mlxreg_mngr.parse_mlxreg_read_output(read_output, num_bytes) + return bytearray(content) # write eeprom specfic bytes beginning from offset with size as num_bytes def write_eeprom(self, offset, num_bytes, write_buffer): @@ -435,21 +365,28 @@ def write_eeprom(self, offset, num_bytes, write_buffer): logger.log_error("Error mismatch between buffer length and number of bytes to be written") return False - # recalculate offset and page - if offset < SFP_PAGE_SIZE: - page = 0 - device_address = offset - else: - page = (offset - SFP_PAGE_SIZE) // SFP_UPPER_PAGE_OFFSET + 1 - # calculate offset per page - device_address = (offset - SFP_PAGE_SIZE) % SFP_UPPER_PAGE_OFFSET + SFP_UPPER_PAGE_OFFSET - - if not self.mst_pci_device: + page_num, page, page_offset = self._get_page_and_page_offset(offset) + if not page: return False - mlxreg_mngr = MlxregManager(self.mst_pci_device, self.slot_id, self.sdk_index) - dword = mlxreg_mngr.construct_dword(write_buffer) - return mlxreg_mngr.write_mlxreg_eeprom(num_bytes, dword, device_address, page) + try: + if self._is_write_protected(page_num, page_offset, num_bytes): + # write limited eeprom is not supported + raise IOError('write limited bytes') + + with open(page, mode='r+b', buffering=0) as f: + f.seek(page_offset) + ret = f.write(write_buffer[0:num_bytes]) + if ret != num_bytes: + raise IOError(f'write return code = {ret}') + if ctypes.get_errno() != 0: + raise IOError(f'errno = {os.strerror(ctypes.get_errno())}') + except (OSError, IOError) as e: + data = ''.join('{:02x}'.format(x) for x in write_buffer) + logger.log_error(f'Failed to write EEPROM data sfp={self.sdk_index} EEPROM page={page}, page_offset={page_offset}, size={num_bytes}, \ + offset={offset}, data = {data}, error = {e}') + return False + return True @classmethod def mgmt_phy_mod_pwr_attr_get(cls, power_attr_type, sdk_handle, sdk_index, slot_id): @@ -759,6 +696,109 @@ def get_error_description(self): error_description = "Unknow SFP module status ({})".format(oper_status) return error_description + def _get_eeprom_path(self): + return SFP_EEPROM_ROOT_TEMPLATE.format(self.sdk_index) + + def _get_page_and_page_offset(self, overall_offset): + """Get EEPROM page and page offset according to overall offset + + Args: + overall_offset (int): Overall read offset + + Returns: + tuple: (, , ) + """ + eeprom_path = self._get_eeprom_path() + if not os.path.exists(eeprom_path): + logger.log_error(f'EEPROM file path for sfp {self.sdk_index} does not exist') + return None, None, None + + if overall_offset < SFP_PAGE_SIZE: + return 0, os.path.join(eeprom_path, SFP_PAGE0_PATH), overall_offset + + if self._get_sfp_type_str(eeprom_path) == SFP_TYPE_SFF8472: + page1h_start = SFP_PAGE_SIZE * 2 + if overall_offset < page1h_start: + return -1, os.path.join(eeprom_path, SFP_A2H_PAGE0_PATH), overall_offset - SFP_PAGE_SIZE + else: + page1h_start = SFP_PAGE_SIZE + + page_num = (overall_offset - page1h_start) // SFP_UPPER_PAGE_OFFSET + 1 + page = f'{page_num}/data' + offset = (overall_offset - page1h_start) % SFP_UPPER_PAGE_OFFSET + return page_num, os.path.join(eeprom_path, page), offset + + def _get_sfp_type_str(self, eeprom_path): + """Get SFP type by reading first byte of EEPROM + + Args: + eeprom_path (str): EEPROM path + + Returns: + str: SFP type in string + """ + if self._sfp_type_str is None: + page = os.path.join(eeprom_path, SFP_PAGE0_PATH) + try: + with open(page, mode='rb', buffering=0) as f: + id_byte_raw = bytearray(f.read(1)) + id = id_byte_raw[0] + if id == 0x18 or id == 0x19 or id == 0x1e: + self._sfp_type_str = SFP_TYPE_CMIS + elif id == 0x11 or id == 0x0D: + # in sonic-platform-common, 0x0D is treated as sff8436, + # but it shared the same implementation on Nvidia platforms, + # so, we treat it as sff8636 here. + self._sfp_type_str = SFP_TYPE_SFF8636 + elif id == 0x03: + self._sfp_type_str = SFP_TYPE_SFF8472 + else: + logger.log_error(f'Unsupported sfp type {id}') + except (OSError, IOError) as e: + # SFP_EEPROM_NOT_AVAILABLE usually indicates SFP is not present, no need + # print such error information to log + if SFP_EEPROM_NOT_AVAILABLE not in str(e): + logger.log_error(f'Failed to get SFP type, index={self.sdk_index}, error={e}') + return None + return self._sfp_type_str + + def _is_write_protected(self, page, page_offset, num_bytes): + """Check if the EEPROM read/write operation hit limitation bytes + + Args: + page (str): EEPROM page path + page_offset (int): EEPROM page offset + num_bytes (int): read/write size + + Returns: + bool: True if the limited bytes is hit + """ + eeprom_path = self._get_eeprom_path() + limited_data = limited_eeprom.get(self._get_sfp_type_str(eeprom_path)) + if not limited_data: + return False + + access_type = 'write' + limited_data = limited_data.get(access_type) + if not limited_data: + return False + + limited_ranges = limited_data.get(page) + if not limited_ranges: + return False + + access_begin = page_offset + access_end = page_offset + num_bytes - 1 + for limited_range in limited_ranges: + if isinstance(limited_range, int): + if access_begin <= limited_range <= access_end: + return True + else: # tuple + if not (access_end < limited_range[0] or access_begin > limited_range[1]): + return True + + return False + def get_rx_los(self): """Accessing rx los is not supproted, return all False @@ -786,7 +826,7 @@ def get_xcvr_api(self): """ if self._xcvr_api is None: self.refresh_xcvr_api() - if self._xcvr_api is not None: + if self._xcvr_api is not None: self._xcvr_api.get_rx_los = self.get_rx_los self._xcvr_api.get_tx_fault = self.get_tx_fault return self._xcvr_api diff --git a/platform/mellanox/mlnx-platform-api/tests/input_platform/cmis_page0 b/platform/mellanox/mlnx-platform-api/tests/input_platform/cmis_page0 new file mode 100644 index 0000000000000000000000000000000000000000..623dcfe5f3f1b1a07b9628c7b3a9a6228ea72c90 GIT binary patch literal 256 zcmb0zXkccbHei-z>|m5(bP<$hR0@>>(h`h<|HXl{7?5TJVuXbfzNtAmiFx@I3Q)kn zbkf&3z|hRhKsVgLz|qy%K*7<-H^j)m&@wF8z{JAP2&mA=z`)eNLO}sx4g({rG7Bra dC{Ton3oe|;U~FJ)Y~XETY+~SH0T)JM001&>7$pDz literal 0 HcmV?d00001 diff --git a/platform/mellanox/mlnx-platform-api/tests/input_platform/sff8472_page0 b/platform/mellanox/mlnx-platform-api/tests/input_platform/sff8472_page0 new file mode 100644 index 0000000000000000000000000000000000000000..edda806c8abaee1b4c64921eaa720b0bc3690f28 GIT binary patch literal 256 zcmZQ(QDy)E4h9Cc|6mp~gKuh1PGVkug#r{vFfg6;b#pX#G&Rt5HZU*-2|EHwpmJ9U zt8@n65JL-N6SoipV+%`5pq!zFfrXK&fm1_tKlmOwe6LNfys1qB9%^aMjA3kx$NL(?#4r!bg3 JaN2-@0RY7;6hZ(1 literal 0 HcmV?d00001 diff --git a/platform/mellanox/mlnx-platform-api/tests/test_sfp.py b/platform/mellanox/mlnx-platform-api/tests/test_sfp.py index b72a5f3ed4aa..2a79b39308b5 100644 --- a/platform/mellanox/mlnx-platform-api/tests/test_sfp.py +++ b/platform/mellanox/mlnx-platform-api/tests/test_sfp.py @@ -14,7 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import ctypes import os +import pytest +import shutil import sys if sys.version_info.major == 3: from unittest import mock @@ -27,8 +30,6 @@ from sonic_platform.sfp import SFP, SX_PORT_MODULE_STATUS_INITIALIZING, SX_PORT_MODULE_STATUS_PLUGGED, SX_PORT_MODULE_STATUS_UNPLUGGED, SX_PORT_MODULE_STATUS_PLUGGED_WITH_ERROR, SX_PORT_MODULE_STATUS_PLUGGED_DISABLED from sonic_platform.chassis import Chassis -from sonic_platform.sfp import MlxregManager -from tests.input_platform import output_sfp class TestSfp: @@ -86,31 +87,61 @@ def test_sfp_get_error_status(self, mock_get_error_code): assert description == expected_description - @mock.patch('sonic_platform.sfp.SFP.get_mst_pci_device', mock.MagicMock(return_value="pciconf")) - @mock.patch('sonic_platform.sfp.MlxregManager.write_mlxreg_eeprom', mock.MagicMock(return_value=True)) - def test_sfp_write_eeprom(self): - mlxreg_mngr = MlxregManager("", 0, 0) - write_buffer = bytearray([1,2,3,4]) - offset = 793 - + @mock.patch('sonic_platform.sfp.SFP._get_page_and_page_offset') + @mock.patch('sonic_platform.sfp.SFP._is_write_protected') + def test_sfp_write_eeprom(self, mock_limited_eeprom, mock_get_page): + sfp = SFP(0) + assert not sfp.write_eeprom(0, 1, bytearray()) + + mock_get_page.return_value = (None, None, None) + assert not sfp.write_eeprom(0, 1, bytearray([1])) + + mock_get_page.return_value = (0, '/tmp/mock_page', 0) + mock_limited_eeprom.return_value = True + assert not sfp.write_eeprom(0, 1, bytearray([1])) + + mock_limited_eeprom.return_value = False + mo = mock.mock_open() + print('after mock open') + with mock.patch('sonic_platform.sfp.open', mo): + handle = mo() + handle.write.return_value = 1 + assert sfp.write_eeprom(0, 1, bytearray([1])) + + handle.seek.assert_called_once_with(0) + handle.write.assert_called_once_with(bytearray([1])) + handle.write.return_value = -1 + assert not sfp.write_eeprom(0, 1, bytearray([1])) + + handle.write.return_value = 1 + ctypes.set_errno(1) + assert not sfp.write_eeprom(0, 1, bytearray([1])) + ctypes.set_errno(0) + + handle.write.side_effect = OSError('') + assert not sfp.write_eeprom(0, 1, bytearray([1])) + + @mock.patch('sonic_platform.sfp.SFP.get_mst_pci_device', mock.MagicMock(return_value = None)) + @mock.patch('sonic_platform.sfp.SFP._get_page_and_page_offset') + def test_sfp_read_eeprom(self, mock_get_page): sfp = SFP(0) - sfp.write_eeprom(offset, 4, write_buffer) - MlxregManager.write_mlxreg_eeprom.assert_called_with(4, output_sfp.write_eeprom_dword1, 153, 5) + mock_get_page.return_value = (None, None, None) + assert sfp.read_eeprom(0, 1) is None - offset = 641 - write_buffer = bytearray([1,2,3,4,5,6]) - sfp.write_eeprom(offset, 6, write_buffer) - MlxregManager.write_mlxreg_eeprom.assert_called_with(6, output_sfp.write_eeprom_dword2, 129, 4) + mock_get_page.return_value = (0, '/tmp/mock_page', 0) + mo = mock.mock_open() + with mock.patch('sonic_platform.sfp.open', mo): + handle = mo() + handle.read.return_value = b'\x00' + assert sfp.read_eeprom(0, 1) == bytearray([0]) + handle.seek.assert_called_once_with(0) - @mock.patch('sonic_platform.sfp.SFP.get_mst_pci_device', mock.MagicMock(return_value="pciconf")) - @mock.patch('sonic_platform.sfp.MlxregManager.read_mlxred_eeprom', mock.MagicMock(return_value=output_sfp.read_eeprom_output)) - def test_sfp_read_eeprom(self): - mlxreg_mngr = MlxregManager("", 0, 0) - offset = 644 + ctypes.set_errno(1) + assert sfp.read_eeprom(0, 1) is None + ctypes.set_errno(0) - sfp = SFP(0) - assert output_sfp.y_cable_part_number == sfp.read_eeprom(offset, 16).decode() - MlxregManager.read_mlxred_eeprom.assert_called_with(132, 4, 16) + handle.read.side_effect = OSError('') + assert sfp.read_eeprom(0, 1) is None @mock.patch('sonic_platform.sfp.SFP._fetch_port_status') def test_is_port_admin_status_up(self, mock_port_status): @@ -120,6 +151,80 @@ def test_is_port_admin_status_up(self, mock_port_status): mock_port_status.return_value = (0, False) assert not SFP.is_port_admin_status_up(None, None) + @mock.patch('sonic_platform.sfp.SFP._get_eeprom_path', mock.MagicMock(return_value = None)) + @mock.patch('sonic_platform.sfp.SFP._get_sfp_type_str') + def test_is_write_protected(self, mock_get_type_str): + sfp = SFP(0) + mock_get_type_str.return_value = 'cmis' + assert sfp._is_write_protected(page=0, page_offset=26, num_bytes=1) + assert not sfp._is_write_protected(page=0, page_offset=27, num_bytes=1) + + # not exist page + assert not sfp._is_write_protected(page=3, page_offset=0, num_bytes=1) + + # invalid sfp type str + mock_get_type_str.return_value = 'invalid' + assert not sfp._is_write_protected(page=0, page_offset=0, num_bytes=1) + + def test_get_sfp_type_str(self): + sfp = SFP(0) + expect_sfp_types = ['cmis', 'sff8636', 'sff8472'] + mock_eeprom_path = '/tmp/mock_eeprom' + mock_dir = '/tmp/mock_eeprom/0/i2c-0x50' + os.makedirs(os.path.join(mock_dir), exist_ok=True) + for expect_sfp_type in expect_sfp_types: + source_eeprom_file = os.path.join(test_path, 'input_platform', expect_sfp_type + '_page0') + shutil.copy(source_eeprom_file, os.path.join(mock_dir, 'data')) + assert sfp._get_sfp_type_str(mock_eeprom_path) == expect_sfp_type + sfp._sfp_type_str = None + + os.system('rm -rf {}'.format(mock_eeprom_path)) + assert sfp._get_sfp_type_str('invalid') is None + + @mock.patch('os.path.exists') + @mock.patch('sonic_platform.sfp.SFP._get_eeprom_path') + @mock.patch('sonic_platform.sfp.SFP._get_sfp_type_str') + def test_get_page_and_page_offset(self, mock_get_type_str, mock_eeprom_path, mock_path_exists): + sfp = SFP(0) + mock_path_exists.return_value = False + page_num, page, page_offset = sfp._get_page_and_page_offset(0) + assert page_num is None + assert page is None + assert page_offset is None + + mock_path_exists.return_value = True + mock_eeprom_path.return_value = '/tmp' + page_num, page, page_offset = sfp._get_page_and_page_offset(255) + assert page_num == 0 + assert page == '/tmp/0/i2c-0x50/data' + assert page_offset is 255 + + mock_get_type_str.return_value = 'cmis' + page_num, page, page_offset = sfp._get_page_and_page_offset(256) + assert page_num == 1 + assert page == '/tmp/1/data' + assert page_offset is 0 + + mock_get_type_str.return_value = 'sff8472' + page_num, page, page_offset = sfp._get_page_and_page_offset(511) + assert page_num == -1 + assert page == '/tmp/0/i2c-0x51/data' + assert page_offset is 255 + + page_num, page, page_offset = sfp._get_page_and_page_offset(512) + assert page_num == 1 + assert page == '/tmp/1/data' + assert page_offset is 0 + + @mock.patch('sonic_platform.sfp.SFP._read_eeprom') + def test_get_presence(self, mock_read_eeprom): + sfp = SFP(0) + mock_read_eeprom.return_value = None + assert not sfp.get_presence() + + mock_read_eeprom.return_value = bytearray([1]) + assert sfp.get_presence() + @mock.patch('sonic_platform.sfp.SFP.get_xcvr_api') def test_dummy_apis(self, mock_get_xcvr_api): mock_api = mock.MagicMock() From 6169ae3ee324aaca994da25a7be5653d8f219bf4 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Fri, 4 Nov 2022 08:12:00 +0800 Subject: [PATCH 136/174] Add lossy scheduler for queue 7 (#12596) * Add lossy scheduler for queue 7 --- files/build_templates/qos_config.j2 | 7 + .../py3/qos-arista7050cx3-dualtor.json | 96 +++++++++ .../py3/qos-arista7260-dualtor.json | 192 ++++++++++++++++++ .../sample_output/py3/qos-arista7260-t1.json | 84 ++++++++ .../py3/qos-mellanox4600c-c64.json | 84 ++++++++ 5 files changed, 463 insertions(+) diff --git a/files/build_templates/qos_config.j2 b/files/build_templates/qos_config.j2 index 358a114c11fb..aeba03f595cf 100644 --- a/files/build_templates/qos_config.j2 +++ b/files/build_templates/qos_config.j2 @@ -233,6 +233,7 @@ "global": { "dscp_to_tc_map" : "AZURE" }{% if PORT_ACTIVE %},{% endif %} + {% endif %} {% for port in PORT_ACTIVE %} "{{ port }}": { @@ -332,6 +333,12 @@ "{{ port }}|5": { "scheduler": "scheduler.0" }, +{# DSCP 48 is mapped to QUEUE 7 in macro generate_dscp_to_tc_map #} +{% if (generate_dscp_to_tc_map is defined) and tunnel_qos_remap_enable %} + "{{ port }}|7": { + "scheduler": "scheduler.0" + }, +{% endif %} {% endfor %} {% for port in PORT_ACTIVE %} "{{ port }}|6": { diff --git a/src/sonic-config-engine/tests/sample_output/py3/qos-arista7050cx3-dualtor.json b/src/sonic-config-engine/tests/sample_output/py3/qos-arista7050cx3-dualtor.json index 00bf9a9a438c..afe1e9946eb6 100644 --- a/src/sonic-config-engine/tests/sample_output/py3/qos-arista7050cx3-dualtor.json +++ b/src/sonic-config-engine/tests/sample_output/py3/qos-arista7050cx3-dualtor.json @@ -1050,99 +1050,195 @@ "Ethernet0|5": { "scheduler": "scheduler.0" }, + "Ethernet0|7": { + "scheduler": "scheduler.0" + }, "Ethernet4|5": { "scheduler": "scheduler.0" }, + "Ethernet4|7": { + "scheduler": "scheduler.0" + }, "Ethernet8|5": { "scheduler": "scheduler.0" }, + "Ethernet8|7": { + "scheduler": "scheduler.0" + }, "Ethernet12|5": { "scheduler": "scheduler.0" }, + "Ethernet12|7": { + "scheduler": "scheduler.0" + }, "Ethernet16|5": { "scheduler": "scheduler.0" }, + "Ethernet16|7": { + "scheduler": "scheduler.0" + }, "Ethernet20|5": { "scheduler": "scheduler.0" }, + "Ethernet20|7": { + "scheduler": "scheduler.0" + }, "Ethernet24|5": { "scheduler": "scheduler.0" }, + "Ethernet24|7": { + "scheduler": "scheduler.0" + }, "Ethernet28|5": { "scheduler": "scheduler.0" }, + "Ethernet28|7": { + "scheduler": "scheduler.0" + }, "Ethernet32|5": { "scheduler": "scheduler.0" }, + "Ethernet32|7": { + "scheduler": "scheduler.0" + }, "Ethernet36|5": { "scheduler": "scheduler.0" }, + "Ethernet36|7": { + "scheduler": "scheduler.0" + }, "Ethernet40|5": { "scheduler": "scheduler.0" }, + "Ethernet40|7": { + "scheduler": "scheduler.0" + }, "Ethernet44|5": { "scheduler": "scheduler.0" }, + "Ethernet44|7": { + "scheduler": "scheduler.0" + }, "Ethernet48|5": { "scheduler": "scheduler.0" }, + "Ethernet48|7": { + "scheduler": "scheduler.0" + }, "Ethernet52|5": { "scheduler": "scheduler.0" }, + "Ethernet52|7": { + "scheduler": "scheduler.0" + }, "Ethernet56|5": { "scheduler": "scheduler.0" }, + "Ethernet56|7": { + "scheduler": "scheduler.0" + }, "Ethernet60|5": { "scheduler": "scheduler.0" }, + "Ethernet60|7": { + "scheduler": "scheduler.0" + }, "Ethernet64|5": { "scheduler": "scheduler.0" }, + "Ethernet64|7": { + "scheduler": "scheduler.0" + }, "Ethernet68|5": { "scheduler": "scheduler.0" }, + "Ethernet68|7": { + "scheduler": "scheduler.0" + }, "Ethernet72|5": { "scheduler": "scheduler.0" }, + "Ethernet72|7": { + "scheduler": "scheduler.0" + }, "Ethernet76|5": { "scheduler": "scheduler.0" }, + "Ethernet76|7": { + "scheduler": "scheduler.0" + }, "Ethernet80|5": { "scheduler": "scheduler.0" }, + "Ethernet80|7": { + "scheduler": "scheduler.0" + }, "Ethernet84|5": { "scheduler": "scheduler.0" }, + "Ethernet84|7": { + "scheduler": "scheduler.0" + }, "Ethernet88|5": { "scheduler": "scheduler.0" }, + "Ethernet88|7": { + "scheduler": "scheduler.0" + }, "Ethernet92|5": { "scheduler": "scheduler.0" }, + "Ethernet92|7": { + "scheduler": "scheduler.0" + }, "Ethernet96|5": { "scheduler": "scheduler.0" }, + "Ethernet96|7": { + "scheduler": "scheduler.0" + }, "Ethernet100|5": { "scheduler": "scheduler.0" }, + "Ethernet100|7": { + "scheduler": "scheduler.0" + }, "Ethernet104|5": { "scheduler": "scheduler.0" }, + "Ethernet104|7": { + "scheduler": "scheduler.0" + }, "Ethernet108|5": { "scheduler": "scheduler.0" }, + "Ethernet108|7": { + "scheduler": "scheduler.0" + }, "Ethernet112|5": { "scheduler": "scheduler.0" }, + "Ethernet112|7": { + "scheduler": "scheduler.0" + }, "Ethernet116|5": { "scheduler": "scheduler.0" }, + "Ethernet116|7": { + "scheduler": "scheduler.0" + }, "Ethernet120|5": { "scheduler": "scheduler.0" }, + "Ethernet120|7": { + "scheduler": "scheduler.0" + }, "Ethernet124|5": { "scheduler": "scheduler.0" }, + "Ethernet124|7": { + "scheduler": "scheduler.0" + }, "Ethernet0|6": { "scheduler": "scheduler.0" }, diff --git a/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-dualtor.json b/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-dualtor.json index 4d16791f9287..b4f5b5f42e77 100644 --- a/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-dualtor.json +++ b/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-dualtor.json @@ -1849,196 +1849,388 @@ }, "Ethernet0|5": { "scheduler": "scheduler.0" + }, + "Ethernet0|7": { + "scheduler": "scheduler.0" }, "Ethernet4|5": { "scheduler": "scheduler.0" }, + "Ethernet4|7": { + "scheduler": "scheduler.0" + }, "Ethernet8|5": { "scheduler": "scheduler.0" }, + "Ethernet8|7": { + "scheduler": "scheduler.0" + }, "Ethernet12|5": { "scheduler": "scheduler.0" }, + "Ethernet12|7": { + "scheduler": "scheduler.0" + }, "Ethernet16|5": { "scheduler": "scheduler.0" }, + "Ethernet16|7": { + "scheduler": "scheduler.0" + }, "Ethernet20|5": { "scheduler": "scheduler.0" }, + "Ethernet20|7": { + "scheduler": "scheduler.0" + }, "Ethernet24|5": { "scheduler": "scheduler.0" }, + "Ethernet24|7": { + "scheduler": "scheduler.0" + }, "Ethernet28|5": { "scheduler": "scheduler.0" }, + "Ethernet28|7": { + "scheduler": "scheduler.0" + }, "Ethernet32|5": { "scheduler": "scheduler.0" }, + "Ethernet32|7": { + "scheduler": "scheduler.0" + }, "Ethernet36|5": { "scheduler": "scheduler.0" }, + "Ethernet36|7": { + "scheduler": "scheduler.0" + }, "Ethernet40|5": { "scheduler": "scheduler.0" }, + "Ethernet40|7": { + "scheduler": "scheduler.0" + }, "Ethernet44|5": { "scheduler": "scheduler.0" }, + "Ethernet44|7": { + "scheduler": "scheduler.0" + }, "Ethernet48|5": { "scheduler": "scheduler.0" }, + "Ethernet48|7": { + "scheduler": "scheduler.0" + }, "Ethernet52|5": { "scheduler": "scheduler.0" }, + "Ethernet52|7": { + "scheduler": "scheduler.0" + }, "Ethernet56|5": { "scheduler": "scheduler.0" }, + "Ethernet56|7": { + "scheduler": "scheduler.0" + }, "Ethernet60|5": { "scheduler": "scheduler.0" }, + "Ethernet60|7": { + "scheduler": "scheduler.0" + }, "Ethernet64|5": { "scheduler": "scheduler.0" }, + "Ethernet64|7": { + "scheduler": "scheduler.0" + }, "Ethernet68|5": { "scheduler": "scheduler.0" }, + "Ethernet68|7": { + "scheduler": "scheduler.0" + }, "Ethernet72|5": { "scheduler": "scheduler.0" }, + "Ethernet72|7": { + "scheduler": "scheduler.0" + }, "Ethernet76|5": { "scheduler": "scheduler.0" }, + "Ethernet76|7": { + "scheduler": "scheduler.0" + }, "Ethernet80|5": { "scheduler": "scheduler.0" }, + "Ethernet80|7": { + "scheduler": "scheduler.0" + }, "Ethernet84|5": { "scheduler": "scheduler.0" }, + "Ethernet84|7": { + "scheduler": "scheduler.0" + }, "Ethernet88|5": { "scheduler": "scheduler.0" }, + "Ethernet88|7": { + "scheduler": "scheduler.0" + }, "Ethernet92|5": { "scheduler": "scheduler.0" }, + "Ethernet92|7": { + "scheduler": "scheduler.0" + }, "Ethernet96|5": { "scheduler": "scheduler.0" }, + "Ethernet96|7": { + "scheduler": "scheduler.0" + }, "Ethernet100|5": { "scheduler": "scheduler.0" }, + "Ethernet100|7": { + "scheduler": "scheduler.0" + }, "Ethernet104|5": { "scheduler": "scheduler.0" }, + "Ethernet104|7": { + "scheduler": "scheduler.0" + }, "Ethernet108|5": { "scheduler": "scheduler.0" }, + "Ethernet108|7": { + "scheduler": "scheduler.0" + }, "Ethernet112|5": { "scheduler": "scheduler.0" }, + "Ethernet112|7": { + "scheduler": "scheduler.0" + }, "Ethernet116|5": { "scheduler": "scheduler.0" }, + "Ethernet116|7": { + "scheduler": "scheduler.0" + }, "Ethernet120|5": { "scheduler": "scheduler.0" }, + "Ethernet120|7": { + "scheduler": "scheduler.0" + }, "Ethernet124|5": { "scheduler": "scheduler.0" }, + "Ethernet124|7": { + "scheduler": "scheduler.0" + }, "Ethernet128|5": { "scheduler": "scheduler.0" }, + "Ethernet128|7": { + "scheduler": "scheduler.0" + }, "Ethernet132|5": { "scheduler": "scheduler.0" }, + "Ethernet132|7": { + "scheduler": "scheduler.0" + }, "Ethernet136|5": { "scheduler": "scheduler.0" }, + "Ethernet136|7": { + "scheduler": "scheduler.0" + }, "Ethernet140|5": { "scheduler": "scheduler.0" }, + "Ethernet140|7": { + "scheduler": "scheduler.0" + }, "Ethernet144|5": { "scheduler": "scheduler.0" }, + "Ethernet144|7": { + "scheduler": "scheduler.0" + }, "Ethernet148|5": { "scheduler": "scheduler.0" }, + "Ethernet148|7": { + "scheduler": "scheduler.0" + }, "Ethernet152|5": { "scheduler": "scheduler.0" }, + "Ethernet152|7": { + "scheduler": "scheduler.0" + }, "Ethernet156|5": { "scheduler": "scheduler.0" }, + "Ethernet156|7": { + "scheduler": "scheduler.0" + }, "Ethernet160|5": { "scheduler": "scheduler.0" }, + "Ethernet160|7": { + "scheduler": "scheduler.0" + }, "Ethernet164|5": { "scheduler": "scheduler.0" }, + "Ethernet164|7": { + "scheduler": "scheduler.0" + }, "Ethernet168|5": { "scheduler": "scheduler.0" }, + "Ethernet168|7": { + "scheduler": "scheduler.0" + }, "Ethernet172|5": { "scheduler": "scheduler.0" }, + "Ethernet172|7": { + "scheduler": "scheduler.0" + }, "Ethernet176|5": { "scheduler": "scheduler.0" }, + "Ethernet176|7": { + "scheduler": "scheduler.0" + }, "Ethernet180|5": { "scheduler": "scheduler.0" }, + "Ethernet180|7": { + "scheduler": "scheduler.0" + }, "Ethernet184|5": { "scheduler": "scheduler.0" }, + "Ethernet184|7": { + "scheduler": "scheduler.0" + }, "Ethernet188|5": { "scheduler": "scheduler.0" }, + "Ethernet188|7": { + "scheduler": "scheduler.0" + }, "Ethernet192|5": { "scheduler": "scheduler.0" }, + "Ethernet192|7": { + "scheduler": "scheduler.0" + }, "Ethernet196|5": { "scheduler": "scheduler.0" }, + "Ethernet196|7": { + "scheduler": "scheduler.0" + }, "Ethernet200|5": { "scheduler": "scheduler.0" }, + "Ethernet200|7": { + "scheduler": "scheduler.0" + }, "Ethernet204|5": { "scheduler": "scheduler.0" }, + "Ethernet204|7": { + "scheduler": "scheduler.0" + }, "Ethernet208|5": { "scheduler": "scheduler.0" }, + "Ethernet208|7": { + "scheduler": "scheduler.0" + }, "Ethernet212|5": { "scheduler": "scheduler.0" }, + "Ethernet212|7": { + "scheduler": "scheduler.0" + }, "Ethernet216|5": { "scheduler": "scheduler.0" }, + "Ethernet216|7": { + "scheduler": "scheduler.0" + }, "Ethernet220|5": { "scheduler": "scheduler.0" }, + "Ethernet220|7": { + "scheduler": "scheduler.0" + }, "Ethernet224|5": { "scheduler": "scheduler.0" }, + "Ethernet224|7": { + "scheduler": "scheduler.0" + }, "Ethernet228|5": { "scheduler": "scheduler.0" }, + "Ethernet228|7": { + "scheduler": "scheduler.0" + }, "Ethernet232|5": { "scheduler": "scheduler.0" }, + "Ethernet232|7": { + "scheduler": "scheduler.0" + }, "Ethernet236|5": { "scheduler": "scheduler.0" }, + "Ethernet236|7": { + "scheduler": "scheduler.0" + }, "Ethernet240|5": { "scheduler": "scheduler.0" }, + "Ethernet240|7": { + "scheduler": "scheduler.0" + }, "Ethernet244|5": { "scheduler": "scheduler.0" }, + "Ethernet244|7": { + "scheduler": "scheduler.0" + }, "Ethernet248|5": { "scheduler": "scheduler.0" }, + "Ethernet248|7": { + "scheduler": "scheduler.0" + }, "Ethernet252|5": { "scheduler": "scheduler.0" }, + "Ethernet252|7": { + "scheduler": "scheduler.0" + }, "Ethernet0|6": { "scheduler": "scheduler.0" }, diff --git a/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-t1.json b/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-t1.json index a0e9ad63147e..26e7dd8be502 100644 --- a/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-t1.json +++ b/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-t1.json @@ -925,87 +925,171 @@ "Ethernet0|5": { "scheduler": "scheduler.0" }, + "Ethernet0|7": { + "scheduler": "scheduler.0" + }, "Ethernet4|5": { "scheduler": "scheduler.0" }, + "Ethernet4|7": { + "scheduler": "scheduler.0" + }, "Ethernet16|5": { "scheduler": "scheduler.0" }, + "Ethernet16|7": { + "scheduler": "scheduler.0" + }, "Ethernet20|5": { "scheduler": "scheduler.0" }, + "Ethernet20|7": { + "scheduler": "scheduler.0" + }, "Ethernet64|5": { "scheduler": "scheduler.0" }, + "Ethernet64|7": { + "scheduler": "scheduler.0" + }, "Ethernet68|5": { "scheduler": "scheduler.0" }, + "Ethernet68|7": { + "scheduler": "scheduler.0" + }, "Ethernet80|5": { "scheduler": "scheduler.0" }, + "Ethernet80|7": { + "scheduler": "scheduler.0" + }, "Ethernet84|5": { "scheduler": "scheduler.0" }, + "Ethernet84|7": { + "scheduler": "scheduler.0" + }, "Ethernet136|5": { "scheduler": "scheduler.0" }, + "Ethernet136|7": { + "scheduler": "scheduler.0" + }, "Ethernet144|5": { "scheduler": "scheduler.0" }, + "Ethernet144|7": { + "scheduler": "scheduler.0" + }, "Ethernet148|5": { "scheduler": "scheduler.0" }, + "Ethernet148|7": { + "scheduler": "scheduler.0" + }, "Ethernet152|5": { "scheduler": "scheduler.0" }, + "Ethernet152|7": { + "scheduler": "scheduler.0" + }, "Ethernet156|5": { "scheduler": "scheduler.0" }, + "Ethernet156|7": { + "scheduler": "scheduler.0" + }, "Ethernet168|5": { "scheduler": "scheduler.0" }, + "Ethernet168|7": { + "scheduler": "scheduler.0" + }, "Ethernet176|5": { "scheduler": "scheduler.0" }, + "Ethernet176|7": { + "scheduler": "scheduler.0" + }, "Ethernet180|5": { "scheduler": "scheduler.0" }, + "Ethernet180|7": { + "scheduler": "scheduler.0" + }, "Ethernet184|5": { "scheduler": "scheduler.0" }, + "Ethernet184|7": { + "scheduler": "scheduler.0" + }, "Ethernet188|5": { "scheduler": "scheduler.0" }, + "Ethernet188|7": { + "scheduler": "scheduler.0" + }, "Ethernet200|5": { "scheduler": "scheduler.0" }, + "Ethernet200|7": { + "scheduler": "scheduler.0" + }, "Ethernet208|5": { "scheduler": "scheduler.0" }, + "Ethernet208|7": { + "scheduler": "scheduler.0" + }, "Ethernet212|5": { "scheduler": "scheduler.0" }, + "Ethernet212|7": { + "scheduler": "scheduler.0" + }, "Ethernet216|5": { "scheduler": "scheduler.0" }, + "Ethernet216|7": { + "scheduler": "scheduler.0" + }, "Ethernet220|5": { "scheduler": "scheduler.0" }, + "Ethernet220|7": { + "scheduler": "scheduler.0" + }, "Ethernet232|5": { "scheduler": "scheduler.0" }, + "Ethernet232|7": { + "scheduler": "scheduler.0" + }, "Ethernet240|5": { "scheduler": "scheduler.0" }, + "Ethernet240|7": { + "scheduler": "scheduler.0" + }, "Ethernet244|5": { "scheduler": "scheduler.0" }, + "Ethernet244|7": { + "scheduler": "scheduler.0" + }, "Ethernet248|5": { "scheduler": "scheduler.0" }, + "Ethernet248|7": { + "scheduler": "scheduler.0" + }, "Ethernet252|5": { "scheduler": "scheduler.0" }, + "Ethernet252|7": { + "scheduler": "scheduler.0" + }, "Ethernet0|6": { "scheduler": "scheduler.0" }, diff --git a/src/sonic-config-engine/tests/sample_output/py3/qos-mellanox4600c-c64.json b/src/sonic-config-engine/tests/sample_output/py3/qos-mellanox4600c-c64.json index 7da43b0d8f38..df3e773b7328 100644 --- a/src/sonic-config-engine/tests/sample_output/py3/qos-mellanox4600c-c64.json +++ b/src/sonic-config-engine/tests/sample_output/py3/qos-mellanox4600c-c64.json @@ -922,87 +922,171 @@ "Ethernet0|5": { "scheduler": "scheduler.0" }, + "Ethernet0|7": { + "scheduler": "scheduler.0" + }, "Ethernet4|5": { "scheduler": "scheduler.0" }, + "Ethernet4|7": { + "scheduler": "scheduler.0" + }, "Ethernet16|5": { "scheduler": "scheduler.0" }, + "Ethernet16|7": { + "scheduler": "scheduler.0" + }, "Ethernet20|5": { "scheduler": "scheduler.0" }, + "Ethernet20|7": { + "scheduler": "scheduler.0" + }, "Ethernet64|5": { "scheduler": "scheduler.0" }, + "Ethernet64|7": { + "scheduler": "scheduler.0" + }, "Ethernet68|5": { "scheduler": "scheduler.0" }, + "Ethernet68|7": { + "scheduler": "scheduler.0" + }, "Ethernet80|5": { "scheduler": "scheduler.0" }, + "Ethernet80|7": { + "scheduler": "scheduler.0" + }, "Ethernet84|5": { "scheduler": "scheduler.0" }, + "Ethernet84|7": { + "scheduler": "scheduler.0" + }, "Ethernet136|5": { "scheduler": "scheduler.0" }, + "Ethernet136|7": { + "scheduler": "scheduler.0" + }, "Ethernet144|5": { "scheduler": "scheduler.0" }, + "Ethernet144|7": { + "scheduler": "scheduler.0" + }, "Ethernet148|5": { "scheduler": "scheduler.0" }, + "Ethernet148|7": { + "scheduler": "scheduler.0" + }, "Ethernet152|5": { "scheduler": "scheduler.0" }, + "Ethernet152|7": { + "scheduler": "scheduler.0" + }, "Ethernet156|5": { "scheduler": "scheduler.0" }, + "Ethernet156|7": { + "scheduler": "scheduler.0" + }, "Ethernet168|5": { "scheduler": "scheduler.0" }, + "Ethernet168|7": { + "scheduler": "scheduler.0" + }, "Ethernet176|5": { "scheduler": "scheduler.0" }, + "Ethernet176|7": { + "scheduler": "scheduler.0" + }, "Ethernet180|5": { "scheduler": "scheduler.0" }, + "Ethernet180|7": { + "scheduler": "scheduler.0" + }, "Ethernet184|5": { "scheduler": "scheduler.0" }, + "Ethernet184|7": { + "scheduler": "scheduler.0" + }, "Ethernet188|5": { "scheduler": "scheduler.0" }, + "Ethernet188|7": { + "scheduler": "scheduler.0" + }, "Ethernet200|5": { "scheduler": "scheduler.0" }, + "Ethernet200|7": { + "scheduler": "scheduler.0" + }, "Ethernet208|5": { "scheduler": "scheduler.0" }, + "Ethernet208|7": { + "scheduler": "scheduler.0" + }, "Ethernet212|5": { "scheduler": "scheduler.0" }, + "Ethernet212|7": { + "scheduler": "scheduler.0" + }, "Ethernet216|5": { "scheduler": "scheduler.0" }, + "Ethernet216|7": { + "scheduler": "scheduler.0" + }, "Ethernet220|5": { "scheduler": "scheduler.0" }, + "Ethernet220|7": { + "scheduler": "scheduler.0" + }, "Ethernet232|5": { "scheduler": "scheduler.0" }, + "Ethernet232|7": { + "scheduler": "scheduler.0" + }, "Ethernet240|5": { "scheduler": "scheduler.0" }, + "Ethernet240|7": { + "scheduler": "scheduler.0" + }, "Ethernet244|5": { "scheduler": "scheduler.0" }, + "Ethernet244|7": { + "scheduler": "scheduler.0" + }, "Ethernet248|5": { "scheduler": "scheduler.0" }, + "Ethernet248|7": { + "scheduler": "scheduler.0" + }, "Ethernet252|5": { "scheduler": "scheduler.0" }, + "Ethernet252|7": { + "scheduler": "scheduler.0" + }, "Ethernet0|6": { "scheduler": "scheduler.0" }, From 7b813a90b00595b9d46ce74e8270a0cee22c43b3 Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Fri, 4 Nov 2022 10:55:16 +0800 Subject: [PATCH 137/174] Update sonic-swss-common submodule (#12578) #### Why I did it Submodule update for sonic-swss-common with following change: ``` 276f47c [sonic-db-cli] Fix sonic-db-cli crash when database config file not ready issue. (#639) ``` #### How I did it #### How to verify it #### Which release branch to backport (provide reason below if selected) - [ ] 201811 - [ ] 201911 - [ ] 202006 - [ ] 202012 - [ ] 202106 #### Description for the changelog Submodule update for sonic-swss-common with following change: 276f47c [sonic-db-cli] Fix sonic-db-cli crash when database config file not ready issue. (#639) #### A picture of a cute animal (not mandatory but encouraged) --- src/sonic-swss-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-swss-common b/src/sonic-swss-common index abda263ba0a5..276f47cab176 160000 --- a/src/sonic-swss-common +++ b/src/sonic-swss-common @@ -1 +1 @@ -Subproject commit abda263ba0a584799312366b01dacd86dbaeb7cc +Subproject commit 276f47cab1768efd5a80da86904063b3ada5dd22 From d7a9f18d1840d39b423d42f9e8e636d79b1895de Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Fri, 4 Nov 2022 15:40:16 +0800 Subject: [PATCH 138/174] [ci] Upload the debian packages (#12582) Why I did it [ci] Upload the debian packages --- .azure-pipelines/trigger-publish-artifacts-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/trigger-publish-artifacts-build.yml b/.azure-pipelines/trigger-publish-artifacts-build.yml index 7a6676e0f1ba..085fbe3cd05d 100644 --- a/.azure-pipelines/trigger-publish-artifacts-build.yml +++ b/.azure-pipelines/trigger-publish-artifacts-build.yml @@ -53,7 +53,7 @@ steps: **/*.raw\n **/*.img.gz\n **/*-rpc.gz\n - **/python-saithrift*.deb"}, + **/*.deb"}, publishContext: {"publishPrefix":"${{ parameters.publishPrefix }}", "keepArtifactName":false, "dockerImagePatterns":"target/*-rpc.gz", From 7fb8bf70126a16153619447a8b9c97112a496261 Mon Sep 17 00:00:00 2001 From: jerseyang <48576574+jerseyang@users.noreply.github.com> Date: Fri, 4 Nov 2022 19:26:18 +0800 Subject: [PATCH 139/174] Fix the pddf_custom_wdt driver rarely reports kernel dump issue while reboot in belgite platform (#12322) Why I did it SONiC will report the kernel dump while system reboot in Belgite platform as the following shows: How I did it Cause: Invalid cdev container pointer from the inode is being accessing in misc device open, which causes a memory corruption in the slub. Because of the slub corruption, random crash is seen during reboot. Fix: - Instead of cdev pointer from the inode, mdev container pointer is used from the file->privdate_data member. Action: update the pddf_custom_wdt driver, How to verify it Do the reboot stress test to check whether there is kernel dump during reboot progress --- .../belgite/modules/pddf_custom_wdt.c | 29 ++++++++++--------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/platform/broadcom/sonic-platform-modules-cel/belgite/modules/pddf_custom_wdt.c b/platform/broadcom/sonic-platform-modules-cel/belgite/modules/pddf_custom_wdt.c index a73a6f6eb143..21ef0de9f31c 100644 --- a/platform/broadcom/sonic-platform-modules-cel/belgite/modules/pddf_custom_wdt.c +++ b/platform/broadcom/sonic-platform-modules-cel/belgite/modules/pddf_custom_wdt.c @@ -86,6 +86,7 @@ struct cpld_wdt_private { struct platform_device *pdev; struct watchdog_device wddev; struct cdev cdev; + struct miscdevice mdev; bool suspended; struct wdt_data wdat; }; @@ -100,7 +101,7 @@ MODULE_PARM_DESC(timeout, "Start watchdog timer on module load with" " Zero (default) disables this feature."); static bool nowayout = WATCHDOG_NOWAYOUT; -module_param(nowayout, bool, 0444); +module_param(nowayout, bool, 0644); MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); static unsigned int watchdog_get_timeleft(struct cpld_wdt_private *wdt) @@ -114,7 +115,7 @@ static unsigned int watchdog_get_timeleft(struct cpld_wdt_private *wdt) time = time << 8 | inb(WDT_TIMER_L_BIT_REG); time = time/1000; mutex_unlock(&wdt->wdat.lock); - //pr_crit("Watchdog Get Timeleft:%u\n", time); + return time; } static int watchdog_get_timeout(struct cpld_wdt_private *wdt) @@ -175,7 +176,7 @@ static int watchdog_ping(struct cpld_wdt_private *wdt) outb(WDT_START_FEED, WDT_FEED_REG); /* stop feed watchdog */ outb(WDT_STOP_FEED, WDT_FEED_REG); - //pr_crit("Watchdog Ping\n"); + mutex_unlock(&wdt->wdat.lock); return 0; @@ -198,7 +199,7 @@ static void watchdog_keepalive(struct cpld_wdt_private *wdt) val &= 0x1; /* start feed watchdog */ outb(val, WDT_FEED_REG); - //pr_crit("Watchdog Keepalive\n"); + mutex_unlock(&wdt->wdat.lock); return; } @@ -214,7 +215,7 @@ static int watchdog_start(struct cpld_wdt_private *wdt) outb(WDT_ENABLE, WDT_ENABLE_REG); outb(WDT_RESTART, WDT_PUNCH_REG); mutex_unlock(&wdt->wdat.lock); - //pr_crit("Watchdog Start:Enable and PUNCH\n"); + return 0; } @@ -226,7 +227,7 @@ static int watchdog_stop(struct cpld_wdt_private *wdt) mutex_lock(&wdt->wdat.lock); outb(WDT_DISABLE, WDT_ENABLE_REG); mutex_unlock(&wdt->wdat.lock); - //pr_crit("Watchdog Stop\n"); + return 0; } @@ -370,7 +371,7 @@ static int watchdog_open(struct inode *inode, struct file *file) { struct cpld_wdt_private *wdt; - wdt = container_of(inode->i_cdev, struct cpld_wdt_private, cdev); + wdt = container_of(file->private_data, struct cpld_wdt_private, mdev); /* If the watchdog is alive we don't need to start it again */ @@ -384,14 +385,14 @@ static int watchdog_open(struct inode *inode, struct file *file) wdt->wdat.expect_close = 0; - file->private_data = wdt; + return nonseekable_open(inode, file); } static int watchdog_release(struct inode *inode, struct file *file) { struct cpld_wdt_private *p; - p = (struct cpld_wdt_private *)file->private_data; + p = container_of(file->private_data, struct cpld_wdt_private, mdev); if(!p) return -EINVAL; @@ -423,7 +424,7 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct cpld_wdt_private *p; - p = (struct cpld_wdt_private *)file->private_data; + p = container_of(file->private_data, struct cpld_wdt_private, mdev); if(!p) return -EINVAL; @@ -480,7 +481,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, uarg.i = (int __user *)arg; struct cpld_wdt_private *p; - p = (struct cpld_wdt_private *)file->private_data; + p = container_of(file->private_data, struct cpld_wdt_private, mdev); if(!p) return -EINVAL; @@ -627,8 +628,8 @@ static int cpld_wdt_probe(struct platform_device *pdev) err = register_reboot_notifier(&watchdog_notifier); if (err) return err; - - err = misc_register(&watchdog_miscdev); + p->mdev = watchdog_miscdev; + err = misc_register(&p->mdev); if (err) { pr_err("cannot register miscdev on minor=%d\n", watchdog_miscdev.minor); @@ -672,7 +673,7 @@ static int cpld_wdt_remove(struct platform_device *pdev) sysfs_remove_group(&pdev->dev.kobj, &wdt_group); - misc_deregister(&watchdog_miscdev); + misc_deregister(&p->mdev); unregister_reboot_notifier(&watchdog_notifier); From b522b7762f11a90991bc9c63ecca92ad197b8dae Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Fri, 4 Nov 2022 10:25:17 -0400 Subject: [PATCH 140/174] [sonic-py-common] Remove subprocess with shell=True (#12562) Signed-off-by: maipbui #### Why I did it `subprocess` is used with `shell=True`, which is very dangerous for shell injection. #### How I did it remove `shell=True`, use `shell=False` #### How to verify it Manual test Pass UT --- .../sonic_py_common/device_info.py | 84 +++++++++++++------ .../sonic_py_common/multi_asic.py | 3 +- 2 files changed, 59 insertions(+), 28 deletions(-) diff --git a/src/sonic-py-common/sonic_py_common/device_info.py b/src/sonic-py-common/sonic_py_common/device_info.py index 8173c2677275..3e14979fe4d6 100644 --- a/src/sonic-py-common/sonic_py_common/device_info.py +++ b/src/sonic-py-common/sonic_py_common/device_info.py @@ -6,7 +6,7 @@ import yaml from natsort import natsorted - +from sonic_py_common.general import getstatusoutput_noshell_pipe from swsscommon.swsscommon import ConfigDBConnector, SonicV2Connector USR_SHARE_SONIC_PATH = "/usr/share/sonic" @@ -531,7 +531,27 @@ def _valid_mac_address(mac): return bool(re.match("^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$", mac)) +def run_command(cmd): + proc = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = proc.communicate() + return (out, err) + + +def run_command_pipe(cmd0, cmd1, cmd2): + exitcodes, out = getstatusoutput_noshell_pipe(cmd0, cmd1, cmd2) + if exitcodes == [0, 0, 0]: + err = None + else: + err = out + return (out, err) + + def get_system_mac(namespace=None): + hw_mac_entry_outputs = [] + syseeprom_cmd = ["sudo", "decode-syseeprom", "-m"] + iplink_cmd0 = ["ip", 'link', 'show', 'eth0'] + iplink_cmd1 = ['grep', 'ether'] + iplink_cmd2 = ['awk', '{print $2}'] version_info = get_sonic_version_info() if (version_info['asic_type'] == 'mellanox'): @@ -548,36 +568,51 @@ def get_system_mac(namespace=None): if _valid_mac_address(mac): return mac - hw_mac_entry_cmds = [ "sudo decode-syseeprom -m" ] + (mac, err) = run_command(syseeprom_cmd) + hw_mac_entry_outputs.append((mac, err)) elif (version_info['asic_type'] == 'marvell'): # Try valid mac in eeprom, else fetch it from eth0 platform = get_platform() machine_key = "onie_machine" machine_vars = get_machine_info() + (mac, err) = run_command(syseeprom_cmd) + hw_mac_entry_outputs.append((mac, err)) if machine_vars is not None and machine_key in machine_vars: hwsku = machine_vars[machine_key] - profile_cmd = 'cat ' + HOST_DEVICE_PATH + '/' + platform + '/' + hwsku + '/profile.ini | grep switchMacAddress | cut -f2 -d=' + profile_cmd0 = ['cat', HOST_DEVICE_PATH + '/' + platform + '/' + hwsku + '/profile.ini'] + profile_cmd1 = ['grep', 'switchMacAddress'] + profile_cmd2 = ['cut', '-f2', '-d', '='] + (mac, err) = run_command_pipe(profile_cmd0, profile_cmd1, profile_cmd2) else: - profile_cmd = "false" - hw_mac_entry_cmds = ["sudo decode-syseeprom -m", profile_cmd, "ip link show eth0 | grep ether | awk '{print $2}'"] + profile_cmd = ["false"] + (mac, err) = run_command(profile_cmd) + hw_mac_entry_outputs.append((mac, err)) + (mac, err) = run_command_pipe(iplink_cmd0, iplink_cmd1, iplink_cmd2) + hw_mac_entry_outputs.append((mac, err)) elif (version_info['asic_type'] == 'cisco-8000'): # Try to get valid MAC from profile.ini first, else fetch it from syseeprom or eth0 platform = get_platform() if namespace is not None: - profile_cmd = 'cat ' + HOST_DEVICE_PATH + '/' + platform + '/profile.ini | grep ' + namespace + 'switchMacAddress | cut -f2 -d=' + profile_cmd0 = ['cat', HOST_DEVICE_PATH + '/' + platform + '/profile.ini'] + profile_cmd1 = ['grep', str(namespace)+'switchMacAddress'] + profile_cmd2 = ['cut', '-f2', '-d', '='] + (mac, err) = run_command_pipe(profile_cmd0, profile_cmd1, profile_cmd2) else: - profile_cmd = "false" - hw_mac_entry_cmds = [profile_cmd, "sudo decode-syseeprom -m", "ip link show eth0 | grep ether | awk '{print $2}'"] + profile_cmd = ["false"] + (mac, err) = run_command(profile_cmd) + hw_mac_entry_outputs.append((mac, err)) + (mac, err) = run_command(syseeprom_cmd) + hw_mac_entry_outputs.append((mac, err)) + (mac, err) = run_command_pipe(iplink_cmd0, iplink_cmd1, iplink_cmd2) + hw_mac_entry_outputs.append((mac, err)) else: - mac_address_cmd = "cat /sys/class/net/eth0/address" + mac_address_cmd = ["cat", "/sys/class/net/eth0/address"] if namespace is not None: - mac_address_cmd = "sudo ip netns exec {} {}".format(namespace, mac_address_cmd) - - hw_mac_entry_cmds = [mac_address_cmd] + mac_address_cmd = ['sudo', 'ip', 'netns', 'exec', str(namespace)] + mac_address_cmd + (mac, err) = run_command(mac_address_cmd) + hw_mac_entry_outputs.append((mac, err)) - for get_mac_cmd in hw_mac_entry_cmds: - proc = subprocess.Popen(get_mac_cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (mac, err) = proc.communicate() + for (mac, err) in hw_mac_entry_outputs: if err: continue mac = mac.strip() @@ -602,17 +637,14 @@ def get_system_routing_stack(): Returns: A string containing the name of the routing stack in use on the device """ - command = "sudo docker ps | grep bgp | awk '{print$2}' | cut -d'-' -f3 | cut -d':' -f1" + cmd0 = ['sudo', 'docker', 'ps'] + cmd1 = ['grep', 'bgp'] + cmd2 = ['awk', '{print$2}'] + cmd3 = ['cut', '-d', '-', '-f3'] + cmd4 = ['cut', '-d', ':', '-f1'] try: - proc = subprocess.Popen(command, - stdout=subprocess.PIPE, - shell=True, - universal_newlines=True, - stderr=subprocess.STDOUT) - stdout = proc.communicate()[0] - proc.wait() - result = stdout.rstrip('\n') + _, result = getstatusoutput_noshell_pipe(cmd0, cmd1, cmd2, cmd3, cmd4) except OSError as e: raise OSError("Cannot detect routing stack") @@ -644,8 +676,8 @@ def is_warm_restart_enabled(container_name): # Check if System fast reboot is enabled. def is_fast_reboot_enabled(): fb_system_state = 0 - cmd = 'sonic-db-cli STATE_DB get "FAST_REBOOT|system"' - proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE) + cmd = ['sonic-db-cli', 'STATE_DB', 'get', "FAST_REBOOT|system"] + proc = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE) (stdout, stderr) = proc.communicate() if proc.returncode != 0: diff --git a/src/sonic-py-common/sonic_py_common/multi_asic.py b/src/sonic-py-common/sonic_py_common/multi_asic.py index d63c698392e4..54f9a01dec5a 100644 --- a/src/sonic-py-common/sonic_py_common/multi_asic.py +++ b/src/sonic-py-common/sonic_py_common/multi_asic.py @@ -157,10 +157,9 @@ def get_current_namespace(pid=None): """ net_namespace = None - command = ["sudo /bin/ip netns identify {}".format(os.getpid() if not pid else pid)] + command = ["sudo", '/bin/ip', 'netns', 'identify', "{}".format(os.getpid() if not pid else pid)] proc = subprocess.Popen(command, stdout=subprocess.PIPE, - shell=True, universal_newlines=True, stderr=subprocess.STDOUT) try: From 61a085e55e72cf2c28ed74004a266fad2b2858b7 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Fri, 4 Nov 2022 10:48:51 -0400 Subject: [PATCH 141/174] Replace os.system and remove subprocess with shell=True (#12177) Signed-off-by: maipbui #### Why I did it `subprocess` is used with `shell=True`, which is very dangerous for shell injection. `os` - not secure against maliciously constructed input and dangerous if used to evaluate dynamic content #### How I did it remove `shell=True`, use `shell=False` Replace `os` by `subprocess` --- dockers/docker-lldp/lldpmgrd | 18 +++++++++--------- dockers/docker-nat/restore_nat_entries.py | 12 ++++++------ .../corefile_uploader/core_uploader.py | 6 +++--- files/image_config/monit/memory_checker | 6 +++--- files/image_config/monit/restart_service | 6 +++--- files/scripts/mark_dhcp_packet.py | 7 +++---- 6 files changed, 27 insertions(+), 28 deletions(-) diff --git a/dockers/docker-lldp/lldpmgrd b/dockers/docker-lldp/lldpmgrd index 753aa678eaee..331fedfb4ca3 100755 --- a/dockers/docker-lldp/lldpmgrd +++ b/dockers/docker-lldp/lldpmgrd @@ -80,10 +80,10 @@ class LldpManager(daemon_base.DaemonBase): self.port_init_done = False def update_hostname(self, hostname): - cmd = "lldpcli configure system hostname {0}".format(hostname) + cmd = ["lldpcli", "configure", "system", "hostname", hostname] self.log_debug("Running command: '{}'".format(cmd)) - proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + proc = subprocess.Popen(cmd,stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = proc.communicate() if proc.returncode != 0: @@ -93,15 +93,15 @@ class LldpManager(daemon_base.DaemonBase): def update_mgmt_addr(self, ip): if ip == "None": - cmd = "lldpcli unconfigure system ip management pattern" + cmd = ["lldpcli", "unconfigure", "system", "ip", "management", "pattern"] self.log_info("Mgmt IP {0} deleted".format(self.mgmt_ip)) else: - cmd = "lldpcli configure system ip management pattern {0}".format(ip) + cmd = ["lldpcli", "configure", "system", "ip", "management", "pattern", ip] self.log_info("Mgmt IP changed old ip {0}, new ip {1}".format(self.mgmt_ip, ip)) self.log_debug("Running command: '{}'".format(cmd)) - proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = proc.communicate() if proc.returncode != 0: @@ -150,11 +150,11 @@ class LldpManager(daemon_base.DaemonBase): # Get the port description. If None or empty string, we'll skip this configuration port_desc = port_table_dict.get("description") - lldpcli_cmd = "lldpcli configure ports {0} lldp portidsubtype local {1}".format(port_name, port_alias) + lldpcli_cmd = ["lldpcli", "configure", "ports", port_name, "lldp", "portidsubtype", "local", port_alias] # if there is a description available, also configure that if port_desc: - lldpcli_cmd += " description '{}'".format(port_desc) + lldpcli_cmd += ["description", port_desc] else: self.log_info("Unable to retrieve description for port '{}'. Not adding port description".format(port_name)) @@ -330,7 +330,7 @@ class LldpManager(daemon_base.DaemonBase): self.port_init_done = self.port_config_done = True if self.port_init_done and self.port_config_done: self.port_init_done = self.port_config_done = False - rc, stderr = run_cmd(self, "lldpcli resume") + rc, stderr = run_cmd(self, ["lldpcli", "resume"]) if rc != 0: self.log_error("Failed to resume lldpd with command: 'lldpcli resume': {}".format(stderr)) sys.exit(1) @@ -350,7 +350,7 @@ def main(): def run_cmd(self, cmd): - proc = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + proc = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = proc.communicate() return proc.returncode, stderr diff --git a/dockers/docker-nat/restore_nat_entries.py b/dockers/docker-nat/restore_nat_entries.py index cf10d983ab98..9249d95b22af 100755 --- a/dockers/docker-nat/restore_nat_entries.py +++ b/dockers/docker-nat/restore_nat_entries.py @@ -34,13 +34,13 @@ def add_nat_conntrack_entry_in_kernel(ipproto, srcip, dstip, srcport, dstport, natsrcip, natdstip, natsrcport, natdstport): # pyroute2 doesn't have support for adding conntrack entries via netlink yet. So, invoking the conntrack utility to add the entries. - state = '' + state = [] if (ipproto == IP_PROTO_TCP): - state = ' --state ESTABLISHED ' - ctcmd = 'conntrack -I -n ' + natdstip + ':' + natdstport + ' -g ' + natsrcip + ':' + natsrcport + \ - ' --protonum ' + ipproto + state + ' --timeout 432000 --src ' + srcip + ' --sport ' + srcport + \ - ' --dst ' + dstip + ' --dport ' + dstport + ' -u ASSURED' - subprocess.call(ctcmd, shell=True) + state = ['--state', 'ESTABLISHED'] + ctcmd = ['conntrack', '-I', '-n', natdstip + ':' + natdstport, '-g', natsrcip + ':' + natsrcport, \ + '--protonum', ipproto] + state + ['--timeout', '432000', '--src', srcip, '--sport', srcport, \ + '--dst', dstip, '--dport', dstport, '-u', 'ASSURED'] + subprocess.call(ctcmd) logger.log_info("Restored NAT entry: {}".format(ctcmd)) diff --git a/files/image_config/corefile_uploader/core_uploader.py b/files/image_config/corefile_uploader/core_uploader.py index aba78618307c..efbe88f4a75c 100755 --- a/files/image_config/corefile_uploader/core_uploader.py +++ b/files/image_config/corefile_uploader/core_uploader.py @@ -5,7 +5,7 @@ import socket import tarfile import time - +import subprocess import yaml from azure.storage.file import FileService from sonic_py_common.logger import Logger @@ -42,8 +42,8 @@ def make_new_dir(p): - os.system("rm -rf " + p) - os.system("mkdir -p " + p) + subprocess.call(["rm", "-rf", p]) + subprocess.call(["mkdir", "-p", p]) def parse_a_json(data, prefix, val): diff --git a/files/image_config/monit/memory_checker b/files/image_config/monit/memory_checker index a93bc30b3fe4..bcb487261af0 100755 --- a/files/image_config/monit/memory_checker +++ b/files/image_config/monit/memory_checker @@ -41,7 +41,7 @@ def get_command_result(command): try: proc_instance = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - shell=True, universal_newlines=True) + universal_newlines=True) command_stdout, command_stderr = proc_instance.communicate() if proc_instance.returncode != 0: syslog.syslog(syslog.LOG_ERR, "[memory_checker] Failed to execute the command '{}'. Return code: '{}'" @@ -66,7 +66,7 @@ def check_memory_usage(container_name, threshold_value): Returns: None. """ - command = "docker stats --no-stream --format \{{\{{.MemUsage\}}\}} {}".format(container_name) + command = ["docker", "stats", "--no-stream", "--format", "{{.MemUsage}}", container_name] command_stdout = get_command_result(command) mem_usage = command_stdout.split("/")[0].strip() match_obj = re.match(r"\d+\.?\d*", mem_usage) @@ -105,7 +105,7 @@ def is_service_active(service_name): Returns: True if service is running, False otherwise """ - status = subprocess.run("systemctl is-active --quiet {}".format(service_name), shell=True, check=False) + status = subprocess.run(["systemctl", "is-active", "--quiet", service_name]) return status.returncode == 0 diff --git a/files/image_config/monit/restart_service b/files/image_config/monit/restart_service index 40da147e9526..94cc3b7a5677 100755 --- a/files/image_config/monit/restart_service +++ b/files/image_config/monit/restart_service @@ -39,7 +39,7 @@ def get_command_result(command): try: proc_instance = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - shell=True, universal_newlines=True) + universal_newlines=True) command_stdout, command_stderr = proc_instance.communicate() if proc_instance.returncode != 0: return 1, command_stdout.strip(), command_stderr.strip() @@ -58,7 +58,7 @@ def reset_failed_flag(service_name): Returns: None """ - reset_failed_command = "sudo systemctl reset-failed {}.service".format(service_name) + reset_failed_command = ["sudo", "systemctl", "reset-failed", "{}.service".format(service_name)] syslog.syslog(syslog.LOG_INFO, "Resetting failed status of service '{}' ..." .format(service_name)) @@ -81,7 +81,7 @@ def restart_service(service_name): Returns: None. """ - restart_command = "sudo systemctl restart {}.service".format(service_name) + restart_command = ["sudo", "systemctl", "restart", "{}.service".format(service_name)] reset_failed_flag(service_name) diff --git a/files/scripts/mark_dhcp_packet.py b/files/scripts/mark_dhcp_packet.py index c80810f0316b..4c8e6a728bbd 100755 --- a/files/scripts/mark_dhcp_packet.py +++ b/files/scripts/mark_dhcp_packet.py @@ -73,18 +73,17 @@ def generate_mark_from_index(self, index): return intf_mark def run_command(self, cmd): - subprocess.call(cmd, shell=True) + subprocess.call(cmd) log.log_info("run command: {}".format(cmd)) def clear_dhcp_packet_marks(self): ''' Flush the INPUT chain in ebtables upon restart ''' - self.run_command("sudo ebtables -F INPUT") + self.run_command(["sudo", "ebtables", "-F", "INPUT"]) def apply_mark_in_ebtables(self, intf, mark): - self.run_command("sudo ebtables -A INPUT -i {} -j mark --mark-set {}" - .format(intf, mark)) + self.run_command(["sudo", "ebtables", "-A", "INPUT", "-i", intf, "-j", "mark", "--mark-set", mark]) def update_mark_in_state_db(self, intf, mark): self.state_db.set( From 661c467858f71bff08b74a4707d7dda6d37b0d50 Mon Sep 17 00:00:00 2001 From: StormLiangMS <89824293+StormLiangMS@users.noreply.github.com> Date: Mon, 7 Nov 2022 09:32:42 +0800 Subject: [PATCH 142/174] Revert "[sonic-config-engine] Replace os.system, replace yaml.load, remove subprocess with shell=True (#12533)" (#12616) This reverts commit 934871cce14701e59df0a5f4bc60b13db9423e00. Unblocking sync from github to internal --- src/sonic-config-engine/sonic-cfggen | 2 +- src/sonic-config-engine/tests/common_utils.py | 21 +- src/sonic-config-engine/tests/test_cfggen.py | 202 +++++++++--------- .../tests/test_cfggen_from_yang.py | 54 ++--- .../tests/test_cfggen_pfx_filter.py | 13 +- .../tests/test_cfggen_platformJson.py | 26 +-- .../tests/test_cfggen_t2_chassis_fe.py | 21 +- src/sonic-config-engine/tests/test_frr.py | 23 +- src/sonic-config-engine/tests/test_j2files.py | 167 ++++++++------- .../tests/test_j2files_t2_chassis_fe.py | 20 +- .../tests/test_minigraph_case.py | 95 ++++---- .../tests/test_multinpu_cfggen.py | 117 +++++----- 12 files changed, 384 insertions(+), 377 deletions(-) diff --git a/src/sonic-config-engine/sonic-cfggen b/src/sonic-config-engine/sonic-cfggen index 287640d8a119..d5358f633dbf 100755 --- a/src/sonic-config-engine/sonic-cfggen +++ b/src/sonic-config-engine/sonic-cfggen @@ -351,7 +351,7 @@ def main(): if yaml.__version__ >= "5.1": additional_data = yaml.full_load(stream) else: - additional_data = yaml.safe_load(stream) + additional_data = yaml.load(stream) deep_update(data, FormatConverter.to_deserialized(additional_data)) if args.additional_data is not None: diff --git a/src/sonic-config-engine/tests/common_utils.py b/src/sonic-config-engine/tests/common_utils.py index d2be32c8544d..72325ecbc1e0 100644 --- a/src/sonic-config-engine/tests/common_utils.py +++ b/src/sonic-config-engine/tests/common_utils.py @@ -5,6 +5,7 @@ import sys import subprocess import argparse +import shlex PY3x = sys.version_info >= (3, 0) PYvX_DIR = "py3" if PY3x else "py2" @@ -46,7 +47,7 @@ def __init__(self, path=YANG_MODELS_DIR): self.yang_parser = sonic_yang.SonicYang(path) self.yang_parser.loadYangModel() self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = [PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] + self.script_file = PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') def validate(self, argument): """ @@ -61,22 +62,22 @@ def validate(self, argument): parser.add_argument("-p", "--port-config", help="port config file, used with -m or -k", nargs='?', const=None) parser.add_argument("-S", "--hwsku-config", help="hwsku config file, used with -p and -m or -k", nargs='?', const=None) parser.add_argument("-j", "--json", help="additional json file input, used with -p, -S and -m or -k", nargs='?', const=None) - args, unknown = parser.parse_known_args(argument) + args, unknown = parser.parse_known_args(shlex.split(argument)) print('\n Validating yang schema') - cmd = self.script_file + ['-m', args.minigraph] + cmd = self.script_file + ' -m ' + args.minigraph if args.hwsku is not None: - cmd += ['-k', args.hwsku] + cmd += ' -k ' + args.hwsku if args.hwsku_config is not None: - cmd += ['-S', args.hwsku_config] + cmd += ' -S ' + args.hwsku_config if args.port_config is not None: - cmd += ['-p', args.port_config] + cmd += ' -p ' + args.port_config if args.namespace is not None: - cmd += ['-n', args.namespace] + cmd += ' -n ' + args.namespace if args.json is not None: - cmd += ['-j', args.json] - cmd += ['--print-data'] - output = subprocess.check_output(cmd).decode() + cmd += ' -j ' + args.json + cmd += ' --print-data' + output = subprocess.check_output(cmd, shell=True).decode() try: self.yang_parser.loadData(configdbJson=json.loads(output)) self.yang_parser.validate_data_tree() diff --git a/src/sonic-config-engine/tests/test_cfggen.py b/src/sonic-config-engine/tests/test_cfggen.py index 3b979d4a52ec..bcc0625ec8a8 100644 --- a/src/sonic-config-engine/tests/test_cfggen.py +++ b/src/sonic-config-engine/tests/test_cfggen.py @@ -1,6 +1,7 @@ import json import subprocess import os + import tests.common_utils as utils from unittest import TestCase @@ -15,7 +16,7 @@ class TestCfgGen(TestCase): def setUp(self): self.yang = utils.YangWrapper() self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] + self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') self.sample_graph = os.path.join(self.test_dir, 'sample_graph.xml') self.sample_graph_t0 = os.path.join(self.test_dir, 't0-sample-graph.xml') self.sample_graph_simple = os.path.join(self.test_dir, 'simple-sample-graph.xml') @@ -51,12 +52,13 @@ def tearDown(self): pass def run_script(self, argument, check_stderr=False, verbose=False): - print('\n Running sonic-cfggen ' + ' '.join(argument)) + print('\n Running sonic-cfggen ' + argument) self.assertTrue(self.yang.validate(argument)) + if check_stderr: - output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) + output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(self.script_file + argument) + output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) if utils.PY3x: output = output.decode() @@ -71,52 +73,52 @@ def run_script(self, argument, check_stderr=False, verbose=False): return output def test_dummy_run(self): - argument = [] + argument = '' output = self.run_script(argument) self.assertEqual(output, '') def test_device_desc(self): - argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'hwsku\']", "-M", self.sample_device_desc] + argument = '-v "DEVICE_METADATA[\'localhost\'][\'hwsku\']" -M "' + self.sample_device_desc + '"' output = self.run_script(argument) self.assertEqual(output.strip(), 'ACS-MSN2700') def test_device_desc_mgmt_ip(self): - argument = ['-v', "(MGMT_INTERFACE.keys()|list)[0]", '-M', self.sample_device_desc] + argument = '-v "(MGMT_INTERFACE.keys()|list)[0]" -M "' + self.sample_device_desc + '"' output = self.run_script(argument) self.assertEqual(output.strip(), "('eth0', '10.0.1.5/28')") def test_minigraph_hostname(self): - argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'hostname\']", '-m', self.sample_graph, "-p", self.port_config] + argument = '-v "DEVICE_METADATA[\'localhost\'][\'hostname\']" -m "' + self.sample_graph + '" -p "' + self.port_config + '"' output = self.run_script(argument) self.assertEqual(output.strip(), 'OCPSCH01040DDLF') def test_minigraph_sku(self): - argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'hwsku\']", '-m', self.sample_graph, '-p', self.port_config] + argument = '-v "DEVICE_METADATA[\'localhost\'][\'hwsku\']" -m "' + self.sample_graph + '" -p "' + self.port_config + '"' output = self.run_script(argument) self.assertEqual(output.strip(), 'Force10-Z9100') def test_minigraph_region(self): - argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'region\']", '-m', self.sample_graph_metadata, '-p', self.port_config] + argument = '-v "DEVICE_METADATA[\'localhost\'][\'region\']" -m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '"' output = self.run_script(argument) self.assertEqual(output.strip(), 'usfoo') def test_minigraph_cloudtype(self): - argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'cloudtype\']", '-m', self.sample_graph_metadata, '-p', self.port_config] + argument = '-v "DEVICE_METADATA[\'localhost\'][\'cloudtype\']" -m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '"' output = self.run_script(argument) self.assertEqual(output.strip(), 'Public') def test_minigraph_resourcetype(self): - argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'resource_type\']", '-m', self.sample_graph_metadata, '-p', self.port_config] + argument = '-v "DEVICE_METADATA[\'localhost\'][\'resource_type\']" -m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '"' output = self.run_script(argument) self.assertEqual(output.strip(), 'resource_type_x') def test_minigraph_downstream_subrole(self): - argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'downstream_subrole\']", '-m', self.sample_graph_metadata, '-p', self.port_config] + argument = '-v "DEVICE_METADATA[\'localhost\'][\'downstream_subrole\']" -m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '"' output = self.run_script(argument) self.assertEqual(output.strip(), 'downstream_subrole_y') def test_print_data(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '--print-data'] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" --print-data' output = self.run_script(argument) self.assertTrue(len(output.strip()) > 0) @@ -125,29 +127,29 @@ def test_jinja_expression(self, graph=None, port_config=None, expected_router_ty graph = self.sample_graph if port_config is None: port_config = self.port_config - argument = ['-m', graph, '-p', port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'type\']"] + argument = '-m "' + graph + '" -p "' + port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'type\']"' output = self.run_script(argument) self.assertEqual(output.strip(), expected_router_type) def test_additional_json_data(self): - argument = ['-a', '{"key1":"value1"}', '-v', 'key1'] + argument = '-a \'{"key1":"value1"}\' -v key1' output = self.run_script(argument) self.assertEqual(output.strip(), 'value1') def test_additional_json_data_level1_key(self): - argument = ['-a', '{"k1":{"k11":"v11","k12":"v12"}, "k2":{"k22":"v22"}}', '--var-json', 'k1'] + argument = '-a \'{"k1":{"k11":"v11","k12":"v12"}, "k2":{"k22":"v22"}}\' --var-json k1' output = self.run_script(argument) self.assertEqual(utils.to_dict(output.strip()), utils.to_dict('{\n "k11": "v11", \n "k12": "v12"\n}')) def test_additional_json_data_level2_key(self): - argument = ['-a', '{"k1":{"k11":"v11","k12":"v12"},"k2":{"k22":"v22"}}', '--var-json', 'k1', '-K', 'k11'] + argument = '-a \'{"k1":{"k11":"v11","k12":"v12"},"k2":{"k22":"v22"}}\' --var-json k1 -K k11' output = self.run_script(argument) self.assertEqual(utils.to_dict(output.strip()), utils.to_dict('{\n "k11": "v11"\n}')) def test_var_json_data(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) tag_mode = kwargs.get('tag_mode', 'untagged') - argument = ['-m', graph_file, '-p', self.port_config, '--var-json', 'VLAN_MEMBER'] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" --var-json VLAN_MEMBER' output = self.run_script(argument) if tag_mode == "tagged": self.assertEqual( @@ -173,20 +175,20 @@ def test_var_json_data(self, **kwargs): ) def test_read_yaml(self): - argument = ['-v', 'yml_item', '-y', os.path.join(self.test_dir, 'test.yml')] + argument = '-v yml_item -y ' + os.path.join(self.test_dir, 'test.yml') output = self.run_script(argument) self.assertEqual(output.strip(), '[\'value1\', \'value2\']') def test_render_template(self): - argument = ['-y', os.path.join(self.test_dir, 'test.yml'), '-t', os.path.join(self.test_dir, 'test.j2')] + argument = '-y ' + os.path.join(self.test_dir, 'test.yml') + ' -t ' + os.path.join(self.test_dir, 'test.j2') output = self.run_script(argument) self.assertEqual(output.strip(), 'value1\nvalue2') def test_template_batch_mode(self): - argument = ['-y', os.path.join(self.test_dir, 'test.yml')] - argument += ['-a', '{"key1":"value"}'] - argument += ['-t', os.path.join(self.test_dir, 'test.j2') + ',' + self.output_file] - argument += ['-t', os.path.join(self.test_dir, 'test2.j2') + ',' + self.output2_file] + argument = '-y ' + os.path.join(self.test_dir, 'test.yml') + argument += ' -a \'{"key1":"value"}\'' + argument += ' -t ' + os.path.join(self.test_dir, 'test.j2') + ',' + self.output_file + argument += ' -t ' + os.path.join(self.test_dir, 'test2.j2') + ',' + self.output2_file output = self.run_script(argument) assert(os.path.exists(self.output_file)) assert(os.path.exists(self.output2_file)) @@ -197,10 +199,10 @@ def test_template_batch_mode(self): def test_template_json_batch_mode(self): data = {"key1_1":"value1_1", "key1_2":"value1_2", "key2_1":"value2_1", "key2_2":"value2_2"} - argument = ["-a", '{0}'.format(repr(data).replace('\'', '"'))] - argument += ['-t', os.path.join(self.test_dir, 'sample-template-1.json.j2') + ",config-db"] - argument += ['-t', os.path.join(self.test_dir, 'sample-template-2.json.j2') + ",config-db"] - argument += ['--print-data'] + argument = " -a '{0}'".format(repr(data).replace('\'', '"')) + argument += ' -t ' + os.path.join(self.test_dir, 'sample-template-1.json.j2') + ",config-db" + argument += ' -t ' + os.path.join(self.test_dir, 'sample-template-2.json.j2') + ",config-db" + argument += ' --print-data' output = self.run_script(argument) output_data = json.loads(output) for key, value in data.items(): @@ -210,7 +212,7 @@ def test_template_json_batch_mode(self): # it is not at all intuitive what that ordering should be. Could make it # more robust by adding better parsing logic. def test_minigraph_acl(self): - argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', 'ACL_TABLE'] + argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v ACL_TABLE' output = self.run_script(argument, True, True) self.assertEqual( utils.to_dict(output.strip().replace("Warning: Ignoring Control Plane ACL NTP_ACL without type\n", '')), @@ -234,7 +236,7 @@ def test_minigraph_acl(self): # self.assertEqual(output.strip(), "{'everflow0': {'src_ip': '10.1.0.32', 'dst_ip': '2.2.2.2'}}") def test_minigraph_mgmt_ports(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v','MGMT_PORT'] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v MGMT_PORT' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -242,13 +244,13 @@ def test_minigraph_mgmt_ports(self): ) def test_minigraph_interfaces(self): - argument = ['-m', self.sample_graph_simple, '-p', self.port_config, '-v', "INTERFACE.keys()|list"] + argument = '-m "' + self.sample_graph_simple + '" -p "' + self.port_config + '" -v "INTERFACE.keys()|list"' output = self.run_script(argument) self.assertEqual(output.strip(), "[('Ethernet0', '10.0.0.58/31'), 'Ethernet0', ('Ethernet0', 'FC00::75/126')]") def test_minigraph_vlans(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = ['-m', graph_file, '-p', self.port_config, '-v', 'VLAN'] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v VLAN' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -264,7 +266,7 @@ def test_minigraph_vlans(self, **kwargs): def test_minigraph_vlan_members(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) tag_mode = kwargs.get('tag_mode', 'untagged') - argument = ['-m', graph_file, '-p', self.port_config, '-v', 'VLAN_MEMBER'] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v VLAN_MEMBER' output = self.run_script(argument) if tag_mode == "tagged": self.assertEqual( @@ -291,12 +293,12 @@ def test_minigraph_vlan_members(self, **kwargs): def test_minigraph_vlan_interfaces(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = ['-m', graph_file, '-p', self.port_config, '-v', "VLAN_INTERFACE.keys()|list"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "VLAN_INTERFACE.keys()|list"' output = self.run_script(argument) self.assertEqual(output.strip(), "[('Vlan1000', '192.168.0.1/27'), 'Vlan1000']") def test_minigraph_ecmp_fg_nhg(self): - argument = ['-m', self.ecmp_graph, '-p', self.mlnx_port_config, '-v', 'FG_NHG'] + argument = '-m "' + self.ecmp_graph + '" -p "' + self.mlnx_port_config + '" -v FG_NHG' output = self.run_script(argument) print(output.strip()) self.assertEqual(utils.to_dict(output.strip()), @@ -306,7 +308,7 @@ def test_minigraph_ecmp_fg_nhg(self): )) def test_minigraph_ecmp_members(self): - argument = ['-m', self.ecmp_graph, '-p', self.mlnx_port_config, '-v', "FG_NHG_MEMBER.keys()|list|sort"] + argument = '-m "' + self.ecmp_graph + '" -p "' + self.mlnx_port_config + '" -v "FG_NHG_MEMBER.keys()|list|sort"' output = self.run_script(argument) self.assertEqual(output.strip(), "['200.200.200.1', '200.200.200.10', '200.200.200.2', '200.200.200.3', '200.200.200.4', '200.200.200.5'," " '200.200.200.6', '200.200.200.7', '200.200.200.8', '200.200.200.9', '200:200:200:200::1', '200:200:200:200::10'," @@ -314,7 +316,7 @@ def test_minigraph_ecmp_members(self): " '200:200:200:200::7', '200:200:200:200::8', '200:200:200:200::9']") def test_minigraph_ecmp_neighbors(self): - argument = ['-m', self.ecmp_graph, '-p', self.mlnx_port_config, '-v', "NEIGH.keys()|list|sort"] + argument = '-m "' + self.ecmp_graph + '" -p "' + self.mlnx_port_config + '" -v "NEIGH.keys()|list|sort"' output = self.run_script(argument) self.assertEqual(output.strip(), "['Vlan31|200.200.200.1', 'Vlan31|200.200.200.10', 'Vlan31|200.200.200.2', 'Vlan31|200.200.200.3'," " 'Vlan31|200.200.200.4', 'Vlan31|200.200.200.5', 'Vlan31|200.200.200.6', 'Vlan31|200.200.200.7'," @@ -324,7 +326,7 @@ def test_minigraph_ecmp_neighbors(self): def test_minigraph_portchannels(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = ['-m', graph_file, '-p', self.port_config, '-v', 'PORTCHANNEL'] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v PORTCHANNEL' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -332,7 +334,7 @@ def test_minigraph_portchannels(self, **kwargs): ) def test_minigraph_portchannel_with_more_member(self): - argument = ['-m', self.sample_graph_pc_test, '-p', self.port_config, '-v', 'PORTCHANNEL'] + argument = '-m "' + self.sample_graph_pc_test + '" -p "' + self.port_config + '" -v PORTCHANNEL' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -340,7 +342,7 @@ def test_minigraph_portchannel_with_more_member(self): ) def test_minigraph_portchannel_members(self): - argument = ['-m', self.sample_graph_pc_test, '-p', self.port_config, '-v', "PORTCHANNEL_MEMBER.keys()|list"] + argument = '-m "' + self.sample_graph_pc_test + '" -p "' + self.port_config + '" -v "PORTCHANNEL_MEMBER.keys()|list"' output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -348,7 +350,7 @@ def test_minigraph_portchannel_members(self): ) def test_minigraph_portchannel_interfaces(self): - argument = ['-m', self.sample_graph_simple, '-p', self.port_config, '-v', "PORTCHANNEL_INTERFACE.keys()|list"] + argument = '-m "' + self.sample_graph_simple + '" -p "' + self.port_config + '" -v "PORTCHANNEL_INTERFACE.keys()|list"' output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -356,7 +358,7 @@ def test_minigraph_portchannel_interfaces(self): ) def test_minigraph_neighbors(self): - argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', "DEVICE_NEIGHBOR[\'Ethernet124\']"] + argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v "DEVICE_NEIGHBOR[\'Ethernet124\']"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -367,7 +369,7 @@ def test_minigraph_neighbors(self): # it is not at all intuitive what that ordering should be. Could make it # more robust by adding better parsing logic. def test_minigraph_extra_neighbors(self): - argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', 'DEVICE_NEIGHBOR'] + argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v DEVICE_NEIGHBOR' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -381,7 +383,7 @@ def test_minigraph_extra_neighbors(self): ) def test_minigraph_port_description(self): - argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', "PORT[\'Ethernet124\']"] + argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v "PORT[\'Ethernet124\']"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -390,7 +392,7 @@ def test_minigraph_port_description(self): def test_minigraph_port_fec_disabled(self): # Test for FECDisabled - argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', "PORT[\'Ethernet4\']"] + argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v "PORT[\'Ethernet4\']"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -399,7 +401,7 @@ def test_minigraph_port_fec_disabled(self): def test_minigraph_port_autonegotiation(self): # Test with a port_config.ini file which doesn't have an 'autoneg' column - argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', "PORT"] + argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v "PORT"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -440,7 +442,7 @@ def test_minigraph_port_autonegotiation(self): ) # Test with a port_config.ini file which has an 'autoneg' column - argument = ['-m', self.sample_graph_t0, '-p', self.port_config_autoneg, '-v', "PORT"] + argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config_autoneg + '" -v "PORT"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -481,7 +483,7 @@ def test_minigraph_port_autonegotiation(self): ) def test_minigraph_port_rs(self): - argument = ['-m', self.sample_graph_t0, '-p', self.port_config, '-v', "PORT[\'Ethernet124\']"] + argument = '-m "' + self.sample_graph_t0 + '" -p "' + self.port_config + '" -v "PORT[\'Ethernet124\']"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -489,7 +491,7 @@ def test_minigraph_port_rs(self): ) def test_minigraph_bgp(self): - argument = ['-m', self.sample_graph_bgp_speaker, '-p', self.port_config, '-v', "BGP_NEIGHBOR[\'10.0.0.59\']"] + argument = '-m "' + self.sample_graph_bgp_speaker + '" -p "' + self.port_config + '" -v "BGP_NEIGHBOR[\'10.0.0.59\']"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -497,7 +499,7 @@ def test_minigraph_bgp(self): ) def test_minigraph_peers_with_range(self): - argument = ["-m", self.sample_graph_bgp_speaker, "-p", self.port_config, "-v", "BGP_PEER_RANGE.values()|list"] + argument = "-m " + self.sample_graph_bgp_speaker + " -p " + self.port_config + " -v \"BGP_PEER_RANGE.values()|list\"" output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -505,24 +507,24 @@ def test_minigraph_peers_with_range(self): ) def test_minigraph_deployment_id(self): - argument = ['-m', self.sample_graph_bgp_speaker, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'deployment_id\']"] + argument = '-m "' + self.sample_graph_bgp_speaker + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'deployment_id\']"' output = self.run_script(argument) self.assertEqual(output.strip(), "1") def test_minigraph_deployment_id_null(self): - argument = ['-m', self.sample_graph_deployment_id, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\']"] + argument = '-m "' + self.sample_graph_deployment_id + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\']"' output = self.run_script(argument) self.assertNotIn('deployment_id', output.strip()) def test_minigraph_ethernet_interfaces(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = ['-m', graph_file, '-p', self.port_config, '-v', "PORT[\'Ethernet8\']"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "PORT[\'Ethernet8\']"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), utils.to_dict("{'lanes': '37,38,39,40', 'description': 'Interface description', 'pfc_asym': 'off', 'mtu': '9100', 'alias': 'fortyGigE0/8', 'admin_status': 'up', 'speed': '1000', 'tpid': '0x8100'}") ) - argument = ['-m', graph_file, '-p', self.port_config, '-v', "PORT[\'Ethernet12\']"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "PORT[\'Ethernet12\']"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -530,7 +532,7 @@ def test_minigraph_ethernet_interfaces(self, **kwargs): ) def test_minigraph_neighbor_interfaces(self): - argument = ['-m', self.sample_graph_simple_case, '-p', self.port_config, '-v', "PORT"] + argument = '-m "' + self.sample_graph_simple_case + '" -p "' + self.port_config + '" -v "PORT"' output = self.run_script(argument) self.assertEqual( @@ -573,7 +575,7 @@ def test_minigraph_neighbor_interfaces(self): def test_minigraph_neighbor_interfaces_config_db(self): # test to check if PORT table is retrieved from config_db - argument = ['-m', self.sample_graph_simple_case, '-p', self.port_config, '-v', "PORT"] + argument = '-m "' + self.sample_graph_simple_case + '" -p "' + self.port_config + '" -v "PORT"' output = self.run_script(argument) self.assertEqual( @@ -616,7 +618,7 @@ def test_minigraph_neighbor_interfaces_config_db(self): def test_minigraph_extra_ethernet_interfaces(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = ['-m', graph_file, '-p', self.port_config, '-v', "PORT"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "PORT"' output = self.run_script(argument) self.assertEqual( @@ -664,7 +666,7 @@ def test_minigraph_extra_ethernet_interfaces(self, **kwargs): # self.assertEqual(output.strip(), "{'everflow0': {'src_ip': '10.1.0.32', 'dst_ip': '10.0.100.1'}}") def test_metadata_tacacs(self): - argument = ['-m', self.sample_graph_metadata, '-p', self.port_config, '-v', "TACPLUS_SERVER"] + argument = '-m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '" -v "TACPLUS_SERVER"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -672,24 +674,24 @@ def test_metadata_tacacs(self): ) def test_metadata_ntp(self): - argument = ['-m', self.sample_graph_metadata, '-p', self.port_config, '-v', "NTP_SERVER"] + argument = '-m "' + self.sample_graph_metadata + '" -p "' + self.port_config + '" -v "NTP_SERVER"' output = self.run_script(argument) self.assertEqual(utils.to_dict(output.strip()), utils.to_dict("{'10.0.10.1': {}, '10.0.10.2': {}}")) def test_minigraph_vnet(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = ['-m', graph_file, '-p', self.port_config, '-v', "VNET"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "VNET"' output = self.run_script(argument) self.assertEqual(output.strip(), "") def test_minigraph_vxlan(self, **kwargs): graph_file = kwargs.get('graph_file', self.sample_graph_simple) - argument = ['-m', graph_file, '-p', self.port_config, '-v', "VXLAN_TUNNEL"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "VXLAN_TUNNEL"' output = self.run_script(argument) self.assertEqual(output.strip(), "") def test_minigraph_bgp_mon(self): - argument = ['-m', self.sample_graph_simple, '-p', self.port_config, '-v', "BGP_MONITORS"] + argument = '-m "' + self.sample_graph_simple + '" -p "' + self.port_config + '" -v "BGP_MONITORS"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -697,7 +699,7 @@ def test_minigraph_bgp_mon(self): ) def test_minigraph_bgp_voq_chassis_peer(self): - argument = ['-m', self.sample_graph_simple, '-p', self.port_config, '-v', "BGP_VOQ_CHASSIS_NEIGHBOR[\'10.2.0.21\']"] + argument = '-m "' + self.sample_graph_simple + '" -p "' + self.port_config + '" -v "BGP_VOQ_CHASSIS_NEIGHBOR[\'10.2.0.21\']"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -705,7 +707,7 @@ def test_minigraph_bgp_voq_chassis_peer(self): ) # make sure VoQChassisInternal value of false is honored - argument = ['-m', self.sample_graph_simple, '-p', self.port_config, '-v', "BGP_VOQ_CHASSIS_NEIGHBOR[\'10.0.0.57\']"] + argument = '-m "' + self.sample_graph_simple + '" -p "' + self.port_config + '" -v "BGP_VOQ_CHASSIS_NEIGHBOR[\'10.0.0.57\']"' output = self.run_script(argument) self.assertEqual(output.strip(), "") @@ -725,14 +727,14 @@ def test_minigraph_backend_acl_leaf(self, check_stderr=True): try: print('\n Change device type to %s' % (BACKEND_LEAF_ROUTER)) if check_stderr: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_LEAF_ROUTER), self.sample_backend_graph], stderr=subprocess.STDOUT) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_LEAF_ROUTER, self.sample_backend_graph), stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_LEAF_ROUTER), self.sample_backend_graph]) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_LEAF_ROUTER, self.sample_backend_graph), shell=True) self.test_jinja_expression(self.sample_backend_graph, self.port_config, BACKEND_LEAF_ROUTER) # ACL_TABLE should contain EVERFLOW related entries - argument = ['-m', self.sample_backend_graph, '-p', self.port_config, '-v', "ACL_TABLE"] + argument = '-m "' + self.sample_backend_graph + '" -p "' + self.port_config + '" -v "ACL_TABLE"' output = self.run_script(argument) sample_output = utils.to_dict(output.strip()).keys() assert 'DATAACL' not in sample_output, sample_output @@ -741,9 +743,9 @@ def test_minigraph_backend_acl_leaf(self, check_stderr=True): finally: print('\n Change device type back to %s' % (TOR_ROUTER)) if check_stderr: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_LEAF_ROUTER, TOR_ROUTER), self.sample_backend_graph], stderr=subprocess.STDOUT) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_LEAF_ROUTER, TOR_ROUTER, self.sample_backend_graph), stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_LEAF_ROUTER, TOR_ROUTER), self.sample_backend_graph]) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_LEAF_ROUTER, TOR_ROUTER, self.sample_backend_graph), shell=True) self.test_jinja_expression(self.sample_backend_graph, self.port_config, TOR_ROUTER) @@ -751,23 +753,23 @@ def test_minigraph_sub_port_no_vlan_member(self, check_stderr=True): try: print('\n Change device type to %s' % (BACKEND_LEAF_ROUTER)) if check_stderr: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (LEAF_ROUTER, BACKEND_LEAF_ROUTER), self.sample_graph], stderr=subprocess.STDOUT) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (LEAF_ROUTER, BACKEND_LEAF_ROUTER, self.sample_graph), stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (LEAF_ROUTER, BACKEND_LEAF_ROUTER), self.sample_graph]) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (LEAF_ROUTER, BACKEND_LEAF_ROUTER, self.sample_graph), shell=True) self.test_jinja_expression(self.sample_graph, self.port_config, BACKEND_LEAF_ROUTER) self.verify_no_vlan_member() finally: print('\n Change device type back to %s' % (LEAF_ROUTER)) if check_stderr: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_LEAF_ROUTER, LEAF_ROUTER), self.sample_graph], stderr=subprocess.STDOUT) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_LEAF_ROUTER, LEAF_ROUTER, self.sample_graph), stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_LEAF_ROUTER, LEAF_ROUTER), self.sample_graph]) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_LEAF_ROUTER, LEAF_ROUTER, self.sample_graph), shell=True) self.test_jinja_expression(self.sample_graph, self.port_config, LEAF_ROUTER) def verify_no_vlan_member(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "VLAN_MEMBER"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "VLAN_MEMBER"' output = self.run_script(argument) self.assertEqual(output.strip(), "{}") @@ -785,33 +787,33 @@ def verify_sub_intf(self, **kwargs): try: print('\n Change device type to %s' % (BACKEND_TOR_ROUTER)) if check_stderr: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_TOR_ROUTER), graph_file], stderr=subprocess.STDOUT) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_TOR_ROUTER, graph_file), stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_TOR_ROUTER), graph_file]) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_TOR_ROUTER, graph_file), shell=True) self.test_jinja_expression(graph_file, self.port_config, BACKEND_TOR_ROUTER) # INTERFACE table does not exist - argument = ['-m', graph_file, '-p', self.port_config, '-v', "INTERFACE"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "INTERFACE"' output = self.run_script(argument) self.assertEqual(output.strip(), "") # PORTCHANNEL_INTERFACE table does not exist - argument = ['-m', graph_file, '-p', self.port_config, '-v', "PORTCHANNEL_INTERFACE"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "PORTCHANNEL_INTERFACE"' output = self.run_script(argument) self.assertEqual(output.strip(), "") # SLB and BGP Monitor table does not exist - argument = ['-m', graph_file, '-p', self.port_config, '-v', "BGP_PEER_RANGE"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "BGP_PEER_RANGE"' output = self.run_script(argument) self.assertEqual(output.strip(), "{}") - argument = ['-m', graph_file, '-p', self.port_config, '-v', "BGP_MONITORS"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "BGP_MONITORS"' output = self.run_script(argument) self.assertEqual(output.strip(), "{}") # ACL_TABLE should not contain EVERFLOW related entries - argument = ['-m', graph_file, '-p', self.port_config, '-v', "ACL_TABLE"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "ACL_TABLE"' output = self.run_script(argument) sample_output = utils.to_dict(output.strip()).keys() assert 'DATAACL' in sample_output, sample_output @@ -827,7 +829,7 @@ def verify_sub_intf(self, **kwargs): self.test_minigraph_vxlan(graph_file=graph_file) # VLAN_SUB_INTERFACE - argument = ['-m', graph_file, '-p', self.port_config, '-v', 'VLAN_SUB_INTERFACE'] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v VLAN_SUB_INTERFACE' output = self.run_script(argument) print(output.strip()) # not a usecase to parse SubInterfaces under PortChannel @@ -860,24 +862,24 @@ def verify_sub_intf(self, **kwargs): finally: print('\n Change device type back to %s' % (TOR_ROUTER)) if check_stderr: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_TOR_ROUTER, TOR_ROUTER), graph_file], stderr=subprocess.STDOUT) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_TOR_ROUTER, TOR_ROUTER, graph_file), stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_TOR_ROUTER, TOR_ROUTER), graph_file]) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_TOR_ROUTER, TOR_ROUTER, graph_file), shell=True) self.test_jinja_expression(graph_file, self.port_config, TOR_ROUTER) def test_show_run_acl(self): - argument = ['-a', '{"key1":"value"}', '--var-json', 'ACL_RULE'] + argument = '-a \'{"key1":"value"}\' --var-json ACL_RULE' output = self.run_script(argument) self.assertEqual(output, '') def test_show_run_interfaces(self): - argument = ['-a', '{"key1":"value"}', '--var-json', 'INTERFACE'] + argument = '-a \'{"key1":"value"}\' --var-json INTERFACE' output = self.run_script(argument) self.assertEqual(output, '') def test_minigraph_voq_metadata(self): - argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "--var-json", "DEVICE_METADATA"] + argument = "-j {} -m {} -p {} --var-json DEVICE_METADATA".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) output = json.loads(self.run_script(argument)) self.assertEqual(output['localhost']['asic_name'], 'Asic0') self.assertEqual(output['localhost']['switch_id'], '0') @@ -885,7 +887,7 @@ def test_minigraph_voq_metadata(self): self.assertEqual(output['localhost']['max_cores'], '16') def test_minigraph_voq_system_ports(self): - argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "--var-json", "SYSTEM_PORT"] + argument = "-j {} -m {} -p {} --var-json SYSTEM_PORT".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) self.assertDictEqual( json.loads(self.run_script(argument)), { @@ -904,7 +906,7 @@ def test_minigraph_voq_system_ports(self): ) def test_minigraph_voq_port_macsec_enabled(self): - argument = ['-j', self.macsec_profile, '-m', self.sample_graph_voq, '-p', self.voq_port_config, '-v', "PORT[\'Ethernet0\']"] + argument = '-j "' + self.macsec_profile + '" -m "' + self.sample_graph_voq + '" -p "' + self.voq_port_config + '" -v "PORT[\'Ethernet0\']"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -912,7 +914,7 @@ def test_minigraph_voq_port_macsec_enabled(self): ) def test_minigraph_voq_inband_interface_port(self): - argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "--var-json", "VOQ_INBAND_INTERFACE"] + argument = "-j {} -m {} -p {} --var-json VOQ_INBAND_INTERFACE".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) output = self.run_script(argument) output_dict = utils.to_dict(output.strip()) self.assertDictEqual( @@ -925,7 +927,7 @@ def test_minigraph_voq_inband_interface_port(self): ) def test_minigraph_voq_inband_port(self): - argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "--var-json", "PORT"] + argument = "-j {} -m {} -p {} --var-json PORT".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) output = self.run_script(argument) output_dict = utils.to_dict(output.strip()) self.assertDictEqual( @@ -943,7 +945,7 @@ def test_minigraph_voq_inband_port(self): }) def test_minigraph_voq_recirc_ports(self): - argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "--var-json", "PORT"] + argument = "-j {} -m {} -p {} --var-json PORT".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) output = self.run_script(argument) output_dict = utils.to_dict(output.strip()) self.assertDictEqual( @@ -961,7 +963,7 @@ def test_minigraph_voq_recirc_ports(self): }) def test_minigraph_dhcp(self): - argument = ['-m', self.sample_graph_simple_case, '-p', self.port_config, '-v', 'DHCP_RELAY'] + argument = '-m "' + self.sample_graph_simple_case + '" -p "' + self.port_config + '" -v DHCP_RELAY' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -972,7 +974,7 @@ def test_minigraph_dhcp(self): ) def test_minigraph_bgp_packet_chassis_peer(self): - argument = ['-m', self.packet_chassis_graph, '-p', self.packet_chassis_port_ini, '-n', "asic1", '-v', "BGP_INTERNAL_NEIGHBOR[\'8.0.0.1\']"] + argument = '-m "' + self.packet_chassis_graph + '" -p "' + self.packet_chassis_port_ini + '" -n "' + "asic1" + '" -v "BGP_INTERNAL_NEIGHBOR[\'8.0.0.1\']"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -980,14 +982,14 @@ def test_minigraph_bgp_packet_chassis_peer(self): ) def test_minigraph_bgp_packet_chassis_static_route(self): - argument = ['-m', self.packet_chassis_graph, '-p', self.packet_chassis_port_ini, '-v', "STATIC_ROUTE"] + argument = '-m "' + self.packet_chassis_graph + '" -p "' + self.packet_chassis_port_ini + '" -v "STATIC_ROUTE"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), utils.to_dict("{'8.0.0.1/32': {'nexthop': '192.168.1.2,192.168.2.2', 'ifname': 'PortChannel40,PortChannel50', 'advertise':'false'}}") ) - argument = ['-m', self.packet_chassis_graph, '-p', self.packet_chassis_port_ini, '-n', "asic1", '-v', "STATIC_ROUTE"] + argument = '-m "' + self.packet_chassis_graph + '" -p "' + self.packet_chassis_port_ini + '" -n "' + "asic1" + '" -v "STATIC_ROUTE"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -995,7 +997,7 @@ def test_minigraph_bgp_packet_chassis_static_route(self): ) def test_minigraph_bgp_packet_chassis_vlan_subintf(self): - argument = ['-m', self.packet_chassis_graph, '-p', self.packet_chassis_port_ini, '-n', "asic1", '-v', "VLAN_SUB_INTERFACE"] + argument = '-m "' + self.packet_chassis_graph + '" -p "' + self.packet_chassis_port_ini + '" -n "' + "asic1" + '" -v "VLAN_SUB_INTERFACE"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -1003,14 +1005,14 @@ def test_minigraph_bgp_packet_chassis_vlan_subintf(self): ) def test_minigraph_voq_400g_zr_port_config(self): - argument = ["-j", self.macsec_profile, "-m", self.sample_graph_voq, "-p", self.voq_port_config, "-v" "PORT[\'Ethernet4\']"] + argument = "-j {} -m {} -p {} -v \"PORT[\'Ethernet4\']\"".format(self.macsec_profile, self.sample_graph_voq, self.voq_port_config) output = self.run_script(argument) output_dict = utils.to_dict(output.strip()) self.assertEqual(output_dict['tx_power'], '-10') self.assertEqual(output_dict['laser_freq'], 195875) def test_minigraph_packet_chassis_400g_zr_port_config(self): - argument = ["-m", self.packet_chassis_graph, "-p", self.packet_chassis_port_ini, "-n", "asic1", "-v", "PORT[\'Ethernet13\']"] + argument = "-m {} -p {} -n asic1 -v \"PORT[\'Ethernet13\']\"".format(self.packet_chassis_graph, self.packet_chassis_port_ini) output = self.run_script(argument) output_dict = utils.to_dict(output.strip()) self.assertEqual(output_dict['tx_power'], '7.5') diff --git a/src/sonic-config-engine/tests/test_cfggen_from_yang.py b/src/sonic-config-engine/tests/test_cfggen_from_yang.py index ddc77c8b0311..801340ea4722 100644 --- a/src/sonic-config-engine/tests/test_cfggen_from_yang.py +++ b/src/sonic-config-engine/tests/test_cfggen_from_yang.py @@ -4,7 +4,6 @@ import os import tests.common_utils as utils -from sonic_py_common.general import getstatusoutput_noshell #TODO: Remove this fixuture once SONiC moves to python3.x @@ -22,18 +21,20 @@ class TestCfgGen(object): @pytest.fixture(autouse=True) def setup_teardown(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join( - self.test_dir, '..', 'sonic-cfggen')] + self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join( + self.test_dir, '..', 'sonic-cfggen') self.sample_yang_file = os.path.join(self.test_dir, 'test_yang_data.json') def run_script(self, arg, check_stderr=False): - print('\n Running sonic-cfggen ', arg) + print('\n Running sonic-cfggen ' + arg) if check_stderr: - output = subprocess.check_output(self.script_file + arg, - stderr=subprocess.STDOUT) + output = subprocess.check_output(self.script_file + ' ' + arg, + stderr=subprocess.STDOUT, + shell=True) else: - output = subprocess.check_output(self.script_file + arg) + output = subprocess.check_output(self.script_file + ' ' + arg, + shell=True) if utils.PY3x: output = output.decode() @@ -47,31 +48,32 @@ def run_script(self, arg, check_stderr=False): return output def run_diff(self, file1, file2): - _, output = getstatusoutput_noshell(['diff', '-u', file1, file2]) - return output + return subprocess.check_output('diff -u {} {} || true'.format( + file1, file2), + shell=True) def run_script_with_yang_arg(self, arg, check_stderr=False): - args = ["-Y", self.sample_yang_file] + arg + args = "-Y {} {}".format(self.sample_yang_file, arg) return self.run_script(arg=args, check_stderr=check_stderr) def test_print_data(self): - arg = ["--print-data"] + arg = "--print-data" output = self.run_script_with_yang_arg(arg) assert len(output.strip()) > 0 def test_jinja_expression(self, expected_router_type='LeafRouter'): - arg = ["-v", "DEVICE_METADATA[\'localhost\'][\'type\']"] + arg = " -v \"DEVICE_METADATA[\'localhost\'][\'type\']\" " output = self.run_script_with_yang_arg(arg) assert output.strip() == expected_router_type def test_hwsku(self): - arg = ["-v", "DEVICE_METADATA[\'localhost\'][\'hwsku\']"] + arg = "-v \"DEVICE_METADATA[\'localhost\'][\'hwsku\']\" " output = self.run_script_with_yang_arg(arg) assert output.strip() == "Force10-S6000" def test_device_metadata(self): - arg = ["--var-json", "DEVICE_METADATA"] + arg = "--var-json \"DEVICE_METADATA\" " output = json.loads(self.run_script_with_yang_arg(arg)) assert (output['localhost'] == {\ 'bgp_asn': '65100', @@ -85,7 +87,7 @@ def test_device_metadata(self): def test_port_table(self): - arg = ["--var-json", "PORT"] + arg = "--var-json \"PORT\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == \ {'Ethernet0': {'admin_status': 'up', 'alias': 'eth0', 'description': 'Ethernet0', 'fec': 'rs', 'lanes': '65, 66', 'mtu': '9100', 'pfc_asym': 'on', 'speed': '40000'}, @@ -99,7 +101,7 @@ def test_port_table(self): }) def test_portchannel_table(self): - arg = ["--var-json", "PORTCHANNEL"] + arg = "--var-json \"PORTCHANNEL\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == \ {'PortChannel1001': {'admin_status': 'up', @@ -114,7 +116,7 @@ def test_portchannel_table(self): 'mtu': '9100'}}) def test_portchannel_member_table(self): - arg = ["--var-json", "PORTCHANNEL_MEMBER"] + arg = "--var-json \"PORTCHANNEL_MEMBER\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output ==\ { "PortChannel1001|Ethernet0": {}, @@ -124,7 +126,7 @@ def test_portchannel_member_table(self): }) def test_interface_table(self): - arg = ["--var-json", "INTERFACE"] + arg = "--var-json \"INTERFACE\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output =={\ "Ethernet8": {}, @@ -148,7 +150,7 @@ def test_interface_table(self): }) def test_portchannel_interface_table(self): - arg = ["--var-json", "PORTCHANNEL_INTERFACE"] + arg = "--var-json \"PORTCHANNEL_INTERFACE\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output =={\ "PortChannel1001|10.0.0.1/31": {}, @@ -156,7 +158,7 @@ def test_portchannel_interface_table(self): }) def test_loopback_table(self): - arg = ["--var-json", "LOOPBACK_INTERFACE"] + arg = "--var-json \"LOOPBACK_INTERFACE\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "Loopback0": {}, @@ -171,7 +173,7 @@ def test_loopback_table(self): }) def test_acl_table(self): - arg = ["--var-json", "ACL_TABLE"] + arg = "--var-json \"ACL_TABLE\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ 'DATAACL': {'policy_desc': 'DATAACL', 'ports': ['PortChannel1001','PortChannel1002'], 'stage': 'ingress', 'type': 'L3'}, @@ -181,7 +183,7 @@ def test_acl_table(self): 'SSH_ONLY': {'policy_desc': 'SSH_ONLY', 'services': ['SSH'], 'stage': 'ingress', 'type': 'CTRLPLANE'}}) def test_acl_rule(self): - arg = ["--var-json", "ACL_RULE"] + arg = "--var-json \"ACL_RULE\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "DATAACL|Rule1": { @@ -199,7 +201,7 @@ def test_acl_rule(self): }) def test_vlan_table(self): - arg = ["--var-json", "VLAN"] + arg = "--var-json \"VLAN\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "Vlan100": { @@ -216,7 +218,7 @@ def test_vlan_table(self): }) def test_vlan_interface(self): - arg = ["--var-json", "VLAN_INTERFACE"] + arg = "--var-json \"VLAN_INTERFACE\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "Vlan100": {}, @@ -231,7 +233,7 @@ def test_vlan_interface(self): }) def test_vlan_member(self): - arg = ["--var-json", "VLAN_MEMBER"] + arg = "--var-json \"VLAN_MEMBER\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "Vlan100|Ethernet24": { @@ -243,7 +245,7 @@ def test_vlan_member(self): }) def test_vlan_crm(self): - arg = ["--var-json", "CRM"] + arg = "--var-json \"CRM\"" output = json.loads(self.run_script_with_yang_arg(arg)) assert(output == {\ "Config": { diff --git a/src/sonic-config-engine/tests/test_cfggen_pfx_filter.py b/src/sonic-config-engine/tests/test_cfggen_pfx_filter.py index b3cad3aa2152..1ac2b7f7f5f3 100644 --- a/src/sonic-config-engine/tests/test_cfggen_pfx_filter.py +++ b/src/sonic-config-engine/tests/test_cfggen_pfx_filter.py @@ -9,14 +9,13 @@ class TestPfxFilter(TestCase): def test_comprehensive(self): # Generate output data_dir = "tests/data/pfx_filter" - output_file = "/tmp/result_1.txt" - cmd = [utils.PYTHON_INTERPRETTER, "./sonic-cfggen", "-j", "{}/param_1.json".format(data_dir), "-t", "{}/tmpl_1.txt.j2".format(data_dir)] - output = subprocess.check_output(cmd, universal_newlines=True) - with open(output_file, 'w') as f: - f.write(output) + cmd = "{} ./sonic-cfggen -j {}/param_1.json -t {}/tmpl_1.txt.j2 > /tmp/result_1.txt".format( + utils.PYTHON_INTERPRETTER, data_dir, data_dir + ) + subprocess.check_output(cmd, shell=True) # Compare outputs - cmd = ["diff", "-u", "tests/data/pfx_filter/result_1.txt", "/tmp/result_1.txt"] + cmd = "diff -u tests/data/pfx_filter/result_1.txt /tmp/result_1.txt" try: - res = subprocess.check_output(cmd) + res = subprocess.check_output(cmd, shell=True) except subprocess.CalledProcessError as e: assert False, "Wrong output. return code: %d, Diff: %s" % (e.returncode, e.output) diff --git a/src/sonic-config-engine/tests/test_cfggen_platformJson.py b/src/sonic-config-engine/tests/test_cfggen_platformJson.py index 5d39fd2f3660..0af361718b99 100644 --- a/src/sonic-config-engine/tests/test_cfggen_platformJson.py +++ b/src/sonic-config-engine/tests/test_cfggen_platformJson.py @@ -3,7 +3,7 @@ import os import subprocess import sys -import ast + import tests.common_utils as utils from unittest import TestCase @@ -21,17 +21,17 @@ class TestCfgGenPlatformJson(TestCase): def setUp(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] + self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') self.platform_sample_graph = os.path.join(self.test_dir, 'platform-sample-graph.xml') self.platform_json = os.path.join(self.test_dir, 'sample_platform.json') self.hwsku_json = os.path.join(self.test_dir, 'sample_hwsku.json') def run_script(self, argument, check_stderr=False): - print('\n Running sonic-cfggen ', argument) + print('\n Running sonic-cfggen ' + argument) if check_stderr: - output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) + output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(self.script_file + argument) + output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) if utils.PY3x: output = output.decode() @@ -44,18 +44,18 @@ def run_script(self, argument, check_stderr=False): return output def test_dummy_run(self): - argument = [] + argument = '' output = self.run_script(argument) self.assertEqual(output, '') def test_print_data(self): - argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '--print-data'] + argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" --print-data' output = self.run_script(argument) self.assertTrue(len(output.strip()) > 0) # Check whether all interfaces present or not as per platform.json def test_platform_json_interfaces_keys(self): - argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '-S', self.hwsku_json, '-v', "PORT.keys()|list"] + argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" -S "' + self.hwsku_json + '" -v "PORT.keys()|list"' output = self.run_script(argument) self.maxDiff = None @@ -71,24 +71,24 @@ def test_platform_json_interfaces_keys(self): 'Ethernet139', 'Ethernet140', 'Ethernet141', 'Ethernet142', 'Ethernet144' ] - self.assertEqual(sorted(ast.literal_eval(output.strip())), sorted(expected)) + self.assertEqual(sorted(eval(output.strip())), sorted(expected)) # Check specific Interface with it's proper configuration as per platform.json def test_platform_json_specific_ethernet_interfaces(self): - argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '-S', self.hwsku_json, '-v', "PORT[\'Ethernet8\']"] + argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" -S "' + self.hwsku_json + '" -v "PORT[\'Ethernet8\']"' output = self.run_script(argument) self.maxDiff = None expected = "{'index': '3', 'lanes': '8', 'description': 'Eth3/1', 'mtu': '9100', 'alias': 'Eth3/1', 'pfc_asym': 'off', 'speed': '25000', 'tpid': '0x8100'}" self.assertEqual(utils.to_dict(output.strip()), utils.to_dict(expected)) - argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '-S', self.hwsku_json, '-v', "PORT[\'Ethernet112\']"] + argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" -S "' + self.hwsku_json + '" -v "PORT[\'Ethernet112\']"' output = self.run_script(argument) self.maxDiff = None expected = "{'index': '29', 'lanes': '112', 'description': 'Eth29/1', 'mtu': '9100', 'alias': 'Eth29/1', 'pfc_asym': 'off', 'speed': '25000', 'tpid': '0x8100'}" self.assertEqual(utils.to_dict(output.strip()), utils.to_dict(expected)) - argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '-S', self.hwsku_json, '-v', "PORT[\'Ethernet4\']"] + argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" -S "' + self.hwsku_json + '" -v "PORT[\'Ethernet4\']"' output = self.run_script(argument) self.maxDiff = None expected = "{'index': '2', 'lanes': '4,5', 'description': 'Eth2/1', 'admin_status': 'up', 'mtu': '9100', 'alias': 'Eth2/1', 'pfc_asym': 'off', 'speed': '50000', 'tpid': '0x8100'}" @@ -97,7 +97,7 @@ def test_platform_json_specific_ethernet_interfaces(self): # Check all Interface with it's proper configuration as per platform.json def test_platform_json_all_ethernet_interfaces(self): - argument = ['-m', self.platform_sample_graph, '-p', self.platform_json, '-S', self.hwsku_json, '-v', "PORT"] + argument = '-m "' + self.platform_sample_graph + '" -p "' + self.platform_json + '" -S "' + self.hwsku_json + '" -v "PORT"' output = self.run_script(argument) self.maxDiff = None diff --git a/src/sonic-config-engine/tests/test_cfggen_t2_chassis_fe.py b/src/sonic-config-engine/tests/test_cfggen_t2_chassis_fe.py index 9bf3fc8a3ed5..a3d6d02a7ff2 100644 --- a/src/sonic-config-engine/tests/test_cfggen_t2_chassis_fe.py +++ b/src/sonic-config-engine/tests/test_cfggen_t2_chassis_fe.py @@ -1,5 +1,6 @@ import os import subprocess + import tests.common_utils as utils from unittest import TestCase @@ -9,18 +10,18 @@ class TestCfgGenT2ChassisFe(TestCase): def setUp(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] + self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') self.sample_graph_t2_chassis_fe = os.path.join(self.test_dir, 't2-chassis-fe-graph.xml') self.sample_graph_t2_chassis_fe_vni = os.path.join(self.test_dir, 't2-chassis-fe-graph-vni.xml') self.sample_graph_t2_chassis_fe_pc = os.path.join(self.test_dir, 't2-chassis-fe-graph-pc.xml') self.t2_chassis_fe_port_config = os.path.join(self.test_dir, 't2-chassis-fe-port-config.ini') def run_script(self, argument, check_stderr=False): - print('\n Running sonic-cfggen ' + ' '.join(argument)) + print('\n Running sonic-cfggen ' + argument) if check_stderr: - output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) + output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(self.script_file + argument) + output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) if utils.PY3x: output = output.decode() @@ -33,12 +34,12 @@ def run_script(self, argument, check_stderr=False): return output def test_minigraph_t2_chassis_fe_type(self): - argument = ['-m', self.sample_graph_t2_chassis_fe, '-p', self.t2_chassis_fe_port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'type\']"] + argument = '-m "' + self.sample_graph_t2_chassis_fe + '" -p "' + self.t2_chassis_fe_port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'type\']"' output = self.run_script(argument) self.assertEqual(output.strip(), 'SpineChassisFrontendRouter') def test_minigraph_t2_chassis_fe_interfaces(self): - argument = ['-m', self.sample_graph_t2_chassis_fe, '-p', self.t2_chassis_fe_port_config, '-v', "INTERFACE"] + argument = '-m "' + self.sample_graph_t2_chassis_fe + '" -p "' + self.t2_chassis_fe_port_config + '" -v "INTERFACE"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -52,7 +53,7 @@ def test_minigraph_t2_chassis_fe_interfaces(self): ) ) def test_minigraph_t2_chassis_fe_pc_interfaces(self): - argument = ['-m', self.sample_graph_t2_chassis_fe_pc, '-p', self.t2_chassis_fe_port_config, '-v', "PORTCHANNEL_INTERFACE"] + argument = '-m "' + self.sample_graph_t2_chassis_fe_pc + '" -p "' + self.t2_chassis_fe_port_config + '" -v "PORTCHANNEL_INTERFACE"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -69,17 +70,17 @@ def test_minigraph_t2_chassis_fe_pc_interfaces(self): # Test a minigraph file where VNI is not specified # Default VNI is 8000 def test_minigraph_t2_chassis_fe_vnet_default(self): - argument = ['-m', self.sample_graph_t2_chassis_fe, '-p', self.t2_chassis_fe_port_config, '-v', "VNET"] + argument = '-m "' + self.sample_graph_t2_chassis_fe + '" -p "' + self.t2_chassis_fe_port_config + '" -v "VNET"' output = self.run_script(argument) self.assertEqual(output.strip(), "{'VnetFE': {'vxlan_tunnel': 'TunnelInt', 'vni': 8000}}") # Test a minigraph file where VNI is specified def test_minigraph_t2_chassis_fe_vnet(self): - argument = ['-m', self.sample_graph_t2_chassis_fe_vni, '-p', self.t2_chassis_fe_port_config, '-v', "VNET"] + argument = '-m "' + self.sample_graph_t2_chassis_fe_vni + '" -p "' + self.t2_chassis_fe_port_config + '" -v "VNET"' output = self.run_script(argument) self.assertEqual(output.strip(), "{'VnetFE': {'vxlan_tunnel': 'TunnelInt', 'vni': 9000}}") def test_minigraph_t2_chassis_fe_vxlan(self): - argument = ['-m', self.sample_graph_t2_chassis_fe, '-p', self.t2_chassis_fe_port_config, '-v', "VXLAN_TUNNEL"] + argument = '-m "' + self.sample_graph_t2_chassis_fe + '" -p "' + self.t2_chassis_fe_port_config + '" -v "VXLAN_TUNNEL"' output = self.run_script(argument) self.assertEqual(output.strip(), "{'TunnelInt': {'src_ip': '4.0.0.0'}}") diff --git a/src/sonic-config-engine/tests/test_frr.py b/src/sonic-config-engine/tests/test_frr.py index c30bc8b4969f..3934f8c7d70c 100644 --- a/src/sonic-config-engine/tests/test_frr.py +++ b/src/sonic-config-engine/tests/test_frr.py @@ -3,13 +3,13 @@ import subprocess import tests.common_utils as utils -from sonic_py_common.general import getstatusoutput_noshell + from unittest import TestCase class TestCfgGen(TestCase): def setUp(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] + self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') self.t0_minigraph = os.path.join(self.test_dir, 't0-sample-graph.xml') self.t0_port_config = os.path.join(self.test_dir, 't0-sample-port-config.ini') self.output_file = os.path.join(self.test_dir, 'output') @@ -21,19 +21,15 @@ def tearDown(self): pass - def run_script(self, argument, check_stderr=False, output_file=None): + def run_script(self, argument, check_stderr=False): # print '\n Running sonic-cfggen ' + argument - if check_stderr: - output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) + output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(self.script_file + argument) + output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) if utils.PY3x: output = output.decode() - if output_file: - with open(output_file, 'w') as f: - f.write(output) linecount = output.strip().count('\n') if linecount <= 0: @@ -43,7 +39,8 @@ def run_script(self, argument, check_stderr=False, output_file=None): return output def run_diff(self, file1, file2): - _, output = getstatusoutput_noshell(['diff', '-u', file1, file2]) + output = subprocess.check_output('diff -u {} {} || true'.format(file1, file2), shell=True) + if utils.PY3x: output = output.decode() @@ -53,8 +50,9 @@ def run_case(self, template, target): template_dir = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-fpm-frr', "frr") conf_template = os.path.join(template_dir, template) constants = os.path.join(self.test_dir, '..', '..', '..', 'files', 'image_config', 'constants', 'constants.yml') - cmd = ['-m', self.t0_minigraph, '-p', self.t0_port_config, '-y', constants, '-t', conf_template, '-T', template_dir] - self.run_script(cmd, output_file=self.output_file) + cmd_args = self.t0_minigraph, self.t0_port_config, constants, conf_template, template_dir, self.output_file + cmd = "-m %s -p %s -y %s -t %s -T %s > %s" % cmd_args + self.run_script(cmd) original_filename = os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, target) r = filecmp.cmp(original_filename, self.output_file) @@ -71,3 +69,4 @@ def test_bgpd_frr(self): def test_zebra_frr(self): self.assertTrue(*self.run_case('zebra/zebra.conf.j2', 'zebra_frr.conf')) + diff --git a/src/sonic-config-engine/tests/test_j2files.py b/src/sonic-config-engine/tests/test_j2files.py index ad86f1cb4b2a..3ac219468e43 100644 --- a/src/sonic-config-engine/tests/test_j2files.py +++ b/src/sonic-config-engine/tests/test_j2files.py @@ -5,13 +5,12 @@ from unittest import TestCase import tests.common_utils as utils -from sonic_py_common.general import getstatusoutput_noshell_pipe class TestJ2Files(TestCase): def setUp(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] + self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') self.simple_minigraph = os.path.join(self.test_dir, 'simple-sample-graph.xml') self.port_data = os.path.join(self.test_dir, 'sample-port-data.json') self.ztp = os.path.join(self.test_dir, "sample-ztp.json") @@ -41,21 +40,17 @@ def setUp(self): self.output_file = os.path.join(self.test_dir, 'output') os.environ["CFGGEN_UNIT_TESTING"] = "2" - def run_script(self, argument, output_file=None): - print('CMD: sonic-cfggen ', argument) - output = subprocess.check_output(self.script_file + argument) + def run_script(self, argument): + print('CMD: sonic-cfggen ' + argument) + output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) if utils.PY3x: output = output.decode() - if output_file: - with open(output_file, 'w') as f: - f.write(output) return output def run_diff(self, file1, file2): - _, output = getstatusoutput_noshell(['diff', '-u', file1, file2]) - return output + return subprocess.check_output('diff -u {} {} || true'.format(file1, file2), shell=True) def create_machine_conf(self, platform, vendor): file_exist = True @@ -64,24 +59,23 @@ def create_machine_conf(self, platform, vendor): 'dell': 'onie', 'mellanox': 'onie' } - echo_cmd1 = ["echo", '{}_platform={}'.format(mode[vendor], platform)] - echo_cmd2 = ["sudo", "tee", "-a", "/host/machine.conf"] + echo_cmd = "echo '{}_platform={}' | sudo tee -a /host/machine.conf > /dev/null".format(mode[vendor], platform) if not os.path.exists('/host/machine.conf'): file_exist = False if not os.path.isdir('/host'): dir_exist = False - subprocess.call(['sudo', 'mkdir', '/host']) - subprocess.call(['sudo', 'touch', '/host/machine.conf']) - getstatusoutput_noshell_pipe(echo_cmd1, echo_cmd2) + os.system('sudo mkdir /host') + os.system('sudo touch /host/machine.conf') + os.system(echo_cmd) return file_exist, dir_exist def remove_machine_conf(self, file_exist, dir_exist): if not file_exist: - subprocess.call(['sudo', 'rm', '-f', '/host/machine.conf']) + os.system('sudo rm -f /host/machine.conf') if not dir_exist: - subprocess.call(['sudo', 'rmdir', '/host']) + os.system('sudo rmdir /host') def modify_cable_len(self, base_file, file_dir): input_file = os.path.join(file_dir, base_file) @@ -101,76 +95,76 @@ def test_interfaces(self): interfaces_template = os.path.join(self.test_dir, '..', '..', '..', 'files', 'image_config', 'interfaces', 'interfaces.j2') # ZTP enabled - argument = ['-m', self.t0_minigraph_nomgmt, '-p', self.t0_port_config_tiny, '-j', self.ztp, '-j', self.port_data, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_minigraph_nomgmt + ' -p ' + self.t0_port_config_tiny + ' -j ' + self.ztp + ' -j ' + self.port_data + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + '> ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces_nomgmt_ztp'), self.output_file)) - argument = ['-m', self.t0_minigraph_nomgmt, '-p', self.t0_port_config_tiny, '-j', self.ztp_inband, '-j', self.port_data, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_minigraph_nomgmt + ' -p ' + self.t0_port_config_tiny + ' -j ' + self.ztp_inband + ' -j ' + self.port_data + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + '> ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces_nomgmt_ztp_inband'), self.output_file)) - argument = ['-m', self.t0_minigraph_nomgmt, '-p', self.t0_port_config_tiny, '-j', self.ztp_ip, '-j', self.port_data, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_minigraph_nomgmt + ' -p ' + self.t0_port_config_tiny + ' -j ' + self.ztp_ip + ' -j ' + self.port_data + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + '> ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces_nomgmt_ztp_ip'), self.output_file)) - argument = ['-m', self.t0_minigraph_nomgmt, '-p', self.t0_port_config_tiny, '-j', self.ztp_inband_ip, '-j', self.port_data, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_minigraph_nomgmt + ' -p ' + self.t0_port_config_tiny + ' -j ' + self.ztp_inband_ip + ' -j ' + self.port_data + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + '> ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces_nomgmt_ztp_inband_ip'), self.output_file)) # ZTP disabled, MGMT_INTERFACE defined - argument = ['-m', self.t0_minigraph, '-p', self.t0_port_config, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_minigraph + ' -p ' + self.t0_port_config + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + ' > ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces'), self.output_file)) - argument = ['-m', self.t0_mvrf_minigraph, '-p', self.t0_port_config, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_mvrf_minigraph + ' -p ' + self.t0_port_config + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + ' > ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'mvrf_interfaces'), self.output_file)) - argument = ['-m', self.t0_minigraph_two_mgmt, '-p', self.t0_port_config, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_minigraph_two_mgmt + ' -p ' + self.t0_port_config + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + ' > ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'two_mgmt_interfaces'), self.output_file), self.output_file) # ZTP disabled, no MGMT_INTERFACE defined - argument = ['-m', self.t0_minigraph_nomgmt, '-p', self.t0_port_config, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_minigraph_nomgmt + ' -p ' + self.t0_port_config + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + ' > ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'interfaces_nomgmt'), self.output_file)) - argument = ['-m', self.t0_mvrf_minigraph_nomgmt, '-p', self.t0_port_config, '-a', '{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}', '-t', interfaces_template] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_mvrf_minigraph_nomgmt + ' -p ' + self.t0_port_config + ' -a \'{\"hwaddr\":\"e4:1d:2d:a5:f3:ad\"}\' -t ' + interfaces_template + ' > ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'mvrf_interfaces_nomgmt'), self.output_file)) def test_ports_json(self): ports_template = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-orchagent', 'ports.json.j2') - argument = ['-m', self.simple_minigraph, '-p', self.t0_port_config, '-t', ports_template] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.simple_minigraph + ' -p ' + self.t0_port_config + ' -t ' + ports_template + ' > ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'ports.json'), self.output_file)) def test_dhcp_relay(self): # Test generation of wait_for_intf.sh dhc_sample_data = os.path.join(self.test_dir, "dhcp-relay-sample.json") template_path = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-dhcp-relay', 'wait_for_intf.sh.j2') - argument = ['-m', self.t0_minigraph, '-j', dhc_sample_data, '-p', self.t0_port_config, '-t', template_path] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_minigraph + ' -j ' + dhc_sample_data + ' -p ' + self.t0_port_config + ' -t ' + template_path + ' > ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'wait_for_intf.sh'), self.output_file)) # Test generation of docker-dhcp-relay.supervisord.conf template_path = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-dhcp-relay', 'docker-dhcp-relay.supervisord.conf.j2') - argument = ['-m', self.t0_minigraph, '-p', self.t0_port_config, '-t', template_path] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_minigraph + ' -p ' + self.t0_port_config + ' -t ' + template_path + ' > ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'docker-dhcp-relay.supervisord.conf'), self.output_file)) # Test generation of docker-dhcp-relay.supervisord.conf when a vlan is missing ip/ipv6 helpers template_path = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-dhcp-relay', 'docker-dhcp-relay.supervisord.conf.j2') - argument = ['-m', self.no_ip_helper_minigraph, '-p', self.t0_port_config, '-t', template_path] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.no_ip_helper_minigraph + ' -p ' + self.t0_port_config + ' -t ' + template_path + ' > ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'docker-dhcp-relay-no-ip-helper.supervisord.conf'), self.output_file)) def test_radv(self): # Test generation of radvd.conf with multiple ipv6 prefixes template_path = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-router-advertiser', 'radvd.conf.j2') - argument = ['-m', self.radv_test_minigraph, '-p', self.t0_port_config, '-t', template_path] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.radv_test_minigraph + ' -p ' + self.t0_port_config + ' -t ' + template_path + ' > ' + self.output_file + self.run_script(argument) self.assertTrue(utils.cmp(os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'radvd.conf'), self.output_file)) def test_lldp(self): @@ -182,32 +176,32 @@ def test_lldp(self): # Test generation of lldpd.conf if IPv4 and IPv6 management interfaces exist mgmt_iface_ipv4_and_ipv6_json = os.path.join(self.test_dir, "data", "lldp", "mgmt_iface_ipv4_and_ipv6.json") - argument = ['-j', mgmt_iface_ipv4_and_ipv6_json, '-t', lldpd_conf_template] - self.run_script(argument, output_file=self.output_file) + argument = '-j {} -t {} > {}'.format(mgmt_iface_ipv4_and_ipv6_json, lldpd_conf_template, self.output_file) + self.run_script(argument) self.assertTrue(utils.cmp(expected_mgmt_ipv4_and_ipv6, self.output_file)) # Test generation of lldpd.conf if management interface IPv4 only exist mgmt_iface_ipv4_json = os.path.join(self.test_dir, "data", "lldp", "mgmt_iface_ipv4.json") - argument = ['-j', mgmt_iface_ipv4_json, '-t', lldpd_conf_template] - self.run_script(argument, output_file=self.output_file) + argument = '-j {} -t {} > {}'.format(mgmt_iface_ipv4_json, lldpd_conf_template, self.output_file) + self.run_script(argument) self.assertTrue(utils.cmp(expected_mgmt_ipv4, self.output_file)) # Test generation of lldpd.conf if Management interface IPv6 only exist mgmt_iface_ipv6_json = os.path.join(self.test_dir, "data", "lldp", "mgmt_iface_ipv6.json") - argument = ['-j', mgmt_iface_ipv6_json, '-t', lldpd_conf_template] - self.run_script(argument, output_file=self.output_file) + argument = '-j {} -t {} > {}'.format(mgmt_iface_ipv6_json, lldpd_conf_template, self.output_file) + self.run_script(argument) self.assertTrue(utils.cmp(expected_mgmt_ipv6, self.output_file)) def test_ipinip(self): ipinip_file = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-orchagent', 'ipinip.json.j2') - argument = ['-m', self.t0_minigraph, '-p', self.t0_port_config, '-t', ipinip_file] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.t0_minigraph + ' -p ' + self.t0_port_config + ' -t ' + ipinip_file + ' > ' + self.output_file + self.run_script(argument) sample_output_file = os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, 'ipinip.json') assert utils.cmp(sample_output_file, self.output_file), self.run_diff(sample_output_file, self.output_file) def test_l2switch_template(self): - argument = ['-k', 'Mellanox-SN2700', '--preset', 'l2', '-p', self.t0_port_config] + argument = '-k Mellanox-SN2700 --preset l2 -p ' + self.t0_port_config output = self.run_script(argument) output_json = json.loads(output) @@ -218,14 +212,14 @@ def test_l2switch_template(self): self.assertTrue(json.dumps(sample_output_json, sort_keys=True) == json.dumps(output_json, sort_keys=True)) template_dir = os.path.join(self.test_dir, '..', 'data', 'l2switch.j2') - argument = ['-t', template_dir, '-k', 'Mellanox-SN2700', '-p', self.t0_port_config] + argument = '-t ' + template_dir + ' -k Mellanox-SN2700 -p ' + self.t0_port_config output = self.run_script(argument) output_json = json.loads(output) self.assertTrue(json.dumps(sample_output_json, sort_keys=True) == json.dumps(output_json, sort_keys=True)) def test_l1_ports_template(self): - argument = ['-k', '32x1000Gb', '--preset', 'l1', '-p', self.l1_l3_port_config] + argument = '-k 32x1000Gb --preset l1 -p ' + self.l1_l3_port_config output = self.run_script(argument) output_json = json.loads(output) @@ -236,14 +230,14 @@ def test_l1_ports_template(self): self.assertTrue(json.dumps(sample_output_json, sort_keys=True) == json.dumps(output_json, sort_keys=True)) template_dir = os.path.join(self.test_dir, '..', 'data', 'l1intf.j2') - argument = ['-t', template_dir, '-k', '32x1000Gb', '-p', self.l1_l3_port_config] + argument = '-t ' + template_dir + ' -k 32x1000Gb -p ' + self.l1_l3_port_config output = self.run_script(argument) output_json = json.loads(output) self.assertTrue(json.dumps(sample_output_json, sort_keys=True) == json.dumps(output_json, sort_keys=True)) def test_l3_ports_template(self): - argument = ['-k', '32x1000Gb', '--preset', 'l3', '-p', self.l1_l3_port_config] + argument = '-k 32x1000Gb --preset l3 -p ' + self.l1_l3_port_config output = self.run_script(argument) output_json = json.loads(output) @@ -254,7 +248,7 @@ def test_l3_ports_template(self): self.assertTrue(json.dumps(sample_output_json, sort_keys=True) == json.dumps(output_json, sort_keys=True)) template_dir = os.path.join(self.test_dir, '..', 'data', 'l3intf.j2') - argument = ['-t', template_dir, '-k', '32x1000Gb', '-p', self.l1_l3_port_config] + argument = '-t ' + template_dir + ' -k 32x1000Gb -p ' + self.l1_l3_port_config output = self.run_script(argument) output_json = json.loads(output) @@ -276,7 +270,9 @@ def test_l2switch_template_dualtor(self): "Ethernet112", "Ethernet116", "Ethernet120", "Ethernet124" ] } - argument = ['-a', json.dumps(extra_args), '-k', 'Arista-7050CX3-32S-D48C8', '--preset', 'l2', '-p', self.t0_7050cx3_port_config] + argument = '-a \'{}\' -k Arista-7050CX3-32S-D48C8 --preset l2 -p {}'.format( + json.dumps(extra_args), self.t0_7050cx3_port_config + ) output = self.run_script(argument) output_json = json.loads(output) @@ -303,8 +299,8 @@ def do_test_qos_and_buffer_arista7800r3_48cq2_lc_render_template(self, platform, for template_file, cfg_file, sample_output_file in [(qos_file, 'qos_config.j2', 'qos-arista7800r3-48cq2-lc.json'), (buffer_file, 'buffers_config.j2', 'buffer-arista7800r3-48cq2-lc.json') ]: - argument = ['-m', self.arista7800r3_48cq2_lc_t2_minigraph, '-p', port_config_ini_file, '-t', template_file] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + self.arista7800r3_48cq2_lc_t2_minigraph + ' -p ' + port_config_ini_file + ' -t ' + template_file + ' > ' + self.output_file + self.run_script(argument) # cleanup cfg_file_new = os.path.join(arista_dir_path, cfg_file) @@ -339,8 +335,8 @@ def _test_qos_render_template(self, vendor, platform, sku, minigraph, expected): shutil.copy2(qos_config_file, dir_path) minigraph = os.path.join(self.test_dir, minigraph) - argument = ['-m', minigraph, '-p', port_config_ini_file, '-t', qos_file] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + minigraph + ' -p ' + port_config_ini_file + ' -t ' + qos_file + ' > ' + self.output_file + self.run_script(argument) # cleanup qos_config_file_new = os.path.join(dir_path, 'qos_config.j2') @@ -402,8 +398,8 @@ def test_qos_dscp_remapping_render_template(self): qos_config_file = os.path.join(self.test_dir, '..', '..', '..', 'files', 'build_templates', 'qos_config.j2') shutil.copy2(qos_config_file, device_template_path) - argument = ['-m', sample_minigraph_file, '-p', port_config_ini_file, '-t', qos_file] - self.run_script(argument, output_file=test_output) + argument = '-m ' + sample_minigraph_file + ' -p ' + port_config_ini_file + ' -t ' + qos_file + ' > ' + test_output + self.run_script(argument) # cleanup qos_config_file_new = os.path.join(device_template_path, 'qos_config.j2') @@ -436,8 +432,8 @@ def test_config_brcm_render_template(self): config_bcm_file = os.path.join(device_template_path, 'config.bcm.j2') config_test_output = os.path.join(self.test_dir, 'config_output.bcm') - argument = ['-m', sample_minigraph_file, '-p', port_config_ini_file, '-t', config_bcm_file] - self.run_script(argument, output_file=config_test_output) + argument = '-m ' + sample_minigraph_file + ' -p ' + port_config_ini_file + ' -t ' + config_bcm_file + ' > ' + config_test_output + self.run_script(argument) #check output config.bcm config_sample_output_file = os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, config_sample_output) @@ -455,8 +451,8 @@ def _test_buffers_render_template(self, vendor, platform, sku, minigraph, buffer shutil.copy2(buffers_config_file, dir_path) minigraph = os.path.join(self.test_dir, minigraph) - argument = ['-m', minigraph, '-p', port_config_ini_file, '-t', buffers_file] - self.run_script(argument, output_file=self.output_file) + argument = '-m ' + minigraph + ' -p ' + port_config_ini_file + ' -t ' + buffers_file + ' > ' + self.output_file + self.run_script(argument) # cleanup buffers_config_file_new = os.path.join(dir_path, 'buffers_config.j2') @@ -520,9 +516,9 @@ def test_extra_lossless_buffer_for_tunnel_remapping(self): def test_ipinip_multi_asic(self): ipinip_file = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-orchagent', 'ipinip.json.j2') - argument = ['-m', self.multi_asic_minigraph, '-p', self.multi_asic_port_config, '-t', ipinip_file, '-n', 'asic0'] + argument = '-m ' + self.multi_asic_minigraph + ' -p ' + self.multi_asic_port_config + ' -t ' + ipinip_file + ' -n asic0 ' + ' > ' + self.output_file print(argument) - self.run_script(argument, output_file=self.output_file) + self.run_script(argument) sample_output_file = os.path.join(self.test_dir, 'multi_npu_data', utils.PYvX_DIR, 'ipinip.json') assert utils.cmp(sample_output_file, self.output_file), self.run_diff(sample_output_file, self.output_file) @@ -548,11 +544,13 @@ def test_swss_switch_render_template(self): }, } for _, v in test_list.items(): - argument = ["-m", v["graph"], "-p", v["port_config"], "-y", constants_yml, "-t", switch_template] + argument = " -m {} -p {} -y {} -t {} > {}".format( + v["graph"], v["port_config"], constants_yml, switch_template, self.output_file + ) sample_output_file = os.path.join( self.test_dir, 'sample_output', v["output"] ) - self.run_script(argument, output_file=self.output_file) + self.run_script(argument) assert utils.cmp(sample_output_file, self.output_file), self.run_diff(sample_output_file, self.output_file) def test_swss_switch_render_template_multi_asic(self): @@ -577,11 +575,14 @@ def test_swss_switch_render_template_multi_asic(self): } for _, v in test_list.items(): os.environ["NAMESPACE_ID"] = v["namespace_id"] - argument = ["-m", self.t1_mlnx_minigraph, "-y", constants_yml, "-t", switch_template] + argument = " -m {} -y {} -t {} > {}".format( + self.t1_mlnx_minigraph, constants_yml, switch_template, + self.output_file + ) sample_output_file = os.path.join( self.test_dir, 'sample_output', v["output"] ) - self.run_script(argument, output_file=self.output_file) + self.run_script(argument) assert utils.cmp(sample_output_file, self.output_file), self.run_diff(sample_output_file, self.output_file) os.environ["NAMESPACE_ID"] = "" @@ -590,8 +591,8 @@ def test_ndppd_conf(self): vlan_interfaces_json = os.path.join(self.test_dir, "data", "ndppd", "vlan_interfaces.json") expected = os.path.join(self.test_dir, "sample_output", utils.PYvX_DIR, "ndppd.conf") - argument = ['-j', vlan_interfaces_json, '-t', conf_template] - self.run_script(argument, output_file=self.output_file) + argument = '-j {} -t {} > {}'.format(vlan_interfaces_json, conf_template, self.output_file) + self.run_script(argument) assert utils.cmp(expected, self.output_file), self.run_diff(expected, self.output_file) def test_ntp_conf(self): @@ -599,8 +600,8 @@ def test_ntp_conf(self): ntp_interfaces_json = os.path.join(self.test_dir, "data", "ntp", "ntp_interfaces.json") expected = os.path.join(self.test_dir, "sample_output", utils.PYvX_DIR, "ntp.conf") - argument = ['-j', ntp_interfaces_json, '-t', conf_template] - self.run_script(argument, output_file=self.output_file) + argument = '-j {} -t {} > {}'.format(ntp_interfaces_json, conf_template, self.output_file) + self.run_script(argument) assert utils.cmp(expected, self.output_file), self.run_diff(expected, self.output_file) def test_backend_acl_template_render(self): @@ -622,11 +623,13 @@ def test_backend_acl_template_render(self): input_file = os.path.join( self.test_dir, 'data', 'backend_acl', v['input'] ) - argument = ["-j", input_file, "-t", acl_template] + argument = " -j {} -t {} > {}".format( + input_file, acl_template, self.output_file + ) sample_output_file = os.path.join( self.test_dir, 'data', 'backend_acl', v['output'] ) - self.run_script(argument, output_file=self.output_file) + self.run_script(argument) assert utils.cmp(sample_output_file, self.output_file), self.run_diff(sample_output_file, self.output_file) def tearDown(self): diff --git a/src/sonic-config-engine/tests/test_j2files_t2_chassis_fe.py b/src/sonic-config-engine/tests/test_j2files_t2_chassis_fe.py index 7041bc1b9b5c..e6bc82941bf6 100644 --- a/src/sonic-config-engine/tests/test_j2files_t2_chassis_fe.py +++ b/src/sonic-config-engine/tests/test_j2files_t2_chassis_fe.py @@ -3,6 +3,7 @@ import os import shutil import subprocess + from unittest import TestCase import tests.common_utils as utils @@ -10,7 +11,7 @@ class TestJ2FilesT2ChassisFe(TestCase): def setUp(self): self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] + self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') self.t2_chassis_fe_minigraph = os.path.join(self.test_dir, 't2-chassis-fe-graph.xml') self.t2_chassis_fe_vni_minigraph = os.path.join(self.test_dir, 't2-chassis-fe-graph-vni.xml') self.t2_chassis_fe_pc_minigraph = os.path.join(self.test_dir, 't2-chassis-fe-graph-pc.xml') @@ -23,28 +24,25 @@ def tearDown(self): except OSError: pass - def run_script(self, argument, output_file=None): - print('CMD: sonic-cfggen ' + ' '.join(argument)) - output = subprocess.check_output(self.script_file + argument) + def run_script(self, argument): + print('CMD: sonic-cfggen ' + argument) + output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) if utils.PY3x: output = output.decode() - if output_file: - with open(output_file, 'w') as f: - f.write(output) return output def run_diff(self, file1, file2): - _, output = getstatusoutput_noshell(['diff', '-u', file1, file2]) - return output + return subprocess.check_output('diff -u {} {} || true'.format(file1, file2), shell=True) def run_case(self, minigraph, template, target): template_dir = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-fpm-frr', "frr") conf_template = os.path.join(template_dir, template) constants = os.path.join(self.test_dir, '..', '..', '..', 'files', 'image_config', 'constants', 'constants.yml') - cmd = ["-m", minigraph, "-p", self.t2_chassis_fe_port_config, "-y", constants, "-t", conf_template, "-T", template_dir] - self.run_script(cmd, output_file=self.output_file) + cmd_args = minigraph, self.t2_chassis_fe_port_config, constants, conf_template, template_dir, self.output_file + cmd = "-m %s -p %s -y %s -t %s -T %s > %s" % cmd_args + self.run_script(cmd) original_filename = os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, target) r = filecmp.cmp(original_filename, self.output_file) diff --git a/src/sonic-config-engine/tests/test_minigraph_case.py b/src/sonic-config-engine/tests/test_minigraph_case.py index 8c399920b88b..9ee8a49db6ae 100644 --- a/src/sonic-config-engine/tests/test_minigraph_case.py +++ b/src/sonic-config-engine/tests/test_minigraph_case.py @@ -2,6 +2,7 @@ import os import subprocess import ipaddress + import tests.common_utils as utils import minigraph @@ -16,7 +17,7 @@ class TestCfgGenCaseInsensitive(TestCase): def setUp(self): self.yang = utils.YangWrapper() self.test_dir = os.path.dirname(os.path.realpath(__file__)) - self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] + self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') self.sample_graph = os.path.join(self.test_dir, 'simple-sample-graph-case.xml') self.sample_simple_graph = os.path.join(self.test_dir, 'simple-sample-graph.xml') self.sample_resource_graph = os.path.join(self.test_dir, 'sample-graph-resource-type.xml') @@ -26,13 +27,13 @@ def setUp(self): self.port_config = os.path.join(self.test_dir, 't0-sample-port-config.ini') def run_script(self, argument, check_stderr=False): - print('\n Running sonic-cfggen ' + ' '.join(argument)) + print('\n Running sonic-cfggen ' + argument) self.assertTrue(self.yang.validate(argument)) if check_stderr: - output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) + output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(self.script_file + argument) + output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) if utils.PY3x: output = output.decode() @@ -45,47 +46,47 @@ def run_script(self, argument, check_stderr=False): return output def test_dummy_run(self): - argument = [] + argument = '' output = self.run_script(argument) self.assertEqual(output, '') def test_minigraph_sku(self): - argument = ['-v', "DEVICE_METADATA[\'localhost\'][\'hwsku\']", '-m', self.sample_graph, '-p', self.port_config] + argument = '-v "DEVICE_METADATA[\'localhost\'][\'hwsku\']" -m "' + self.sample_graph + '" -p "' + self.port_config + '"' output = self.run_script(argument) self.assertEqual(output.strip(), 'Force10-S6000') def test_print_data(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '--print-data'] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" --print-data' output = self.run_script(argument) self.assertTrue(len(output.strip()) > 0) def test_jinja_expression(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'type\']"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'type\']"' output = self.run_script(argument) self.assertEqual(output.strip(), 'ToRRouter') def test_minigraph_subtype(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'subtype\']"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'subtype\']"' output = self.run_script(argument) self.assertEqual(output.strip(), 'DualToR') def test_minigraph_peer_switch_hostname(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'peer_switch\']"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'peer_switch\']"' output = self.run_script(argument) self.assertEqual(output.strip(), 'switch2-t0') def test_additional_json_data(self): - argument = ['-a', '{"key1":"value1"}', '-v', 'key1'] + argument = '-a \'{"key1":"value1"}\' -v key1' output = self.run_script(argument) self.assertEqual(output.strip(), 'value1') def test_read_yaml(self): - argument = ['-v', 'yml_item', '-y', os.path.join(self.test_dir, 'test.yml')] + argument = '-v yml_item -y ' + os.path.join(self.test_dir, 'test.yml') output = self.run_script(argument) self.assertEqual(output.strip(), '[\'value1\', \'value2\']') def test_render_template(self): - argument = ['-y', os.path.join(self.test_dir, 'test.yml'), '-t', os.path.join(self.test_dir, 'test.j2')] + argument = '-y ' + os.path.join(self.test_dir, 'test.yml') + ' -t ' + os.path.join(self.test_dir, 'test.j2') output = self.run_script(argument) self.assertEqual(output.strip(), 'value1\nvalue2') @@ -96,12 +97,12 @@ def test_render_template(self): # self.assertEqual(output.strip(), "{'everflow0': {'src_ip': '10.1.0.32', 'dst_ip': '10.0.100.1'}}") def test_minigraph_interfaces(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'INTERFACE.keys()|list'] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v \'INTERFACE.keys()|list\'' output = self.run_script(argument) self.assertEqual(output.strip(), "[('Ethernet0', '10.0.0.58/31'), 'Ethernet0', ('Ethernet0', 'FC00::75/126')]") def test_minigraph_vlans(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'VLAN'] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v VLAN' output = self.run_script(argument) expected = { @@ -125,7 +126,7 @@ def test_minigraph_vlans(self): ) def test_minigraph_vlan_members(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'VLAN_MEMBER'] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v VLAN_MEMBER' output = self.run_script(argument) expected = { 'Vlan1000|Ethernet8': {'tagging_mode': 'untagged'}, @@ -137,12 +138,12 @@ def test_minigraph_vlan_members(self): ) def test_minigraph_vlan_interfaces_keys(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "VLAN_INTERFACE.keys()|list"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "VLAN_INTERFACE.keys()|list"' output = self.run_script(argument) self.assertEqual(output.strip(), "[('Vlan1000', '192.168.0.1/27'), 'Vlan1000']") def test_minigraph_vlan_interfaces(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "VLAN_INTERFACE"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "VLAN_INTERFACE"' output = self.run_script(argument) expected_table = { 'Vlan1000|192.168.0.1/27': {}, @@ -154,7 +155,7 @@ def test_minigraph_vlan_interfaces(self): self.assertEqual(utils.to_dict(output.strip()), expected_table) def test_minigraph_portchannels(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'PORTCHANNEL'] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v PORTCHANNEL' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -162,44 +163,44 @@ def test_minigraph_portchannels(self): ) def test_minigraph_console_mgmt_feature(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'CONSOLE_SWITCH'] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v CONSOLE_SWITCH' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), utils.to_dict("{'console_mgmt': {'enabled': 'no'}}")) def test_minigraph_console_port(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', 'CONSOLE_PORT'] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v CONSOLE_PORT' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), utils.to_dict("{'1': {'baud_rate': '9600', 'remote_device': 'managed_device', 'flow_control': 1}}")) def test_minigraph_dhcp_server_feature(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'dhcp_server\']"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'dhcp_server\']"' output = self.run_script(argument) self.assertEqual(output.strip(), '') try: # For DHCP server enabled device type - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BMC_MGMT_TOR_ROUTER), self.sample_graph]) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BMC_MGMT_TOR_ROUTER, self.sample_graph), shell=True) output = self.run_script(argument) self.assertEqual(output.strip(), 'enabled') finally: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BMC_MGMT_TOR_ROUTER, TOR_ROUTER), self.sample_graph]) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BMC_MGMT_TOR_ROUTER, TOR_ROUTER, self.sample_graph), shell=True) def test_minigraph_deployment_id(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'deployment_id\']"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'deployment_id\']"' output = self.run_script(argument) self.assertEqual(output.strip(), "1") def test_minigraph_cluster(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'cluster\']"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'cluster\']"' output = self.run_script(argument) self.assertEqual(output.strip(), "AAA00PrdStr00") def test_minigraph_neighbor_metadata(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_NEIGHBOR_METADATA"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_NEIGHBOR_METADATA"' expected_table = { 'switch2-t0': { @@ -252,43 +253,43 @@ def test_minigraph_neighbor_metadata(self): # self.assertEqual(output.strip(), "{'everflow0': {'src_ip': '10.1.0.32', 'dst_ip': '10.0.100.1'}}") def test_metadata_tacacs(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "TACPLUS_SERVER"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "TACPLUS_SERVER"' output = self.run_script(argument) self.assertEqual(output.strip(), "{'10.0.10.7': {'priority': '1', 'tcp_port': '49'}, '10.0.10.8': {'priority': '1', 'tcp_port': '49'}}") def test_metadata_kube(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "KUBERNETES_MASTER[\'SERVER\']"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "KUBERNETES_MASTER[\'SERVER\']"' output = self.run_script(argument) self.assertEqual(json.loads(output.strip().replace("'", "\"")), json.loads('{"ip": "10.10.10.10", "disable": "True"}')) def test_minigraph_mgmt_port(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "MGMT_PORT"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "MGMT_PORT"' output = self.run_script(argument) self.assertEqual(output.strip(), "{'eth0': {'alias': 'eth0', 'admin_status': 'up', 'speed': '1000'}}") def test_metadata_ntp(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "NTP_SERVER"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "NTP_SERVER"' output = self.run_script(argument) self.assertEqual(output.strip(), "{'10.0.10.1': {}, '10.0.10.2': {}}") def test_minigraph_vnet(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "VNET"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "VNET"' output = self.run_script(argument) self.assertEqual(output.strip(), "") def test_minigraph_vxlan(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "VXLAN_TUNNEL"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "VXLAN_TUNNEL"' output = self.run_script(argument) self.assertEqual(output.strip(), "") def test_minigraph_bgp_mon(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "BGP_MONITORS"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "BGP_MONITORS"' output = self.run_script(argument) self.assertEqual(output.strip(), "{}") def test_minigraph_peer_switch(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "PEER_SWITCH"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "PEER_SWITCH"' expected_table = { 'switch2-t0': { 'address_ipv4': "25.1.1.10" @@ -313,7 +314,7 @@ def test_mux_cable_parsing(self): self.assertTrue("mux_cable" not in port) def test_minigraph_storage_device(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'storage_device\']"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'storage_device\']"' output = self.run_script(argument) self.assertEqual(output.strip(), "true") @@ -330,23 +331,23 @@ def verify_storage_device_set(self, graph_file, check_stderr=False): try: print('\n Change device type to %s' % (BACKEND_TOR_ROUTER)) if check_stderr: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_TOR_ROUTER), graph_file], stderr=subprocess.STDOUT) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_TOR_ROUTER, graph_file), stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (TOR_ROUTER, BACKEND_TOR_ROUTER), graph_file]) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (TOR_ROUTER, BACKEND_TOR_ROUTER, graph_file), shell=True) - argument = ['-m', graph_file, '-p', self.port_config, '-v', "DEVICE_METADATA[\'localhost\'][\'storage_device\']"] + argument = '-m "' + graph_file + '" -p "' + self.port_config + '" -v "DEVICE_METADATA[\'localhost\'][\'storage_device\']"' output = self.run_script(argument) self.assertEqual(output.strip(), "true") finally: print('\n Change device type back to %s' % (TOR_ROUTER)) if check_stderr: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_TOR_ROUTER, TOR_ROUTER), graph_file], stderr=subprocess.STDOUT) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_TOR_ROUTER, TOR_ROUTER, graph_file), stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(["sed", "-i", 's/%s/%s/g' % (BACKEND_TOR_ROUTER, TOR_ROUTER), graph_file]) + output = subprocess.check_output("sed -i \'s/%s/%s/g\' %s" % (BACKEND_TOR_ROUTER, TOR_ROUTER, graph_file), shell=True) def test_minigraph_tunnel_table(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "TUNNEL"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "TUNNEL"' expected_tunnel = { "MuxTunnel0": { "tunnel_type": "IPINIP", @@ -366,7 +367,7 @@ def test_minigraph_tunnel_table(self): # Validate tunnel config is as before when tunnel_qos_remap = disabled sample_graph_disabled_remap = os.path.join(self.test_dir, 'simple-sample-graph-case-remap-disabled.xml') - argument = ['-m', sample_graph_disabled_remap, '-p', self.port_config, '-v', "TUNNEL"] + argument = '-m "' + sample_graph_disabled_remap + '" -p "' + self.port_config + '" -v "TUNNEL"' output = self.run_script(argument) self.assertEqual( @@ -376,7 +377,7 @@ def test_minigraph_tunnel_table(self): # Validate extra config is generated when tunnel_qos_remap = enabled sample_graph_enabled_remap = os.path.join(self.test_dir, 'simple-sample-graph-case-remap-enabled.xml') - argument = ['-m', sample_graph_enabled_remap, '-p', self.port_config, '-v', "TUNNEL"] + argument = '-m "' + sample_graph_enabled_remap + '" -p "' + self.port_config + '" -v "TUNNEL"' expected_tunnel = { "MuxTunnel0": { "tunnel_type": "IPINIP", @@ -401,7 +402,7 @@ def test_minigraph_tunnel_table(self): # Validate extra config for mux tunnel is generated automatically when tunnel_qos_remap = enabled sample_graph_enabled_remap = os.path.join(self.test_dir, 'simple-sample-graph-case-remap-enabled-no-tunnel-attributes.xml') - argument = ['-m', sample_graph_enabled_remap, '-p', self.port_config, '-v', "TUNNEL"] + argument = '-m "' + sample_graph_enabled_remap + '" -p "' + self.port_config + '" -v "TUNNEL"' output = self.run_script(argument) self.assertEqual( utils.to_dict(output.strip()), @@ -409,7 +410,7 @@ def test_minigraph_tunnel_table(self): ) def test_minigraph_mux_cable_table(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "MUX_CABLE"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "MUX_CABLE"' expected_table = { 'Ethernet4': { 'state': 'auto', @@ -433,7 +434,7 @@ def test_minigraph_mux_cable_table(self): ) def test_dhcp_table(self): - argument = ['-m', self.sample_graph, '-p', self.port_config, '-v', "DHCP_RELAY"] + argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DHCP_RELAY"' expected = { 'Vlan1000': { 'dhcpv6_servers': [ diff --git a/src/sonic-config-engine/tests/test_multinpu_cfggen.py b/src/sonic-config-engine/tests/test_multinpu_cfggen.py index 070137af3960..2bfb879a1ad2 100644 --- a/src/sonic-config-engine/tests/test_multinpu_cfggen.py +++ b/src/sonic-config-engine/tests/test_multinpu_cfggen.py @@ -5,6 +5,7 @@ import subprocess import unittest import yaml + import tests.common_utils as utils from unittest import TestCase @@ -22,7 +23,7 @@ def setUp(self): self.yang = utils.YangWrapper() self.test_dir = os.path.dirname(os.path.realpath(__file__)) self.test_data_dir = os.path.join(self.test_dir, 'multi_npu_data') - self.script_file = [utils.PYTHON_INTERPRETTER, os.path.join(self.test_dir, '..', 'sonic-cfggen')] + self.script_file = utils.PYTHON_INTERPRETTER + ' ' + os.path.join(self.test_dir, '..', 'sonic-cfggen') self.sample_graph = os.path.join(self.test_data_dir, 'sample-minigraph.xml') self.sample_graph1 = os.path.join(self.test_data_dir, 'sample-minigraph-noportchannel.xml') self.sample_port_config = os.path.join(self.test_data_dir, 'sample_port_config.ini') @@ -32,19 +33,17 @@ def setUp(self): self.output_file = os.path.join(self.test_dir, 'output') os.environ["CFGGEN_UNIT_TESTING"] = "2" - def run_script(self, argument, check_stderr=False, output_file=None): - print('\n Running sonic-cfggen ' + ' '.join(argument)) + def run_script(self, argument, check_stderr=False): + print('\n Running sonic-cfggen ' + argument) self.assertTrue(self.yang.validate(argument)) + if check_stderr: - output = subprocess.check_output(self.script_file + argument, stderr=subprocess.STDOUT) + output = subprocess.check_output(self.script_file + ' ' + argument, stderr=subprocess.STDOUT, shell=True) else: - output = subprocess.check_output(self.script_file + argument) + output = subprocess.check_output(self.script_file + ' ' + argument, shell=True) if utils.PY3x: output = output.decode() - if output_file: - with open(output_file, 'w') as f: - f.write(output) linecount = output.strip().count('\n') if linecount <= 0: @@ -54,15 +53,15 @@ def run_script(self, argument, check_stderr=False, output_file=None): return output def run_diff(self, file1, file2): - _, output = getstatusoutput_noshell(['diff', '-u', file1, file2]) - return output + return subprocess.check_output('diff -u {} {} || true'.format(file1, file2), shell=True) def run_frr_asic_case(self, template, target, asic, port_config): template_dir = os.path.join(self.test_dir, '..', '..', '..', 'dockers', 'docker-fpm-frr', "frr") conf_template = os.path.join(template_dir, template) constants = os.path.join(self.test_dir, '..', '..', '..', 'files', 'image_config', 'constants', 'constants.yml') - cmd = ['-n', asic, '-m', self.sample_graph, '-p', port_config, '-y', constants, '-t', conf_template, '-T', template_dir] - self.run_script(cmd, output_file=self.output_file) + cmd_args = asic, self.sample_graph, port_config, constants, conf_template, template_dir, self.output_file + cmd = "-n %s -m %s -p %s -y %s -t %s -T %s > %s" % cmd_args + self.run_script(cmd) original_filename = os.path.join(self.test_dir, 'sample_output', utils.PYvX_DIR, target) r = filecmp.cmp(original_filename, self.output_file) @@ -73,37 +72,37 @@ def run_frr_asic_case(self, template, target, asic, port_config): def run_script_for_asic(self,argument,asic, port_config=None): - cmd = argument + ["-n", "asic{}".format(asic)] + argument = "{} -n asic{} ".format(argument, asic) if port_config: - cmd = argument + ["-n", "asic{}".format(asic), "-p", port_config] - output = self.run_script(cmd) + argument += "-p {}".format(port_config) + output = self.run_script(argument) return output def test_dummy_run(self): - argument = [] + argument = '' output = self.run_script(argument) self.assertEqual(output, '') def test_hwsku(self): - argument = ["-v", "DEVICE_METADATA[\'localhost\'][\'hwsku\']", "-m", self.sample_graph, "-p", self.sample_port_config] + argument = "-v \"DEVICE_METADATA[\'localhost\'][\'hwsku\']\" -m \"{}\" -p \"{}\"".format(self.sample_graph, self.sample_port_config) output = self.run_script(argument) self.assertEqual(output.strip(), SKU) - argument = ["-v", "DEVICE_METADATA[\'localhost\'][\'hwsku\']", "-m", self.sample_graph] + argument = "-v \"DEVICE_METADATA[\'localhost\'][\'hwsku\']\" -m \"{}\"".format(self.sample_graph) for asic in range(NUM_ASIC): output = self.run_script_for_asic(argument, asic, self.port_config[asic]) self.assertEqual(output.strip(), SKU) def test_print_data(self): - argument = ["-m", self.sample_graph, "-p", self.sample_port_config, "--print-data"] + argument = "-m \"{}\" -p \"{}\" --print-data".format(self.sample_graph, self.sample_port_config) output = self.run_script(argument) self.assertGreater(len(output.strip()) , 0) - argument = ["-m", self.sample_graph, "--print-data"] + argument = "-m \"{}\" --print-data".format(self.sample_graph) for asic in range(NUM_ASIC): output = self.run_script_for_asic(argument, asic, self.port_config[asic]) self.assertGreater(len(output.strip()) , 0) def test_additional_json_data(self): - argument = ['-a', '{"key1":"value1"}', '-v', 'key1'] + argument = '-a \'{"key1":"value1"}\' -v key1' output = self.run_script(argument) self.assertEqual(output.strip(), 'value1') for asic in range(NUM_ASIC): @@ -111,15 +110,15 @@ def test_additional_json_data(self): self.assertEqual(output.strip(), 'value1') def test_read_yaml(self): - argument = ['-v', 'yml_item', '-y', os.path.join(self.test_dir, 'test.yml')] - output = yaml.safe_load(self.run_script(argument)) + argument = '-v yml_item -y ' + os.path.join(self.test_dir, 'test.yml') + output = yaml.load(self.run_script(argument)) self.assertListEqual(output, ['value1', 'value2']) for asic in range(NUM_ASIC): - output = yaml.safe_load(self.run_script_for_asic(argument, asic, self.port_config[asic])) + output = yaml.load(self.run_script_for_asic(argument, asic, self.port_config[asic])) self.assertListEqual(output, ['value1', 'value2']) def test_render_template(self): - argument = ['-y', os.path.join(self.test_dir, 'test.yml'), '-t', os.path.join(self.test_dir, 'test.j2')] + argument = '-y ' + os.path.join(self.test_dir, 'test.yml') + ' -t ' + os.path.join(self.test_dir, 'test.j2') output = self.run_script(argument) self.assertEqual(output.strip(), 'value1\nvalue2') for asic in range(NUM_ASIC): @@ -127,37 +126,37 @@ def test_render_template(self): self.assertEqual(output.strip(), 'value1\nvalue2') def test_metadata_tacacs(self): - argument = ['-m', self.sample_graph, '-p', self.sample_port_config, '--var-json', "TACPLUS_SERVER"] + argument = '-m "' + self.sample_graph + '" -p "' + self.sample_port_config + '" --var-json "TACPLUS_SERVER"' output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {'123.46.98.21': {'priority': '1', 'tcp_port': '49'}}) #TACPLUS_SERVER not present in the asic configuration. - argument = ['-m', self.sample_graph, '--var-json', "TACPLUS_SERVER"] + argument = '-m "' + self.sample_graph + '" --var-json "TACPLUS_SERVER"' for asic in range(NUM_ASIC): output = json.loads(self.run_script_for_asic(argument, asic, self.port_config[asic])) self.assertDictEqual(output, {}) def test_metadata_ntp(self): - argument = ['-m', self.sample_graph, '-p', self.sample_port_config, '--var-json', "NTP_SERVER"] + argument = '-m "' + self.sample_graph + '" -p "' + self.sample_port_config + '" --var-json "NTP_SERVER"' output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {'17.39.1.130': {}, '17.39.1.129': {}}) #NTP data is present only in the host config - argument = ['-m', self.sample_graph, '--var-json', "NTP_SERVER"] + argument = '-m "' + self.sample_graph + '" --var-json "NTP_SERVER"' for asic in range(NUM_ASIC): output = json.loads(self.run_script_for_asic(argument, asic, self.port_config[asic])) print("Log:asic{} sku {}".format(asic,output)) self.assertDictEqual(output, {}) def test_mgmt_port(self): - argument = ['-m', self.sample_graph, '-p', self.sample_port_config, '--var-json', "MGMT_PORT"] + argument = '-m "' + self.sample_graph + '" -p "' + self.sample_port_config + '" --var-json "MGMT_PORT"' output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {'eth0': {'alias': 'eth0', 'admin_status': 'up'}}) - argument = ['-m', self.sample_graph, '--var-json', "MGMT_PORT"] + argument = '-m "' + self.sample_graph + '" --var-json "MGMT_PORT"' for asic in range(NUM_ASIC): output = json.loads(self.run_script_for_asic(argument, asic, self.port_config[asic])) self.assertDictEqual(output, {'eth0': {'alias': 'eth0', 'admin_status': 'up'}}) def test_frontend_asic_portchannels(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "PORTCHANNEL"] + argument = "-m {} -p {} -n asic0 --var-json \"PORTCHANNEL\"".format(self.sample_graph, self.port_config[0]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'PortChannel0002': {'admin_status': 'up', 'min_links': '2', 'members': ['Ethernet0', 'Ethernet4'], 'mtu': '9100', 'tpid': '0x8100'}, @@ -165,14 +164,14 @@ def test_frontend_asic_portchannels(self): 'PortChannel4002': {'admin_status': 'up', 'min_links': '2', 'members': ['Ethernet-BP8', 'Ethernet-BP12'], 'mtu': '9100', 'tpid': '0x8100'}}) def test_backend_asic_portchannels(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "PORTCHANNEL"] + argument = "-m {} -p {} -n asic3 --var-json \"PORTCHANNEL\"".format(self.sample_graph, self.port_config[3]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'PortChannel4013': {'admin_status': 'up', 'min_links': '2', 'members': ['Ethernet-BP384', 'Ethernet-BP388'], 'mtu': '9100', 'tpid': '0x8100'}, 'PortChannel4014': {'admin_status': 'up', 'min_links': '2', 'members': ['Ethernet-BP392', 'Ethernet-BP396'], 'mtu': '9100', 'tpid': '0x8100'}}) def test_frontend_asic_portchannel_mem(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "-v", "PORTCHANNEL_MEMBER.keys()|list"] + argument = "-m {} -p {} -n asic0 -v \"PORTCHANNEL_MEMBER.keys()|list\"".format(self.sample_graph, self.port_config[0]) output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -180,7 +179,7 @@ def test_frontend_asic_portchannel_mem(self): ) def test_backend_asic_portchannels_mem(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "-v", "PORTCHANNEL_MEMBER.keys()|list"] + argument = "-m {} -p {} -n asic3 -v \"PORTCHANNEL_MEMBER.keys()|list\"".format(self.sample_graph, self.port_config[3]) output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -188,7 +187,7 @@ def test_backend_asic_portchannels_mem(self): ) def test_frontend_asic_portchannel_intf(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "-v", "PORTCHANNEL_INTERFACE.keys()|list"] + argument = "-m {} -p {} -n asic0 -v \"PORTCHANNEL_INTERFACE.keys()|list\"".format(self.sample_graph, self.port_config[0]) output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -196,7 +195,7 @@ def test_frontend_asic_portchannel_intf(self): ) def test_frontend_asic_routerport_intf(self): - argument = ["-m", self.sample_graph1, "-p", self.port_config[0], "-n", "asic0", "-v", "INTERFACE.keys()|list"] + argument = "-m {} -p {} -n asic0 -v \"INTERFACE.keys()|list\"".format(self.sample_graph1, self.port_config[0]) output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -204,7 +203,7 @@ def test_frontend_asic_routerport_intf(self): ) def test_backend_asic_portchannel_intf(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "-v", "PORTCHANNEL_INTERFACE.keys()|list"] + argument = "-m {} -p {} -n asic3 -v \"PORTCHANNEL_INTERFACE.keys()|list\"".format(self.sample_graph, self.port_config[3]) output = self.run_script(argument) self.assertEqual( utils.liststr_to_dict(output.strip()), @@ -212,7 +211,7 @@ def test_backend_asic_portchannel_intf(self): ) def test_frontend_asic_ports(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "PORT"] + argument = "-m {} -p {} -n asic0 --var-json \"PORT\"".format(self.sample_graph, self.port_config[0]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {"Ethernet0": { "admin_status": "up", "alias": "Ethernet1/1", "asic_port_name": "Eth0-ASIC0", "description": "01T2:Ethernet1", "index": "0", "lanes": "33,34,35,36", "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", "role": "Ext", "speed": "40000", "autoneg": "on" }, @@ -225,7 +224,7 @@ def test_frontend_asic_ports(self): "Ethernet-BP12": { "admin_status": "up", "alias": "Eth7-ASIC0", "asic_port_name": "Eth7-ASIC0", "description": "ASIC3:Eth1-ASIC3", "index": "3", "lanes": "25,26,27,28", "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", "role": "Int", "speed": "40000" }}) def test_frontend_asic_ports_config_db(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "PORT"] + argument = "-m {} -p {} -n asic0 --var-json \"PORT\"".format(self.sample_graph, self.port_config[0]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {"Ethernet0": { "admin_status": "up", "alias": "Ethernet1/1", "asic_port_name": "Eth0-ASIC0", "description": "01T2:Ethernet1", "index": "0", "lanes": "33,34,35,36", "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", "role": "Ext", "speed": "40000", "autoneg": "on" }, @@ -238,7 +237,7 @@ def test_frontend_asic_ports_config_db(self): "Ethernet-BP12": { "admin_status": "up", "alias": "Eth7-ASIC0", "asic_port_name": "Eth7-ASIC0", "description": "ASIC3:Eth1-ASIC3", "index": "3", "lanes": "25,26,27,28", "mtu": "9100", "tpid": "0x8100", "pfc_asym": "off", "role": "Int", "speed": "40000" }}) def test_frontend_asic_device_neigh(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "DEVICE_NEIGHBOR"] + argument = "-m {} -p {} -n asic0 --var-json \"DEVICE_NEIGHBOR\"".format(self.sample_graph, self.port_config[0]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'Ethernet0': {'name': '01T2', 'port': 'Ethernet1'}, @@ -249,7 +248,7 @@ def test_frontend_asic_device_neigh(self): 'Ethernet-BP8': {'name': 'ASIC3', 'port': 'Eth0-ASIC3'}}) def test_frontend_asic_device_neigh_metadata(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "DEVICE_NEIGHBOR_METADATA"] + argument = "-m {} -p {} -n asic0 --var-json \"DEVICE_NEIGHBOR_METADATA\"".format(self.sample_graph, self.port_config[0]) output = json.loads(self.run_script(argument)) print(output) self.assertDictEqual(output, \ @@ -258,7 +257,7 @@ def test_frontend_asic_device_neigh_metadata(self): 'ASIC2': {'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'lo_addr': '0.0.0.0/0', 'type': 'Asic', 'mgmt_addr_v6': '::/0'}}) def test_backend_asic_device_neigh(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "DEVICE_NEIGHBOR"] + argument = "-m {} -p {} -n asic3 --var-json \"DEVICE_NEIGHBOR\"".format(self.sample_graph, self.port_config[3]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'Ethernet-BP396': {'name': 'ASIC1', 'port': 'Eth7-ASIC1'}, @@ -267,7 +266,7 @@ def test_backend_asic_device_neigh(self): 'Ethernet-BP388': {'name': 'ASIC0', 'port': 'Eth7-ASIC0'}}) def test_backend_device_neigh_metadata(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "DEVICE_NEIGHBOR_METADATA"] + argument = "-m {} -p {} -n asic3 --var-json \"DEVICE_NEIGHBOR_METADATA\"".format(self.sample_graph, self.port_config[3]) output = json.loads(self.run_script(argument)) print(output) self.assertDictEqual(output, \ @@ -275,28 +274,28 @@ def test_backend_device_neigh_metadata(self): 'ASIC0': {'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'lo_addr': '0.0.0.0/0', 'type': 'Asic', 'mgmt_addr_v6': '::/0'}}) def test_frontend_bgp_neighbor(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "BGP_NEIGHBOR"] + argument = "-m {} -p {} -n asic0 --var-json \"BGP_NEIGHBOR\"".format(self.sample_graph, self.port_config[0]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'10.0.0.1': {'rrclient': 0, 'name': '01T2', 'local_addr': '10.0.0.0', 'nhopself': 0, 'holdtime': '10', 'asn': '65200', 'keepalive': '3'}, 'fc00::2': {'rrclient': 0, 'name': '01T2', 'local_addr': 'fc00::1', 'nhopself': 0, 'holdtime': '10', 'asn': '65200', 'keepalive': '3'}}) def test_frontend_asic_bgp_neighbor(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "BGP_INTERNAL_NEIGHBOR"] + argument = "-m {} -p {} -n asic0 --var-json \"BGP_INTERNAL_NEIGHBOR\"".format(self.sample_graph, self.port_config[0]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'10.1.0.0': {'rrclient': 0, 'name': 'ASIC2', 'local_addr': '10.1.0.1', 'nhopself': 0, 'admin_status': 'up', 'holdtime': '0', 'asn': '65100', 'keepalive': '0'}, '10.1.0.2': {'rrclient': 0, 'name': 'ASIC3', 'local_addr': '10.1.0.3', 'nhopself': 0, 'admin_status': 'up', 'holdtime': '0', 'asn': '65100', 'keepalive': '0'}}) def test_backend_asic_bgp_neighbor(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "BGP_INTERNAL_NEIGHBOR"] + argument = "-m {} -p {} -n asic3 --var-json \"BGP_INTERNAL_NEIGHBOR\"".format(self.sample_graph, self.port_config[3]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, \ {'10.1.0.7': {'rrclient': 0, 'name': 'ASIC1', 'local_addr': '10.1.0.6', 'nhopself': 0, 'admin_status': 'up', 'holdtime': '0', 'asn': '65100', 'keepalive': '0'}, '10.1.0.3': {'rrclient': 0, 'name': 'ASIC0', 'local_addr': '10.1.0.2', 'nhopself': 0, 'admin_status': 'up', 'holdtime': '0', 'asn': '65100', 'keepalive': '0'}}) def test_device_asic_metadata(self): - argument = ["-m", self.sample_graph, "--var-json", "DEVICE_METADATA"] + argument = "-m {} --var-json DEVICE_METADATA".format(self.sample_graph) for asic in range(NUM_ASIC): output = json.loads(self.run_script_for_asic(argument, asic,self.port_config[asic])) asic_name = "asic{}".format(asic) @@ -310,7 +309,7 @@ def test_device_asic_metadata(self): self.assertEqual(output['localhost']['deployment_id'], "1") def test_global_asic_acl(self): - argument = ["-m", self.sample_graph, "-p", self.sample_port_config, "--var-json", "ACL_TABLE"] + argument = "-m {} -p {} --var-json \"ACL_TABLE\"".format(self.sample_graph, self.sample_port_config) output = json.loads(self.run_script(argument)) exp = {\ 'SNMP_ACL': {'policy_desc': 'SNMP_ACL', 'type': 'CTRLPLANE', 'stage': 'ingress', 'services': ['SNMP']}, @@ -327,7 +326,7 @@ def test_global_asic_acl(self): self.assertDictEqual(output, exp) def test_global_asic_acl1(self): - argument = ["-m", self.sample_graph1, "-p", self.sample_port_config, "--var-json", "ACL_TABLE"] + argument = "-m {} -p {} --var-json \"ACL_TABLE\"".format(self.sample_graph1, self.sample_port_config) self.maxDiff = None output = json.loads(self.run_script(argument)) exp = {\ @@ -344,7 +343,7 @@ def test_global_asic_acl1(self): self.assertDictEqual(output, exp) def test_front_end_asic_acl(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "ACL_TABLE"] + argument = "-m {} -p {} -n asic0 --var-json \"ACL_TABLE\"".format(self.sample_graph, self.port_config[0]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {\ 'DATAACL': {'policy_desc': 'DATAACL', 'ports': ['PortChannel0002'], 'stage': 'ingress', 'type': 'L3'}, @@ -354,7 +353,7 @@ def test_front_end_asic_acl(self): 'SSH_ONLY': {'policy_desc': 'SSH_ONLY', 'services': ['SSH'], 'stage': 'ingress', 'type': 'CTRLPLANE'}}) def test_front_end_asic_acl1(self): - argument = ["-m", self.sample_graph1, "-p", self.port_config[0], "-n", "asic0", "--var-json", "ACL_TABLE"] + argument = "-m {} -p {} -n asic0 --var-json \"ACL_TABLE\"".format(self.sample_graph1, self.port_config[0]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {\ 'EVERFLOW': {'policy_desc': 'EVERFLOW', 'ports': ['Ethernet0','Ethernet4'], 'stage': 'ingress', 'type': 'MIRROR'}, @@ -364,18 +363,18 @@ def test_front_end_asic_acl1(self): def test_back_end_asic_acl(self): - argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "ACL_TABLE"] + argument = "-m {} -p {} -n asic3 --var-json \"ACL_TABLE\"".format(self.sample_graph, self.port_config[3]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {}) def test_back_end_asic_acl1(self): - argument = ["-m", self.sample_graph1, "-p", self.port_config[3], "-n", "asic3", "--var-json", "ACL_TABLE"] + argument = "-m {} -p {} -n asic3 --var-json \"ACL_TABLE\"".format(self.sample_graph1, self.port_config[3]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {}) def test_loopback_intfs(self): - argument = ["-m", self.sample_graph, "-p", self.sample_port_config, "--var-json", "LOOPBACK_INTERFACE"] + argument = "-m {} -p {} --var-json \"LOOPBACK_INTERFACE\"".format(self.sample_graph, self.sample_port_config) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {\ "Loopback0": {}, @@ -383,7 +382,7 @@ def test_loopback_intfs(self): "Loopback0|FC00:1::32/128": {}}) # The asic configuration should have 2 loopback interfaces - argument = ["-m", self.sample_graph, "-p", self.port_config[0], "-n", "asic0", "--var-json", "LOOPBACK_INTERFACE"] + argument = "-m {} -p {} -n asic0 --var-json \"LOOPBACK_INTERFACE\"".format(self.sample_graph, self.port_config[0]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, { \ "Loopback0": {}, @@ -393,7 +392,7 @@ def test_loopback_intfs(self): "Loopback4096|8.0.0.0/32": {}, "Loopback4096|FD00:1::32/128": {}}) - argument = ["-m", self.sample_graph, "-p", self.port_config[3], "-n", "asic3", "--var-json", "LOOPBACK_INTERFACE"] + argument = "-m {} -p {} -n asic3 --var-json \"LOOPBACK_INTERFACE\"".format(self.sample_graph, self.port_config[3]) output = json.loads(self.run_script(argument)) self.assertDictEqual(output, {\ "Loopback0": {}, @@ -426,7 +425,9 @@ def test_buffers_multi_asic_template(self): ) # asic0 - mix of front end and back end ports shutil.copy2(buffer_template, device_config_dir) - argument = ["-m", self.sample_graph, "-p", port_config_ini_asic0, "-n", "asic0", "-t", device_buffer_template] + argument = "-m {} -p {} -n asic0 -t {}".format( + self.sample_graph, port_config_ini_asic0, device_buffer_template + ) output = json.loads(self.run_script(argument)) os.remove(os.path.join(device_config_dir, "buffers_config.j2")) self.assertDictEqual( From 61246b62c8e2528c7002cfb0f3ea85aee4897abd Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:04:43 +0800 Subject: [PATCH 143/174] [Build] Fix the docker-sync not found issue (#12593) Why I did it [Build] Fix the docker-sync not found issue How I did it When SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD not enabled, not to remove the docker-sync tag. --- slave.mk | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/slave.mk b/slave.mk index ec484af94489..c358e965fabf 100644 --- a/slave.mk +++ b/slave.mk @@ -453,8 +453,10 @@ define docker-image-save docker tag $(1)-$(DOCKER_USERNAME):$(DOCKER_USERTAG) $(1):latest $(LOG) @echo "Saving docker image $(1):latest" $(LOG) docker save $(1):latest | gzip -c > $(2) - @echo "Removing docker image $(1):latest" $(LOG) - docker rmi -f $(1):latest $(LOG) + if [ x$(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD) == x"y" ]; then + @echo "Removing docker image $(1):latest" $(LOG) + docker rmi -f $(1):latest $(LOG) + fi $(call MOD_UNLOCK,$(1)) @echo "Released docker image lock for $(1) save" $(LOG) @echo "Removing docker image $(1)-$(DOCKER_USERNAME):$(DOCKER_USERTAG)" $(LOG) @@ -476,8 +478,10 @@ define docker-image-load docker load -i $(TARGET_PATH)/$(1).gz $(LOG) @echo "Tagging docker image $(1):latest as $(1)-$(DOCKER_USERNAME):$(DOCKER_USERTAG)" $(LOG) docker tag $(1):latest $(1)-$(DOCKER_USERNAME):$(DOCKER_USERTAG) $(LOG) - @echo "Removing docker image $(1):latest" $(LOG) - docker rmi -f $(1):latest $(LOG) + if [ x$(SONIC_CONFIG_USE_NATIVE_DOCKERD_FOR_BUILD) == x"y" ]; then + @echo "Removing docker image $(1):latest" $(LOG) + docker rmi -f $(1):latest $(LOG) + fi $(call MOD_UNLOCK,$(1)) @echo "Released docker image lock for $(1) load" $(LOG) endef From c3a51b2d0d2d13a6ce58f51e61dc8ef4bdab5888 Mon Sep 17 00:00:00 2001 From: lixiaoyuner <35456895+lixiaoyuner@users.noreply.github.com> Date: Mon, 7 Nov 2022 13:06:19 +0800 Subject: [PATCH 144/174] Fix code irregular issues (#12595) * Fix code irregular issues Signed-off-by: Yun Li --- src/sonic-ctrmgrd/ctrmgr/container_startup.py | 10 ---------- src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py | 8 +++++--- src/sonic-ctrmgrd/ctrmgr/kube_commands.py | 2 -- 3 files changed, 5 insertions(+), 15 deletions(-) diff --git a/src/sonic-ctrmgrd/ctrmgr/container_startup.py b/src/sonic-ctrmgrd/ctrmgr/container_startup.py index 13ce01ff1ead..c22ceb2f9ce5 100755 --- a/src/sonic-ctrmgrd/ctrmgr/container_startup.py +++ b/src/sonic-ctrmgrd/ctrmgr/container_startup.py @@ -28,7 +28,6 @@ def debug_msg(m): msg = "{}: {}".format(inspect.stack()[1][3], m) - print(msg) syslog.syslog(syslog.LOG_DEBUG, msg) @@ -232,15 +231,6 @@ def container_up(feature, owner, version): do_freeze(feature, "This version is marked disabled. Exiting ...") return - # if not instance_higher(feature, state_data[VERSION], version): - # # TODO: May Remove label __enabled - # # Else kubelet will continue to re-deploy every 5 mins, until - # # master removes the lable to un-deploy. - # # - # do_freeze(feature, "bail out as current deploy version {} is not higher". - # format(version)) - # return - update_data(state_db, feature, { VERSION: version }) mode = state_data[REMOTE_STATE] diff --git a/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py b/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py index 6be18146b2aa..b6e9249fcb11 100755 --- a/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py +++ b/src/sonic-ctrmgrd/ctrmgr/ctrmgrd.py @@ -101,7 +101,6 @@ def log_debug(m): msg = "{}: {}".format(inspect.stack()[1][3], m) - #print(msg) syslog.syslog(syslog.LOG_DEBUG, msg) @@ -176,7 +175,7 @@ def register_db(self, db_name): self.db_connectors[db_name] = swsscommon.DBConnector(db_name, 0) - def register_timer(self, ts, handler, args=()): + def register_timer(self, ts, handler, args=None): """ Register timer based handler. The handler will be called on/after give timestamp, ts """ @@ -239,7 +238,10 @@ def run(self): lst = self.timer_handlers[k] del self.timer_handlers[k] for fn in lst: - fn[0](*fn[1]) + if fn[1] is None: + fn[0]() + else: + fn[0](*fn[1]) else: timeout = (k - ct_ts).seconds break diff --git a/src/sonic-ctrmgrd/ctrmgr/kube_commands.py b/src/sonic-ctrmgrd/ctrmgr/kube_commands.py index 91415390ccd5..783f122ec914 100755 --- a/src/sonic-ctrmgrd/ctrmgr/kube_commands.py +++ b/src/sonic-ctrmgrd/ctrmgr/kube_commands.py @@ -337,8 +337,6 @@ def _do_join(server, port, insecure): out = "" ret = 0 try: - #local_ipv6 = _get_local_ipv6() - #_download_file(server, port, insecure) _gen_cli_kubeconf(server, port, insecure) _do_reset(True) _run_command("modprobe br_netfilter") From dc0ceaa5006b1292bc886ada15d1ec0f25e03bac Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Mon, 7 Nov 2022 03:48:06 -0800 Subject: [PATCH 145/174] [TestbedV2] Add dualtor test using TestbedV2. (#12601) Add dualtor test using TestbedV2 in buildimage repo. Why I did it Add dualtor test using TestbedV2 in buildimage repo. How I did it Add dualtor test using TestbedV2 in buildimage repo. Signed-off-by: Yutong Zhang --- .azure-pipelines/run-test-scheduler-template.yml | 6 +++++- azure-pipelines.yml | 16 ++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/.azure-pipelines/run-test-scheduler-template.yml b/.azure-pipelines/run-test-scheduler-template.yml index 4173b062adb2..265f698e0290 100644 --- a/.azure-pipelines/run-test-scheduler-template.yml +++ b/.azure-pipelines/run-test-scheduler-template.yml @@ -26,6 +26,10 @@ parameters: type: string default: "" +- name: COMMON_EXTRA_PARAMS + type: string + default: "" + steps: - script: | set -ex @@ -37,7 +41,7 @@ steps: set -ex pip install PyYAML rm -f new_test_plan_id.txt - python ./.azure-pipelines/test_plan.py create -t ${{ parameters.TOPOLOGY }} -o new_test_plan_id.txt --min-worker ${{ parameters.MIN_WORKER }} --max-worker ${{ parameters.MAX_WORKER }} --test-set ${{ parameters.TEST_SET }} --kvm-build-id $(KVM_BUILD_ID) --deploy-mg-extra-params "${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" + python ./.azure-pipelines/test_plan.py create -t ${{ parameters.TOPOLOGY }} -o new_test_plan_id.txt --min-worker ${{ parameters.MIN_WORKER }} --max-worker ${{ parameters.MAX_WORKER }} --test-set ${{ parameters.TEST_SET }} --kvm-build-id $(KVM_BUILD_ID) --deploy-mg-extra-params "${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" --common-extra-params "${{ parameters.COMMON_EXTRA_PARAMS }}" TEST_PLAN_ID=`cat new_test_plan_id.txt` echo "Created test plan $TEST_PLAN_ID" diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 60ef591efe3c..c526b0dc3f2d 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -313,3 +313,19 @@ stages: ptf_name: ptf_vms6-4 tbtype: multi-asic-t1-lag-pr image: sonic-4asic-vs.img.gz + + - job: dualtor_testbedv2 + pool: + vmImage: 'ubuntu-20.04' + displayName: "kvmtest-dualtor-t0 by TestbedV2" + timeoutInMinutes: 1080 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES')) + continueOnError: false + steps: + - template: .azure-pipelines/run-test-scheduler-template.yml + parameters: + TOPOLOGY: dualtor + MIN_WORKER: 1 + MAX_WORKER: 1 + COMMON_EXTRA_PARAMS: "--disable_loganalyzer " + From f0873f29d864f8dd47b546fba85173b903353408 Mon Sep 17 00:00:00 2001 From: "Richard.Yu" Date: Mon, 7 Nov 2022 21:47:52 +0800 Subject: [PATCH 146/174] [SAI PTF]Syncd-rpc and PTF docker support sai ptf v2 (#11610) * support sai-ptf-v2 in libsaithrift vs * add build target docker-ptf-sai syncd-rpcv2 and saiserverv2 Signed-off-by: richardyu-ms * add docker ptf sai Signed-off-by: richardyu-ms * add build condition for broadcom Signed-off-by: richardyu-ms * add docker syncd dbg and add debug symbol to docker-saiserverv2 Signed-off-by: richardyu-ms * correct the build option * change the azure pipeline build template Signed-off-by: richardyu-ms * change build option for docker-ptf-sai * enable ptf-sai docker build * remove the build for syncd-rpcv2 Signed-off-by: richardyu-ms * fix issue in build tempalte * ignore useless package build when build sai-ptf Signed-off-by: richardyu-ms * remove scapy version contraint Signed-off-by: richardyu-ms * remove duplicated target docker-ptf Signed-off-by: richardyu-ms * change template for testing the pipeline Signed-off-by: richardyu-ms * remove duplicated target Signed-off-by: richardyu-ms * fix error in make script Signed-off-by: richardyu-ms * add shel to setup env Signed-off-by: richardyu-ms * replace with certain platform name Signed-off-by: richardyu-ms * disable cache for syncd-rpcv2 Signed-off-by: richardyu-ms * test without cache Signed-off-by: richardyu-ms * disable cache Signed-off-by: richardyu-ms * testing: disable the cache for build syncd-rpcv2 Signed-off-by: richardyu-ms * add cache back and get the code ready for testing Signed-off-by: richardyu-ms * refactor code Signed-off-by: richardyu-ms * add workaround for issue in rules/sairedis.dep Signed-off-by: richardyu-ms * refactor code Signed-off-by: richardyu-ms Signed-off-by: richardyu-ms --- .azure-pipelines/azure-pipelines-build.yml | 17 +++++++++++++++ .azure-pipelines/build-template.yml | 15 +++++++++++++ dockers/docker-ptf-sai/Dockerfile.j2 | 7 ++++++- platform/broadcom/docker-saiserver-brcm.mk | 2 +- platform/broadcom/docker-syncd-brcm-rpc.mk | 21 ++++++++++++++++++- .../docker-syncd-brcm-rpc/Dockerfile.j2 | 1 + platform/vs/docker-ptf-sai.mk | 2 +- platform/vs/docker-ptf.mk | 8 ++++++- platform/vs/libsaithrift-dev.mk | 10 ++++++++- rules/syncd.mk | 8 ++++++- 10 files changed, 84 insertions(+), 7 deletions(-) diff --git a/.azure-pipelines/azure-pipelines-build.yml b/.azure-pipelines/azure-pipelines-build.yml index e8e74e8d1882..b9998b2a54e1 100644 --- a/.azure-pipelines/azure-pipelines-build.yml +++ b/.azure-pipelines/azure-pipelines-build.yml @@ -121,6 +121,7 @@ jobs: mv target/docker-sonic-vs.gz target/docker-sonic-vs-asan.gz fi make $BUILD_OPTIONS target/docker-sonic-vs.gz target/sonic-vs.img.gz target/docker-ptf.gz + make $BUILD_OPTIONS ENABLE_SYNCD_RPC=y SAITHRIFT_V2=y target/docker-ptf-sai.gz if [ $(Build.Reason) != 'PullRequest' ];then gzip -kd target/sonic-vs.img.gz SONIC_RUN_CMDS="qemu-img convert target/sonic-vs.img -O vhdx -o subformat=dynamic target/sonic-vs.vhdx" make sonic-slave-run @@ -138,9 +139,25 @@ jobs: make $BUILD_OPTIONS target/sonic-$(GROUP_NAME).raw fi if [ $(docker_syncd_rpc_image) == yes ]; then + # workaround for issue in rules/sairedis.dep, git ls-files will list un-exist files for cache make $BUILD_OPTIONS ENABLE_SYNCD_RPC=y target/docker-syncd-$(platform_rpc)-rpc.gz + pushd ./src/sonic-sairedis/SAI + git stash + popd if [ $(GROUP_NAME) == broadcom ]; then make $BUILD_OPTIONS ENABLE_SYNCD_RPC=y target/docker-syncd-$(platform_rpc)-dnx-rpc.gz + pushd ./src/sonic-sairedis/SAI + git stash + popd + make $BUILD_OPTIONS ENABLE_SYNCD_RPC=y SAITHRIFT_V2=y target/docker-saiserverv2-brcm.gz + pushd ./src/sonic-sairedis/SAI + git stash + popd + echo BUILD_OPTIONS $BUILD_OPTIONS + make $BUILD_OPTIONS ENABLE_SYNCD_RPC=y SAITHRIFT_V2=y target/docker-syncd-brcm-rpcv2.gz + pushd ./src/sonic-sairedis/SAI + git stash + popd fi fi if [ $(syncd_rpc_image) == yes ]; then diff --git a/.azure-pipelines/build-template.yml b/.azure-pipelines/build-template.yml index 2665f46452c7..cde438cbe563 100644 --- a/.azure-pipelines/build-template.yml +++ b/.azure-pipelines/build-template.yml @@ -92,6 +92,7 @@ jobs: fi make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) target/docker-sonic-vs.gz target/sonic-vs.img.gz target/docker-ptf.gz + make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) ENABLE_SYNCD_RPC=y SAITHRIFT_V2=y target/docker-ptf-sai.gz else if [ ${{ parameters.dbg_image }} == true ]; then make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) INSTALL_DEBUG_TOOLS=y target/sonic-${{ parameters.platform }}.bin && \ @@ -105,6 +106,20 @@ jobs: fi if [ ${{ parameters.sync_rpc_image }} == true ]; then make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) ENABLE_SYNCD_RPC=y target/docker-syncd-${{ parameters.platform_short }}-rpc.gz + # workaround for issue in rules/sairedis.dep, git ls-files will list un-exist files for cache + pushd ./src/sonic-sairedis/SAI + git stash + popd + if [ ${{ parameters.platform }} == broadcom ]; then + make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) SAITHRIFT_V2=y ENABLE_SYNCD_RPC=y target/docker-syncd-brcm-rpcv2.gz + pushd ./src/sonic-sairedis/SAI + git stash + popd + make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) ENABLE_SYNCD_RPC=y SAITHRIFT_V2=y target/docker-saiserverv2-brcm.gz + pushd ./src/sonic-sairedis/SAI + git stash + popd + fi fi make USERNAME=admin $CACHE_OPTIONS SONIC_BUILD_JOBS=$(nproc) target/sonic-${{ parameters.platform }}.bin diff --git a/dockers/docker-ptf-sai/Dockerfile.j2 b/dockers/docker-ptf-sai/Dockerfile.j2 index d68b39abb01e..65c24a977ed3 100644 --- a/dockers/docker-ptf-sai/Dockerfile.j2 +++ b/dockers/docker-ptf-sai/Dockerfile.j2 @@ -16,7 +16,7 @@ RUN pip3 install crc16 \ getmac \ packet_helper \ psutil \ - scapy==2.4.4 \ + scapy \ scapy_helper \ pysubnettree \ xmlrunner @@ -39,3 +39,8 @@ RUN dpkg -r python-ptf RUN git clone https://github.com/p4lang/ptf.git \ && cd ptf \ && python3.7 setup.py install --single-version-externally-managed --record /tmp/ptf_install.txt + +run echo "declare -x LANG=\"C.UTF-8\"" >> /root/.bashrc +run echo "declare -x LC_ALL=\"C.UTF-8\"" >> /root/.bashrc +run echo "declare -x PYTHONIOENCODING=\"UTF-8\"" >> /root/.bashrc +run echo "declare -x VIRTUAL_ENV=\"/root/env-python3\"" >> /root/.bashrc diff --git a/platform/broadcom/docker-saiserver-brcm.mk b/platform/broadcom/docker-saiserver-brcm.mk index 2ee39c8600e8..f7684a7d5b76 100644 --- a/platform/broadcom/docker-saiserver-brcm.mk +++ b/platform/broadcom/docker-saiserver-brcm.mk @@ -2,7 +2,7 @@ DOCKER_SAISERVER_BRCM = docker-saiserver$(SAITHRIFT_VER)-brcm.gz $(DOCKER_SAISERVER_BRCM)_PATH = $(PLATFORM_PATH)/docker-saiserver-brcm -$(DOCKER_SAISERVER_BRCM)_DEPENDS += $(SAISERVER) +$(DOCKER_SAISERVER_BRCM)_DEPENDS += $(SAISERVER) $(SAISERVER_DBG) $(DOCKER_SAISERVER_BRCM)_FILES += $(DSSERVE) $(BCMCMD) $(DOCKER_SAISERVER_BRCM)_LOAD_DOCKERS += $(DOCKER_CONFIG_ENGINE_BULLSEYE) SONIC_DOCKER_IMAGES += $(DOCKER_SAISERVER_BRCM) diff --git a/platform/broadcom/docker-syncd-brcm-rpc.mk b/platform/broadcom/docker-syncd-brcm-rpc.mk index 95810f1cb477..13e713534ce4 100644 --- a/platform/broadcom/docker-syncd-brcm-rpc.mk +++ b/platform/broadcom/docker-syncd-brcm-rpc.mk @@ -1,8 +1,17 @@ # docker image for brcm syncd with rpc -DOCKER_SYNCD_BRCM_RPC = docker-syncd-brcm-rpc.gz +DOCKER_SYNCD_BRCM_RPC = docker-syncd-brcm-rpc$(SAITHRIFT_VER).gz +DOCKER_SYNCD_BRCM_RPC_DBG = docker-syncd-brcm-rpc$(SAITHRIFT_VER)-$(DBG_IMAGE_MARK).gz $(DOCKER_SYNCD_BRCM_RPC)_PATH = $(PLATFORM_PATH)/docker-syncd-brcm-rpc + + +#Support two different versions of thrift +ifeq ($(SAITHRIFT_V2),y) +$(DOCKER_SYNCD_BRCM_RPC)_DEPENDS += $(SYNCD_RPC) $(LIBTHRIFT_0_14_1) $(LIBTHRIFT_0_14_1_DEV) $(PYTHON3_THRIFT_0_14_1) $(THRIFT_0_14_1_COMPILER) $(PTF) +else $(DOCKER_SYNCD_BRCM_RPC)_DEPENDS += $(SYNCD_RPC) $(LIBTHRIFT) $(PTF) +endif + ifeq ($(INSTALL_DEBUG_TOOLS), y) $(DOCKER_SYNCD_BRCM_RPC)_DEPENDS += $(SYNCD_RPC_DBG) \ $(LIBSWSSCOMMON_DBG) \ @@ -10,12 +19,21 @@ $(DOCKER_SYNCD_BRCM_RPC)_DEPENDS += $(SYNCD_RPC_DBG) \ $(LIBSAIREDIS_DBG) endif $(DOCKER_SYNCD_BRCM_RPC)_FILES += $(DSSERVE) $(BCMCMD) $(SUPERVISOR_PROC_EXIT_LISTENER_SCRIPT) + $(DOCKER_SYNCD_BRCM_RPC)_LOAD_DOCKERS += $(DOCKER_SYNCD_BASE) +$(DOCKER_SYNCD_BRCM_RPC)_DBG_DEPENDS += $($(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_DEPENDS) +$(DOCKER_SYNCD_BRCM_RPC)_DBG_IMAGE_PACKAGES = $($(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_IMAGE_PACKAGES) + SONIC_DOCKER_IMAGES += $(DOCKER_SYNCD_BRCM_RPC) ifeq ($(ENABLE_SYNCD_RPC),y) SONIC_INSTALL_DOCKER_IMAGES += $(DOCKER_SYNCD_BRCM_RPC) endif +SONIC_DOCKER_DBG_IMAGES += $(DOCKER_SYNCD_BRCM_RPC_DBG) +ifneq ($(ENABLE_SYNCD_RPC),y) +SONIC_INSTALL_DOCKER_DBG_IMAGES += $(DOCKER_SYNCD_BRCM_RPC_DBG) +endif + $(DOCKER_SYNCD_BRCM_RPC)_CONTAINER_NAME = syncd $(DOCKER_SYNCD_BRCM_RPC)_VERSION = 1.0.0+rpc $(DOCKER_SYNCD_BRCM_RPC)_PACKAGE_NAME = syncd @@ -30,3 +48,4 @@ $(DOCKER_SYNCD_BRCM_RPC)_BASE_IMAGE_FILES += bcmsh:/usr/bin/bcmsh $(DOCKER_SYNCD_BRCM_RPC)_MACHINE = broadcom SONIC_BULLSEYE_DOCKERS += $(DOCKER_SYNCD_BRCM_RPC) +SONIC_BULLSEYE_DBG_DOCKERS += $(DOCKER_SYNCD_BRCM_RPC_DBG) diff --git a/platform/broadcom/docker-syncd-brcm-rpc/Dockerfile.j2 b/platform/broadcom/docker-syncd-brcm-rpc/Dockerfile.j2 index 3517b037a40a..d6583085e2e7 100644 --- a/platform/broadcom/docker-syncd-brcm-rpc/Dockerfile.j2 +++ b/platform/broadcom/docker-syncd-brcm-rpc/Dockerfile.j2 @@ -25,6 +25,7 @@ RUN apt-get update \ cmake \ libqt5core5a \ libqt5network5 \ + gdb \ libboost-atomic1.74.0 RUN dpkg_apt() { [ -f $1 ] && { dpkg -i $1 || apt-get -y install -f; } || return 1; } ; \ diff --git a/platform/vs/docker-ptf-sai.mk b/platform/vs/docker-ptf-sai.mk index d3463c5a0930..17fce00c1fe2 100644 --- a/platform/vs/docker-ptf-sai.mk +++ b/platform/vs/docker-ptf-sai.mk @@ -3,7 +3,7 @@ DOCKER_PTF_SAI = docker-ptf-sai.gz DOCKER_PTF_BASE = docker-ptf.gz $(DOCKER_PTF_SAI)_PATH = $(DOCKERS_PATH)/docker-ptf-sai -$(DOCKER_PTF_SAI)_DEPENDS += $(LIBTHRIFT_2) $(PYTHON3_THRIFT_2) +$(DOCKER_PTF_SAI)_DEPENDS += $(LIBTHRIFT_0_14_1) $(PYTHON3_THRIFT_0_14_1) $(DOCKER_PTF_SAI)_LOAD_DOCKERS += $(DOCKER_PTF_BASE) SONIC_DOCKER_IMAGES += $(DOCKER_PTF_SAI) SONIC_BUSTER_DOCKERS += $(DOCKER_PTF_SAI) diff --git a/platform/vs/docker-ptf.mk b/platform/vs/docker-ptf.mk index ef102be967f4..a31757dd1f29 100644 --- a/platform/vs/docker-ptf.mk +++ b/platform/vs/docker-ptf.mk @@ -3,6 +3,12 @@ DOCKER_PTF = docker-ptf.gz $(DOCKER_PTF)_PYTHON_WHEELS += $(PTF_PY3) $(DOCKER_PTF)_PATH = $(DOCKERS_PATH)/docker-ptf -$(DOCKER_PTF)_DEPENDS += $(LIBTHRIFT) $(PYTHON_THRIFT) $(PTF) $(PYTHON_SAITHRIFT) +$(DOCKER_PTF)_DEPENDS += $(LIBTHRIFT) $(PYTHON_THRIFT) $(PTF) +#Don't need PYTHON_SAITHRIFT in SAITHRIFT_V2 environment +ifeq ($(SAITHRIFT_V2),) +$(DOCKER_PTF)_DEPENDS += $(PYTHON_SAITHRIFT) +else +$(info "SAITHRIFT_V2: $(SAITHRIFT_V2):skip install $(PYTHON_SAITHRIFT).") +endif SONIC_DOCKER_IMAGES += $(DOCKER_PTF) SONIC_BUSTER_DOCKERS += $(DOCKER_PTF) diff --git a/platform/vs/libsaithrift-dev.mk b/platform/vs/libsaithrift-dev.mk index ba8a63d539ac..9519078a1052 100644 --- a/platform/vs/libsaithrift-dev.mk +++ b/platform/vs/libsaithrift-dev.mk @@ -2,12 +2,20 @@ SAI_VER = 0.9.4 -LIBSAITHRIFT_DEV = libsaithrift-dev_$(SAI_VER)_$(CONFIGURED_ARCH).deb +LIBSAITHRIFT_DEV = libsaithrift$(SAITHRIFT_VER)-dev_$(SAI_VER)_$(CONFIGURED_ARCH).deb $(LIBSAITHRIFT_DEV)_SRC_PATH = $(SRC_PATH)/sonic-sairedis/SAI +#Support two different versions of thrift +ifeq ($(SAITHRIFT_V2),y) +$(LIBSAITHRIFT_DEV)_DEPENDS += $(LIBTHRIFT_0_14_1) $(LIBTHRIFT_0_14_1_DEV) $(PYTHON3_THRIFT_0_14_1) $(THRIFT_0_14_1_COMPILER) \ + $(LIBSAIVS) $(LIBSAIVS_DEV) $(LIBSAIMETADATA) $(LIBSAIMETADATA_DEV) +$(LIBSAITHRIFT_DEV)_RDEPENDS += $(LIBTHRIFT_0_14_1) $(LIBSAIVS) $(LIBSAIMETADATA) +$(LIBSAITHRIFT_DEV)_BUILD_ENV = SAITHRIFTV2=true SAITHRIFT_VER=v2 platform=vs +else $(LIBSAITHRIFT_DEV)_DEPENDS += $(LIBTHRIFT) $(LIBTHRIFT_DEV) $(PYTHON_THRIFT) $(THRIFT_COMPILER) \ $(LIBSAIVS) $(LIBSAIVS_DEV) $(LIBSAIMETADATA) $(LIBSAIMETADATA_DEV) $(LIBSAITHRIFT_DEV)_RDEPENDS += $(LIBTHRIFT) $(LIBSAIVS) $(LIBSAIMETADATA) $(LIBSAITHRIFT_DEV)_BUILD_ENV = platform=vs +endif SONIC_DPKG_DEBS += $(LIBSAITHRIFT_DEV) PYTHON_SAITHRIFT = python-saithrift_$(SAI_VER)_$(CONFIGURED_ARCH).deb diff --git a/rules/syncd.mk b/rules/syncd.mk index 225f7ec0574c..e62b2a66bc6c 100644 --- a/rules/syncd.mk +++ b/rules/syncd.mk @@ -17,7 +17,13 @@ $(SYNCD_RPC)_RDEPENDS += $(LIBSAIREDIS) $(LIBSAIMETADATA) $(eval $(call add_derived_package,$(SYNCD),$(SYNCD_RPC))) # Inject libthrift build dependency for RPC build -$(SYNCD)_DEPENDS += $(LIBSWSSCOMMON_DEV) $(LIBTHRIFT_DEV) +# Support two different versions of thrift +ifeq ($(SAITHRIFT_V2),y) +$(SYNCD)_DEPENDS += $(LIBTHRIFT_0_14_1_DEV) +else +$(SYNCD)_DEPENDS += $(LIBTHRIFT_DEV) +endif +$(SYNCD)_DEPENDS += $(LIBSWSSCOMMON_DEV) $(SYNCD)_DPKG_TARGET = binary-syncd-rpc endif From 00178187d003370ef655c315a3fd04c99fb2009e Mon Sep 17 00:00:00 2001 From: Andriy Kokhan Date: Mon, 7 Nov 2022 16:06:52 +0200 Subject: [PATCH 147/174] [BFN] Fixed FANs indexing for multi-drawer case (#12491) Why I did it In case the device contains more then one FAN drawer, the FANs name was incorrect. How I did it Passed max fan value to FAN object. Fixed get_name() FAN API How to verify it show platform fan --- .../sonic_platform/fan.py | 10 +++++----- .../sonic_platform/fan_drawer.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/fan.py b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/fan.py index c13382fddb55..371bb86371a1 100644 --- a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/fan.py +++ b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/fan.py @@ -17,15 +17,15 @@ def get_data(client): # Fan -> FanBase -> DeviceBase class Fan(FanBase): - def __init__(self, index, fantrayindex): + def __init__(self, index, max_index, fantrayindex): self.__index = index - self.__fantrayindex = fantrayindex + self.__glob_index = (fantrayindex - 1) * max_index + self.__index # FanBase interface methods: # returns speed in percents def get_speed(self): def cb(info): return info.percent - return _fan_info_get(self.__index, cb, 0) + return _fan_info_get(self.__glob_index, cb, 0) def set_speed(self, percent): # Fan tray speed controlled by BMC @@ -33,10 +33,10 @@ def set_speed(self, percent): # DeviceBase interface methods: def get_name(self): - return "counter-rotating-fan-{}".format((self.__fantrayindex - 1) * self.__index + self.__index) + return "counter-rotating-fan-{}".format(self.__glob_index) def get_presence(self): - return _fan_info_get(self.__index, lambda _: True, False) + return _fan_info_get(self.__glob_index, lambda _: True, False) def get_position_in_parent(self): return self.__index diff --git a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/fan_drawer.py b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/fan_drawer.py index 269d3d43b0bf..0711d1755d9f 100644 --- a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/fan_drawer.py +++ b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/fan_drawer.py @@ -10,7 +10,7 @@ class FanDrawer(FanDrawerBase): def __init__(self, fantray_index, max_fan): # For now we return only present fans self.fantrayindex = fantray_index - self._fan_list = [Fan(i, self.fantrayindex) for i in range(1, max_fan + 1)] + self._fan_list = [Fan(i, max_fan, self.fantrayindex) for i in range(1, max_fan + 1)] # DeviceBase interface methods: def get_name(self): From 5b0c4ec1e64f0bc604a3fd3e828b46d8974544d8 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Mon, 7 Nov 2022 10:31:32 -0500 Subject: [PATCH 148/174] [device/accton] Replace os.system and remove subprocess with shell=True (#11985) Signed-off-by: maipbui #### Why I did it `subprocess.Popen()` and `subprocess.run()` is used with `shell=True`, which is very dangerous for shell injection. `os` - not secure against maliciously constructed input and dangerous if used to evaluate dynamic content #### How I did it Replace `os` by `subprocess` Remove unused functions --- .../sonic_platform/chassis.py | 6 +- .../sonic_platform/component.py | 19 ----- .../sonic_platform/helper.py | 73 +---------------- .../sonic_platform/sfp.py | 7 +- .../sonic_platform/chassis.py | 6 +- .../sonic_platform/component.py | 19 ----- .../sonic_platform/helper.py | 73 +---------------- .../sonic_platform/sfp.py | 7 +- .../sonic_platform/chassis.py | 6 +- .../sonic_platform/component.py | 21 ----- .../sonic_platform/helper.py | 73 +---------------- .../sonic_platform/sfp.py | 7 +- .../sonic_platform/chassis.py | 9 +- .../sonic_platform/component.py | 6 +- .../sonic_platform/sfp.py | 6 +- .../sonic_platform/chassis.py | 6 +- .../sonic_platform/component.py | 18 ---- .../sonic_platform/helper.py | 73 +---------------- .../sonic_platform/sfp.py | 7 +- .../sonic_platform/chassis.py | 6 +- .../sonic_platform/sfp.py | 7 +- .../sonic_platform/chassis.py | 6 +- .../sonic_platform/component.py | 18 ---- .../sonic_platform/helper.py | 73 +---------------- .../sonic_platform/sfp.py | 7 +- .../sonic_platform/chassis.py | 6 +- .../sonic_platform/component.py | 19 ----- .../sonic_platform/helper.py | 73 +---------------- .../sonic_platform/sfp.py | 7 +- .../as4630-54pe/sonic_platform/component.py | 6 +- .../utils/accton_as4630_54pe_monitor.py | 6 +- .../utils/accton_as4630_54pe_pddf_monitor.py | 19 +++-- .../as4630-54pe/utils/pddf_switch_svc.py | 82 +++++++++---------- .../utils/accton_as4630_54te_monitor.py | 5 +- .../as7315-27xb/classes/fanutil.py | 11 ++- .../as7326-56x/utils/accton_as7326_monitor.py | 10 +-- .../utils/accton_as7326_pddf_monitor.py | 10 +-- .../as7326-56x/utils/accton_as7326_util.py | 18 ++-- .../as7326-56x/utils/pddf_switch_svc.py | 26 +++--- .../as7712-32x/utils/pddf_switch_svc.py | 24 +++--- .../as7716-32x/utils/accton_as7716_util.py | 13 +-- .../utils/accton_as7726_32x_monitor.py | 10 +-- .../utils/accton_as7726_32x_pddf_monitor.py | 10 +-- .../utils/accton_as7726_32x_util.py | 13 +-- .../as7726-32x/utils/pddf_switch_svc.py | 36 ++++---- .../utils/accton_as7816_64x_util.py | 15 ++-- .../as7816-64x/utils/pddf_switch_svc.py | 18 ++-- .../utils/accton_as9716_32d_util.py | 13 +-- .../as9716-32d/utils/pddf_switch_svc.py | 26 +++--- 49 files changed, 247 insertions(+), 788 deletions(-) diff --git a/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/chassis.py b/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/chassis.py index 310d0433d8bc..c1192b3a2eb2 100644 --- a/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/chassis.py +++ b/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/chassis.py @@ -6,8 +6,8 @@ # ############################################################################# -import os import sys +import subprocess try: from sonic_platform_base.chassis_base import ChassisBase @@ -27,7 +27,7 @@ PMON_REBOOT_CAUSE_PATH = "/usr/share/sonic/platform/api_files/reboot-cause/" REBOOT_CAUSE_FILE = "reboot-cause.txt" PREV_REBOOT_CAUSE_FILE = "previous-reboot-cause.txt" -HOST_CHK_CMD = "which systemctl > /dev/null 2>&1" +HOST_CHK_CMD = ["which", "systemctl"] SYSLED_FNODE = "/sys/class/leds/diag/brightness" SYSLED_MODES = { "0" : "STATUS_LED_COLOR_OFF", @@ -97,7 +97,7 @@ def __initialize_watchdog(self): def __is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def __read_txt_file(self, file_path): try: diff --git a/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/component.py b/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/component.py index 53a01c1f1475..c942a2fed976 100644 --- a/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/component.py +++ b/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/component.py @@ -6,9 +6,6 @@ # ############################################################################# -import shlex -import subprocess - try: from sonic_platform_base.component_base import ComponentBase @@ -38,22 +35,6 @@ def __init__(self, component_index=0): self.index = component_index self.name = self.get_name() - def __run_command(self, command): - # Run bash command and print output to stdout - try: - process = subprocess.Popen( - shlex.split(command), stdout=subprocess.PIPE) - while True: - output = process.stdout.readline() - if output == '' and process.poll() is not None: - break - rc = process.poll() - if rc != 0: - return False - except Exception: - return False - return True - def __get_bios_version(self): # Retrieves the BIOS firmware version try: diff --git a/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/helper.py b/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/helper.py index b124ca29f0df..b19fab85deb8 100644 --- a/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/helper.py +++ b/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/helper.py @@ -4,7 +4,7 @@ from mmap import * from sonic_py_common import device_info -HOST_CHK_CMD = "docker > /dev/null 2>&1" +HOST_CHK_CMD = ["docker"] EMPTY_STRING = "" @@ -14,7 +14,7 @@ def __init__(self): (self.platform, self.hwsku) = device_info.get_platform_and_hwsku() def is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def pci_get_value(self, resource, offset): status = True @@ -29,26 +29,6 @@ def pci_get_value(self, resource, offset): status = False return status, result - def run_command(self, cmd): - status = True - result = "" - try: - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - except Exception: - status = False - return status, result - - def run_interactive_command(self, cmd): - try: - os.system(cmd) - except Exception: - return False - return True - def read_txt_file(self, file_path): try: with open(file_path, 'r', errors='replace') as fd: @@ -66,52 +46,3 @@ def write_txt_file(self, file_path, value): return False return True - def ipmi_raw(self, netfn, cmd): - status = True - result = "" - try: - cmd = "ipmitool raw {} {}".format(str(netfn), str(cmd)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_fru_id(self, id, key=None): - status = True - result = "" - try: - cmd = "ipmitool fru print {}".format(str( - id)) if not key else "ipmitool fru print {0} | grep '{1}' ".format(str(id), str(key)) - - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_set_ss_thres(self, id, threshold_key, value): - status = True - result = "" - try: - cmd = "ipmitool sensor thresh '{}' {} {}".format(str(id), str(threshold_key), str(value)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result diff --git a/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/sfp.py b/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/sfp.py index c421761025cf..4205b17efadd 100644 --- a/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/sfp.py +++ b/device/accton/x86_64-accton_as4630_54pe-r0/sonic_platform/sfp.py @@ -6,10 +6,9 @@ # ############################################################################# -import os import time import sys - +import subprocess from ctypes import create_string_buffer try: @@ -30,7 +29,7 @@ class Sfp(SfpOptoeBase): # Path to sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "which systemctl > /dev/null 2>&1" + HOST_CHK_CMD = ["which", "systemctl"] PLATFORM = "x86_64-accton_as4630_54pe-r0" HWSKU = "Accton-AS4630-54PE" @@ -60,7 +59,7 @@ def get_eeprom_path(self): return self.port_to_eeprom_mapping[self.port_num] def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/chassis.py b/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/chassis.py index 7a54eee6f478..9f39c903c686 100644 --- a/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/chassis.py +++ b/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/chassis.py @@ -6,7 +6,7 @@ # ############################################################################# -import os +import subprocess try: from sonic_platform_base.chassis_base import ChassisBase @@ -26,7 +26,7 @@ PMON_REBOOT_CAUSE_PATH = "/usr/share/sonic/platform/api_files/reboot-cause/" REBOOT_CAUSE_FILE = "reboot-cause.txt" PREV_REBOOT_CAUSE_FILE = "previous-reboot-cause.txt" -HOST_CHK_CMD = "docker > /dev/null 2>&1" +HOST_CHK_CMD = ["docker"] SYSLED_FNODE = "/sys/class/leds/diag/brightness" SYSLED_MODES = { "0" : "STATUS_LED_COLOR_OFF", @@ -95,7 +95,7 @@ def __initialize_watchdog(self): def __is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def __read_txt_file(self, file_path): try: diff --git a/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/component.py b/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/component.py index c34da704432a..8137d0f3a790 100644 --- a/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/component.py +++ b/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/component.py @@ -6,9 +6,6 @@ # ############################################################################# -import shlex -import subprocess - try: from sonic_platform_base.component_base import ComponentBase @@ -38,22 +35,6 @@ def __init__(self, component_index=0): self.index = component_index self.name = self.get_name() - def __run_command(self, command): - # Run bash command and print output to stdout - try: - process = subprocess.Popen( - shlex.split(command), stdout=subprocess.PIPE) - while True: - output = process.stdout.readline() - if output == '' and process.poll() is not None: - break - rc = process.poll() - if rc != 0: - return False - except Exception: - return False - return True - def __get_bios_version(self): # Retrieves the BIOS firmware version try: diff --git a/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/helper.py b/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/helper.py index b124ca29f0df..b19fab85deb8 100644 --- a/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/helper.py +++ b/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/helper.py @@ -4,7 +4,7 @@ from mmap import * from sonic_py_common import device_info -HOST_CHK_CMD = "docker > /dev/null 2>&1" +HOST_CHK_CMD = ["docker"] EMPTY_STRING = "" @@ -14,7 +14,7 @@ def __init__(self): (self.platform, self.hwsku) = device_info.get_platform_and_hwsku() def is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def pci_get_value(self, resource, offset): status = True @@ -29,26 +29,6 @@ def pci_get_value(self, resource, offset): status = False return status, result - def run_command(self, cmd): - status = True - result = "" - try: - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - except Exception: - status = False - return status, result - - def run_interactive_command(self, cmd): - try: - os.system(cmd) - except Exception: - return False - return True - def read_txt_file(self, file_path): try: with open(file_path, 'r', errors='replace') as fd: @@ -66,52 +46,3 @@ def write_txt_file(self, file_path, value): return False return True - def ipmi_raw(self, netfn, cmd): - status = True - result = "" - try: - cmd = "ipmitool raw {} {}".format(str(netfn), str(cmd)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_fru_id(self, id, key=None): - status = True - result = "" - try: - cmd = "ipmitool fru print {}".format(str( - id)) if not key else "ipmitool fru print {0} | grep '{1}' ".format(str(id), str(key)) - - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_set_ss_thres(self, id, threshold_key, value): - status = True - result = "" - try: - cmd = "ipmitool sensor thresh '{}' {} {}".format(str(id), str(threshold_key), str(value)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result diff --git a/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/sfp.py b/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/sfp.py index 4529c2f0ac88..adab3d294c16 100644 --- a/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/sfp.py +++ b/device/accton/x86_64-accton_as4630_54te-r0/sonic_platform/sfp.py @@ -8,10 +8,9 @@ # ############################################################################# -import os import time import sys - +import subprocess from ctypes import create_string_buffer try: @@ -127,7 +126,7 @@ class Sfp(SfpBase): # Path to sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "docker > /dev/null 2>&1" + HOST_CHK_CMD = ["docker"] PLATFORM = "x86_64-accton_as4630_54te-r0" HWSKU = "Accton-AS4630-54TE" @@ -193,7 +192,7 @@ def __write_txt_file(self, file_path, value): return True def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/chassis.py b/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/chassis.py index dce9f3c150cd..c4a062f49a96 100644 --- a/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/chassis.py +++ b/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/chassis.py @@ -6,8 +6,8 @@ # ############################################################################# -import os import sys +import subprocess try: from sonic_platform_base.chassis_base import ChassisBase @@ -28,7 +28,7 @@ PMON_REBOOT_CAUSE_PATH = "/usr/share/sonic/platform/api_files/reboot-cause/" REBOOT_CAUSE_FILE = "reboot-cause.txt" PREV_REBOOT_CAUSE_FILE = "previous-reboot-cause.txt" -HOST_CHK_CMD = "which systemctl > /dev/null 2>&1" +HOST_CHK_CMD = ["which", "systemctl"] SYSLED_FNODE= "/sys/class/leds/as5835_54x_led::diag/brightness" SYSLED_MODES = { @@ -98,7 +98,7 @@ def __initialize_watchdog(self): def __is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def __read_txt_file(self, file_path): try: diff --git a/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/component.py b/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/component.py index f3c9b3cee754..ef47a154e954 100644 --- a/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/component.py +++ b/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/component.py @@ -6,10 +6,6 @@ # ############################################################################# -import shlex -import subprocess - - try: from sonic_platform_base.component_base import ComponentBase from .helper import APIHelper @@ -44,23 +40,6 @@ def __init__(self, component_index=0): self.index = component_index self.name = self.get_name() - - def __run_command(self, command): - # Run bash command and print output to stdout - try: - process = subprocess.Popen( - shlex.split(command), stdout=subprocess.PIPE) - while True: - output = process.stdout.readline() - if output == '' and process.poll() is not None: - break - rc = process.poll() - if rc != 0: - return False - except Exception: - return False - return True - def __get_bios_version(self): # Retrieves the BIOS firmware version try: diff --git a/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/helper.py b/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/helper.py index b124ca29f0df..b19fab85deb8 100644 --- a/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/helper.py +++ b/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/helper.py @@ -4,7 +4,7 @@ from mmap import * from sonic_py_common import device_info -HOST_CHK_CMD = "docker > /dev/null 2>&1" +HOST_CHK_CMD = ["docker"] EMPTY_STRING = "" @@ -14,7 +14,7 @@ def __init__(self): (self.platform, self.hwsku) = device_info.get_platform_and_hwsku() def is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def pci_get_value(self, resource, offset): status = True @@ -29,26 +29,6 @@ def pci_get_value(self, resource, offset): status = False return status, result - def run_command(self, cmd): - status = True - result = "" - try: - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - except Exception: - status = False - return status, result - - def run_interactive_command(self, cmd): - try: - os.system(cmd) - except Exception: - return False - return True - def read_txt_file(self, file_path): try: with open(file_path, 'r', errors='replace') as fd: @@ -66,52 +46,3 @@ def write_txt_file(self, file_path, value): return False return True - def ipmi_raw(self, netfn, cmd): - status = True - result = "" - try: - cmd = "ipmitool raw {} {}".format(str(netfn), str(cmd)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_fru_id(self, id, key=None): - status = True - result = "" - try: - cmd = "ipmitool fru print {}".format(str( - id)) if not key else "ipmitool fru print {0} | grep '{1}' ".format(str(id), str(key)) - - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_set_ss_thres(self, id, threshold_key, value): - status = True - result = "" - try: - cmd = "ipmitool sensor thresh '{}' {} {}".format(str(id), str(threshold_key), str(value)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result diff --git a/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/sfp.py b/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/sfp.py index ec16e80568f4..39813a87728d 100644 --- a/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/sfp.py +++ b/device/accton/x86_64-accton_as5835_54x-r0/sonic_platform/sfp.py @@ -6,10 +6,9 @@ # ############################################################################# -import os import time import sys - +import subprocess from ctypes import create_string_buffer try: @@ -31,7 +30,7 @@ class Sfp(SfpOptoeBase): # Path to sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "which systemctl > /dev/null 2>&1" + HOST_CHK_CMD = ["which", "systemctl"] PLATFORM = "x86_64-accton_as5835_54x-r0" HWSKU = "Accton-AS5835-54X" @@ -121,7 +120,7 @@ def __get_cpld_num(self, port_num): def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess(self.HOST_CHK_CMD).returncode == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/chassis.py b/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/chassis.py index 86ae121c6c82..178f3c56265a 100644 --- a/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/chassis.py +++ b/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/chassis.py @@ -5,12 +5,7 @@ # ############################################################################# try: - import sys - import re - import os import subprocess - import json - import syslog from sonic_platform_base.chassis_base import ChassisBase from sonic_py_common.logger import Logger from sonic_platform.fan import Fan @@ -36,7 +31,7 @@ REBOOT_CAUSE_FILE = "reboot-cause.txt" PREV_REBOOT_CAUSE_FILE = "previous-reboot-cause.txt" COMPONENT_NAME_LIST = ["BIOS"] -HOST_CHK_CMD = "docker > /dev/null 2>&1" +HOST_CHK_CMD = ["docker"] class Chassis(ChassisBase): @@ -71,7 +66,7 @@ def __init__(self): logger.log_info("Chassis loaded successfully") def __is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def __read_txt_file(self, file_path): try: diff --git a/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/component.py b/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/component.py index 15d8e9e15e9d..5e1e800d52f9 100644 --- a/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/component.py +++ b/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/component.py @@ -3,10 +3,7 @@ # provides the components firmware management function ############################################################################# -import json import os.path -import shutil -import shlex import subprocess try: @@ -29,8 +26,7 @@ def __init__(self, component_name): def __run_command(self, command): # Run bash command and print output to stdout try: - process = subprocess.Popen( - shlex.split(command), universal_newlines=True, stdout=subprocess.PIPE) + process = subprocess.Popen(command, universal_newlines=True, stdout=subprocess.PIPE) while True: output = process.stdout.readline() if output == '' and process.poll() is not None: diff --git a/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/sfp.py b/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/sfp.py index 6aabc144117c..39c21c0d11c9 100644 --- a/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/sfp.py +++ b/device/accton/x86_64-accton_as7116_54x-r0/sonic_platform/sfp.py @@ -3,10 +3,8 @@ # provides the sfp device status which are available in the platform ############################################################################# try: - import os import time import subprocess - import syslog from ctypes import create_string_buffer from sonic_platform_base.sfp_base import SfpBase from sonic_platform_base.sonic_sfp.sff8472 import sff8472Dom @@ -173,7 +171,7 @@ class Sfp(SfpBase): PLATFORM_ROOT_PATH = '/usr/share/sonic/device' PMON_HWSKU_PATH = '/usr/share/sonic/hwsku' - HOST_CHK_CMD = "docker > /dev/null 2>&1" + HOST_CHK_CMD = ["docker"] PLATFORM = "x86_64-accton_as7116_54x-r0" HWSKU = "Accton-AS7116-54X-R0" @@ -233,7 +231,7 @@ def __read_txt_file(self, file_path): return "" def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/chassis.py b/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/chassis.py index ceece0f9db83..a1143b11254d 100644 --- a/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/chassis.py +++ b/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/chassis.py @@ -6,7 +6,7 @@ # ############################################################################# -import os +import subprocess try: from sonic_platform_base.chassis_base import ChassisBase @@ -24,7 +24,7 @@ PMON_REBOOT_CAUSE_PATH = "/usr/share/sonic/platform/api_files/reboot-cause/" REBOOT_CAUSE_FILE = "reboot-cause.txt" PREV_REBOOT_CAUSE_FILE = "previous-reboot-cause.txt" -HOST_CHK_CMD = "docker > /dev/null 2>&1" +HOST_CHK_CMD = ["docker"] class Chassis(ChassisBase): @@ -87,7 +87,7 @@ def __initialize_watchdog(self): def __is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def __read_txt_file(self, file_path): try: diff --git a/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/component.py b/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/component.py index 5300e1e73233..2ba405f9f8ad 100644 --- a/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/component.py +++ b/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/component.py @@ -6,8 +6,6 @@ # ############################################################################# -import shlex -import subprocess try: from sonic_platform_base.component_base import ComponentBase @@ -43,22 +41,6 @@ def __init__(self, component_index=0): self.index = component_index self.name = self.get_name() - def __run_command(self, command): - # Run bash command and print output to stdout - try: - process = subprocess.Popen( - shlex.split(command), stdout=subprocess.PIPE) - while True: - output = process.stdout.readline() - if output == '' and process.poll() is not None: - break - rc = process.poll() - if rc != 0: - return False - except Exception: - return False - return True - def __get_bios_version(self): # Retrieves the BIOS firmware version try: diff --git a/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/helper.py b/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/helper.py index 4cd60ac90611..2c644ecbfb88 100644 --- a/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/helper.py +++ b/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/helper.py @@ -4,7 +4,7 @@ from mmap import * from sonic_py_common import device_info -HOST_CHK_CMD = "docker > /dev/null 2>&1" +HOST_CHK_CMD = ["docker"] EMPTY_STRING = "" @@ -14,7 +14,7 @@ def __init__(self): (self.platform, self.hwsku) = device_info.get_platform_and_hwsku() def is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def pci_get_value(self, resource, offset): status = True @@ -29,26 +29,6 @@ def pci_get_value(self, resource, offset): status = False return status, result - def run_command(self, cmd): - status = True - result = "" - try: - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - except Exception: - status = False - return status, result - - def run_interactive_command(self, cmd): - try: - os.system(cmd) - except Exception: - return False - return True - def read_txt_file(self, file_path): try: with open(file_path, 'r') as fd: @@ -66,52 +46,3 @@ def write_txt_file(self, file_path, value): return False return True - def ipmi_raw(self, netfn, cmd): - status = True - result = "" - try: - cmd = "ipmitool raw {} {}".format(str(netfn), str(cmd)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_fru_id(self, id, key=None): - status = True - result = "" - try: - cmd = "ipmitool fru print {}".format(str( - id)) if not key else "ipmitool fru print {0} | grep '{1}' ".format(str(id), str(key)) - - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_set_ss_thres(self, id, threshold_key, value): - status = True - result = "" - try: - cmd = "ipmitool sensor thresh '{}' {} {}".format(str(id), str(threshold_key), str(value)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result diff --git a/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/sfp.py b/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/sfp.py index 445b39edc73d..fd6d0f8524f0 100644 --- a/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/sfp.py +++ b/device/accton/x86_64-accton_as7312_54x-r0/sonic_platform/sfp.py @@ -6,10 +6,9 @@ # ############################################################################# -import os import time import sys - +import subprocess from ctypes import create_string_buffer try: @@ -125,7 +124,7 @@ class Sfp(SfpBase): # Path to sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "docker > /dev/null 2>&1" + HOST_CHK_CMD = ["docker"] PLATFORM = "x86_64-accton_as7312_54x-r0" HWSKU = "Accton-AS7312-54X" @@ -256,7 +255,7 @@ def __write_txt_file(self, file_path, value): return True def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/device/accton/x86_64-accton_as7326_56x-r0/sonic_platform/chassis.py b/device/accton/x86_64-accton_as7326_56x-r0/sonic_platform/chassis.py index f32f381b7c21..1c97d0e878ae 100644 --- a/device/accton/x86_64-accton_as7326_56x-r0/sonic_platform/chassis.py +++ b/device/accton/x86_64-accton_as7326_56x-r0/sonic_platform/chassis.py @@ -6,8 +6,8 @@ # ############################################################################# -import os import sys +import subprocess try: from sonic_platform_base.chassis_base import ChassisBase @@ -27,7 +27,7 @@ PMON_REBOOT_CAUSE_PATH = "/usr/share/sonic/platform/api_files/reboot-cause/" REBOOT_CAUSE_FILE = "reboot-cause.txt" PREV_REBOOT_CAUSE_FILE = "previous-reboot-cause.txt" -HOST_CHK_CMD = "which systemctl > /dev/null 2>&1" +HOST_CHK_CMD = ["which", "systemctl"] SYSLED_FNODE= "/sys/class/leds/accton_as7326_56x_led::diag/brightness" SYSLED_MODES = { "0" : "STATUS_LED_COLOR_OFF", @@ -93,7 +93,7 @@ def __initialize_watchdog(self): self._watchdog = Watchdog() def __is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def __read_txt_file(self, file_path): try: diff --git a/device/accton/x86_64-accton_as7326_56x-r0/sonic_platform/sfp.py b/device/accton/x86_64-accton_as7326_56x-r0/sonic_platform/sfp.py index 95a54e554a01..16fe60bfb698 100644 --- a/device/accton/x86_64-accton_as7326_56x-r0/sonic_platform/sfp.py +++ b/device/accton/x86_64-accton_as7326_56x-r0/sonic_platform/sfp.py @@ -6,10 +6,9 @@ # ############################################################################# -import os import time import sys - +import subprocess from ctypes import create_string_buffer try: @@ -40,7 +39,7 @@ class Sfp(SfpOptoeBase): # Path to sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "which systemctl > /dev/null 2>&1" + HOST_CHK_CMD = ["which", "systemctl"] PLATFORM = "x86_64-accton_as7326_56x-r0" HWSKU = "Accton-AS7326-56X" @@ -144,7 +143,7 @@ def __write_txt_file(self, file_path, value): return True def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/chassis.py b/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/chassis.py index 44a759045b6b..164da5fd6122 100644 --- a/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/chassis.py +++ b/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/chassis.py @@ -6,8 +6,8 @@ # ############################################################################# -import os import sys +import subprocess try: from sonic_platform_base.chassis_base import ChassisBase @@ -26,7 +26,7 @@ PMON_REBOOT_CAUSE_PATH = "/usr/share/sonic/platform/api_files/reboot-cause/" REBOOT_CAUSE_FILE = "reboot-cause.txt" PREV_REBOOT_CAUSE_FILE = "previous-reboot-cause.txt" -HOST_CHK_CMD = "which systemctl > /dev/null 2>&1" +HOST_CHK_CMD = ["which", "systemctl"] SYSLED_FNODE = "/sys/class/leds/as7816_64x_led::diag/brightness" SYSLED_MODES = { "0" : "STATUS_LED_COLOR_OFF", @@ -96,7 +96,7 @@ def __initialize_watchdog(self): def __is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def __read_txt_file(self, file_path): try: diff --git a/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/component.py b/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/component.py index 6af2f6008e72..d024e43e1f41 100644 --- a/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/component.py +++ b/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/component.py @@ -6,8 +6,6 @@ # ############################################################################# -import shlex -import subprocess try: from sonic_platform_base.component_base import ComponentBase @@ -43,22 +41,6 @@ def __init__(self, component_index=0): self.index = component_index self.name = self.get_name() - def __run_command(self, command): - # Run bash command and print output to stdout - try: - process = subprocess.Popen( - shlex.split(command), stdout=subprocess.PIPE) - while True: - output = process.stdout.readline() - if output == '' and process.poll() is not None: - break - rc = process.poll() - if rc != 0: - return False - except Exception: - return False - return True - def __get_bios_version(self): # Retrieves the BIOS firmware version try: diff --git a/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/helper.py b/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/helper.py index b124ca29f0df..b19fab85deb8 100644 --- a/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/helper.py +++ b/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/helper.py @@ -4,7 +4,7 @@ from mmap import * from sonic_py_common import device_info -HOST_CHK_CMD = "docker > /dev/null 2>&1" +HOST_CHK_CMD = ["docker"] EMPTY_STRING = "" @@ -14,7 +14,7 @@ def __init__(self): (self.platform, self.hwsku) = device_info.get_platform_and_hwsku() def is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def pci_get_value(self, resource, offset): status = True @@ -29,26 +29,6 @@ def pci_get_value(self, resource, offset): status = False return status, result - def run_command(self, cmd): - status = True - result = "" - try: - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - except Exception: - status = False - return status, result - - def run_interactive_command(self, cmd): - try: - os.system(cmd) - except Exception: - return False - return True - def read_txt_file(self, file_path): try: with open(file_path, 'r', errors='replace') as fd: @@ -66,52 +46,3 @@ def write_txt_file(self, file_path, value): return False return True - def ipmi_raw(self, netfn, cmd): - status = True - result = "" - try: - cmd = "ipmitool raw {} {}".format(str(netfn), str(cmd)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_fru_id(self, id, key=None): - status = True - result = "" - try: - cmd = "ipmitool fru print {}".format(str( - id)) if not key else "ipmitool fru print {0} | grep '{1}' ".format(str(id), str(key)) - - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_set_ss_thres(self, id, threshold_key, value): - status = True - result = "" - try: - cmd = "ipmitool sensor thresh '{}' {} {}".format(str(id), str(threshold_key), str(value)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result diff --git a/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/sfp.py b/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/sfp.py index 01e568ef3e78..d8ff303b359d 100644 --- a/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/sfp.py +++ b/device/accton/x86_64-accton_as7816_64x-r0/sonic_platform/sfp.py @@ -6,10 +6,9 @@ # ############################################################################# -import os import time import sys - +import subprocess from ctypes import create_string_buffer try: @@ -31,7 +30,7 @@ class Sfp(SfpOptoeBase): # Path to sysfs PLATFORM_ROOT_PATH = "/usr/share/sonic/device" PMON_HWSKU_PATH = "/usr/share/sonic/hwsku" - HOST_CHK_CMD = "which systemctl > /dev/null 2>&1" + HOST_CHK_CMD = ["which", "systemctl"] PLATFORM = "x86_64-accton_as7816_64x-r0" HWSKU = "Accton-AS7816-64X" @@ -119,7 +118,7 @@ def get_eeprom_path(self): return self.port_to_eeprom_mapping[self.port_num] def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/chassis.py b/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/chassis.py index 7eb9601d8b56..b0a573432b76 100644 --- a/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/chassis.py +++ b/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/chassis.py @@ -6,7 +6,7 @@ # ############################################################################# -import os +import subprocess try: from sonic_platform_base.chassis_base import ChassisBase @@ -28,7 +28,7 @@ PMON_REBOOT_CAUSE_PATH = "/usr/share/sonic/platform/api_files/reboot-cause/" REBOOT_CAUSE_FILE = "reboot-cause.txt" PREV_REBOOT_CAUSE_FILE = "previous-reboot-cause.txt" -HOST_CHK_CMD = "docker > /dev/null 2>&1" +HOST_CHK_CMD = ["docker"] class Chassis(ChassisBase): @@ -96,7 +96,7 @@ def __initialize_watchdog(self): def __is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def __read_txt_file(self, file_path): try: diff --git a/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/component.py b/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/component.py index c34da704432a..8137d0f3a790 100644 --- a/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/component.py +++ b/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/component.py @@ -6,9 +6,6 @@ # ############################################################################# -import shlex -import subprocess - try: from sonic_platform_base.component_base import ComponentBase @@ -38,22 +35,6 @@ def __init__(self, component_index=0): self.index = component_index self.name = self.get_name() - def __run_command(self, command): - # Run bash command and print output to stdout - try: - process = subprocess.Popen( - shlex.split(command), stdout=subprocess.PIPE) - while True: - output = process.stdout.readline() - if output == '' and process.poll() is not None: - break - rc = process.poll() - if rc != 0: - return False - except Exception: - return False - return True - def __get_bios_version(self): # Retrieves the BIOS firmware version try: diff --git a/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/helper.py b/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/helper.py index 4cd60ac90611..2c644ecbfb88 100644 --- a/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/helper.py +++ b/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/helper.py @@ -4,7 +4,7 @@ from mmap import * from sonic_py_common import device_info -HOST_CHK_CMD = "docker > /dev/null 2>&1" +HOST_CHK_CMD = ["docker"] EMPTY_STRING = "" @@ -14,7 +14,7 @@ def __init__(self): (self.platform, self.hwsku) = device_info.get_platform_and_hwsku() def is_host(self): - return os.system(HOST_CHK_CMD) == 0 + return subprocess.call(HOST_CHK_CMD) == 0 def pci_get_value(self, resource, offset): status = True @@ -29,26 +29,6 @@ def pci_get_value(self, resource, offset): status = False return status, result - def run_command(self, cmd): - status = True - result = "" - try: - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - except Exception: - status = False - return status, result - - def run_interactive_command(self, cmd): - try: - os.system(cmd) - except Exception: - return False - return True - def read_txt_file(self, file_path): try: with open(file_path, 'r') as fd: @@ -66,52 +46,3 @@ def write_txt_file(self, file_path, value): return False return True - def ipmi_raw(self, netfn, cmd): - status = True - result = "" - try: - cmd = "ipmitool raw {} {}".format(str(netfn), str(cmd)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_fru_id(self, id, key=None): - status = True - result = "" - try: - cmd = "ipmitool fru print {}".format(str( - id)) if not key else "ipmitool fru print {0} | grep '{1}' ".format(str(id), str(key)) - - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result - - def ipmi_set_ss_thres(self, id, threshold_key, value): - status = True - result = "" - try: - cmd = "ipmitool sensor thresh '{}' {} {}".format(str(id), str(threshold_key), str(value)) - p = subprocess.Popen( - cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - raw_data, err = p.communicate() - if err == '': - result = raw_data.strip() - else: - status = False - except Exception: - status = False - return status, result diff --git a/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/sfp.py b/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/sfp.py index 6cefc2a29a47..5d7c03d0055b 100644 --- a/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/sfp.py +++ b/device/accton/x86_64-accton_as9726_32d-r0/sonic_platform/sfp.py @@ -6,11 +6,10 @@ # ############################################################################# -import os import sys import time import struct - +import subprocess from ctypes import create_string_buffer try: @@ -272,7 +271,7 @@ class Sfp(SfpBase): """Platform-specific Sfp class""" - HOST_CHK_CMD = "docker > /dev/null 2>&1" + HOST_CHK_CMD = ["docker"] PLATFORM = "x86_64-accton_as9726_32d-r0" HWSKU = "Accton-AS9726-32D" @@ -310,7 +309,7 @@ def __write_txt_file(self, file_path, value): return True def __is_host(self): - return os.system(self.HOST_CHK_CMD) == 0 + return subprocess.call(self.HOST_CHK_CMD) == 0 def __get_path_to_port_config_file(self): platform_path = "/".join([self.PLATFORM_ROOT_PATH, self.PLATFORM]) diff --git a/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/sonic_platform/component.py b/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/sonic_platform/component.py index 22696341523d..2659d16f1c80 100644 --- a/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/sonic_platform/component.py +++ b/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/sonic_platform/component.py @@ -8,8 +8,8 @@ ############################################################################# try: - import subprocess from sonic_platform_base.component_base import ComponentBase + from sonic_py_common.general import getstatusoutput_noshell except ImportError as e: raise ImportError(str(e) + "- required module not found") @@ -67,8 +67,8 @@ def get_firmware_version(self): if self.name == "BIOS": fw_version = self.__get_bios_version() elif "CPLD" in self.name: - cmd = "i2cget -f -y {0} {1} 0x1".format(self.cpld_mapping[self.index][0], self.cpld_mapping[self.index][1]) - status, value = subprocess.getstatusoutput(cmd) + cmd = ["i2cget", "-f", "-y", self.cpld_mapping[self.index][0], self.cpld_mapping[self.index][1], "0x1"] + status, value = getstatusoutput_noshell(cmd) if not status: fw_version = value.rstrip() diff --git a/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/accton_as4630_54pe_monitor.py b/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/accton_as4630_54pe_monitor.py index b074177133aa..e79137785d18 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/accton_as4630_54pe_monitor.py +++ b/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/accton_as4630_54pe_monitor.py @@ -26,9 +26,9 @@ import logging.config import logging.handlers import time - import subprocess from as4630_54pe.fanutil import FanUtil from as4630_54pe.thermalutil import ThermalUtil + from sonic_py_common.general import getstatusoutput_noshell except ImportError as e: raise ImportError('%s - required module not found' % str(e)) @@ -198,9 +198,9 @@ def manage_fans(self): if temp[0] >= 70000: #LM75-48 #critical case*/ logging.critical('Alarm-Critical for temperature critical is detected, reset DUT') - cmd_str="i2cset -y -f 3 0x60 0x4 0xE4" + cmd_str = ["i2cset", "-y", "-f", "3", "0x60", "0x4", "0xE4"] time.sleep(2); - status, output = subprocess.getstatusoutput(cmd_str) + status, output = getstatusoutput_noshell(cmd_str) #logging.debug('ori_state=%d, current_state=%d, temp_val=%d\n\n',ori_state, fan_policy_state, temp_val) diff --git a/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/accton_as4630_54pe_pddf_monitor.py b/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/accton_as4630_54pe_pddf_monitor.py index d9299b96a3b2..6bbaeaa06d6e 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/accton_as4630_54pe_pddf_monitor.py +++ b/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/accton_as4630_54pe_pddf_monitor.py @@ -28,6 +28,7 @@ import logging.handlers import time from sonic_platform import platform + from sonic_py_common.general import getstatusoutput_noshell except ImportError as e: raise ImportError('%s - required module not found' % str(e)) @@ -209,10 +210,12 @@ def manage_fans(self): # Critical: Either all the fans are faulty or they are removed, shutdown the system logging.critical('Alarm for all fan faulty/absent is detected') logging.critical("Alarm for all fan faulty/absent is detected, reset DUT") - cmd_str = "i2cset -y -f 3 0x60 0x4 0xE4" + cmd_str = ["i2cset", "-y", "-f", "3", "0x60", "0x4", "0xE4"] time.sleep(2) - subprocess.getstatusoutput('sync; sync; sync') - subprocess.getstatusoutput(cmd_str) + getstatusoutput_noshell('sync') + getstatusoutput_noshell('sync') + getstatusoutput_noshell('sync') + getstatusoutput_noshell(cmd_str) elif sum(fan_fail_list) != 0: # Set the 100% speed only for first fan failure detection logging.warning('Fan_{} failed, set remaining fan speed to 100%'.format( @@ -235,7 +238,7 @@ def manage_fans(self): as4630_54pe_set_fan_speed(new_duty_cycle) if test_temp == 1: time.sleep(3) - status, output = subprocess.getstatusoutput('pddf_fanutil getspeed') + status, output = getstatusoutput_noshell(['pddf_fanutil', 'getspeed']) logging.debug('\n%s\n', output) if temp[0] >= 70000: # LM77-48 @@ -252,10 +255,12 @@ def manage_fans(self): if status: logging.warning('Reboot cause file not updated. {}'.format(output)) - cmd_str = "i2cset -y -f 3 0x60 0x4 0xE4" - subprocess.getstatusoutput('sync; sync; sync') + cmd_str = ["i2cset", "-y", "-f", "3", "0x60", "0x4", "0xE4"] + getstatusoutput_noshell('sync') + getstatusoutput_noshell('sync') + getstatusoutput_noshell('sync') time.sleep(3) - subprocess.getstatusoutput(cmd_str) + getstatusoutput_noshell(cmd_str) logging.debug('ori_state=%d, current_state=%d, temp_val=%d\n\n', ori_state, fan_policy_state, temp_val) diff --git a/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/pddf_switch_svc.py b/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/pddf_switch_svc.py index 5a505e19f19b..5c00f338ba61 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/pddf_switch_svc.py +++ b/platform/broadcom/sonic-platform-modules-accton/as4630-54pe/utils/pddf_switch_svc.py @@ -2,111 +2,111 @@ # Script to stop and start the respective platforms default services. # This will be used while switching the pddf->non-pddf mode and vice versa -import commands +from sonic_py_common.general import getstatusoutput_noshell def check_pddf_support(): return True def stop_platform_svc(): - status, output = commands.getstatusoutput("systemctl disable as4630-54pe-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "disable", "as4630-54pe-platform-monitor-fan.service"]) if status: - print "Disable as4630-54pe-platform-monitor-fan.service failed %d"%status + print("Disable as4630-54pe-platform-monitor-fan.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl stop as4630-54pe-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as4630-54pe-platform-monitor-fan.service"]) if status: - print "Stop as4630-54pe-platform-monitor-fan.service failed %d"%status + print("Stop as4630-54pe-platform-monitor-fan.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl disable as4630-54pe-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "disable", "as4630-54pe-platform-monitor-psu.service"]) if status: - print "Disable as4630-54pe-platform-monitor-psu.service failed %d"%status + print("Disable as4630-54pe-platform-monitor-psu.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl stop as4630-54pe-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as4630-54pe-platform-monitor-psu.service"]) if status: - print "Stop as4630-54pe-platform-monitor-psu.service failed %d"%status + print("Stop as4630-54pe-platform-monitor-psu.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl disable as4630-54pe-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "disable", "as4630-54pe-platform-monitor.service"]) if status: - print "Disable as4630-54pe-platform-monitor.service failed %d"%status + print("Disable as4630-54pe-platform-monitor.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl stop as4630-54pe-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as4630-54pe-platform-monitor.service"]) if status: - print "Stop as4630-54pe-platform-monitor.service failed %d"%status + print("Stop as4630-54pe-platform-monitor.service failed %d"%status) return False - status, output = commands.getstatusoutput("/usr/local/bin/accton_as4630_54pe_util.py clean") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as4630_54pe_util.py", "clean"]) if status: - print "accton_as4630_54pe_util.py clean command failed %d"%status + print("accton_as4630_54pe_util.py clean command failed %d"%status) return False # HACK , stop the pddf-platform-init service if it is active - status, output = commands.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: - print "Stop pddf-platform-init.service along with other platform serives failed %d"%status + print("Stop pddf-platform-init.service along with other platform serives failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl stop as4630-54pe-pddf-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as4630-54pe-pddf-platform-monitor.service"]) if status: - print "Stop as4630-54pe-pddf-platform-monitor.service along with other platform serives failed %d"%status + print("Stop as4630-54pe-pddf-platform-monitor.service along with other platform serives failed %d"%status) return False return True def start_platform_svc(): - status, output = commands.getstatusoutput("/usr/local/bin/accton_as4630_54pe_util.py install") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as4630_54pe_util.py", "install"]) if status: - print "accton_as4630_54pe_util.py install command failed %d"%status + print("accton_as4630_54pe_util.py install command failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl enable as4630-54pe-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "enable", "as4630-54pe-platform-monitor-fan.service"]) if status: - print "Enable as4630-54pe-platform-monitor-fan.service failed %d"%status + print("Enable as4630-54pe-platform-monitor-fan.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl start as4630-54pe-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as4630-54pe-platform-monitor-fan.service"]) if status: - print "Start as4630-54pe-platform-monitor-fan.service failed %d"%status + print("Start as4630-54pe-platform-monitor-fan.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl enable as4630-54pe-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "enable", "as4630-54pe-platform-monitor-psu.service"]) if status: - print "Enable as4630-54pe-platform-monitor-psu.service failed %d"%status + print("Enable as4630-54pe-platform-monitor-psu.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl start as4630-54pe-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as4630-54pe-platform-monitor-psu.service"]) if status: - print "Start as4630-54pe-platform-monitor-psu.service failed %d"%status + print("Start as4630-54pe-platform-monitor-psu.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl enable as4630-54pe-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "enable", "as4630-54pe-platform-monitor.service"]) if status: - print "Enable as4630-54pe-platform-monitor.service failed %d"%status + print("Enable as4630-54pe-platform-monitor.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl start as4630-54pe-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as4630-54pe-platform-monitor.service"]) if status: - print "Start as4630-54pe-platform-monitor.service failed %d"%status + print("Start as4630-54pe-platform-monitor.service failed %d"%status) return False return True def start_platform_pddf(): - status, output = commands.getstatusoutput("systemctl start pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "pddf-platform-init.service"]) if status: - print "Start pddf-platform-init.service failed %d"%status + print("Start pddf-platform-init.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl start as4630-54pe-pddf-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as4630-54pe-pddf-platform-monitor.service"]) if status: - print "Start as4630-54pe-pddf-platform-monitor.service failed %d"%status + print("Start as4630-54pe-pddf-platform-monitor.service failed %d"%status) return False return True def stop_platform_pddf(): - status, output = commands.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: - print "Stop pddf-platform-init.service failed %d"%status + print("Stop pddf-platform-init.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl stop as4630-54pe-pddf-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as4630-54pe-pddf-platform-monitor.service"]) if status: - print "Stop as4630-54pe-pddf-platform-monitor.service failed %d"%status + print("Stop as4630-54pe-pddf-platform-monitor.service failed %d"%status) return False return True diff --git a/platform/broadcom/sonic-platform-modules-accton/as4630-54te/utils/accton_as4630_54te_monitor.py b/platform/broadcom/sonic-platform-modules-accton/as4630-54te/utils/accton_as4630_54te_monitor.py index d3c2b2c86094..aa5076c2caa4 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as4630-54te/utils/accton_as4630_54te_monitor.py +++ b/platform/broadcom/sonic-platform-modules-accton/as4630-54te/utils/accton_as4630_54te_monitor.py @@ -20,7 +20,6 @@ # ------------------------------------------------------------------ try: - import os import sys import getopt import logging @@ -194,9 +193,9 @@ def manage_fans(self): # critical case*/ logging.critical( 'Alarm-Critical for temperature critical is detected, reset DUT') - cmd_str = "i2cset -y -f 3 0x60 0x4 0xE4" + cmd_str = ["i2cset", "-y", "-f", "3", "0x60", "0x4", "0xE4"] time.sleep(2) - return_value = os.system(cmd_str) + return_value = subprocess.call(cmd_str) logging.warning('Fan set: i2cset -y -f 3 0x60 0x4 0xE4, status is %d', return_value) #logging.debug('ori_state=%d, current_state=%d, temp_val=%d\n\n',ori_state, fan_policy_state, temp_val) diff --git a/platform/broadcom/sonic-platform-modules-accton/as7315-27xb/classes/fanutil.py b/platform/broadcom/sonic-platform-modules-accton/as7315-27xb/classes/fanutil.py index 31697f7c644e..9dc8d94ef68b 100644 --- a/platform/broadcom/sonic-platform-modules-accton/as7315-27xb/classes/fanutil.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7315-27xb/classes/fanutil.py @@ -24,10 +24,8 @@ # ------------------------------------------------------------------ try: - import time import logging - from collections import namedtuple - import subprocess + from sonic_py_common.general import getstatusoutput_noshell except ImportError as e: raise ImportError('%s - required module not found' % str(e)) @@ -65,10 +63,11 @@ def _get_fan_to_device_node(self, fan_num, node_num): return "fan{0}_{1}".format(fan_num, self.node_postfix[node_num-1]) def _get_fan_i2c_bus_addr(self): - cmd_template = 'i2cget -f -y {} 0x{} 0' + cmd_template = ['i2cget', '-f', '-y', '', '', '0'] for bus_no, dev_addr in self.I2CADDR_CANDIDATES: - cmd = cmd_template.format(bus_no, dev_addr) - if subprocess.getstatusoutput(cmd)[0] == 0: + cmd_template[3] = str(bus_no) + cmd_template[4] = '0x' + str(dev_addr) + if getstatusoutput_noshell(cmd_template)[0] == 0: return bus_no, dev_addr raise IOError('Unable to reach fan CPLD via I2C') diff --git a/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_monitor.py b/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_monitor.py index ec3a4c133cdd..da046335cdc6 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_monitor.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_monitor.py @@ -23,10 +23,10 @@ # ------------------------------------------------------------------ try: - import os import getopt import sys import logging + import subprocess import logging.config import logging.handlers import time # this is only being used as part of the example @@ -233,7 +233,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected, reboot DUT') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if ori_state==LEVEL_FAN_MID: if new_state==LEVEL_TEMP_HIGH: if alarm_state==0: @@ -242,7 +242,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if ori_state==LEVEL_FAN_MAX: if new_state==LEVEL_TEMP_HIGH: if alarm_state==0: @@ -251,7 +251,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if alarm_state==1: if temp_get < (fan_policy[3][0] - 5000): #below 65 C, clear alarm logging.warning('Alarm for temperature high is cleared') @@ -260,7 +260,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if new_state <= LEVEL_FAN_MID: logging.warning('Alarm for temperature high is cleared') alarm_state=0 diff --git a/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_pddf_monitor.py b/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_pddf_monitor.py index faa4f7d48b15..7f3add4e4d2b 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_pddf_monitor.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_pddf_monitor.py @@ -22,10 +22,10 @@ # ------------------------------------------------------------------ try: - import os import sys import getopt import logging + import subprocess import logging.config import logging.handlers import time @@ -218,7 +218,7 @@ def manage_fans(self): if new_state == LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected, reboot DUT') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if ori_state == LEVEL_FAN_MID: if new_state == LEVEL_TEMP_HIGH: if alarm_state == 0: @@ -227,7 +227,7 @@ def manage_fans(self): if new_state == LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if ori_state == LEVEL_FAN_MAX: if new_state == LEVEL_TEMP_HIGH: if alarm_state == 0: @@ -236,7 +236,7 @@ def manage_fans(self): if new_state == LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if alarm_state == 1: if temp_get < (fan_policy[3][0] - 5000): # below 65 C, clear alarm logging.warning('Alarm for temperature high is cleared') @@ -245,7 +245,7 @@ def manage_fans(self): if new_state == LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if new_state <= LEVEL_FAN_MID: logging.warning('Alarm for temperature high is cleared') alarm_state = 0 diff --git a/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_util.py b/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_util.py index c3e1c50366b7..f4335bd44566 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_util.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/accton_as7326_util.py @@ -33,7 +33,7 @@ import re import time import os - +from sonic_py_common.general import getstatusoutput_noshell PROJECT_NAME = 'as7326_56x' @@ -100,16 +100,16 @@ def show_help(): def dis_i2c_ir3570a(addr): - cmd = "i2cset -y 0 0x%x 0xE5 0x01" % addr - status, output = subprocess.getstatusoutput(cmd) - cmd = "i2cset -y 0 0x%x 0x12 0x02" % addr - status, output = subprocess.getstatusoutput(cmd) + cmd = ["i2cset", "-y", "0", "0x"+"%x"%addr, "0xE5", "0x01"] + status, output = getstatusoutput_noshell(cmd) + cmd = ["i2cset", "-y", "0", "0x"+"%x"%addr, "0x12", "0x02"] + status, output = getstatusoutput_noshell(cmd) return status def ir3570_check(): - cmd = "i2cdump -y 0 0x42 s 0x9a" + cmd = ["i2cdump", "-y", "0", "0x42", "s", "0x9a"] try: - status, output = subprocess.getstatusoutput(cmd) + status, output = getstatusoutput_noshell(cmd) lines = output.split('\n') hn = re.findall(r'\w+', lines[-1]) version = int(hn[1], 16) @@ -257,8 +257,8 @@ def i2c_order_check(): return 0 def eeprom_check(): - cmd = "i2cget -y -f 0 0x56" - status, output = subprocess.getstatusoutput(cmd) + cmd = ["i2cget", "-y", "-f", "0", "0x56"] + status, output = getstatusoutput_noshell(cmd) return status def device_install(): diff --git a/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/pddf_switch_svc.py b/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/pddf_switch_svc.py index 1b5f46ef0e1d..f02202b210b6 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/pddf_switch_svc.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7326-56x/utils/pddf_switch_svc.py @@ -2,39 +2,39 @@ # Script to stop and start the respective platforms default services. # This will be used while switching the pddf->non-pddf mode and vice versa -import subprocess +from sonic_py_common.general import getstatusoutput_noshell def check_pddf_support(): return True def stop_platform_svc(): - status, output = subprocess.getstatusoutput("systemctl stop as7326-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as7326-platform-monitor-fan.service"]) if status: print("Stop as7326-platform-fan.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl stop as7326-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as7326-platform-monitor-psu.service"]) if status: print("Stop as7326-platform-psu.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl stop as7326-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as7326-platform-monitor.service"]) if status: print("Stop as7326-platform-init.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl disable as7326-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "disable", "as7326-platform-monitor.service"]) if status: print("Disable as7326-platform-monitor.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("/usr/local/bin/accton_as7326_util.py clean") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as7326_util.py", "clean"]) if status: print("accton_as7326_util.py clean command failed %d"%status) return False # HACK , stop the pddf-platform-init service if it is active - status, output = subprocess.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: print("Stop pddf-platform-init.service along with other platform serives failed %d"%status) return False @@ -42,21 +42,21 @@ def stop_platform_svc(): return True def start_platform_svc(): - status, output = subprocess.getstatusoutput("/usr/local/bin/accton_as7326_util.py install") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as7326_util.py", "install"]) if status: print("accton_as7326_util.py install command failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl enable as7326-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "enable", "as7326-platform-monitor.service"]) if status: print("Enable as7326-platform-monitor.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl start as7326-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as7326-platform-monitor-fan.service"]) if status: print("Start as7326-platform-monitor-fan.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl start as7326-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as7326-platform-monitor-psu.service"]) if status: print("Start as7326-platform-monitor-psu.service failed %d"%status) return False @@ -64,7 +64,7 @@ def start_platform_svc(): return True def start_platform_pddf(): - status, output = subprocess.getstatusoutput("systemctl start pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "pddf-platform-init.service"]) if status: print("Start pddf-platform-init.service failed %d"%status) return False @@ -72,7 +72,7 @@ def start_platform_pddf(): return True def stop_platform_pddf(): - status, output = subprocess.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: print("Stop pddf-platform-init.service failed %d"%status) return False diff --git a/platform/broadcom/sonic-platform-modules-accton/as7712-32x/utils/pddf_switch_svc.py b/platform/broadcom/sonic-platform-modules-accton/as7712-32x/utils/pddf_switch_svc.py index 3ed3f7604e73..0f6c82893095 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7712-32x/utils/pddf_switch_svc.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7712-32x/utils/pddf_switch_svc.py @@ -1,28 +1,28 @@ #!/usr/bin/env python # Script to stop and start the respective platforms default services. # This will be used while switching the pddf->non-pddf mode and vice versa -import commands +from sonic_py_common.general import getstatusoutput_noshell def check_pddf_support(): return True def stop_platform_svc(): - status, output = commands.getstatusoutput("systemctl stop as7712-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as7712-platform-init.service"]) if status: print("Stop as7712-platform-init.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl disable as7712-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "disable", "as7712-platform-init.service"]) if status: print("Disable as7712-platform-init.service failed %d"%status) return False - status, output = commands.getstatusoutput("/usr/local/bin/accton_as7712_util.py clean") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as7712_util.py", "clean"]) if status: - print("accton_as7712_util.py clean command failed %d"%status) + print("accton_as7712_util.py clean failed %d"%status) return False # HACK , stop the pddf-platform-init service if it is active - status, output = commands.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: print("Stop pddf-platform-init.service along with other platform serives failed %d"%status) return False @@ -30,16 +30,16 @@ def stop_platform_svc(): return True def start_platform_svc(): - status, output = commands.getstatusoutput("/usr/local/bin/accton_as7712_util.py install") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as7712_util.py", "install"]) if status: - print("accton_as7712_util.py install command failed %d"%status) + print("accton_as7712_util.py install failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl enable as7712-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "enable", "as7712-platform-init.service"]) if status: print("Enable as7712-platform-init.service failed %d"%status) return False - status, output = commands.getstatusoutput("systemctl start as7712-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as7712-platform-init.service"]) if status: print("Start as7712-platform-init.service failed %d"%status) return False @@ -47,7 +47,7 @@ def start_platform_svc(): return True def start_platform_pddf(): - status, output = commands.getstatusoutput("systemctl start pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "pddf-platform-init.service"]) if status: print("Start pddf-platform-init.service failed %d"%status) return False @@ -55,7 +55,7 @@ def start_platform_pddf(): return True def stop_platform_pddf(): - status, output = commands.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: print("Stop pddf-platform-init.service failed %d"%status) return False diff --git a/platform/broadcom/sonic-platform-modules-accton/as7716-32x/utils/accton_as7716_util.py b/platform/broadcom/sonic-platform-modules-accton/as7716-32x/utils/accton_as7716_util.py index 6322aac6bbd7..f312d2261636 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7716-32x/utils/accton_as7716_util.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7716-32x/utils/accton_as7716_util.py @@ -36,6 +36,7 @@ import logging import re import time +from sonic_py_common.general import getstatusoutput_noshell PROJECT_NAME = 'as7716_32x' version = '0.0.1' @@ -220,16 +221,16 @@ def show_set_help(): sys.exit(0) def dis_i2c_ir3570a(addr): - cmd = "i2cset -y 0 0x%x 0xE5 0x01" % addr - status, output = subprocess.getstatusoutput(cmd) - cmd = "i2cset -y 0 0x%x 0x12 0x02" % addr - status, output = subprocess.getstatusoutput(cmd) + cmd = ["i2cset", "-y", "0", "0x"+"%x"%addr, "0xE5", "0x01"] + status, output = getstatusoutput_noshell(cmd) + cmd = ["i2cset", "-y", "0", "0x"+"%x"%addr, "0x12", "0x02"] + status, output = getstatusoutput_noshell(cmd) return status def ir3570_check(): - cmd = "i2cdump -y 0 0x42 s 0x9a" + cmd = ["i2cdump", "-y", "0", "0x42", "s", "0x9a"] try: - status, output = subprocess.getstatusoutput(cmd) + status, output = getstatusoutput_noshell(cmd) lines = output.split('\n') hn = re.findall(r'\w+', lines[-1]) version = int(hn[1], 16) diff --git a/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_monitor.py b/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_monitor.py index f506b72f83ab..b889dcf712d1 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_monitor.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_monitor.py @@ -23,10 +23,10 @@ # ------------------------------------------------------------------ try: - import os import getopt import sys import logging + import subprocess import logging.config import logging.handlers import time # this is only being used as part of the example @@ -242,7 +242,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected, reboot DUT') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if ori_state==LEVEL_FAN_MID: if new_state==LEVEL_TEMP_HIGH: if alarm_state==0: @@ -251,7 +251,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if ori_state==LEVEL_FAN_MAX: if new_state==LEVEL_TEMP_HIGH: if alarm_state==0: @@ -260,7 +260,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if alarm_state==1: if temp_get < (fan_policy[3][0] - 5000): #below 65 C, clear alarm logging.warning('Alarm for temperature high is cleared') @@ -269,7 +269,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if new_state <= LEVEL_FAN_MID: logging.warning('Alarm for temperature high is cleared') alarm_state=0 diff --git a/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_pddf_monitor.py b/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_pddf_monitor.py index fd283cd6cc2b..2b18ac646ac9 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_pddf_monitor.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_pddf_monitor.py @@ -24,10 +24,10 @@ # ------------------------------------------------------------------ try: - import os import sys import getopt import logging + import subprocess import logging.config import logging.handlers import time @@ -230,7 +230,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected, reboot DUT') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if ori_state==LEVEL_FAN_MID: if new_state==LEVEL_TEMP_HIGH: if alarm_state==0: @@ -239,7 +239,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if ori_state==LEVEL_FAN_MAX: if new_state==LEVEL_TEMP_HIGH: if alarm_state==0: @@ -248,7 +248,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if alarm_state==1: if temp_get < (fan_policy[3][0] - 5000): #below 65 C, clear alarm logging.warning('Alarm for temperature high is cleared') @@ -257,7 +257,7 @@ def manage_fans(self): if new_state==LEVEL_TEMP_CRITICAL: logging.critical('Alarm for temperature critical is detected') time.sleep(2) - os.system('reboot') + subprocess.call(['reboot']) if new_state <= LEVEL_FAN_MID: logging.warning('Alarm for temperature high is cleared') alarm_state=0 diff --git a/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_util.py b/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_util.py index bee413fe0655..949f1230de99 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_util.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/accton_as7726_32x_util.py @@ -32,6 +32,7 @@ import logging import re import time +from sonic_py_common.general import getstatusoutput_noshell PROJECT_NAME = 'as7726_32x' version = '0.0.1' @@ -147,16 +148,16 @@ def show_help(): sys.exit(0) def dis_i2c_ir3570a(addr): - cmd = "i2cset -y 0 0x%x 0xE5 0x01" % addr - status, output = subprocess.getstatusoutput(cmd) - cmd = "i2cset -y 0 0x%x 0x12 0x02" % addr - status, output = subprocess.getstatusoutput(cmd) + cmd = ["i2cset", "-y", "0", "0x"+"%x"%addr, "0xE5", "0x01"] + status, output = getstatusoutput_noshell(cmd) + cmd = ["i2cset", "-y", "0", "0x"+"%x"%addr, "0x12", "0x02"] + status, output = getstatusoutput_noshell(cmd) return status def ir3570_check(): - cmd = "i2cdump -y 0 0x42 s 0x9a" + cmd = ["i2cdump", "-y", "0", "0x42", "s", "0x9a"] try: - status, output = subprocess.getstatusoutput(cmd) + status, output = getstatusoutput_noshell(cmd) lines = output.split('\n') hn = re.findall(r'\w+', lines[-1]) version = int(hn[1], 16) diff --git a/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/pddf_switch_svc.py b/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/pddf_switch_svc.py index 9664d21b6c36..e8b42aaa1b93 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/pddf_switch_svc.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7726-32x/utils/pddf_switch_svc.py @@ -2,44 +2,44 @@ # Script to stop and start the respective platforms default services. # This will be used while switching the pddf->non-pddf mode and vice versa -import subprocess +from sonic_py_common.general import getstatusoutput_noshell def check_pddf_support(): return True def stop_platform_svc(): - status, output = subprocess.getstatusoutput("systemctl stop as7726-32x-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as7726-32x-platform-monitor-fan.service"]) if status: print("Stop as7726-32x-platform-monitor-fan.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl disable as7726-32x-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "disable", "as7726-32x-platform-monitor-fan.service"]) if status: print("Disable as7726-32x-platform-monitor-fan.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl stop as7726-32x-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as7726-32x-platform-monitor-psu.service"]) if status: print("Stop as7726-32x-platform-monitor-psu.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl disable as7726-32x-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "disable", "as7726-32x-platform-monitor-psu.service"]) if status: print("Disable as7726-32x-platform-monitor-psu.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl stop as7726-32x-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as7726-32x-platform-monitor.service"]) if status: print("Stop as7726-32x-platform-monitor.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl disable as7726-32x-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "disable", "as7726-32x-platform-monitor.service"]) if status: print("Disable as7726-32x-platform-monitor.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("/usr/local/bin/accton_as7726_32x_util.py clean") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as7726_32x_util.py", "clean"]) if status: print("accton_as7726_32x_util.py clean command failed %d"%status) return False # HACK , stop the pddf-platform-init service if it is active - status, output = subprocess.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: print("Stop pddf-platform-init.service along with other platform serives failed %d"%status) return False @@ -47,32 +47,32 @@ def stop_platform_svc(): return True def start_platform_svc(): - status, output = subprocess.getstatusoutput("/usr/local/bin/accton_as7726_32x_util.py install") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as7726_32x_util.py install"]) if status: print("accton_as7726_32x_util.py install command failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl enable as7726-32x-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "enable", "as7726-32x-platform-monitor-fan.service"]) if status: print("Enable as7726-32x-platform-monitor-fan.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl start as7726-32x-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as7726-32x-platform-monitor-fan.service"]) if status: print("Start as7726-32x-platform-monitor-fan.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl enable as7726-32x-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "enable", "as7726-32x-platform-monitor-psu.service"]) if status: print("Enable as7726-32x-platform-monitor-psu.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl start as7726-32x-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as7726-32x-platform-monitor-psu.service"]) if status: print("Start as7726-32x-platform-monitor-psu.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl enable as7726-32x-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "enable", "as7726-32x-platform-monitor.service"]) if status: print("Enable as7726-32x-platform-monitor.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl start as7726-32x-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as7726-32x-platform-monitor.service"]) if status: print("Start as7726-32x-platform-monitor.service failed %d"%status) return False @@ -80,7 +80,7 @@ def start_platform_svc(): return True def start_platform_pddf(): - status, output = subprocess.getstatusoutput("systemctl start pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "pddf-platform-init.service"]) if status: print("Start pddf-platform-init.service failed %d"%status) return False @@ -88,7 +88,7 @@ def start_platform_pddf(): return True def stop_platform_pddf(): - status, output = subprocess.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: print("Stop pddf-platform-init.service failed %d"%status) return False diff --git a/platform/broadcom/sonic-platform-modules-accton/as7816-64x/utils/accton_as7816_64x_util.py b/platform/broadcom/sonic-platform-modules-accton/as7816-64x/utils/accton_as7816_64x_util.py index 1e9314fb824e..5ab3d8f20e98 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7816-64x/utils/accton_as7816_64x_util.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7816-64x/utils/accton_as7816_64x_util.py @@ -32,7 +32,7 @@ import re import time import os - +from sonic_py_common.general import getstatusoutput_noshell PROJECT_NAME = 'as7816_64x' @@ -99,18 +99,17 @@ def show_help(): print( __doc__ % {'scriptName' : sys.argv[0].split("/")[-1]}) sys.exit(0) - def dis_i2c_ir3570a(addr): - cmd = "i2cset -y 0 0x%x 0xE5 0x01" % addr - status, output = subprocess.getstatusoutput(cmd) - cmd = "i2cset -y 0 0x%x 0x12 0x02" % addr - status, output = subprocess.getstatusoutput(cmd) + cmd = ["i2cset", "-y", "0", "0x"+"%x"%addr, "0xE5", "0x01"] + status, output = getstatusoutput_noshell(cmd) + cmd = ["i2cset", "-y", "0", "0x"+"%x"%addr, "0x12", "0x02"] + status, output = getstatusoutput_noshell(cmd) return status def ir3570_check(): - cmd = "i2cdump -y 0 0x42 s 0x9a" + cmd = ["i2cdump", "-y", "0", "0x42", "s", "0x9a"] try: - status, output = subprocess.getstatusoutput(cmd) + status, output = getstatusoutput_noshell(cmd) lines = output.split('\n') hn = re.findall(r'\w+', lines[-1]) version = int(hn[1], 16) diff --git a/platform/broadcom/sonic-platform-modules-accton/as7816-64x/utils/pddf_switch_svc.py b/platform/broadcom/sonic-platform-modules-accton/as7816-64x/utils/pddf_switch_svc.py index 98bf05eca115..166b9c12f0ea 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as7816-64x/utils/pddf_switch_svc.py +++ b/platform/broadcom/sonic-platform-modules-accton/as7816-64x/utils/pddf_switch_svc.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # Script to stop and start the respective platforms default services. # This will be used while switching the pddf->non-pddf mode and vice versa -import subprocess +from sonic_py_common.general import getstatusoutput_noshell def check_pddf_support(): @@ -10,22 +10,22 @@ def check_pddf_support(): def stop_platform_svc(): - status, output = subprocess.getstatusoutput("systemctl stop as7816-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as7816-platform-init.service"]) if status: print(("Stop as7816-platform-init.service failed %d" % status)) return False - status, output = subprocess.getstatusoutput("systemctl disable as7816-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "disable", "as7816-platform-init.service"]) if status: print(("Disable as7816-platform-init.service failed %d" % status)) return False - status, output = subprocess.getstatusoutput("/usr/local/bin/accton_as7816_util.py clean") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as7816_util.py", "clean"]) if status: print(("accton_as7816_util.py clean command failed %d" % status)) return False # HACK , stop the pddf-platform-init service if it is active - status, output = subprocess.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: print(("Stop pddf-platform-init.service along with other platform serives failed %d" % status)) return False @@ -34,12 +34,12 @@ def stop_platform_svc(): def start_platform_svc(): - status, output = subprocess.getstatusoutput("/usr/local/bin/accton_as7816_util.py install") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as7816_util.py", "install"]) if status: print(("accton_as7816_util.py install command failed %d" % status)) return False - status, output = subprocess.getstatusoutput("systemctl enable as7816-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "enable", "as7816-platform-init.service"]) if status: print(("Enable as7816-platform-init.service failed %d" % status)) return False @@ -48,7 +48,7 @@ def start_platform_svc(): def start_platform_pddf(): - status, output = subprocess.getstatusoutput("systemctl start pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "pddf-platform-init.service"]) if status: print(("Start pddf-platform-init.service failed %d" % status)) return False @@ -57,7 +57,7 @@ def start_platform_pddf(): def stop_platform_pddf(): - status, output = subprocess.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: print(("Stop pddf-platform-init.service failed %d" % status)) return False diff --git a/platform/broadcom/sonic-platform-modules-accton/as9716-32d/utils/accton_as9716_32d_util.py b/platform/broadcom/sonic-platform-modules-accton/as9716-32d/utils/accton_as9716_32d_util.py index 647e26e07d2c..53126109f968 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as9716-32d/utils/accton_as9716_32d_util.py +++ b/platform/broadcom/sonic-platform-modules-accton/as9716-32d/utils/accton_as9716_32d_util.py @@ -32,6 +32,7 @@ import logging import re import time +from sonic_py_common.general import getstatusoutput_noshell PROJECT_NAME = 'as9716_32d' version = '0.0.1' @@ -153,16 +154,16 @@ def show_help(): sys.exit(0) def dis_i2c_ir3570a(addr): - cmd = "i2cset -y 0 0x%x 0xE5 0x01" % addr - status, output = subprocess.getstatusoutput(cmd) - cmd = "i2cset -y 0 0x%x 0x12 0x02" % addr - status, output = subprocess.getstatusoutput(cmd) + cmd = ["i2cset", "-y", "0", "0x"+"%x"%addr, "0xE5", "0x01"] + status, output = getstatusoutput_noshell(cmd) + cmd = ["i2cset", "-y", "0", "0x"+"%x"%addr, "0x12", "0x02"] + status, output = getstatusoutput_noshell(cmd) return status def ir3570_check(): - cmd = "i2cdump -y 0 0x42 s 0x9a" + cmd = ["i2cdump", "-y", "0", "0x42", "s", "0x9a"] try: - status, output = subprocess.getstatusoutput(cmd) + status, output = getstatusoutput_noshell(cmd) lines = output.split('\n') hn = re.findall(r'\w+', lines[-1]) version = int(hn[1], 16) diff --git a/platform/broadcom/sonic-platform-modules-accton/as9716-32d/utils/pddf_switch_svc.py b/platform/broadcom/sonic-platform-modules-accton/as9716-32d/utils/pddf_switch_svc.py index 95e42b5c8971..1d672015d52f 100755 --- a/platform/broadcom/sonic-platform-modules-accton/as9716-32d/utils/pddf_switch_svc.py +++ b/platform/broadcom/sonic-platform-modules-accton/as9716-32d/utils/pddf_switch_svc.py @@ -1,39 +1,39 @@ #!/usr/bin/env python # Script to stop and start the respective platforms default services. # This will be used while switching the pddf->non-pddf mode and vice versa -import subprocess +from sonic_py_common.general import getstatusoutput_noshell def check_pddf_support(): return True def stop_platform_svc(): - status, output = subprocess.getstatusoutput("systemctl stop as9716-32d-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as9716-32d-platform-monitor-fan.service"]) if status: print("Stop as9716-32d-platform-fan.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl stop as9716-32d-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as9716-32d-platform-monitor-psu.service"]) if status: print("Stop as9716-32d-platform-psu.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl stop as9716-32d-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "as9716-32d-platform-monitor.service"]) if status: print("Stop as9716-32d-platform-init.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl disable as9716-32d-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "disable", "as9716-32d-platform-monitor.service"]) if status: print("Disable as9716-32d-platform-monitor.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("/usr/local/bin/accton_as9716_32d_util.py clean") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as9716_32d_util.py", "clean"]) if status: print("accton_as9716_32d_util.py clean command failed %d"%status) return False # HACK , stop the pddf-platform-init service if it is active - status, output = subprocess.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: print("Stop pddf-platform-init.service along with other platform serives failed %d"%status) return False @@ -41,21 +41,21 @@ def stop_platform_svc(): return True def start_platform_svc(): - status, output = subprocess.getstatusoutput("/usr/local/bin/accton_as9716_32d_util.py install") + status, output = getstatusoutput_noshell(["/usr/local/bin/accton_as9716_32d_util.py", "install"]) if status: print("accton_as9716_32d_util.py install command failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl enable as9716-32d-platform-monitor.service") + status, output = getstatusoutput_noshell(["systemctl", "enable", "as9716-32d-platform-monitor.service"]) if status: print("Enable as9716-32d-platform-monitor.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl start as9716-32d-platform-monitor-fan.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as9716-32d-platform-monitor-fan.service"]) if status: print("Start as9716-32d-platform-monitor-fan.service failed %d"%status) return False - status, output = subprocess.getstatusoutput("systemctl start as9716-32d-platform-monitor-psu.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "as9716-32d-platform-monitor-psu.service"]) if status: print("Start as9716-32d-platform-monitor-psu.service failed %d"%status) return False @@ -64,7 +64,7 @@ def start_platform_svc(): def start_platform_pddf(): - status, output = subprocess.getstatusoutput("systemctl start pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "start", "pddf-platform-init.service"]) if status: print("Start pddf-platform-init.service failed %d"%status) return False @@ -73,7 +73,7 @@ def start_platform_pddf(): def stop_platform_pddf(): - status, output = subprocess.getstatusoutput("systemctl stop pddf-platform-init.service") + status, output = getstatusoutput_noshell(["systemctl", "stop", "pddf-platform-init.service"]) if status: print("Stop pddf-platform-init.service failed %d"%status) return False From e2b3bdf72a2cc6900ad88a0e23dc3007348e1cbe Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Mon, 7 Nov 2022 09:57:03 -0800 Subject: [PATCH 149/174] Add YANG model and unit tests for additional structured events (#12554) Added YANG models for additional events for host, swss, and dhcp relay --- .../tests/sonic-events-dhcp-relay.json | 15 ++ .../tests/sonic-events-host.json | 71 ++++++++ .../tests/sonic-events-swss.json | 23 +++ .../tests_config/sonic-events-dhcp-relay.json | 36 ++++ .../tests_config/sonic-events-host.json | 168 ++++++++++++++++++ .../tests_config/sonic-events-swss.json | 54 ++++++ .../yang-models/sonic-events-dhcp-relay.yang | 27 +++ .../yang-models/sonic-events-host.yang | 66 +++++++ .../yang-models/sonic-events-swss.yang | 37 ++++ .../yang-templates/sonic-types.yang.j2 | 28 +++ 10 files changed, 525 insertions(+) diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-dhcp-relay.json b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-dhcp-relay.json index 575c2c587d5c..f99b279e39f9 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-dhcp-relay.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-dhcp-relay.json @@ -25,5 +25,20 @@ }, "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_DISPARITY_VALID": { "desc": "VALID DHCP_RELAY_DISPARITY EVENT." + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_BIND_FAILURE_INCORRECT_VLAN": { + "desc": "DHCP_RELAY_BIND_FAILURE_EVENT_INCORRECT_VLAN failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_BIND_FAILURE_INCORRECT_TYPE": { + "desc": "DHCP_RELAY_BIND_FAILURE_EVENT_INCORRECT_TYPE failure.", + "eStrKey": "InvalidValue" + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_BIND_FAILURE_INCORRECT_TIMESTAMP": { + "desc": "DHCP_RELAY_BIND_FAILURE_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_BIND_FAILURE_VALID": { + "desc": "VALID DHCP_RELAY_BIND_FAILURE EVENT." } } diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-host.json b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-host.json index 6e4a8dcbe84e..b26d8e68a531 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-host.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-host.json @@ -111,5 +111,76 @@ }, "SONIC_EVENTS_HOST_INVALID_FREELIST_VALID": { "desc": "VALID INVALID_FREELIST EVENT." + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_INCORRECT_CTR_NAME": { + "desc": "MEM_THRESHOLD_EXCEEDED_EVENT_INCORRECT_CTR_NAME failure.", + "eStr": "Invalid ctr_name." + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_INCORRECT_CTR_NAME_LENGTH": { + "desc": "MEM_THRESHOLD_EXCEEDED_EVENT_INCORRECT_CTR_NAME_LENGTH failure.", + "eStr": "Invalid length for ctr_name." + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_INCORRECT_MEM_USAGE": { + "desc": "MEM_THRESHOLD_EXCEEDED_EVENT_INCORRECT_MEM_USAGE failure.", + "eStrKey": "InvalidValue", + "eStr": ["mem_usage"] + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_INCORRECT_THRESHOLD": { + "desc": "MEM_THRESHOLD_EXCEEDED_EVENT_INCORRECT_THRESHOLD failure.", + "eStrKey": "InvalidValue", + "eStr": ["threshold"] + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_INCORRECT_TIMESTAMP": { + "desc": "MEM_THRESHOLD_EXCEEDED_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_VALID": { + "desc": "VALID MEM_THRESHOLD_EXCEEDED EVENT." + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_INCORRECT_CTR_NAME": { + "desc": "PROCESS_EXITED_UNEXPECTEDLY_EVENT_INCORRECT_CTR_NAME failure.", + "eStr": "Invalid ctr_name." + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_INCORRECT_CTR_NAME_LENGTH": { + "desc": "PROCESS_EXITED_UNEXPECTEDLY_EVENT_INCORRECT_CTR_NAME_LENGTH failure.", + "eStr": "Invalid length for ctr_name." + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_INCORRECT_PROCESS_NAME": { + "desc": "PROCESS_EXITED_UNEXPECTEDLY_EVENT_INCORRECT_PROCESS_NAME failure.", + "eStr": "Invalid process_name." + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_INCORRECT_PROCESS_NAME_LENGTH": { + "desc": "PROCESS_EXITED_UNEXPECTEDLY_EVENT_INCORRECT_PROCESS_NAME_LENGTH failure.", + "eStr": "Invalid length for process_name." + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_INCORRECT_TIMESTAMP": { + "desc": "PROCESS_EXITED_UNEXPECTEDLY_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_VALID": { + "desc": "VALID_PROCESS_EXITED_UNEXPECTEDLY EVENT." + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_INCORRECT_CTR_NAME": { + "desc": "PROCESS_NOT_RUNNING_EVENT_INCORRECT_CTR_NAME failure.", + "eStr": "Invalid ctr_name." + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_INCORRECT_CTR_NAME_LENGTH": { + "desc": "PROCESS_NOT_RUNNING_EVENT_INCORRECT_CTR_NAME_LENGTH failure.", + "eStr": "Invalid length for ctr_name." + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_INCORRECT_PROCESS_NAME": { + "desc": "PROCESS_NOT_RUNNING_EVENT_INCORRECT_PROCESS_NAME failure.", + "eStr": "Invalid process_name." + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_INCORRECT_PROCESS_NAME_LENGTH": { + "desc": "PROCESS_NOT_RUNNING_EVENT_INCORRECT_PROCESS_NAME_LENGTH failure.", + "eStr": "Invalid length for process_name." + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_INCORRECT_TIMESTAMP": { + "desc": "PROCESS_NOT_RUNNING_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_VALID": { + "desc": "VALID_PROCESS_NOT_RUNNING EVENT." } } diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-swss.json b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-swss.json index 2862dede7961..dec47231eb5f 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-swss.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/sonic-events-swss.json @@ -57,5 +57,28 @@ }, "SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_VALID": { "desc": "VALID CHK_CRM_THRESHOLD EVENT." + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_INCORRECT_OPERATION_RESULT": { + "desc": "SELECT_OPERATION_FAILURE_EVENT_INCORRECT_OPERATION_RESULT failure.", + "eStr": "Invalid select operation failure operation_result." + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_INCORRECT_OPERATION_RESULT_LENGTH": { + "desc": "SELECT_OPERATION_FAILURE_EVENT_INCORRECT_OPERATION_RESULT_LENGTH failure.", + "eStr": "Invalid length for select operation failure operation_result." + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_INCORRECT_COMMAND": { + "desc": "SELECT_OPERATION_FAILURE_EVENT_INCORRECT_COMMAND failure.", + "eStr": "Invalid select operation failure command." + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_INCORRECT_COMMAND_LENGTH": { + "desc": "SELECT_OPERATION_FAILURE_EVENT_INCORRECT_COMMAND_LENGTH failure.", + "eStr": "Invalid length for select operation failure command." + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_INCORRECT_TIMESTAMP": { + "desc": "SELECT_OPERATION_FAILURE_EVENT_INCORRECT_TIMESTAMP failure.", + "eStrKey": "Pattern" + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_VALID": { + "desc": "VALID SELECT_OPERATION_FAILURE EVENT." } } diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-dhcp-relay.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-dhcp-relay.json index 114300f43176..70d7786b71db 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-dhcp-relay.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-dhcp-relay.json @@ -133,5 +133,41 @@ "timestamp": "1985-04-12T23:20:50.52Z" } } + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_BIND_FAILURE_INCORRECT_VLAN": { + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-bind-failure": { + "vlan": "INCORRECT_VLAN", + "type": "local", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_BIND_FAILURE_INCORRECT_TYPE": { + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-bind-failure": { + "vlan": "Vlan100", + "type": "INCORRECT_TYPE", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_BIND_FAILURE_INCORRECT_TIMESTAMP": { + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-bind-failure": { + "vlan": "Vlan100", + "type": "global", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_DHCP_RELAY_DHCP_RELAY_BIND_FAILURE_VALID": { + "sonic-events-dhcp-relay:sonic-events-dhcp-relay": { + "sonic-events-dhcp-relay:dhcp-relay-bind-failure": { + "vlan": "Vlan100", + "type": "local", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } } } diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-host.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-host.json index 9a59457a8e54..5c771de041b3 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-host.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-host.json @@ -258,5 +258,173 @@ "timestamp": "1985-04-12T23:20:50.52Z" } } + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_INCORRECT_CTR_NAME": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:mem-threshold-exceeded": { + "ctr_name": "Invalid$", + "mem_usage": 123456, + "threshold": 123456, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_INCORRECT_CTR_NAME_LENGTH": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:mem-threshold-exceeded": { + "ctr_name": "invalid-length-for-ctr-name-too-long", + "mem_usage": 123456, + "threshold": 123456, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_INCORRECT_MEM_USAGE": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:mem-threshold-exceeded": { + "ctr_name": "container_name", + "mem_usage": "INCORRECT_MEM_USAGE", + "threshold": 123456, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_INCORRECT_THRESHOLD": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:mem-threshold-exceeded": { + "ctr_name": "container_name", + "mem_usage": 123456, + "threshold": "INCORRECT_THRESHOLD", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:mem-threshold-exceeded": { + "ctr_name": "container_name", + "mem_usage": 123456, + "threshold": 123456, + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_MEM_THRESHOLD_EXCEEDED_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:mem-threshold-exceeded": { + "ctr_name": "container_name", + "mem_usage": 123456, + "threshold": 123456, + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_INCORRECT_CTR_NAME": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-exited-unexpectedly": { + "ctr_name": "Invalid$", + "process_name": "process_name", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_INCORRECT_CTR_NAME_LENGTH": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-exited-unexpectedly": { + "ctr_name": "invalid-length-for-ctr-name-too-long", + "process_name": "process_name", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_INCORRECT_PROCESS_NAME": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-exited-unexpectedly": { + "ctr_name": "container_name", + "process_name": "Invalid$", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_INCORRECT_PROCESS_NAME_LENGTH": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-exited-unexpectedly": { + "ctr_name": "container_name", + "process_name": "invalid-length-for-process-name-too-long", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-exited-unexpectedly": { + "ctr_name": "container_name", + "process_name": "process_name", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_EXITED_UNEXPECTEDLY_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-exited-unexpectedly": { + "ctr_name": "container_name", + "process_name": "process_name", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_INCORRECT_CTR_NAME": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-not-running": { + "ctr_name": "Invalid$", + "process_name": "process_name", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_INCORRECT_CTR_NAME_LENGTH": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-not-running": { + "ctr_name": "invalid-length-for-ctr-name-too-long", + "process_name": "process_name", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_INCORRECT_PROCESS_NAME": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-not-running": { + "ctr_name": "container_name", + "process_name": "Invalid$", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_INCORRECT_PROCESS_NAME_LENGTH": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-not-running": { + "ctr_name": "container_name", + "process_name": "invalid-length-for-process-name-too-long", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_INCORRECT_TIMESTAMP": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-not-running": { + "ctr_name": "container_name", + "process_name": "process_name", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_HOST_PROCESS_NOT_RUNNING_VALID": { + "sonic-events-host:sonic-events-host": { + "sonic-events-host:process-not-running": { + "ctr_name": "container_name", + "process_name": "process_name", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } } } diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-swss.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-swss.json index 885bd45f5378..62ce9c35c2e8 100644 --- a/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-swss.json +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/sonic-events-swss.json @@ -300,5 +300,59 @@ "timestamp": "1985-04-12T23:20:50.52Z" } } + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_INCORRECT_OPERATION_RESULT": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:select-operation-failure": { + "operation_result": "Invalid$", + "command": "command", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_INCORRECT_OPERATION_RESULT_LENGTH": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:select-operation-failure": { + "operation_result": "invalid-length-for-operation-result-too-long", + "command": "command", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_INCORRECT_COMMAND": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:select-operation-failure": { + "operation_result": "operation_result", + "command": "Invalid$", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_INCORRECT_COMMAND_LENGTH": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:select-operation-failure": { + "operation_result": "operation_result", + "command": "invalid-length-for-command-too-long", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_INCORRECT_TIMESTAMP": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:select-operation-failure": { + "operation_result": "operation_result", + "command": "command", + "timestamp": "INCORRECT_TIMESTAMP" + } + } + }, + "SONIC_EVENTS_SWSS_SELECT_OPERATION_FAILURE_VALID": { + "sonic-events-swss:sonic-events-swss": { + "sonic-events-swss:select-operation-failure": { + "operation_result": "operation_result", + "command": "command", + "timestamp": "1985-04-12T23:20:50.52Z" + } + } } } diff --git a/src/sonic-yang-models/yang-models/sonic-events-dhcp-relay.yang b/src/sonic-yang-models/yang-models/sonic-events-dhcp-relay.yang index 5119397968eb..1c85a55347d6 100644 --- a/src/sonic-yang-models/yang-models/sonic-events-dhcp-relay.yang +++ b/src/sonic-yang-models/yang-models/sonic-events-dhcp-relay.yang @@ -78,5 +78,32 @@ module sonic-events-dhcp-relay { uses evtcmn:sonic-events-cmn; } + + container dhcp-relay-bind-failure { + evtcmn:ALARM_SEVERITY_MAJOR; + + description " + Declares an event for socket binding failure. + parameters: + vlan that shows this failure + type either local or global"; + + leaf vlan { + type string { + pattern 'Vlan([0-9]{1,3}|[1-3][0-9]{3}|[4][0][0-8][0-9]|[4][0][9][0-4])'; + } + description "Name of the vlan affected"; + } + + leaf type { + type enumeration { + enum "local"; + enum "global"; + } + description "Address type"; + } + + uses evtcmn:sonic-events-cmn; + } } } diff --git a/src/sonic-yang-models/yang-models/sonic-events-host.yang b/src/sonic-yang-models/yang-models/sonic-events-host.yang index 3ac8213695ca..cbb129d9fe25 100644 --- a/src/sonic-yang-models/yang-models/sonic-events-host.yang +++ b/src/sonic-yang-models/yang-models/sonic-events-host.yang @@ -8,6 +8,10 @@ module sonic-events-host { revision-date 2022-12-01; } + import sonic-types { + prefix stypes; + } + organization "SONiC"; @@ -179,5 +183,67 @@ module sonic-events-host { uses evtcmn:sonic-events-cmn; } + + container mem-threshold-exceeded { + evtcmn:EVENT_SEVERITY_2; + + description " + Declares an event for memory exceeding threshold failure."; + + leaf ctr_name { + type stypes:ctr_name; + description "Container name of mem-threshold-exceeded event"; + } + + leaf mem_usage { + type uint64; + description "Memory usage of process"; + } + + leaf threshold { + type uint64; + description "Threshold value of process"; + } + + uses evtcmn:sonic-events-cmn; + } + + container process-exited-unexpectedly { + evtcmn:EVENT_SEVERITY_2; + + description " + Declares an event in which a critical process exits unexpectedly."; + + leaf process_name { + type stypes:process_name; + description "Name of process that is exiting unexpectedly"; + } + + leaf ctr_name { + type stypes:ctr_name; + description "Container name of process that is exiting unexpectedly"; + } + + uses evtcmn:sonic-events-cmn; + } + + container process-not-running { + evtcmn:EVENT_SEVERITY_2; + + description " + Declares an event in which a critical process exits unexpectedly."; + + leaf process_name { + type stypes:process_name; + description "Name of process that is not running"; + } + + leaf ctr_name { + type stypes:ctr_name; + description "Container name"; + } + + uses evtcmn:sonic-events-cmn; + } } } diff --git a/src/sonic-yang-models/yang-models/sonic-events-swss.yang b/src/sonic-yang-models/yang-models/sonic-events-swss.yang index 39d57cb2d256..33a3999364c6 100644 --- a/src/sonic-yang-models/yang-models/sonic-events-swss.yang +++ b/src/sonic-yang-models/yang-models/sonic-events-swss.yang @@ -104,5 +104,42 @@ module sonic-events-swss { uses evtcmn:sonic-events-cmn; } + + container select-operation-failure { + evtcmn:ALARM_SEVERITY_MAJOR; + + description " + Describes select operation fails due to timeout or other reason."; + + leaf operation_result { + type string { + pattern '[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,31})' { + error-message "Invalid select operation failure operation_result."; + error-app-tag select-operation-failure-invalid-operation-result; + } + length 1..32 { + error-message "Invalid length for select operation failure operation_result."; + error-app-tag select-operation-failure-invalid-operation-result-length; + } + } + description "Select operation result"; + } + + leaf command { + type string { + pattern '[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,31})' { + error-message "Invalid select operation failure command."; + error-app-tag select-operation-failure-invalid-command; + } + length 1..32 { + error-message "Invalid length for select operation failure command."; + error-app-tag select-operation-failure-invalid-command-length; + } + } + description "Operation command."; + } + + uses evtcmn:sonic-events-cmn; + } } } diff --git a/src/sonic-yang-models/yang-templates/sonic-types.yang.j2 b/src/sonic-yang-models/yang-templates/sonic-types.yang.j2 index f0cd6750f1f8..498abb3aae31 100644 --- a/src/sonic-yang-models/yang-templates/sonic-types.yang.j2 +++ b/src/sonic-yang-models/yang-templates/sonic-types.yang.j2 @@ -279,6 +279,34 @@ module sonic-types { } } + typedef process_name { + type string { + pattern '[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,31})' { + error-message "Invalid process_name."; + error-app-tag invalid-process-name; + } + length 1..32 { + error-message "Invalid length for process_name."; + error-app-tag invalid-process-name-length; + } + } + } + + typedef ctr_name { + type string { + pattern '[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,31})' { + error-message "Invalid ctr_name."; + error-app-tag invalid-ctr-name; + } + length 1..32 { + error-message "Invalid length for ctr_name."; + error-app-tag invalid-ctr-name-length; + } + } + } + + + {% if yang_model_type == "cvl" %} /* Required for CVL */ container operation { From 8f48773fd140e9fc27355427ae79386a5f5b7f9b Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Mon, 7 Nov 2022 09:57:57 -0800 Subject: [PATCH 150/174] Publish additional events (#12563) Add event_publish code or regex for rsyslog plugin for additional events --- files/build_templates/dhcp_relay_regex.json | 5 +++++ files/image_config/monit/memory_checker | 19 ++++++++++++++++--- files/scripts/supervisor-proc-exit-listener | 12 ++++++++++-- .../health_checker/service_checker.py | 14 +++++++++++++- 4 files changed, 44 insertions(+), 6 deletions(-) diff --git a/files/build_templates/dhcp_relay_regex.json b/files/build_templates/dhcp_relay_regex.json index c7aa81eaab18..83d903343058 100644 --- a/files/build_templates/dhcp_relay_regex.json +++ b/files/build_templates/dhcp_relay_regex.json @@ -3,5 +3,10 @@ "tag": "dhcp-relay-discard", "regex": "Discarding packet received on ([a-zA-Z0-9-_]*) interface that has no IPv4 address assigned.", "params": [ "ifname" ] + }, + { + "tag": "dhcp-relay-bind-failure", + "regex": "Failed to bind socket to (link local|global) ipv6 address on interface ([a-zA-Z0-9]*)", + "params": [ "type:ret=(arg==\"link local\")and\"local\"or\"global\")", "vlan" ] } ] diff --git a/files/image_config/monit/memory_checker b/files/image_config/monit/memory_checker index bcb487261af0..11604d4c3784 100755 --- a/files/image_config/monit/memory_checker +++ b/files/image_config/monit/memory_checker @@ -27,6 +27,10 @@ import re import docker +from swsscommon import swsscommon + +EVENTS_PUBLISHER_SOURCE = "sonic-events-host" +EVENTS_PUBLISHER_TAG = "mem-threshold-exceeded" def get_command_result(command): """Executes the command and return the resulting output. @@ -54,8 +58,14 @@ def get_command_result(command): return command_stdout.strip() +def publish_events(events_handle, container_name, mem_usage_bytes, threshold_value): + params = swsscommon.FieldValueMap() + params["ctr_name"] = container_name + params["mem_usage"] = mem_usage_bytes + params["threshold"] = threshold_value + swsscommon.event_publish(events_handle, EVENTS_PUBLISHER_TAG, params) -def check_memory_usage(container_name, threshold_value): +def check_memory_usage(events_handle, container_name, threshold_value): """Checks the memory usage of a container and writes an alerting messages into the syslog if the memory usage is larger than the threshold value. @@ -89,6 +99,8 @@ def check_memory_usage(container_name, threshold_value): .format(container_name, mem_usage_bytes, threshold_value)) syslog.syslog(syslog.LOG_INFO, "[{}]: Memory usage ({} Bytes) is larger than the threshold ({} Bytes)!" .format(container_name, mem_usage_bytes, threshold_value)) + # publish event + publish_events(events_handle, container_name, mem_usage_bytes, threshold_value) sys.exit(3) else: syslog.syslog(syslog.LOG_ERR, "[memory_checker] Failed to retrieve memory value from '{}'" @@ -148,13 +160,14 @@ def main(): sys.exit(0) running_container_names = get_running_container_names() + events_handle = swsscommon.events_init_publisher(EVENTS_PUBLISHER_SOURCE) if args.container_name in running_container_names: - check_memory_usage(args.container_name, args.threshold_value) + check_memory_usage(events_handle, args.container_name, args.threshold_value) else: syslog.syslog(syslog.LOG_INFO, "[memory_checker] Exits without checking memory usage since container '{}' is not running!" .format(args.container_name)) - + swsscommon.events_deinit_publisher(events_handle) if __name__ == "__main__": main() diff --git a/files/scripts/supervisor-proc-exit-listener b/files/scripts/supervisor-proc-exit-listener index a17ffb7e45fa..dbfdaf2c5ac5 100755 --- a/files/scripts/supervisor-proc-exit-listener +++ b/files/scripts/supervisor-proc-exit-listener @@ -31,6 +31,8 @@ SELECT_TIMEOUT_SECS = 1.0 # Alerting message will be written into syslog in the following interval ALERTING_INTERVAL_SECS = 60 +EVENTS_PUBLISHER_SOURCE = "sonic-events-host" +EVENTS_PUBLISHER_TAG = "process-exited-unexpectedly" def get_critical_group_and_process_list(): """ @@ -106,6 +108,11 @@ def get_autorestart_state(container_name): return is_auto_restart +def publish_events(events_handle, process_name, container_name): + params = swsscommon.FieldValueMap() + params["process_name"] = process_name + params["ctr_name"] = container_name + swsscommon.event_publish(events_handle, EVENTS_PUBLISHER_TAG, params) def main(argv): container_name = None @@ -123,7 +130,7 @@ def main(argv): process_under_alerting = defaultdict(dict) # Transition from ACKNOWLEDGED to READY childutils.listener.ready() - + events_handle = swsscommon.events_init_publisher(EVENTS_PUBLISHER_SOURCE) while True: file_descriptor_list = select.select([sys.stdin], [], [], SELECT_TIMEOUT_SECS)[0] if len(file_descriptor_list) > 0: @@ -145,6 +152,8 @@ def main(argv): MSG_FORMAT_STR = "Process '{}' exited unexpectedly. Terminating supervisor '{}'" msg = MSG_FORMAT_STR.format(payload_headers['processname'], container_name) syslog.syslog(syslog.LOG_INFO, msg) + publish_events(events_handle, payload_headers['processname'], container_name) + swsscommon.events_deinit_publisher(events_handle) os.kill(os.getppid(), signal.SIGTERM) else: process_under_alerting[process_name]["last_alerted"] = time.time() @@ -174,6 +183,5 @@ def main(argv): process_under_alerting[process_name]["dead_minutes"] += elapsed_mins generate_alerting_message(process_name, process_under_alerting[process_name]["dead_minutes"]) - if __name__ == "__main__": main(sys.argv[1:]) diff --git a/src/system-health/health_checker/service_checker.py b/src/system-health/health_checker/service_checker.py index c81948a7ae25..ed6c7296fde3 100644 --- a/src/system-health/health_checker/service_checker.py +++ b/src/system-health/health_checker/service_checker.py @@ -12,6 +12,8 @@ SYSLOG_IDENTIFIER = 'service_checker' logger = Logger(log_identifier=SYSLOG_IDENTIFIER) +EVENTS_PUBLISHER_SOURCE = "sonic-events-host" +EVENTS_PUBLISHER_TAG = "process-not-running" class ServiceChecker(HealthChecker): """ @@ -55,6 +57,8 @@ def __init__(self): self.load_critical_process_cache() + self.events_handle = swsscommon.events_init_publisher(EVENTS_PUBLISHER_SOURCE) + def get_expected_running_containers(self, feature_table): """Get a set of containers that are expected to running on SONiC @@ -288,7 +292,7 @@ def check(self, config): self.reset() self.check_by_monit(config) self.check_services(config) - + swsscommon.events_deinit_publisher(self.events_handle) def _parse_supervisorctl_status(self, process_status): """Expected input: @@ -309,6 +313,13 @@ def _parse_supervisorctl_status(self, process_status): data[items[0].strip()] = items[1].strip() return data + def publish_events(self, container_name, critical_process_list): + params = swsscommon.FieldValueMap() + params["ctr_name"] = container_name + for process_name in critical_process_list: + params["process_name"] = process_name + swsscommon.event_publish(self.events_handle, EVENTS_PUBLISHER_TAG, params) + def check_process_existence(self, container_name, critical_process_list, config, feature_table): """Check whether the process in the specified container is running or not. @@ -333,6 +344,7 @@ def check_process_existence(self, container_name, critical_process_list, config, if process_status is None: for process_name in critical_process_list: self.set_object_not_ok('Process', '{}:{}'.format(container_name, process_name), "'{}' is not running".format(process_name)) + self.publish_events(container_name, critical_process_list) return process_status = self._parse_supervisorctl_status(process_status.strip().splitlines()) From ddf16c9d8cf01aeb5fbafd67688c9f2113f594f0 Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Mon, 7 Nov 2022 12:10:00 -0800 Subject: [PATCH 151/174] [arp_update]: Fix hardcoded vlan (#12566) Typo in prior PR #11919 hardcodes Vlan name. Change command to use the $vlan variable instead Signed-off-by: Lawrence Lee --- files/scripts/arp_update | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/scripts/arp_update b/files/scripts/arp_update index 5522ea46828a..7f9a7e1dac7d 100755 --- a/files/scripts/arp_update +++ b/files/scripts/arp_update @@ -86,7 +86,7 @@ while /bin/true; do # generates the following command for each failed or incomplete IPv6 neighbor # ip neigh replace dev nud incomplete neigh_replace_template="sed -e 's/^/ip neigh replace /' -e 's/,/ dev /' -e 's/$/ nud incomplete;/'" - ip_neigh_replace_cmd="ip -6 neigh show | grep -v fe80 | grep Vlan1000 | grep -E 'FAILED|INCOMPLETE' | cut -d ' ' -f 1,3 --output-delimiter=',' | $neigh_replace_template" + ip_neigh_replace_cmd="ip -6 neigh show | grep -v fe80 | grep $vlan | grep -E 'FAILED|INCOMPLETE' | cut -d ' ' -f 1,3 --output-delimiter=',' | $neigh_replace_template" eval `eval $ip_neigh_replace_cmd` # on dual ToR devices, try to resolve failed neighbor entries since From c4be3a51aa5339ede0fbb716d9d4fe9aae63ede6 Mon Sep 17 00:00:00 2001 From: arlakshm <55814491+arlakshm@users.noreply.github.com> Date: Mon, 7 Nov 2022 23:30:02 +0000 Subject: [PATCH 152/174] [chassis][Arista] add supervisor to the platform_env.conf (#12615) Why I did it Fixes #12614 How I did it In the container_checker the database_chassis is added to expected container if device is supervisor To detect the device is superviso, add supervisor=1 to the platform_env.conf of 7808 sup platform How to verify it run container_checker monit check Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan --- device/arista/x86_64-arista_7800_sup/platform_env.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/device/arista/x86_64-arista_7800_sup/platform_env.conf b/device/arista/x86_64-arista_7800_sup/platform_env.conf index 89102035fc48..c7bd98f7c018 100644 --- a/device/arista/x86_64-arista_7800_sup/platform_env.conf +++ b/device/arista/x86_64-arista_7800_sup/platform_env.conf @@ -1,2 +1,3 @@ usemsi=1 dmasize=512M +supervisor=1 From 18aca96f5fec436af420e69781d11ee263de702f Mon Sep 17 00:00:00 2001 From: Vivek Date: Tue, 8 Nov 2022 02:18:35 -0800 Subject: [PATCH 153/174] [submodule] Advance sonic-linux-kernel pointer (#12560) 686b9b1 Update Makefile to provision the ability of building with non-upstream patches (#296) 3b95205 [patch]: Introduce sysctl param `arp_evict_no_carrier` (#293) Signed-off-by: Vivek Reddy --- src/sonic-linux-kernel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-linux-kernel b/src/sonic-linux-kernel index 443253f637ec..686b9b16acc1 160000 --- a/src/sonic-linux-kernel +++ b/src/sonic-linux-kernel @@ -1 +1 @@ -Subproject commit 443253f637ec3dccac246199977a6d65346d7878 +Subproject commit 686b9b16acc1f453cae2a697e9d8009c441c0ab0 From c8c2b7fc45033d66fa58eb9d681345f1a41bff95 Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Tue, 8 Nov 2022 19:37:10 +0800 Subject: [PATCH 154/174] [Mellanox] [Platform API] Update SN2201 dynamic minimum fan speed table (#12602) - Why I did it Update SN2201 dynamic minimum fan speed table according to data provided by the thermal team. - How I did it Update the thermal table in device_data.py - How to verify it Run platform related regression Signed-off-by: Kebo Liu --- .../mellanox/mlnx-platform-api/sonic_platform/device_data.py | 4 ++++ platform/mellanox/mlnx-platform-api/tests/test_thermal.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/platform/mellanox/mlnx-platform-api/sonic_platform/device_data.py b/platform/mellanox/mlnx-platform-api/sonic_platform/device_data.py index 94ed64d7d380..522287cdb531 100644 --- a/platform/mellanox/mlnx-platform-api/sonic_platform/device_data.py +++ b/platform/mellanox/mlnx-platform-api/sonic_platform/device_data.py @@ -156,6 +156,10 @@ }, 'x86_64-nvidia_sn2201-r0': { 'thermal': { + 'minimum_table': { + "unk_trust": {"-127:30": 13, "31:35": 14, "36:40": 15, "41:120": 16}, + "unk_untrust": {"-127:15": 13, "16:20": 14, "21:25": 15, "26:30": 16, "31:35": 17, "36:40": 18, "41:120": 19}, + }, "capability": { "comex_amb": False, "cpu_amb": True diff --git a/platform/mellanox/mlnx-platform-api/tests/test_thermal.py b/platform/mellanox/mlnx-platform-api/tests/test_thermal.py index a7fdc4d0bafe..63da97161d20 100644 --- a/platform/mellanox/mlnx-platform-api/tests/test_thermal.py +++ b/platform/mellanox/mlnx-platform-api/tests/test_thermal.py @@ -93,10 +93,10 @@ def test_chassis_thermal(self): assert gearbox_thermal_count == 2 assert cpu_thermal_count == 2 + @mock.patch('sonic_platform.device_data.DeviceDataManager.get_platform_name', mock.MagicMock(return_value='x86_64-nvidia_sn2201-r0')) + @mock.patch('sonic_platform.device_data.DeviceDataManager.get_thermal_capability', mock.MagicMock(return_value={'comex_amb': False, 'cpu_amb': True, 'swb_amb': True})) def test_chassis_thermal_includes(self): from sonic_platform.thermal import THERMAL_NAMING_RULE - DeviceDataManager.get_platform_name = mock.MagicMock(return_value='x86_64-nvidia_sn2201-r0') - DeviceDataManager.get_thermal_capability = mock.MagicMock(return_value={'comex_amb': False, 'cpu_amb': True, 'swb_amb': True}) chassis = Chassis() thermal_list = chassis.get_all_thermals() assert thermal_list From e6a0fba9eadc9019bbf9babe3ebd83a47b28c148 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Tue, 8 Nov 2022 03:38:14 -0800 Subject: [PATCH 155/174] [logrotate]Fix logrotate firstaction script to reflect correct size (#12599) - Why I did it Fix logrotate firstaction script to reflect correct size. The size was modified to change dynamically based on disk size. However this variable was not updated #9504 - How I did it Updated the variable based on disk size - How to verify it Verify in the generated rsyslog file if the variable is correctly generated from jinja template --- files/image_config/logrotate/rsyslog.j2 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/files/image_config/logrotate/rsyslog.j2 b/files/image_config/logrotate/rsyslog.j2 index 28a7d9dd2ee0..25db65ac48b8 100644 --- a/files/image_config/logrotate/rsyslog.j2 +++ b/files/image_config/logrotate/rsyslog.j2 @@ -50,7 +50,11 @@ NUM_LOGS_TO_ROTATE=8 # Adjust LOG_FILE_ROTATE_SIZE_KB to reflect the "size" parameter specified above, in kB +{% if var_log_kb <= 204800 %} LOG_FILE_ROTATE_SIZE_KB=1024 +{% else %} + LOG_FILE_ROTATE_SIZE_KB=16384 +{% endif %} # Reserve space for btmp, wtmp, dpkg.log, monit.log, etc., as well as logs that # should be disabled, just in case they get created and rotated From 13203198110e6abec44b53fe7bd5365f169958f0 Mon Sep 17 00:00:00 2001 From: judyjoseph <53951155+judyjoseph@users.noreply.github.com> Date: Tue, 8 Nov 2022 11:01:40 -0800 Subject: [PATCH 156/174] Update submodule (#12635) sonic-host-services submodule update with following changes 6eac2d3 Merge pull request #19 from judyjoseph/macsec_feature_enable 31c6108 Sync has_per_asic_scope attribute to config_db in all namespaces for multi-asic 185547f Add UT to improve coverage, for namespace config update 7c3aca0 macsec_supported info is part of DEVICE_RUNTIME_METADATA itself. --- src/sonic-host-services | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-host-services b/src/sonic-host-services index bc8698d1d760..6eac2d3bb254 160000 --- a/src/sonic-host-services +++ b/src/sonic-host-services @@ -1 +1 @@ -Subproject commit bc8698d1d760fefedaeb4742ad19b25ef2b3c17b +Subproject commit 6eac2d3bb25405cd03424ece6ec8a469b7b9844c From c259c996b4767d4ab4f040b1448869fabbc6bc50 Mon Sep 17 00:00:00 2001 From: judyjoseph <53951155+judyjoseph@users.noreply.github.com> Date: Tue, 8 Nov 2022 11:03:38 -0800 Subject: [PATCH 157/174] Use the macsec_enabled flag in platform to enable macsec feature state (#11998) * Use the macsec_enabled flag in platform to enable macesc feature state * Add macsec supported metadata in DEVICE_RUNTIME_METADATA --- .../platform_env.conf | 1 + .../platform_env.conf | 1 + files/build_templates/init_cfg.json.j2 | 2 +- .../sonic_py_common/device_info.py | 23 +++++++++++++++++++ 4 files changed, 26 insertions(+), 1 deletion(-) diff --git a/device/arista/x86_64-arista_7800r3a_36d2_lc/platform_env.conf b/device/arista/x86_64-arista_7800r3a_36d2_lc/platform_env.conf index 558fb7393f62..0f19ad3cadb6 100644 --- a/device/arista/x86_64-arista_7800r3a_36d2_lc/platform_env.conf +++ b/device/arista/x86_64-arista_7800r3a_36d2_lc/platform_env.conf @@ -1,2 +1,3 @@ usemsi=1 dmasize=64M +macsec_enabled=1 diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_env.conf b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_env.conf index 45697fe72fc1..15a060d467b1 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_env.conf +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/platform_env.conf @@ -1,3 +1,4 @@ usemsi=1 dmasize=512M default_mtu=9100 +macsec_enabled=1 diff --git a/files/build_templates/init_cfg.json.j2 b/files/build_templates/init_cfg.json.j2 index 3f0465be15ef..8342b4178d37 100644 --- a/files/build_templates/init_cfg.json.j2 +++ b/files/build_templates/init_cfg.json.j2 @@ -52,7 +52,7 @@ {%- if include_p4rt == "y" %}{% do features.append(("p4rt", "disabled", false, "enabled")) %}{% endif %} {%- if include_restapi == "y" %}{% do features.append(("restapi", "enabled", false, "enabled")) %}{% endif %} {%- if include_sflow == "y" %}{% do features.append(("sflow", "disabled", false, "enabled")) %}{% endif %} -{%- if include_macsec == "y" %}{% do features.append(("macsec", "disabled", false, "enabled")) %}{% endif %} +{%- if include_macsec == "y" %}{% do features.append(("macsec", "{% if 'type' in DEVICE_METADATA['localhost'] and DEVICE_METADATA['localhost']['type'] == 'SpineRouter' and DEVICE_RUNTIME_METADATA['MACSEC_SUPPORTED'] %}enabled{% else %}disabled{% endif %}", false, "enabled")) %}{% endif %} {%- if include_system_telemetry == "y" %}{% do features.append(("telemetry", "enabled", true, "enabled")) %}{% endif %} "FEATURE": { {# has_timer field if set, will start the feature systemd .timer unit instead of .service unit #} diff --git a/src/sonic-py-common/sonic_py_common/device_info.py b/src/sonic-py-common/sonic_py_common/device_info.py index 3e14979fe4d6..48a7e76e2f81 100644 --- a/src/sonic-py-common/sonic_py_common/device_info.py +++ b/src/sonic-py-common/sonic_py_common/device_info.py @@ -469,6 +469,27 @@ def is_supervisor(): return True return False +# Check if this platform has macsec capability. +def is_macsec_supported(): + supported = 0 + platform_env_conf_file_path = get_platform_env_conf_file_path() + + # platform_env.conf file not present for platform + if platform_env_conf_file_path is None: + return supported + + # Else open the file check for keyword - macsec_enabled - + with open(platform_env_conf_file_path) as platform_env_conf_file: + for line in platform_env_conf_file: + tokens = line.split('=') + if len(tokens) < 2: + continue + if tokens[0].lower() == 'macsec_enabled': + supported = tokens[1].strip() + break + return int(supported) + + def get_device_runtime_metadata(): chassis_metadata = {} if is_chassis(): @@ -476,9 +497,11 @@ def get_device_runtime_metadata(): 'chassis_type': 'voq' if is_voq_chassis() else 'packet'}} port_metadata = {'ETHERNET_PORTS_PRESENT': True if get_path_to_port_config_file(hwsku=None, asic="0" if is_multi_npu() else None) else False} + macsec_support_metadata = {'MACSEC_SUPPORTED': True if is_macsec_supported() else False} runtime_metadata = {} runtime_metadata.update(chassis_metadata) runtime_metadata.update(port_metadata) + runtime_metadata.update(macsec_support_metadata) return {'DEVICE_RUNTIME_METADATA': runtime_metadata } def get_npu_id_from_name(npu_name): From fc17cca6c914b2846b4eaaaac260966a2bf458fd Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 8 Nov 2022 11:08:35 -0800 Subject: [PATCH 158/174] [sonic-linkmgrd][master] submodule update (#12610) [sonic-linkmgrd][master] submodule update b3501d2 Jing Zhang Wed Nov 2 22:22:45 2022 -0700 [active-standby][active-active] update link prober stats updating frequency to 30s (#152) 5d546ec Jing Zhang Tue Nov 1 16:12:17 2022 -0700 [202205] incrementing icmp buffer size (#150) 76b128a Jing Zhang Tue Nov 1 12:06:21 2022 -0700 [Active-Active] periodically re-sync soc side admin forwarding state (#151) sign-off: Jing Zhang zhangjing@microsoft.com --- src/linkmgrd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/linkmgrd b/src/linkmgrd index dcf64601179e..b3501d27daa1 160000 --- a/src/linkmgrd +++ b/src/linkmgrd @@ -1 +1 @@ -Subproject commit dcf64601179e5c4ef23fe0137acf6dd7fba0e604 +Subproject commit b3501d27daa12760e3203c66ea757800d7fe5102 From f581a77a64b7e5d4088b37baa663f085d246946e Mon Sep 17 00:00:00 2001 From: wenyiz2021 <91497961+wenyiz2021@users.noreply.github.com> Date: Tue, 8 Nov 2022 12:56:39 -0800 Subject: [PATCH 159/174] [Chassis] [Arista] correct platform.json for sup and LC6 names (#12627) add platform.json separately for LC6 that has different name, bc of supporting macsec Signed-off-by: Wenyi Zhang --- .../x86_64-arista_7800_sup/platform.json | 2 +- .../platform.json | 174 +++++++++++++++++- 2 files changed, 174 insertions(+), 2 deletions(-) mode change 120000 => 100644 device/arista/x86_64-arista_7800r3_48cqm2_lc/platform.json diff --git a/device/arista/x86_64-arista_7800_sup/platform.json b/device/arista/x86_64-arista_7800_sup/platform.json index 3afdaa3eb88b..958c57ac498c 100644 --- a/device/arista/x86_64-arista_7800_sup/platform.json +++ b/device/arista/x86_64-arista_7800_sup/platform.json @@ -1,6 +1,6 @@ { "chassis": { - "name": "DCS-7800A-SUP1A", + "name": "DCS-7800-SUP1A", "components": [], "fans": [], "fan_drawers": [], diff --git a/device/arista/x86_64-arista_7800r3_48cqm2_lc/platform.json b/device/arista/x86_64-arista_7800r3_48cqm2_lc/platform.json deleted file mode 120000 index 2b84d998cf25..000000000000 --- a/device/arista/x86_64-arista_7800r3_48cqm2_lc/platform.json +++ /dev/null @@ -1 +0,0 @@ -../x86_64-arista_7800r3_48cq2_lc/platform.json \ No newline at end of file diff --git a/device/arista/x86_64-arista_7800r3_48cqm2_lc/platform.json b/device/arista/x86_64-arista_7800r3_48cqm2_lc/platform.json new file mode 100644 index 000000000000..18798cabb33a --- /dev/null +++ b/device/arista/x86_64-arista_7800r3_48cqm2_lc/platform.json @@ -0,0 +1,173 @@ +{ + "chassis": { + "name": "7800R3-48CQM2-LC", + "components": [], + "fans": [], + "fan_drawers": [], + "psus": [], + "thermals": [ + { + "name": "Cpu temp sensor" + }, + { + "name": "Center back" + }, + { + "name": "Fap0 core0" + }, + { + "name": "Fap0 core1" + }, + { + "name": "PCIE" + } + ], + "sfps": [ + { + "name": "qsfp1" + }, + { + "name": "qsfp2" + }, + { + "name": "qsfp3" + }, + { + "name": "qsfp4" + }, + { + "name": "qsfp5" + }, + { + "name": "qsfp6" + }, + { + "name": "qsfp7" + }, + { + "name": "qsfp8" + }, + { + "name": "qsfp9" + }, + { + "name": "qsfp10" + }, + { + "name": "qsfp11" + }, + { + "name": "qsfp12" + }, + { + "name": "qsfp13" + }, + { + "name": "qsfp14" + }, + { + "name": "qsfp15" + }, + { + "name": "qsfp16" + }, + { + "name": "qsfp17" + }, + { + "name": "qsfp18" + }, + { + "name": "qsfp19" + }, + { + "name": "qsfp20" + }, + { + "name": "qsfp21" + }, + { + "name": "qsfp22" + }, + { + "name": "qsfp23" + }, + { + "name": "qsfp24" + }, + { + "name": "qsfp25" + }, + { + "name": "qsfp26" + }, + { + "name": "qsfp27" + }, + { + "name": "qsfp28" + }, + { + "name": "qsfp29" + }, + { + "name": "qsfp30" + }, + { + "name": "qsfp31" + }, + { + "name": "qsfp32" + }, + { + "name": "qsfp33" + }, + { + "name": "qsfp34" + }, + { + "name": "qsfp35" + }, + { + "name": "qsfp36" + }, + { + "name": "qsfp37" + }, + { + "name": "qsfp38" + }, + { + "name": "qsfp39" + }, + { + "name": "qsfp40" + }, + { + "name": "qsfp41" + }, + { + "name": "qsfp42" + }, + { + "name": "qsfp43" + }, + { + "name": "qsfp44" + }, + { + "name": "qsfp45" + }, + { + "name": "qsfp46" + }, + { + "name": "qsfp47" + }, + { + "name": "qsfp48" + } + ] + }, + "interfaces": {} +} From ac5d89c6acebc9cfd75e21ec411e7ef175aef069 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Wed, 9 Nov 2022 08:09:53 +0800 Subject: [PATCH 160/174] [Build] Support j2 template for debian sources (#12557) Why I did it Unify the Debian mirror sources Make easy to upgrade to the next Debian release, not source url code change required. Support to customize the Debian mirror sources during the build Relative issue: #12523 --- .gitignore | 3 ++ Makefile.work | 7 ++++ build_debian.sh | 1 + dockers/docker-base-bullseye/Dockerfile.j2 | 8 +--- dockers/docker-base-bullseye/sources.list | 14 ------- .../docker-base-bullseye/sources.list.arm64 | 9 ----- .../docker-base-bullseye/sources.list.armhf | 9 ----- dockers/docker-base-buster/Dockerfile.j2 | 8 +--- dockers/docker-base-buster/sources.list | 14 ------- dockers/docker-base-buster/sources.list.arm64 | 12 ------ dockers/docker-base-buster/sources.list.armhf | 12 ------ dockers/docker-base-stretch/Dockerfile.j2 | 8 +--- dockers/docker-base-stretch/sources.list | 11 ------ .../docker-base-stretch/sources.list.arm64 | 11 ------ .../docker-base-stretch/sources.list.armhf | 11 ------ files/apt/sources.list.amd64 | 13 ------- files/apt/sources.list.arm64 | 13 ------- files/apt/sources.list.armhf | 18 --------- files/apt/sources.list.j2 | 20 ++++++++++ scripts/build_mirror_config.sh | 26 +++++++++++++ scripts/prepare_docker_buildinfo.sh | 4 ++ slave.mk | 2 + sonic-slave-bullseye/Dockerfile.j2 | 37 ++----------------- sonic-slave-buster/Dockerfile.j2 | 36 ++---------------- sonic-slave-stretch/Dockerfile.j2 | 35 +----------------- 25 files changed, 75 insertions(+), 267 deletions(-) delete mode 100644 dockers/docker-base-bullseye/sources.list delete mode 100644 dockers/docker-base-bullseye/sources.list.arm64 delete mode 100644 dockers/docker-base-bullseye/sources.list.armhf delete mode 100644 dockers/docker-base-buster/sources.list delete mode 100644 dockers/docker-base-buster/sources.list.arm64 delete mode 100644 dockers/docker-base-buster/sources.list.armhf delete mode 100644 dockers/docker-base-stretch/sources.list delete mode 100644 dockers/docker-base-stretch/sources.list.arm64 delete mode 100644 dockers/docker-base-stretch/sources.list.armhf delete mode 100644 files/apt/sources.list.amd64 delete mode 100644 files/apt/sources.list.arm64 delete mode 100644 files/apt/sources.list.armhf create mode 100644 files/apt/sources.list.j2 create mode 100755 scripts/build_mirror_config.sh diff --git a/.gitignore b/.gitignore index 5f6f41b1a09c..60c328d62289 100644 --- a/.gitignore +++ b/.gitignore @@ -96,3 +96,6 @@ htmlcov/ .vscode/ .idea/ +# Debian mirror Sources +sources.list.* +!sources.list*.j2 diff --git a/Makefile.work b/Makefile.work index 56f0cde6c331..b048282d64b7 100644 --- a/Makefile.work +++ b/Makefile.work @@ -494,6 +494,8 @@ SONIC_BUILD_INSTRUCTION := $(MAKE) \ ENABLE_FIPS_FEATURE=$(ENABLE_FIPS_FEATURE) \ ENABLE_FIPS=$(ENABLE_FIPS) \ SONIC_SLAVE_DOCKER_DRIVER=$(SONIC_SLAVE_DOCKER_DRIVER) \ + MIRROR_URLS=$(MIRROR_URLS) \ + MIRROR_SECURITY_URLS=$(MIRROR_SECURITY_URLS) \ $(SONIC_OVERRIDE_BUILD_VARS) .PHONY: sonic-slave-build sonic-slave-bash init reset @@ -514,6 +516,9 @@ endif .DEFAULT_GOAL := all .SHELLFLAGS += -e +export MIRROR_URLS +export MIRROR_SECURITY_URLS + %:: | sonic-build-hooks ifneq ($(filter y, $(MULTIARCH_QEMU_ENVIRON) $(CROSS_BUILD_ENVIRON)),) $(Q)$(DOCKER_MULTIARCH_CHECK) @@ -539,6 +544,8 @@ sonic-build-hooks: $(Q)pushd src/sonic-build-hooks; TRUSTED_GPG_URLS=$(TRUSTED_GPG_URLS) $(MAKE) all; popd $(Q)mkdir -p $(SLAVE_DIR)/buildinfo $(Q)cp src/sonic-build-hooks/buildinfo/sonic-build-hooks* $(SLAVE_DIR)/buildinfo + $(Q)[ "$(MULTIARCH_QEMU_ENVIRON)" == y ] && scripts/build_mirror_config.sh $(SLAVE_DIR) amd64 $(BLDENV) + $(Q)scripts/build_mirror_config.sh $(SLAVE_DIR) $(CONFIGURED_ARCH) $(BLDENV) sonic-slave-base-build : | sonic-build-hooks ifeq ($(MULTIARCH_QEMU_ENVIRON), y) diff --git a/build_debian.sh b/build_debian.sh index dd98b052e16e..e09e23706f3a 100755 --- a/build_debian.sh +++ b/build_debian.sh @@ -108,6 +108,7 @@ sudo LANG=C chroot $FILESYSTEM_ROOT mount [ -d $TRUSTED_GPG_DIR ] && [ ! -z "$(ls $TRUSTED_GPG_DIR)" ] && sudo cp $TRUSTED_GPG_DIR/* ${FILESYSTEM_ROOT}/etc/apt/trusted.gpg.d/ ## Pointing apt to public apt mirrors and getting latest packages, needed for latest security updates +scripts/build_mirror_config.sh files/apt $CONFIGURED_ARCH $IMAGE_DISTRO sudo cp files/apt/sources.list.$CONFIGURED_ARCH $FILESYSTEM_ROOT/etc/apt/sources.list sudo cp files/apt/apt.conf.d/{81norecommends,apt-{clean,gzip-indexes,no-languages},no-check-valid-until} $FILESYSTEM_ROOT/etc/apt/apt.conf.d/ diff --git a/dockers/docker-base-bullseye/Dockerfile.j2 b/dockers/docker-base-bullseye/Dockerfile.j2 index 8d197d3c9011..c237015500df 100644 --- a/dockers/docker-base-bullseye/Dockerfile.j2 +++ b/dockers/docker-base-bullseye/Dockerfile.j2 @@ -27,13 +27,7 @@ ENV DEBIAN_FRONTEND=noninteractive # Configure data sources for apt/dpkg COPY ["dpkg_01_drop", "/etc/dpkg/dpkg.cfg.d/01_drop"] -{% if CONFIGURED_ARCH == "armhf" %} -COPY ["sources.list.armhf", "/etc/apt/sources.list"] -{% elif CONFIGURED_ARCH == "arm64" %} -COPY ["sources.list.arm64", "/etc/apt/sources.list"] -{% else %} -COPY ["sources.list", "/etc/apt/sources.list"] -{% endif %} +COPY ["sources.list.{{ CONFIGURED_ARCH }}", "/etc/apt/sources.list"] COPY ["no_install_recommend_suggest", "/etc/apt/apt.conf.d"] COPY ["no-check-valid-until", "/etc/apt/apt.conf.d"] diff --git a/dockers/docker-base-bullseye/sources.list b/dockers/docker-base-bullseye/sources.list deleted file mode 100644 index c45ef0811f10..000000000000 --- a/dockers/docker-base-bullseye/sources.list +++ /dev/null @@ -1,14 +0,0 @@ -## Debian mirror on Microsoft Azure -## Ref: http://debian-archive.trafficmanager.net/ - -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free -deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free -deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian bullseye-updates main contrib non-free -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye-backports main contrib non-free - -# Debian mirror supports multiple versions for a package -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian bullseye-updates main contrib non-free -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian bullseye-backports main contrib non-free diff --git a/dockers/docker-base-bullseye/sources.list.arm64 b/dockers/docker-base-bullseye/sources.list.arm64 deleted file mode 100644 index 5a0652aa3c7e..000000000000 --- a/dockers/docker-base-bullseye/sources.list.arm64 +++ /dev/null @@ -1,9 +0,0 @@ -## Debian mirror for ARM repo - -# ARM repo -deb [arch=arm64] http://deb.debian.org/debian bullseye main contrib non-free -deb [arch=arm64] http://deb.debian.org/debian bullseye-updates main contrib non-free -deb [arch=arm64] http://security.debian.org bullseye-security main contrib non-free -deb [arch=arm64] http://deb.debian.org/debian bullseye-backports main -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian bullseye-updates main contrib non-free diff --git a/dockers/docker-base-bullseye/sources.list.armhf b/dockers/docker-base-bullseye/sources.list.armhf deleted file mode 100644 index 435f4692de9f..000000000000 --- a/dockers/docker-base-bullseye/sources.list.armhf +++ /dev/null @@ -1,9 +0,0 @@ -## Debian mirror for ARM repo - -# ARM repo -deb [arch=armhf] http://deb.debian.org/debian bullseye main contrib non-free -deb [arch=armhf] http://deb.debian.org/debian bullseye-updates main contrib non-free -deb [arch=armhf] http://security.debian.org bullseye-security main contrib non-free -deb [arch=armhf] http://deb.debian.org/debian bullseye-backports main -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian bullseye-updates main contrib non-free diff --git a/dockers/docker-base-buster/Dockerfile.j2 b/dockers/docker-base-buster/Dockerfile.j2 index 9118287cac39..6977b44d3412 100644 --- a/dockers/docker-base-buster/Dockerfile.j2 +++ b/dockers/docker-base-buster/Dockerfile.j2 @@ -27,13 +27,7 @@ ENV DEBIAN_FRONTEND=noninteractive # Configure data sources for apt/dpkg COPY ["dpkg_01_drop", "/etc/dpkg/dpkg.cfg.d/01_drop"] -{% if CONFIGURED_ARCH == "armhf" %} -COPY ["sources.list.armhf", "/etc/apt/sources.list"] -{% elif CONFIGURED_ARCH == "arm64" %} -COPY ["sources.list.arm64", "/etc/apt/sources.list"] -{% else %} -COPY ["sources.list", "/etc/apt/sources.list"] -{% endif %} +COPY ["sources.list.{{ CONFIGURED_ARCH }}", "/etc/apt/sources.list"] COPY ["no_install_recommend_suggest", "/etc/apt/apt.conf.d"] COPY ["no-check-valid-until", "/etc/apt/apt.conf.d"] diff --git a/dockers/docker-base-buster/sources.list b/dockers/docker-base-buster/sources.list deleted file mode 100644 index 473c9eb22e76..000000000000 --- a/dockers/docker-base-buster/sources.list +++ /dev/null @@ -1,14 +0,0 @@ -## Debian mirror on Microsoft Azure -## Ref: http://debian-archive.trafficmanager.net/ - -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ buster main contrib non-free -deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian/ buster main contrib non-free -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ buster/updates main contrib non-free -deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ buster/updates main contrib non-free -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian buster-updates main contrib non-free -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ buster-backports main contrib non-free - -# Debian mirror supports multiple versions for a package -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian buster main contrib non-free -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian buster-updates main contrib non-free -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian buster-backports main contrib non-free diff --git a/dockers/docker-base-buster/sources.list.arm64 b/dockers/docker-base-buster/sources.list.arm64 deleted file mode 100644 index 249efc17b6fd..000000000000 --- a/dockers/docker-base-buster/sources.list.arm64 +++ /dev/null @@ -1,12 +0,0 @@ -## Debian mirror for ARM repo - -# ARM repo -deb [arch=arm64] http://deb.debian.org/debian buster main contrib non-free -deb-src [arch=arm64] http://deb.debian.org/debian buster main contrib non-free -deb [arch=arm64] http://security.debian.org buster/updates main contrib non-free -deb-src [arch=arm64] http://security.debian.org buster/updates main contrib non-free -deb [arch=arm64] http://deb.debian.org/debian buster-updates main contrib non-free -deb [arch=arm64] http://deb.debian.org/debian/ buster-backports main contrib non-free -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian buster main contrib non-free -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian buster-updates main contrib non-free -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian buster-backports main contrib non-free diff --git a/dockers/docker-base-buster/sources.list.armhf b/dockers/docker-base-buster/sources.list.armhf deleted file mode 100644 index ff6d5787b212..000000000000 --- a/dockers/docker-base-buster/sources.list.armhf +++ /dev/null @@ -1,12 +0,0 @@ -## Debian mirror for ARM repo - -# ARM repo -deb [arch=armhf] http://deb.debian.org/debian buster main contrib non-free -deb-src [arch=armhf] http://deb.debian.org/debian buster main contrib non-free -deb [arch=armhf] http://security.debian.org buster/updates main contrib non-free -deb-src [arch=armhf] http://security.debian.org buster/updates main contrib non-free -deb [arch=armhf] http://deb.debian.org/debian buster-updates main contrib non-free -deb [arch=armhf] http://deb.debian.org/debian/ buster-backports main contrib non-free -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian buster main contrib non-free -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian buster-updates main contrib non-free -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian buster-backports main contrib non-free diff --git a/dockers/docker-base-stretch/Dockerfile.j2 b/dockers/docker-base-stretch/Dockerfile.j2 index 5603dc502792..9e11c4ea35f0 100644 --- a/dockers/docker-base-stretch/Dockerfile.j2 +++ b/dockers/docker-base-stretch/Dockerfile.j2 @@ -27,13 +27,7 @@ ENV DEBIAN_FRONTEND=noninteractive # Configure data sources for apt/dpkg COPY ["dpkg_01_drop", "/etc/dpkg/dpkg.cfg.d/01_drop"] -{% if CONFIGURED_ARCH == "armhf" %} -COPY ["sources.list.armhf", "/etc/apt/sources.list"] -{% elif CONFIGURED_ARCH == "arm64" %} -COPY ["sources.list.arm64", "/etc/apt/sources.list"] -{% else %} -COPY ["sources.list", "/etc/apt/sources.list"] -{% endif %} +COPY ["sources.list.{{ CONFIGURED_ARCH }}", "/etc/apt/sources.list"] COPY ["no_install_recommend_suggest", "/etc/apt/apt.conf.d"] COPY ["no-check-valid-until", "/etc/apt/apt.conf.d"] diff --git a/dockers/docker-base-stretch/sources.list b/dockers/docker-base-stretch/sources.list deleted file mode 100644 index 0c29b339bb87..000000000000 --- a/dockers/docker-base-stretch/sources.list +++ /dev/null @@ -1,11 +0,0 @@ -## Debian mirror on Microsoft Azure -## Ref: http://debian-archive.trafficmanager.net/ - -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ stretch main contrib non-free -deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian/ stretch main contrib non-free -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ stretch/updates main contrib non-free -deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ stretch/updates main contrib non-free -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ stretch-backports main contrib non-free -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian stretch main contrib non-free -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian stretch-updates main contrib non-free -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian stretch-backports main contrib non-free diff --git a/dockers/docker-base-stretch/sources.list.arm64 b/dockers/docker-base-stretch/sources.list.arm64 deleted file mode 100644 index 520c46519919..000000000000 --- a/dockers/docker-base-stretch/sources.list.arm64 +++ /dev/null @@ -1,11 +0,0 @@ -## Debian mirror for ARM repo - -# ARM repo -deb [arch=arm64] http://deb.debian.org/debian stretch main contrib non-free -deb-src [arch=arm64] http://deb.debian.org/debian stretch main contrib non-free -deb [arch=arm64] http://security.debian.org stretch/updates main contrib non-free -deb-src [arch=arm64] http://security.debian.org stretch/updates main contrib non-free -deb [arch=arm64] http://deb.debian.org/debian/ stretch-backports main contrib non-free -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian stretch main contrib non-free -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian stretch-updates main contrib non-free -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian stretch-backports main contrib non-free diff --git a/dockers/docker-base-stretch/sources.list.armhf b/dockers/docker-base-stretch/sources.list.armhf deleted file mode 100644 index 58077f310424..000000000000 --- a/dockers/docker-base-stretch/sources.list.armhf +++ /dev/null @@ -1,11 +0,0 @@ -## Debian mirror for ARM repo - -# ARM repo -deb [arch=armhf] http://deb.debian.org/debian stretch main contrib non-free -deb-src [arch=armhf] http://deb.debian.org/debian stretch main contrib non-free -deb [arch=armhf] http://security.debian.org stretch/updates main contrib non-free -deb-src [arch=armhf] http://security.debian.org stretch/updates main contrib non-free -deb [arch=armhf] http://deb.debian.org/debian/ stretch-backports main contrib non-free -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian stretch main contrib non-free -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian stretch-updates main contrib non-free -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian stretch-backports main contrib non-free diff --git a/files/apt/sources.list.amd64 b/files/apt/sources.list.amd64 deleted file mode 100644 index fb0a1e994932..000000000000 --- a/files/apt/sources.list.amd64 +++ /dev/null @@ -1,13 +0,0 @@ -## Debian mirror on Microsoft Azure -## Ref: http://debian-archive.trafficmanager.net/ - -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free -deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free -deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free -deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye-backports main contrib non-free -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free -deb-src [arch=amd64] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian-security/ bullseye-security main contrib non-free -deb-src [arch=amd64] http://packages.trafficmanager.net/debian/debian-security/ bullseye-security main contrib non-free -deb [arch=amd64] http://packages.trafficmanager.net/debian/debian bullseye-backports main contrib non-free diff --git a/files/apt/sources.list.arm64 b/files/apt/sources.list.arm64 deleted file mode 100644 index 75bebe1a91d1..000000000000 --- a/files/apt/sources.list.arm64 +++ /dev/null @@ -1,13 +0,0 @@ -## Debian mirror for ARM -## Not the repo mirror site can change in future, and needs to be updated to be in sync - -deb [arch=arm64] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free -deb-src [arch=arm64] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free -deb [arch=arm64] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free -deb-src [arch=arm64] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free -deb [arch=arm64] http://debian-archive.trafficmanager.net/debian/ bullseye-backports main contrib non-free -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free -deb-src [arch=arm64] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian-security/ bullseye-security main contrib non-free -deb-src [arch=arm64] http://packages.trafficmanager.net/debian/debian-security/ bullseye-security main contrib non-free -deb [arch=arm64] http://packages.trafficmanager.net/debian/debian bullseye-backports main contrib non-free diff --git a/files/apt/sources.list.armhf b/files/apt/sources.list.armhf deleted file mode 100644 index 4f9d4d0a9020..000000000000 --- a/files/apt/sources.list.armhf +++ /dev/null @@ -1,18 +0,0 @@ -## Debian mirror for ARM -## Not the repo mirror site can change in future, and needs to be updated to be in sync - -deb [arch=armhf] http://deb.debian.org/debian bullseye main contrib non-free -deb-src [arch=armhf] http://deb.debian.org/debian bullseye main contrib non-free -deb [arch=armhf] http://security.debian.org bullseye-security main contrib non-free -deb-src [arch=armhf] http://security.debian.org bullseye-security main contrib non-free -deb [arch=armhf] http://deb.debian.org/debian bullseye-backports main contrib non-free -#deb [arch=armhf] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free -#deb-src [arch=armhf] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free -#deb [arch=armhf] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free -#deb-src [arch=armhf] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free -#deb [arch=armhf] http://debian-archive.trafficmanager.net/debian/ bullseye-backports main contrib non-free -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free -deb-src [arch=armhf] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian-security/ bullseye-security main contrib non-free -deb-src [arch=armhf] http://packages.trafficmanager.net/debian/debian-security/ bullseye-security main contrib non-free -deb [arch=armhf] http://packages.trafficmanager.net/debian/debian bullseye-backports main contrib non-free diff --git a/files/apt/sources.list.j2 b/files/apt/sources.list.j2 new file mode 100644 index 000000000000..57d53b919b03 --- /dev/null +++ b/files/apt/sources.list.j2 @@ -0,0 +1,20 @@ +# The configuration is generated by template +# Please add additional sources in /etc/apt/sources.list.d + +{% for mirror_url in MIRROR_URLS.split(',') %} +deb [arch={{ ARCHITECTURE }}] {{ mirror_url }} {{ DISTRIBUTION }} main contrib non-free +deb-src [arch={{ ARCHITECTURE }}] {{ mirror_url }} {{ DISTRIBUTION }} main contrib non-free +deb [arch={{ ARCHITECTURE }}] {{ mirror_url }} {{ DISTRIBUTION }}-updates main contrib non-free +deb-src [arch={{ ARCHITECTURE }}] {{ mirror_url }} {{ DISTRIBUTION }}-updates main contrib non-free +deb [arch={{ ARCHITECTURE }}] {{ mirror_url }} {{ DISTRIBUTION }}-backports main contrib non-free +{% endfor %} +{% for mirror_url in MIRROR_SECURITY_URLS.split(',') %} +{% set dist_separator='/' %}{% if 'packages.trafficmanager.net/debian' in mirror_url %}{% set dist_separator='_' %}{% endif %} +{% if DISTRIBUTION == 'stretch' or DISTRIBUTION == 'buster' %} +deb [arch={{ ARCHITECTURE }}] {{ mirror_url }} {{ DISTRIBUTION }}{{ dist_separator }}updates main contrib non-free +deb-src [arch={{ ARCHITECTURE }}] {{ mirror_url }} {{ DISTRIBUTION }}{{ dist_separator }}updates main contrib non-free +{% else %} +deb [arch={{ ARCHITECTURE }}] {{ mirror_url }} {{ DISTRIBUTION }}-security main contrib non-free +deb-src [arch={{ ARCHITECTURE }}] {{ mirror_url }} {{ DISTRIBUTION }}-security main contrib non-free +{% endif %} +{% endfor %} diff --git a/scripts/build_mirror_config.sh b/scripts/build_mirror_config.sh new file mode 100755 index 000000000000..5f94e701cbde --- /dev/null +++ b/scripts/build_mirror_config.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Generate the sources.list. in the config path +CONFIG_PATH=$1 +export ARCHITECTURE=$2 +export DISTRIBUTION=$3 + +# The default mirror urls +DEFAULT_MIRROR_URLS=http://debian-archive.trafficmanager.net/debian/,http://packages.trafficmanager.net/debian/debian/ +DEFAULT_MIRROR_SECURITY_URLS=http://debian-archive.trafficmanager.net/debian-security/,http://packages.trafficmanager.net/debian/debian-security/ + +# The debian-archive.trafficmanager.net does not support armhf, use debian.org instead +if [ "$ARCHITECTURE" == "armhf" ]; then + DEFAULT_MIRROR_URLS=http://deb.debian.org/debian/,http://packages.trafficmanager.net/debian/debian/ + DEFAULT_MIRROR_SECURITY_URLS=http://deb.debian.org/debian-security/,http://packages.trafficmanager.net/debian/debian-security/ +fi + +[ -z "$MIRROR_URLS" ] && MIRROR_URLS=$DEFAULT_MIRROR_URLS +[ -z "$MIRROR_SECURITY_URLS" ] && MIRROR_SECURITY_URLS=$DEFAULT_MIRROR_SECURITY_URLS + +TEMPLATE=files/apt/sources.list.j2 +[ -f files/apt/sources.list.$ARCHITECTURE.j2 ] && TEMPLATE=files/apt/sources.list.$ARCHITECTURE.j2 +[ -f $CONFIG_PATH/sources.list.j2 ] && TEMPLATE=$CONFIG_PATH/sources.list.j2 +[ -f $CONFIG_PATH/sources.list.$ARCHITECTURE.j2 ] && TEMPLATE=$CONFIG_PATH/sources.list.$ARCHITECTURE.j2 + +MIRROR_URLS=$MIRROR_URLS MIRROR_SECURITY_URLS=$MIRROR_SECURITY_URLS j2 $TEMPLATE | sed '/^$/N;/^\n$/D' > $CONFIG_PATH/sources.list.$ARCHITECTURE diff --git a/scripts/prepare_docker_buildinfo.sh b/scripts/prepare_docker_buildinfo.sh index d2b2c57185d1..3c1104eddc53 100755 --- a/scripts/prepare_docker_buildinfo.sh +++ b/scripts/prepare_docker_buildinfo.sh @@ -22,6 +22,10 @@ if [ -z "$DISTRO" ]; then [ -z "$DISTRO" ] && DISTRO=jessie fi +if [[ "$IMAGENAME" == docker-base-* ]]; then + scripts/build_mirror_config.sh ${DOCKERFILE_PATH} $ARCH $DISTRO +fi + # add script for reproducible build. using sha256 instead of tag for docker base image. scripts/docker_version_control.sh $@ diff --git a/slave.mk b/slave.mk index c358e965fabf..32ca3cb2bfed 100644 --- a/slave.mk +++ b/slave.mk @@ -137,6 +137,8 @@ export TRUSTED_GPG_URLS export SONIC_VERSION_CONTROL_COMPONENTS DEFAULT_CONTAINER_REGISTRY := $(SONIC_DEFAULT_CONTAINER_REGISTRY) export DEFAULT_CONTAINER_REGISTRY +export MIRROR_URLS +export MIRROR_SECURITY_URLS ifeq ($(SONIC_ENABLE_PFCWD_ON_START),y) ENABLE_PFCWD_ON_START = y diff --git a/sonic-slave-bullseye/Dockerfile.j2 b/sonic-slave-bullseye/Dockerfile.j2 index 296905787130..b22639e8318b 100644 --- a/sonic-slave-bullseye/Dockerfile.j2 +++ b/sonic-slave-bullseye/Dockerfile.j2 @@ -23,39 +23,10 @@ MAINTAINER gulv@microsoft.com COPY ["no-check-valid-until", "/etc/apt/apt.conf.d/"] -## TODO: Re-add in any necessary mirror URLs here as they become available -RUN echo "deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian/ bullseye main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ bullseye-security main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://debian-archive.trafficmanager.net/debian bullseye-backports main" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://packages.trafficmanager.net/debian/debian-security bullseye-security main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://packages.trafficmanager.net/debian/debian bullseye-updates main contrib non-free" >> /etc/apt/sources.list - -{%- if CONFIGURED_ARCH == "armhf" and CROSS_BUILD_ENVIRON != "y" %} -RUN echo "deb [arch=armhf] http://deb.debian.org/debian bullseye main contrib non-free" > /etc/apt/sources.list && \ - echo "deb-src [arch=armhf] http://deb.debian.org/debian bullseye main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://deb.debian.org/debian bullseye-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=armhf] http://deb.debian.org/debian bullseye-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://security.debian.org bullseye-security main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=armhf] http://security.debian.org bullseye-security main contrib non-free" >> /etc/apt/sources.list && \ - echo 'deb [arch=armhf] http://ftp.debian.org/debian bullseye-backports main' >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://packages.trafficmanager.net/debian/debian-security bullseye-security main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://packages.trafficmanager.net/debian/debian bullseye-updates main contrib non-free" >> /etc/apt/sources.list -{%- elif CONFIGURED_ARCH == "arm64" and CROSS_BUILD_ENVIRON != "y" %} -RUN echo "deb [arch=arm64] http://deb.debian.org/debian bullseye main contrib non-free" > /etc/apt/sources.list && \ - echo "deb-src [arch=arm64] http://deb.debian.org/debian bullseye main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://deb.debian.org/debian bullseye-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=arm64] http://deb.debian.org/debian bullseye-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://security.debian.org bullseye-security main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=arm64] http://security.debian.org bullseye-security main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://packages.trafficmanager.net/debian/debian-security bullseye-security main contrib non-free" >> /etc/apt/sources.list && \ - echo 'deb [arch=arm64] http://ftp.debian.org/debian bullseye-backports main' >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://packages.trafficmanager.net/debian/debian bullseye main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://packages.trafficmanager.net/debian/debian bullseye-updates main contrib non-free" >> /etc/apt/sources.list -{%- elif CROSS_BUILD_ENVIRON == "y" %} +{%- if CROSS_BUILD_ENVIRON != "y" %} +COPY ["sources.list.{{ CONFIGURED_ARCH }}", "/etc/apt/sources.list"] +{%- else %} +COPY ["sources.list.amd64", "/etc/apt/sources.list"] {%- if CONFIGURED_ARCH == "armhf" %} ARG arch=armhf ARG gcc_arch=arm-linux-gnueabihf diff --git a/sonic-slave-buster/Dockerfile.j2 b/sonic-slave-buster/Dockerfile.j2 index f2a91a27d54c..73e9febda14b 100644 --- a/sonic-slave-buster/Dockerfile.j2 +++ b/sonic-slave-buster/Dockerfile.j2 @@ -22,38 +22,10 @@ MAINTAINER gulv@microsoft.com COPY ["no-check-valid-until", "/etc/apt/apt.conf.d/"] -RUN echo "deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ buster main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian/ buster main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ buster/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ buster/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://debian-archive.trafficmanager.net/debian buster-backports main" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://packages.trafficmanager.net/debian/debian buster main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://packages.trafficmanager.net/debian/debian buster-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://packages.trafficmanager.net/debian/debian-security buster_updates main contrib non-free" >> /etc/apt/sources.list - -{%- if CONFIGURED_ARCH == "armhf" and CROSS_BUILD_ENVIRON != "y" %} -RUN echo "deb [arch=armhf] http://deb.debian.org/debian buster main contrib non-free" > /etc/apt/sources.list && \ - echo "deb-src [arch=armhf] http://deb.debian.org/debian buster main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://deb.debian.org/debian buster-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=armhf] http://deb.debian.org/debian buster-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://security.debian.org buster/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=armhf] http://security.debian.org buster/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo 'deb [arch=armhf] http://ftp.debian.org/debian buster-backports main' >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://packages.trafficmanager.net/debian/debian buster main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://packages.trafficmanager.net/debian/debian buster-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://packages.trafficmanager.net/debian/debian-security buster_updates main contrib non-free" >> /etc/apt/sources.list -{%- elif CONFIGURED_ARCH == "arm64" and CROSS_BUILD_ENVIRON != "y" %} -RUN echo "deb [arch=arm64] http://deb.debian.org/debian buster main contrib non-free" > /etc/apt/sources.list && \ - echo "deb-src [arch=arm64] http://deb.debian.org/debian buster main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://deb.debian.org/debian buster-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=arm64] http://deb.debian.org/debian buster-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://security.debian.org buster/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=arm64] http://security.debian.org buster/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo 'deb [arch=arm64] http://ftp.debian.org/debian buster-backports main' >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://packages.trafficmanager.net/debian/debian buster main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://packages.trafficmanager.net/debian/debian buster-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://packages.trafficmanager.net/debian/debian-security buster_updates main contrib non-free" >> /etc/apt/sources.list -{%- elif CROSS_BUILD_ENVIRON == "y" %} +{%- if CROSS_BUILD_ENVIRON != "y" %} +COPY ["sources.list.{{ CONFIGURED_ARCH }}", "/etc/apt/sources.list"] +{%- else %} +COPY ["sources.list.amd64", "/etc/apt/sources.list"] {%- if CONFIGURED_ARCH == "armhf" %} ARG arch=armhf ARG gcc_arch=arm-linux-gnueabihf diff --git a/sonic-slave-stretch/Dockerfile.j2 b/sonic-slave-stretch/Dockerfile.j2 index e3815eab2d36..1397df1c7368 100644 --- a/sonic-slave-stretch/Dockerfile.j2 +++ b/sonic-slave-stretch/Dockerfile.j2 @@ -10,40 +10,7 @@ FROM {{ prefix }}debian:stretch MAINTAINER gulv@microsoft.com COPY ["no-check-valid-until", "/etc/apt/apt.conf.d/"] - -RUN echo "deb [arch=amd64] http://debian-archive.trafficmanager.net/debian/ stretch main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian/ stretch main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ stretch/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=amd64] http://debian-archive.trafficmanager.net/debian-security/ stretch/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://debian-archive.trafficmanager.net/debian stretch-backports main" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://packages.trafficmanager.net/debian/debian stretch main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://packages.trafficmanager.net/debian/debian stretch-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://packages.trafficmanager.net/debian/debian-security stretch_updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=amd64] http://packages.microsoft.com/debian/9/prod stretch main" >> /etc/apt/sources.list - -{%- if CONFIGURED_ARCH == "armhf" %} -RUN echo "deb [arch=armhf] http://deb.debian.org/debian stretch main contrib non-free" > /etc/apt/sources.list && \ - echo "deb-src [arch=armhf] http://deb.debian.org/debian stretch main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://deb.debian.org/debian stretch-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=armhf] http://deb.debian.org/debian stretch-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://security.debian.org stretch/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=armhf] http://security.debian.org stretch/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo 'deb [arch=armhf] http://ftp.debian.org/debian stretch-backports main' >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://packages.trafficmanager.net/debian/debian stretch main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://packages.trafficmanager.net/debian/debian stretch-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=armhf] http://packages.trafficmanager.net/debian/debian-security stretch_updates main contrib non-free" >> /etc/apt/sources.list -{%- elif CONFIGURED_ARCH == "arm64" %} -RUN echo "deb [arch=arm64] http://deb.debian.org/debian stretch main contrib non-free" > /etc/apt/sources.list && \ - echo "deb-src [arch=arm64] http://deb.debian.org/debian stretch main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://deb.debian.org/debian stretch-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=arm64] http://deb.debian.org/debian stretch-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://security.debian.org stretch/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb-src [arch=arm64] http://security.debian.org stretch/updates main contrib non-free" >> /etc/apt/sources.list && \ - echo 'deb [arch=arm64] http://ftp.debian.org/debian stretch-backports main' >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://packages.trafficmanager.net/debian/debian stretch main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://packages.trafficmanager.net/debian/debian stretch-updates main contrib non-free" >> /etc/apt/sources.list && \ - echo "deb [arch=arm64] http://packages.trafficmanager.net/debian/debian-security stretch_updates main contrib non-free" >> /etc/apt/sources.list -{%- endif %} +COPY ["sources.list.{{ CONFIGURED_ARCH }}", "/etc/apt/sources.list"] ## Make apt-get non-interactive ENV DEBIAN_FRONTEND=noninteractive From 66f1cc458dd2277013f6c1d0567b536b57c91127 Mon Sep 17 00:00:00 2001 From: Caitlin Choate <114622132+cchoate54@users.noreply.github.com> Date: Tue, 8 Nov 2022 16:53:14 -0800 Subject: [PATCH 161/174] Bugfix #9739: Support when 'bgp_asn' is set to 'None', 'Null', or missing. (#12588) bgpd.main.conf.j2: bugfix-9739 * Update bgpd.main.conf.j2 to gracefully handle the bgp configuration cases for when 'bgp_asn' is set to 'None', 'Null', or missing. How I did it Include a conditional statement to avoid configuring bgp in FRR when 'bgp_asn' is missing or set to 'None' or 'Null' How to verify it Configure 'bgp_asn' as 'None', 'Null' or have it missing from configurations and verify that /etc/frr/bgpd.conf does not have invalid bgp configurations like 'router bgp None' Description for the changelog Update bgpd.main.conf.j2 to gracefully handle the bgp configuration cases for when 'bgp_asn' is set to 'None', 'Null', or missing for bugfix 9739. Signed-off-by: cchoate54@gmail.com --- dockers/docker-fpm-frr/frr/bgpd/bgpd.main.conf.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dockers/docker-fpm-frr/frr/bgpd/bgpd.main.conf.j2 b/dockers/docker-fpm-frr/frr/bgpd/bgpd.main.conf.j2 index b5891b9695ac..4cee01ac973d 100644 --- a/dockers/docker-fpm-frr/frr/bgpd/bgpd.main.conf.j2 +++ b/dockers/docker-fpm-frr/frr/bgpd/bgpd.main.conf.j2 @@ -61,6 +61,7 @@ route-map HIDE_INTERNAL permit 20 ! {% endif %} ! +{% if (DEVICE_METADATA is defined) and ('localhost' in DEVICE_METADATA) and ('bgp_asn' in DEVICE_METADATA['localhost']) and (DEVICE_METADATA['localhost']['bgp_asn'].lower() != 'none') and (DEVICE_METADATA['localhost']['bgp_asn'].lower() != 'null') %} router bgp {{ DEVICE_METADATA['localhost']['bgp_asn'] }} ! {% block bgp_init %} @@ -143,6 +144,7 @@ router bgp {{ DEVICE_METADATA['localhost']['bgp_asn'] }} exit-address-family {% endblock maximum_paths %} {% endif %} +{% endif %} ! ! end of template: bgpd/bgpd.main.conf.j2 ! From abf1862f586d14a481e17bebb8e9c267b05786d8 Mon Sep 17 00:00:00 2001 From: Junhua Zhai Date: Wed, 9 Nov 2022 09:58:25 +0800 Subject: [PATCH 162/174] [gbsyncd] Enable debug shell for BRCM broncos PHY (#12622) * Build docker-gbsyncd-broncos image * Correct typo in LIBSAI_BRONCOS_URL_PREFIX * Update docker-gbsyncd-broncos/Dockerfile.j2 * Enable debug shell support on docker-gbsyncd-broncos * Include bcmsh in docker-gbsyncd-broncos Why I did it In docker-gbsyncd-broncos image, enable debug shell support for BRCM broncos PHY. How I did it How to verify it Note: need enable attr SAI_SWITCH_ATTR_SWITCH_SHELL_ENABLE support in BCM PAI library # bcmsh Press Enter to show prompt. Press Ctrl+C to exit. NOTICE: Only one bcmsh or bcmcmd can connect to the shell at same time. BRCM:> help help List of available commands - h or help => Print command menu - l => Print list of active ports on the PHY - ps => Print port status => 1 -> Link status => 2 -> Link training failure status => 3 -> Link training RX status => 4 -> PRBS lock status => 5 -> PRBS lock loss status - rd => Read register contents - wr => Write register data - rrd => Raw read register contents using lanemap and if_side (line = 0, system = 1) - rwr => Raw write register data using lanemap and if_side (line = 0, system = 1) - fw or firmware => Print firmware version of the PHY - pd or port_dump => Dump port status - eyescan => Display eye scan - fec_status => Get fec status of the port - polarity => Set TX and RX polarity => 0xF, 0xFF, or 0xFFFF based on number of lanes => Line = 0, System = 1 =>_TX/RX Polarity bitmap of all lanes Each bit represents a lane number. E.g. Lane 0's polarity value (0 or 1) is populated in Bit 0. - polarity => Print TX and RX polarity - lb => Enable loopback on the port lb_value = 0 -> Disable, 1 -> PHY, 2 -> MAC - lb => Print loopback configuration of the port - prbs => Set/Get PRBS configuration => 1 -> Get PRBS state and polynomial 2 -> Set PRBS Polynomial, - PRBS Polynomial Please refer to phy/chip documentation for valid values 3 -> Enable PRBS => 0 Disable PRBS 1 Enable both PRBS Transmitter and Receiver 2 Enable PRBS Receiver 3 Enable PRBS Transmitter exit or q => Exit the diagnostic shell --- .../docker-gbsyncd-broncos/Dockerfile.j2 | 3 ++ .../components/docker-gbsyncd-broncos/bcmsh | 40 +++++++++++++++++++ .../critical_processes.j2 | 1 + .../supervisord.conf.j2 | 2 +- 4 files changed, 45 insertions(+), 1 deletion(-) create mode 100755 platform/components/docker-gbsyncd-broncos/bcmsh diff --git a/platform/components/docker-gbsyncd-broncos/Dockerfile.j2 b/platform/components/docker-gbsyncd-broncos/Dockerfile.j2 index 154c7735ce97..ca7670acb628 100644 --- a/platform/components/docker-gbsyncd-broncos/Dockerfile.j2 +++ b/platform/components/docker-gbsyncd-broncos/Dockerfile.j2 @@ -27,11 +27,14 @@ debs/{{ deb }}{{' '}} COPY ["docker-init.sh", "/usr/bin/"] COPY ["start.sh", "/usr/bin/"] +COPY ["bcmsh", "/usr/bin/"] COPY ["critical_processes.j2", "/usr/share/sonic/templates"] COPY ["supervisord.conf.j2", "/usr/share/sonic/templates"] COPY ["files/supervisor-proc-exit-listener", "/usr/bin"] +COPY ["files/dsserve", "/usr/bin/"] +RUN chmod +x /usr/bin/dsserve ## Clean up RUN apt-get clean -y; apt-get autoclean -y; apt-get autoremove -y diff --git a/platform/components/docker-gbsyncd-broncos/bcmsh b/platform/components/docker-gbsyncd-broncos/bcmsh new file mode 100755 index 000000000000..bad7a105195d --- /dev/null +++ b/platform/components/docker-gbsyncd-broncos/bcmsh @@ -0,0 +1,40 @@ +#!/bin/bash +usage="$(basename "$0") [-h] [-q] [-t timeout] -- interactive shell for bcm service + +where: + -h show this help text + -t inactivity timeout in seconds (default 300 seconds, 0 for no timeout) + -q quite, no banner (default: verbose)" + +banner="Press Enter to show prompt. +Press Ctrl+C to exit. +NOTICE: Only one bcmsh or bcmcmd can connect to the shell at same time. +" + +# Default verbose +quiet=false +timeout=300 + +while getopts 'hqt:' option; do + case "$option" in + h) echo "$usage" + exit + ;; + q) quiet=true + ;; + t) timeout=$OPTARG + ;; + \?) printf "illegal option: -%s\n" "$OPTARG" >&2 + echo "$usage" >&2 + exit 1 + ;; + esac +done +shift $((OPTIND - 1)) + +if [ "$quiet" = false ]; then + echo "$banner" +fi + +/usr/bin/socat -T$timeout readline UNIX-CONNECT:/var/run/sswsyncd/sswgbsyncd.socket + diff --git a/platform/components/docker-gbsyncd-broncos/critical_processes.j2 b/platform/components/docker-gbsyncd-broncos/critical_processes.j2 index bdd6903c5690..d1163a9c3046 100644 --- a/platform/components/docker-gbsyncd-broncos/critical_processes.j2 +++ b/platform/components/docker-gbsyncd-broncos/critical_processes.j2 @@ -1 +1,2 @@ +program:dsserve program:syncd diff --git a/platform/components/docker-gbsyncd-broncos/supervisord.conf.j2 b/platform/components/docker-gbsyncd-broncos/supervisord.conf.j2 index c274315ef67e..2ac52c22c283 100644 --- a/platform/components/docker-gbsyncd-broncos/supervisord.conf.j2 +++ b/platform/components/docker-gbsyncd-broncos/supervisord.conf.j2 @@ -33,7 +33,7 @@ dependent_startup_wait_for=rsyslogd:running [program:syncd] environment=BRONCOS_DEVICE_PATH=/usr/lib -command=/usr/bin/syncd -s -p /etc/sai.d/psai.profile -x /usr/share/sonic/hwsku/context_config.json -g 1 +command=/usr/bin/dsserve -f /var/run/sswsyncd/sswgbsyncd.socket /usr/bin/syncd --diag -s -p /etc/sai.d/psai.profile -x /usr/share/sonic/hwsku/context_config.json -g 1 priority=3 autostart=false autorestart=false From a544a07931658d67a8fbb0857dc9cf0f047ad896 Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Wed, 9 Nov 2022 08:15:41 -0800 Subject: [PATCH 163/174] Enable Dx010 LPM (#12642) Why I did it DX010 platform has limited routing table size. How I did it Enabling LPM. Signed-off-by: Ying Xie --- .../th-seastone-dx010-32x100G-t0.config.bcm | 1 + .../th-seastone-dx010-32x100G-t1.config.bcm | 1 + ...th-seastone-dx010-48x50G+8x100G.config.bcm | 1313 +++++++++-------- 3 files changed, 660 insertions(+), 655 deletions(-) diff --git a/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-C32/th-seastone-dx010-32x100G-t0.config.bcm b/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-C32/th-seastone-dx010-32x100G-t0.config.bcm index e5e60d51b326..be9cef5600cf 100644 --- a/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-C32/th-seastone-dx010-32x100G-t0.config.bcm +++ b/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-C32/th-seastone-dx010-32x100G-t0.config.bcm @@ -14,6 +14,7 @@ l2xmsg_mode=1 l2_mem_entries=8192 l3_mem_entries=8192 l3_alpm_enable=2 +lpm_scaling_enable=0 ipv6_lpm_128b_enable=1 mmu_lossless=0 diff --git a/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-C32/th-seastone-dx010-32x100G-t1.config.bcm b/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-C32/th-seastone-dx010-32x100G-t1.config.bcm index 3ff49d672fc2..1f90464d4182 100644 --- a/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-C32/th-seastone-dx010-32x100G-t1.config.bcm +++ b/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-C32/th-seastone-dx010-32x100G-t1.config.bcm @@ -14,6 +14,7 @@ l2xmsg_mode=1 l2_mem_entries=8192 l3_mem_entries=8192 l3_alpm_enable=2 +lpm_scaling_enable=0 ipv6_lpm_128b_enable=1 mmu_lossless=0 diff --git a/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-D48C8/th-seastone-dx010-48x50G+8x100G.config.bcm b/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-D48C8/th-seastone-dx010-48x50G+8x100G.config.bcm index 787cce0d986c..e8001c9bc90f 100644 --- a/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-D48C8/th-seastone-dx010-48x50G+8x100G.config.bcm +++ b/device/celestica/x86_64-cel_seastone-r0/Celestica-DX010-D48C8/th-seastone-dx010-48x50G+8x100G.config.bcm @@ -1,655 +1,658 @@ -# disables bcmALPMDH (ALPM distributed hitbit) thread. This thread is purely for debug purpose -l3_alpm_hit_skip=1 - -# Disable Counting ACL Drop towards interface RX_DRP counter -sai_adjust_acl_drop_in_rx_drop=1 - -os=unix -l2xmsg_mode=1 -parity_enable=0 -rate_ext_mdio_divisor=0x80 -phy_ext_rom_boot=0 -fpem_mem_entries=32768 -l2xmsg_mode=1 -oversubscribe_mode=1 -pbmp_xport_xe=0xcccc44cc33113333044cccccc66666622 - - -dport_map_enable=1 -dport_map_port_68=1 -dport_map_port_69=2 - -dport_map_port_72=5 -dport_map_port_73=6 - -dport_map_port_76=9 -dport_map_port_77=10 - -dport_map_port_80=13 -dport_map_port_81=14 - -dport_map_port_34=17 -dport_map_port_35=18 - -dport_map_port_38=21 -dport_map_port_39=22 - -dport_map_port_42=25 -dport_map_port_43=26 - -dport_map_port_46=29 -dport_map_port_47=30 - -dport_map_port_50=33 -dport_map_port_51=34 - -dport_map_port_54=37 -dport_map_port_55=38 - -dport_map_port_58=41 - -dport_map_port_62=45 - -dport_map_port_84=49 - -dport_map_port_88=53 - -dport_map_port_92=57 -dport_map_port_93=58 - -dport_map_port_96=61 -dport_map_port_97=62 - -dport_map_port_102=65 -dport_map_port_103=66 - -dport_map_port_106=69 -dport_map_port_107=70 - -dport_map_port_110=73 - -dport_map_port_114=77 - -dport_map_port_1=81 - -dport_map_port_5=85 - -dport_map_port_9=89 -dport_map_port_10=90 - -dport_map_port_13=93 -dport_map_port_14=94 - -dport_map_port_17=97 -dport_map_port_18=98 - -dport_map_port_21=101 -dport_map_port_22=102 - -dport_map_port_25=105 -dport_map_port_26=106 - -dport_map_port_29=109 -dport_map_port_30=110 - -dport_map_port_118=113 -dport_map_port_119=114 - -dport_map_port_122=117 -dport_map_port_123=118 - -dport_map_port_126=121 -dport_map_port_127=122 - -dport_map_port_130=125 -dport_map_port_131=126 - - -# port mapping -portmap_68=65:50:2 -portmap_69=67:50:2 - -portmap_72=69:50:2 -portmap_73=71:50:2 - -portmap_76=73:50:2 -portmap_77=75:50:2 - -portmap_80=77:50:2 -portmap_81=79:50:2 - -portmap_34=33:50:2 -portmap_35=35:50:2 - -portmap_38=37:50:2 -portmap_39=39:50:2 - -portmap_42=41:50:2 -portmap_43=43:50:2 - -portmap_46=45:50:2 -portmap_47=47:50:2 - -portmap_50=49:50:2 -portmap_51=51:50:2 - -portmap_54=53:50:2 -portmap_55=55:50:2 - -portmap_58=57:100:4 - -portmap_62=61:100:4 - -portmap_84=81:100:4 - -portmap_88=85:100:4 - -portmap_92=89:50:2 -portmap_93=91:50:2 - -portmap_96=93:50:2 -portmap_97=95:50:2 - -portmap_102=97:50:2 -portmap_103=99:50:2 - -portmap_106=101:50:2 -portmap_107=103:50:2 - -portmap_110=105:100:4 - -portmap_114=109:100:4 - -portmap_1=1:100:4 - -portmap_5=5:100:4 - -portmap_9=9:50:2 -portmap_10=11:50:2 - -portmap_13=13:50:2 -portmap_14=15:50:2 - -portmap_17=17:50:2 -portmap_18=19:50:2 - -portmap_21=21:50:2 -portmap_22=23:50:2 - -portmap_25=25:50:2 -portmap_26=27:50:2 - -portmap_29=29:50:2 -portmap_30=31:50:2 - -portmap_118=113:50:2 -portmap_119=115:50:2 - -portmap_122=117:50:2 -portmap_123=119:50:2 - -portmap_126=121:50:2 -portmap_127=123:50:2 - -portmap_130=125:50:2 -portmap_131=127:50:2 - - -#WC16 -xgxs_tx_lane_map_68=0x1023 -xgxs_rx_lane_map_68=0x0132 -xgxs_tx_lane_map_69=0x1023 -xgxs_rx_lane_map_69=0x0132 - - -#WC17 -xgxs_tx_lane_map_72=0x1023 -xgxs_rx_lane_map_72=0x1032 -xgxs_tx_lane_map_73=0x1023 -xgxs_rx_lane_map_73=0x1032 - -#WC18 -xgxs_tx_lane_map_76=0x2310 -xgxs_rx_lane_map_76=0x3210 -xgxs_tx_lane_map_77=0x2310 -xgxs_rx_lane_map_77=0x3210 - -#WC19 -xgxs_tx_lane_map_80=0x1302 -xgxs_rx_lane_map_80=0x0231 -xgxs_tx_lane_map_81=0x1302 -xgxs_rx_lane_map_81=0x0231 -#WC8 -xgxs_tx_lane_map_34=0x1203 -xgxs_rx_lane_map_34=0x3120 -xgxs_tx_lane_map_35=0x1203 -xgxs_rx_lane_map_35=0x3120 - -#WC9 -xgxs_tx_lane_map_38=0x0123 -xgxs_rx_lane_map_38=0x3201 -xgxs_tx_lane_map_39=0x0123 -xgxs_rx_lane_map_39=0x3201 - -#WC10 -xgxs_tx_lane_map_42=0x0132 -xgxs_rx_lane_map_42=0x0123 -xgxs_tx_lane_map_43=0x0132 -xgxs_rx_lane_map_43=0x0123 - -#WC11 -xgxs_tx_lane_map_46=0x2301 -xgxs_rx_lane_map_46=0x2031 -xgxs_tx_lane_map_47=0x2301 -xgxs_rx_lane_map_47=0x2031 - -#WC12 -xgxs_tx_lane_map_50=0x1032 -xgxs_rx_lane_map_50=0x3120 -xgxs_tx_lane_map_51=0x1032 -xgxs_rx_lane_map_51=0x3120 - - -#WC13 -xgxs_tx_lane_map_54=0x1032 -xgxs_rx_lane_map_54=0x0132 -xgxs_tx_lane_map_55=0x1032 -xgxs_rx_lane_map_55=0x0132 - -#WC14 - xgxs_tx_lane_map_58=0x1032 - xgxs_rx_lane_map_58=0x3120 - -#WC15 -xgxs_tx_lane_map_62=0x2031 -xgxs_rx_lane_map_62=0x0132 - -#WC20 - xgxs_tx_lane_map_84=0x3120 - xgxs_rx_lane_map_84=0x1032 - -#WC21 -xgxs_tx_lane_map_88=0x2310 -xgxs_rx_lane_map_88=0x0123 - -#WC22 -xgxs_tx_lane_map_92=0x2310 -xgxs_rx_lane_map_92=0x1302 -xgxs_tx_lane_map_93=0x2310 -xgxs_rx_lane_map_93=0x1302 - -#WC23 -xgxs_tx_lane_map_96=0x1302 -xgxs_rx_lane_map_96=0x1023 -xgxs_tx_lane_map_97=0x1302 -xgxs_rx_lane_map_97=0x1023 - -#WC24 -xgxs_tx_lane_map_102=0x2310 -xgxs_rx_lane_map_102=0x1032 -xgxs_tx_lane_map_103=0x2310 -xgxs_rx_lane_map_103=0x1032 - -#WC25 -xgxs_tx_lane_map_106=0x2310 -xgxs_rx_lane_map_106=0x1023 -xgxs_tx_lane_map_107=0x2310 -xgxs_rx_lane_map_107=0x1023 - -#WC26 -xgxs_tx_lane_map_110=0x2310 -xgxs_rx_lane_map_110=0x1302 - -#WC27 -xgxs_tx_lane_map_114=0x1302 -xgxs_rx_lane_map_114=0x1032 - -#WC0 -xgxs_tx_lane_map_1=0x0123 -xgxs_rx_lane_map_1=0x0213 - -#WC1 -xgxs_tx_lane_map_5=0x2310 -xgxs_rx_lane_map_5=0x3201 - -#WC2 -xgxs_tx_lane_map_9=0x1023 -xgxs_rx_lane_map_9=0x0213 -xgxs_tx_lane_map_10=0x1023 -xgxs_rx_lane_map_10=0x0213 - -#WC3 -xgxs_tx_lane_map_13=0x1302 -xgxs_rx_lane_map_13=0x3201 -xgxs_tx_lane_map_14=0x1302 -xgxs_rx_lane_map_14=0x3201 - -#WC4 -xgxs_tx_lane_map_17=0x0132 -xgxs_rx_lane_map_17=0x0123 -xgxs_tx_lane_map_18=0x0132 -xgxs_rx_lane_map_18=0x0123 - -#WC5 -xgxs_tx_lane_map_21=0x1032 -xgxs_rx_lane_map_21=0x0213 -xgxs_tx_lane_map_22=0x1032 -xgxs_rx_lane_map_22=0x0213 - -#WC6 -xgxs_tx_lane_map_25=0x1023 -xgxs_rx_lane_map_25=0x3120 -xgxs_tx_lane_map_26=0x1023 -xgxs_rx_lane_map_26=0x3120 - -#WC7 -xgxs_tx_lane_map_29=0x2031 -xgxs_rx_lane_map_29=0x3201 -xgxs_tx_lane_map_30=0x2031 -xgxs_rx_lane_map_30=0x3201 - -#WC28 -xgxs_tx_lane_map_118=0x0231 -xgxs_rx_lane_map_118=0x2031 -xgxs_tx_lane_map_119=0x0231 -xgxs_rx_lane_map_119=0x2031 - -#WC29 -xgxs_tx_lane_map_122=0x3201 -xgxs_rx_lane_map_122=0x2301 -xgxs_tx_lane_map_123=0x3201 -xgxs_rx_lane_map_123=0x2301 - -#WC30 -xgxs_tx_lane_map_126=0x0213 -xgxs_rx_lane_map_126=0x0213 -xgxs_tx_lane_map_127=0x0213 -xgxs_rx_lane_map_127=0x0213 - -#WC31 -xgxs_tx_lane_map_130=0x2031 -xgxs_rx_lane_map_130=0x1032 -xgxs_tx_lane_map_131=0x2031 -xgxs_rx_lane_map_131=0x1032 - -#PN - -#WC16 -phy_xaui_tx_polarity_flip_68=0x0000 -phy_xaui_rx_polarity_flip_68=0x0000 -phy_xaui_tx_polarity_flip_69=0x0000 -phy_xaui_rx_polarity_flip_69=0x0000 - -#WC17 -phy_xaui_tx_polarity_flip_72=0x0003 -phy_xaui_rx_polarity_flip_72=0x0000 -phy_xaui_tx_polarity_flip_73=0x0002 -phy_xaui_rx_polarity_flip_73=0x0001 - - - -#WC18 -phy_xaui_tx_polarity_flip_76=0x0003 -phy_xaui_rx_polarity_flip_76=0x0000 -phy_xaui_tx_polarity_flip_77=0x0003 -phy_xaui_rx_polarity_flip_77=0x0000 - - -#WC19 -phy_xaui_tx_polarity_flip_80=0x0003 -phy_xaui_rx_polarity_flip_80=0x0003 -phy_xaui_tx_polarity_flip_81=0x0003 -phy_xaui_rx_polarity_flip_81=0x0003 - - -#WC8 -phy_xaui_tx_polarity_flip_34=0x0003 -phy_xaui_rx_polarity_flip_34=0x0000 -phy_xaui_tx_polarity_flip_35=0x0001 -phy_xaui_rx_polarity_flip_35=0x0000 - - -#WC9 -phy_xaui_tx_polarity_flip_38=0x0001 -phy_xaui_rx_polarity_flip_38=0x0000 -phy_xaui_tx_polarity_flip_39=0x0000 -phy_xaui_rx_polarity_flip_39=0x0000 - - -#WC10 -phy_xaui_tx_polarity_flip_42=0x0003 -phy_xaui_rx_polarity_flip_42=0x0000 -phy_xaui_tx_polarity_flip_43=0x0002 -phy_xaui_rx_polarity_flip_43=0x0000 - - -#WC11 -phy_xaui_tx_polarity_flip_46=0x0000 -phy_xaui_rx_polarity_flip_46=0x0000 -phy_xaui_tx_polarity_flip_47=0x0000 -phy_xaui_rx_polarity_flip_47=0x0000 - - -#WC12 -phy_xaui_tx_polarity_flip_50=0x0000 -phy_xaui_rx_polarity_flip_50=0x0000 -phy_xaui_tx_polarity_flip_51=0x0001 -phy_xaui_rx_polarity_flip_51=0x0000 - -#WC13 -phy_xaui_tx_polarity_flip_54=0x0000 -phy_xaui_rx_polarity_flip_54=0x0000 -phy_xaui_tx_polarity_flip_55=0x0001 -phy_xaui_rx_polarity_flip_55=0x0000 - -#WC14 -phy_xaui_tx_polarity_flip_58=0x0000 -phy_xaui_rx_polarity_flip_58=0x0000 - -#WC15 -phy_xaui_tx_polarity_flip_62=0x0005 -phy_xaui_rx_polarity_flip_62=0x000F - -#WC20 - phy_xaui_tx_polarity_flip_84=0x000E - phy_xaui_rx_polarity_flip_84=0x0007 - -#WC21 -phy_xaui_tx_polarity_flip_88=0x000B -phy_xaui_rx_polarity_flip_88=0x000B - -#WC22 -phy_xaui_tx_polarity_flip_92=0x0003 -phy_xaui_rx_polarity_flip_92=0x0001 -phy_xaui_tx_polarity_flip_93=0x0003 -phy_xaui_rx_polarity_flip_93=0x0000 - - -#WC23 -phy_xaui_tx_polarity_flip_96=0x0002 -phy_xaui_rx_polarity_flip_96=0x0000 -phy_xaui_tx_polarity_flip_97=0x0002 -phy_xaui_rx_polarity_flip_97=0x0000 - -#WC24 -phy_xaui_tx_polarity_flip_102=0x0000 -phy_xaui_rx_polarity_flip_102=0x0003 -phy_xaui_tx_polarity_flip_103=0x0000 -phy_xaui_rx_polarity_flip_103=0x0003 - - -#WC25 -phy_xaui_tx_polarity_flip_106=0x0003 -phy_xaui_rx_polarity_flip_106=0x0000 -phy_xaui_tx_polarity_flip_107=0x0003 -phy_xaui_rx_polarity_flip_107=0x0000 - -#WC26 -phy_xaui_tx_polarity_flip_110=0x000F -phy_xaui_rx_polarity_flip_110=0x000F - -#WC27 -phy_xaui_tx_polarity_flip_114=0x000F -phy_xaui_rx_polarity_flip_114=0x000E - -#WC0 -phy_xaui_tx_polarity_flip_1=0x000C -phy_xaui_rx_polarity_flip_1=0x000F - -#WC1 -phy_xaui_tx_polarity_flip_5=0x000E -phy_xaui_rx_polarity_flip_5=0x0000 - -#WC2 -phy_xaui_tx_polarity_flip_9=0x0000 -phy_xaui_rx_polarity_flip_9=0x0001 -phy_xaui_tx_polarity_flip_10=0x0001 -phy_xaui_rx_polarity_flip_10=0x0000 - - -#WC3 -phy_xaui_tx_polarity_flip_13=0x0003 -phy_xaui_rx_polarity_flip_13=0x0000 -phy_xaui_tx_polarity_flip_14=0x0003 -phy_xaui_rx_polarity_flip_14=0x0000 - -#WC4 -phy_xaui_tx_polarity_flip_17=0x0002 -phy_xaui_rx_polarity_flip_17=0x0000 -phy_xaui_tx_polarity_flip_18=0x0003 -phy_xaui_rx_polarity_flip_18=0x0000 - - -#WC5 -phy_xaui_tx_polarity_flip_21=0x0000 -phy_xaui_rx_polarity_flip_21=0x0000 -phy_xaui_tx_polarity_flip_22=0x0000 -phy_xaui_rx_polarity_flip_22=0x0000 - - -#WC6 -phy_xaui_tx_polarity_flip_25=0x0000 -phy_xaui_rx_polarity_flip_25=0x0002 -phy_xaui_tx_polarity_flip_26=0x0001 -phy_xaui_rx_polarity_flip_26=0x0002 - - -#WC7 -phy_xaui_tx_polarity_flip_29=0x0000 -phy_xaui_rx_polarity_flip_29=0x0000 -phy_xaui_tx_polarity_flip_30=0x0001 -phy_xaui_rx_polarity_flip_30=0x0000 - - -#WC28 -phy_xaui_tx_polarity_flip_118=0x0003 -phy_xaui_rx_polarity_flip_118=0x0003 -phy_xaui_tx_polarity_flip_119=0x0003 -phy_xaui_rx_polarity_flip_119=0x0003 - - -#WC29 -phy_xaui_tx_polarity_flip_122=0x0002 -phy_xaui_rx_polarity_flip_122=0x0000 -phy_xaui_tx_polarity_flip_123=0x0000 -phy_xaui_rx_polarity_flip_123=0x0000 - - -#WC30 -phy_xaui_tx_polarity_flip_126=0x0003 -phy_xaui_rx_polarity_flip_126=0x0000 -phy_xaui_tx_polarity_flip_127=0x0003 -phy_xaui_rx_polarity_flip_127=0x0000 - - -#WC31 -phy_xaui_tx_polarity_flip_130=0x0002 -phy_xaui_rx_polarity_flip_130=0x0000 -phy_xaui_tx_polarity_flip_131=0x0001 -phy_xaui_rx_polarity_flip_131=0x0000 - -#xe -serdes_driver_current=0x0a -serdes_preemphasis=0x1a5402 - -#ce0 -serdes_driver_current_lane0_58=0x0a -serdes_driver_current_lane1_58=0x09 -serdes_driver_current_lane2_58=0x09 -serdes_driver_current_lane3_58=0x0a -serdes_preemphasis_lane0_58=0x254902 -serdes_preemphasis_lane1_58=0x244a02 -serdes_preemphasis_lane2_58=0x244a02 -serdes_preemphasis_lane3_58=0x254902 - -#ce1 -serdes_driver_current_lane0_62=0x09 -serdes_driver_current_lane1_62=0x0a -serdes_driver_current_lane2_62=0x09 -serdes_driver_current_lane3_62=0x09 -serdes_preemphasis_lane0_62=0x244a02 -serdes_preemphasis_lane1_62=0x254902 -serdes_preemphasis_lane2_62=0x244a02 -serdes_preemphasis_lane3_62=0x244a02 - -#ce2 -serdes_driver_current_lane0_84=0x09 -serdes_driver_current_lane1_84=0x09 -serdes_driver_current_lane2_84=0x09 -serdes_driver_current_lane3_84=0x09 -serdes_preemphasis_lane0_84=0x204e02 -serdes_preemphasis_lane1_84=0x204e02 -serdes_preemphasis_lane2_84=0x204e02 -serdes_preemphasis_lane3_84=0x204e02 - -#ce3 -serdes_driver_current_lane0_88=0x09 -serdes_driver_current_lane1_88=0x08 -serdes_driver_current_lane2_88=0x08 -serdes_driver_current_lane3_88=0x09 -serdes_preemphasis_lane0_88=0x204e02 -serdes_preemphasis_lane1_88=0x1d5102 -serdes_preemphasis_lane2_88=0x1d5102 -serdes_preemphasis_lane3_88=0x204e02 - -#ce4 -serdes_driver_current_lane0_110=0x09 -serdes_driver_current_lane1_110=0x08 -serdes_driver_current_lane2_110=0x08 -serdes_driver_current_lane3_110=0x09 -serdes_preemphasis_lane0_110=0x204e02 -serdes_preemphasis_lane1_110=0x1d5102 -serdes_preemphasis_lane2_110=0x1d5102 -serdes_preemphasis_lane3_110=0x204e02 - -#ce5 -serdes_driver_current_lane0_114=0x09 -serdes_driver_current_lane1_114=0x08 -serdes_driver_current_lane2_114=0x09 -serdes_driver_current_lane3_114=0x09 -serdes_preemphasis_lane0_114=0x204e02 -serdes_preemphasis_lane1_114=0x1d5102 -serdes_preemphasis_lane2_114=0x224c02 -serdes_preemphasis_lane3_114=0x224c02 - -#ce6 -serdes_driver_current_lane0_1=0x09 -serdes_driver_current_lane1_1=0x0a -serdes_driver_current_lane2_1=0x09 -serdes_driver_current_lane3_1=0x0a -serdes_preemphasis_lane0_1=0x244a02 -serdes_preemphasis_lane1_1=0x254902 -serdes_preemphasis_lane2_1=0x244a02 -serdes_preemphasis_lane3_1=0x254902 - -#ce7 -serdes_driver_current_lane0_5=0x09 -serdes_driver_current_lane1_5=0x09 -serdes_driver_current_lane2_5=0x09 -serdes_driver_current_lane3_5=0x0a -serdes_preemphasis_lane0_5=0x244a02 -serdes_preemphasis_lane1_5=0x244a02 -serdes_preemphasis_lane2_5=0x244a02 -serdes_preemphasis_lane3_5=0x254902 - -phy_an_lt_msft=1 +# disables bcmALPMDH (ALPM distributed hitbit) thread. This thread is purely for debug purpose +l3_alpm_hit_skip=1 + +# Disable Counting ACL Drop towards interface RX_DRP counter +sai_adjust_acl_drop_in_rx_drop=1 + +os=unix +l2xmsg_mode=1 +parity_enable=0 +rate_ext_mdio_divisor=0x80 +phy_ext_rom_boot=0 +fpem_mem_entries=32768 +l2xmsg_mode=1 +oversubscribe_mode=1 +l3_alpm_enable=2 +lpm_scaling_enable=0 +ipv6_lpm_128b_enable=1 +pbmp_xport_xe=0xcccc44cc33113333044cccccc66666622 + + +dport_map_enable=1 +dport_map_port_68=1 +dport_map_port_69=2 + +dport_map_port_72=5 +dport_map_port_73=6 + +dport_map_port_76=9 +dport_map_port_77=10 + +dport_map_port_80=13 +dport_map_port_81=14 + +dport_map_port_34=17 +dport_map_port_35=18 + +dport_map_port_38=21 +dport_map_port_39=22 + +dport_map_port_42=25 +dport_map_port_43=26 + +dport_map_port_46=29 +dport_map_port_47=30 + +dport_map_port_50=33 +dport_map_port_51=34 + +dport_map_port_54=37 +dport_map_port_55=38 + +dport_map_port_58=41 + +dport_map_port_62=45 + +dport_map_port_84=49 + +dport_map_port_88=53 + +dport_map_port_92=57 +dport_map_port_93=58 + +dport_map_port_96=61 +dport_map_port_97=62 + +dport_map_port_102=65 +dport_map_port_103=66 + +dport_map_port_106=69 +dport_map_port_107=70 + +dport_map_port_110=73 + +dport_map_port_114=77 + +dport_map_port_1=81 + +dport_map_port_5=85 + +dport_map_port_9=89 +dport_map_port_10=90 + +dport_map_port_13=93 +dport_map_port_14=94 + +dport_map_port_17=97 +dport_map_port_18=98 + +dport_map_port_21=101 +dport_map_port_22=102 + +dport_map_port_25=105 +dport_map_port_26=106 + +dport_map_port_29=109 +dport_map_port_30=110 + +dport_map_port_118=113 +dport_map_port_119=114 + +dport_map_port_122=117 +dport_map_port_123=118 + +dport_map_port_126=121 +dport_map_port_127=122 + +dport_map_port_130=125 +dport_map_port_131=126 + + +# port mapping +portmap_68=65:50:2 +portmap_69=67:50:2 + +portmap_72=69:50:2 +portmap_73=71:50:2 + +portmap_76=73:50:2 +portmap_77=75:50:2 + +portmap_80=77:50:2 +portmap_81=79:50:2 + +portmap_34=33:50:2 +portmap_35=35:50:2 + +portmap_38=37:50:2 +portmap_39=39:50:2 + +portmap_42=41:50:2 +portmap_43=43:50:2 + +portmap_46=45:50:2 +portmap_47=47:50:2 + +portmap_50=49:50:2 +portmap_51=51:50:2 + +portmap_54=53:50:2 +portmap_55=55:50:2 + +portmap_58=57:100:4 + +portmap_62=61:100:4 + +portmap_84=81:100:4 + +portmap_88=85:100:4 + +portmap_92=89:50:2 +portmap_93=91:50:2 + +portmap_96=93:50:2 +portmap_97=95:50:2 + +portmap_102=97:50:2 +portmap_103=99:50:2 + +portmap_106=101:50:2 +portmap_107=103:50:2 + +portmap_110=105:100:4 + +portmap_114=109:100:4 + +portmap_1=1:100:4 + +portmap_5=5:100:4 + +portmap_9=9:50:2 +portmap_10=11:50:2 + +portmap_13=13:50:2 +portmap_14=15:50:2 + +portmap_17=17:50:2 +portmap_18=19:50:2 + +portmap_21=21:50:2 +portmap_22=23:50:2 + +portmap_25=25:50:2 +portmap_26=27:50:2 + +portmap_29=29:50:2 +portmap_30=31:50:2 + +portmap_118=113:50:2 +portmap_119=115:50:2 + +portmap_122=117:50:2 +portmap_123=119:50:2 + +portmap_126=121:50:2 +portmap_127=123:50:2 + +portmap_130=125:50:2 +portmap_131=127:50:2 + + +#WC16 +xgxs_tx_lane_map_68=0x1023 +xgxs_rx_lane_map_68=0x0132 +xgxs_tx_lane_map_69=0x1023 +xgxs_rx_lane_map_69=0x0132 + + +#WC17 +xgxs_tx_lane_map_72=0x1023 +xgxs_rx_lane_map_72=0x1032 +xgxs_tx_lane_map_73=0x1023 +xgxs_rx_lane_map_73=0x1032 + +#WC18 +xgxs_tx_lane_map_76=0x2310 +xgxs_rx_lane_map_76=0x3210 +xgxs_tx_lane_map_77=0x2310 +xgxs_rx_lane_map_77=0x3210 + +#WC19 +xgxs_tx_lane_map_80=0x1302 +xgxs_rx_lane_map_80=0x0231 +xgxs_tx_lane_map_81=0x1302 +xgxs_rx_lane_map_81=0x0231 +#WC8 +xgxs_tx_lane_map_34=0x1203 +xgxs_rx_lane_map_34=0x3120 +xgxs_tx_lane_map_35=0x1203 +xgxs_rx_lane_map_35=0x3120 + +#WC9 +xgxs_tx_lane_map_38=0x0123 +xgxs_rx_lane_map_38=0x3201 +xgxs_tx_lane_map_39=0x0123 +xgxs_rx_lane_map_39=0x3201 + +#WC10 +xgxs_tx_lane_map_42=0x0132 +xgxs_rx_lane_map_42=0x0123 +xgxs_tx_lane_map_43=0x0132 +xgxs_rx_lane_map_43=0x0123 + +#WC11 +xgxs_tx_lane_map_46=0x2301 +xgxs_rx_lane_map_46=0x2031 +xgxs_tx_lane_map_47=0x2301 +xgxs_rx_lane_map_47=0x2031 + +#WC12 +xgxs_tx_lane_map_50=0x1032 +xgxs_rx_lane_map_50=0x3120 +xgxs_tx_lane_map_51=0x1032 +xgxs_rx_lane_map_51=0x3120 + + +#WC13 +xgxs_tx_lane_map_54=0x1032 +xgxs_rx_lane_map_54=0x0132 +xgxs_tx_lane_map_55=0x1032 +xgxs_rx_lane_map_55=0x0132 + +#WC14 + xgxs_tx_lane_map_58=0x1032 + xgxs_rx_lane_map_58=0x3120 + +#WC15 +xgxs_tx_lane_map_62=0x2031 +xgxs_rx_lane_map_62=0x0132 + +#WC20 + xgxs_tx_lane_map_84=0x3120 + xgxs_rx_lane_map_84=0x1032 + +#WC21 +xgxs_tx_lane_map_88=0x2310 +xgxs_rx_lane_map_88=0x0123 + +#WC22 +xgxs_tx_lane_map_92=0x2310 +xgxs_rx_lane_map_92=0x1302 +xgxs_tx_lane_map_93=0x2310 +xgxs_rx_lane_map_93=0x1302 + +#WC23 +xgxs_tx_lane_map_96=0x1302 +xgxs_rx_lane_map_96=0x1023 +xgxs_tx_lane_map_97=0x1302 +xgxs_rx_lane_map_97=0x1023 + +#WC24 +xgxs_tx_lane_map_102=0x2310 +xgxs_rx_lane_map_102=0x1032 +xgxs_tx_lane_map_103=0x2310 +xgxs_rx_lane_map_103=0x1032 + +#WC25 +xgxs_tx_lane_map_106=0x2310 +xgxs_rx_lane_map_106=0x1023 +xgxs_tx_lane_map_107=0x2310 +xgxs_rx_lane_map_107=0x1023 + +#WC26 +xgxs_tx_lane_map_110=0x2310 +xgxs_rx_lane_map_110=0x1302 + +#WC27 +xgxs_tx_lane_map_114=0x1302 +xgxs_rx_lane_map_114=0x1032 + +#WC0 +xgxs_tx_lane_map_1=0x0123 +xgxs_rx_lane_map_1=0x0213 + +#WC1 +xgxs_tx_lane_map_5=0x2310 +xgxs_rx_lane_map_5=0x3201 + +#WC2 +xgxs_tx_lane_map_9=0x1023 +xgxs_rx_lane_map_9=0x0213 +xgxs_tx_lane_map_10=0x1023 +xgxs_rx_lane_map_10=0x0213 + +#WC3 +xgxs_tx_lane_map_13=0x1302 +xgxs_rx_lane_map_13=0x3201 +xgxs_tx_lane_map_14=0x1302 +xgxs_rx_lane_map_14=0x3201 + +#WC4 +xgxs_tx_lane_map_17=0x0132 +xgxs_rx_lane_map_17=0x0123 +xgxs_tx_lane_map_18=0x0132 +xgxs_rx_lane_map_18=0x0123 + +#WC5 +xgxs_tx_lane_map_21=0x1032 +xgxs_rx_lane_map_21=0x0213 +xgxs_tx_lane_map_22=0x1032 +xgxs_rx_lane_map_22=0x0213 + +#WC6 +xgxs_tx_lane_map_25=0x1023 +xgxs_rx_lane_map_25=0x3120 +xgxs_tx_lane_map_26=0x1023 +xgxs_rx_lane_map_26=0x3120 + +#WC7 +xgxs_tx_lane_map_29=0x2031 +xgxs_rx_lane_map_29=0x3201 +xgxs_tx_lane_map_30=0x2031 +xgxs_rx_lane_map_30=0x3201 + +#WC28 +xgxs_tx_lane_map_118=0x0231 +xgxs_rx_lane_map_118=0x2031 +xgxs_tx_lane_map_119=0x0231 +xgxs_rx_lane_map_119=0x2031 + +#WC29 +xgxs_tx_lane_map_122=0x3201 +xgxs_rx_lane_map_122=0x2301 +xgxs_tx_lane_map_123=0x3201 +xgxs_rx_lane_map_123=0x2301 + +#WC30 +xgxs_tx_lane_map_126=0x0213 +xgxs_rx_lane_map_126=0x0213 +xgxs_tx_lane_map_127=0x0213 +xgxs_rx_lane_map_127=0x0213 + +#WC31 +xgxs_tx_lane_map_130=0x2031 +xgxs_rx_lane_map_130=0x1032 +xgxs_tx_lane_map_131=0x2031 +xgxs_rx_lane_map_131=0x1032 + +#PN + +#WC16 +phy_xaui_tx_polarity_flip_68=0x0000 +phy_xaui_rx_polarity_flip_68=0x0000 +phy_xaui_tx_polarity_flip_69=0x0000 +phy_xaui_rx_polarity_flip_69=0x0000 + +#WC17 +phy_xaui_tx_polarity_flip_72=0x0003 +phy_xaui_rx_polarity_flip_72=0x0000 +phy_xaui_tx_polarity_flip_73=0x0002 +phy_xaui_rx_polarity_flip_73=0x0001 + + + +#WC18 +phy_xaui_tx_polarity_flip_76=0x0003 +phy_xaui_rx_polarity_flip_76=0x0000 +phy_xaui_tx_polarity_flip_77=0x0003 +phy_xaui_rx_polarity_flip_77=0x0000 + + +#WC19 +phy_xaui_tx_polarity_flip_80=0x0003 +phy_xaui_rx_polarity_flip_80=0x0003 +phy_xaui_tx_polarity_flip_81=0x0003 +phy_xaui_rx_polarity_flip_81=0x0003 + + +#WC8 +phy_xaui_tx_polarity_flip_34=0x0003 +phy_xaui_rx_polarity_flip_34=0x0000 +phy_xaui_tx_polarity_flip_35=0x0001 +phy_xaui_rx_polarity_flip_35=0x0000 + + +#WC9 +phy_xaui_tx_polarity_flip_38=0x0001 +phy_xaui_rx_polarity_flip_38=0x0000 +phy_xaui_tx_polarity_flip_39=0x0000 +phy_xaui_rx_polarity_flip_39=0x0000 + + +#WC10 +phy_xaui_tx_polarity_flip_42=0x0003 +phy_xaui_rx_polarity_flip_42=0x0000 +phy_xaui_tx_polarity_flip_43=0x0002 +phy_xaui_rx_polarity_flip_43=0x0000 + + +#WC11 +phy_xaui_tx_polarity_flip_46=0x0000 +phy_xaui_rx_polarity_flip_46=0x0000 +phy_xaui_tx_polarity_flip_47=0x0000 +phy_xaui_rx_polarity_flip_47=0x0000 + + +#WC12 +phy_xaui_tx_polarity_flip_50=0x0000 +phy_xaui_rx_polarity_flip_50=0x0000 +phy_xaui_tx_polarity_flip_51=0x0001 +phy_xaui_rx_polarity_flip_51=0x0000 + +#WC13 +phy_xaui_tx_polarity_flip_54=0x0000 +phy_xaui_rx_polarity_flip_54=0x0000 +phy_xaui_tx_polarity_flip_55=0x0001 +phy_xaui_rx_polarity_flip_55=0x0000 + +#WC14 +phy_xaui_tx_polarity_flip_58=0x0000 +phy_xaui_rx_polarity_flip_58=0x0000 + +#WC15 +phy_xaui_tx_polarity_flip_62=0x0005 +phy_xaui_rx_polarity_flip_62=0x000F + +#WC20 + phy_xaui_tx_polarity_flip_84=0x000E + phy_xaui_rx_polarity_flip_84=0x0007 + +#WC21 +phy_xaui_tx_polarity_flip_88=0x000B +phy_xaui_rx_polarity_flip_88=0x000B + +#WC22 +phy_xaui_tx_polarity_flip_92=0x0003 +phy_xaui_rx_polarity_flip_92=0x0001 +phy_xaui_tx_polarity_flip_93=0x0003 +phy_xaui_rx_polarity_flip_93=0x0000 + + +#WC23 +phy_xaui_tx_polarity_flip_96=0x0002 +phy_xaui_rx_polarity_flip_96=0x0000 +phy_xaui_tx_polarity_flip_97=0x0002 +phy_xaui_rx_polarity_flip_97=0x0000 + +#WC24 +phy_xaui_tx_polarity_flip_102=0x0000 +phy_xaui_rx_polarity_flip_102=0x0003 +phy_xaui_tx_polarity_flip_103=0x0000 +phy_xaui_rx_polarity_flip_103=0x0003 + + +#WC25 +phy_xaui_tx_polarity_flip_106=0x0003 +phy_xaui_rx_polarity_flip_106=0x0000 +phy_xaui_tx_polarity_flip_107=0x0003 +phy_xaui_rx_polarity_flip_107=0x0000 + +#WC26 +phy_xaui_tx_polarity_flip_110=0x000F +phy_xaui_rx_polarity_flip_110=0x000F + +#WC27 +phy_xaui_tx_polarity_flip_114=0x000F +phy_xaui_rx_polarity_flip_114=0x000E + +#WC0 +phy_xaui_tx_polarity_flip_1=0x000C +phy_xaui_rx_polarity_flip_1=0x000F + +#WC1 +phy_xaui_tx_polarity_flip_5=0x000E +phy_xaui_rx_polarity_flip_5=0x0000 + +#WC2 +phy_xaui_tx_polarity_flip_9=0x0000 +phy_xaui_rx_polarity_flip_9=0x0001 +phy_xaui_tx_polarity_flip_10=0x0001 +phy_xaui_rx_polarity_flip_10=0x0000 + + +#WC3 +phy_xaui_tx_polarity_flip_13=0x0003 +phy_xaui_rx_polarity_flip_13=0x0000 +phy_xaui_tx_polarity_flip_14=0x0003 +phy_xaui_rx_polarity_flip_14=0x0000 + +#WC4 +phy_xaui_tx_polarity_flip_17=0x0002 +phy_xaui_rx_polarity_flip_17=0x0000 +phy_xaui_tx_polarity_flip_18=0x0003 +phy_xaui_rx_polarity_flip_18=0x0000 + + +#WC5 +phy_xaui_tx_polarity_flip_21=0x0000 +phy_xaui_rx_polarity_flip_21=0x0000 +phy_xaui_tx_polarity_flip_22=0x0000 +phy_xaui_rx_polarity_flip_22=0x0000 + + +#WC6 +phy_xaui_tx_polarity_flip_25=0x0000 +phy_xaui_rx_polarity_flip_25=0x0002 +phy_xaui_tx_polarity_flip_26=0x0001 +phy_xaui_rx_polarity_flip_26=0x0002 + + +#WC7 +phy_xaui_tx_polarity_flip_29=0x0000 +phy_xaui_rx_polarity_flip_29=0x0000 +phy_xaui_tx_polarity_flip_30=0x0001 +phy_xaui_rx_polarity_flip_30=0x0000 + + +#WC28 +phy_xaui_tx_polarity_flip_118=0x0003 +phy_xaui_rx_polarity_flip_118=0x0003 +phy_xaui_tx_polarity_flip_119=0x0003 +phy_xaui_rx_polarity_flip_119=0x0003 + + +#WC29 +phy_xaui_tx_polarity_flip_122=0x0002 +phy_xaui_rx_polarity_flip_122=0x0000 +phy_xaui_tx_polarity_flip_123=0x0000 +phy_xaui_rx_polarity_flip_123=0x0000 + + +#WC30 +phy_xaui_tx_polarity_flip_126=0x0003 +phy_xaui_rx_polarity_flip_126=0x0000 +phy_xaui_tx_polarity_flip_127=0x0003 +phy_xaui_rx_polarity_flip_127=0x0000 + + +#WC31 +phy_xaui_tx_polarity_flip_130=0x0002 +phy_xaui_rx_polarity_flip_130=0x0000 +phy_xaui_tx_polarity_flip_131=0x0001 +phy_xaui_rx_polarity_flip_131=0x0000 + +#xe +serdes_driver_current=0x0a +serdes_preemphasis=0x1a5402 + +#ce0 +serdes_driver_current_lane0_58=0x0a +serdes_driver_current_lane1_58=0x09 +serdes_driver_current_lane2_58=0x09 +serdes_driver_current_lane3_58=0x0a +serdes_preemphasis_lane0_58=0x254902 +serdes_preemphasis_lane1_58=0x244a02 +serdes_preemphasis_lane2_58=0x244a02 +serdes_preemphasis_lane3_58=0x254902 + +#ce1 +serdes_driver_current_lane0_62=0x09 +serdes_driver_current_lane1_62=0x0a +serdes_driver_current_lane2_62=0x09 +serdes_driver_current_lane3_62=0x09 +serdes_preemphasis_lane0_62=0x244a02 +serdes_preemphasis_lane1_62=0x254902 +serdes_preemphasis_lane2_62=0x244a02 +serdes_preemphasis_lane3_62=0x244a02 + +#ce2 +serdes_driver_current_lane0_84=0x09 +serdes_driver_current_lane1_84=0x09 +serdes_driver_current_lane2_84=0x09 +serdes_driver_current_lane3_84=0x09 +serdes_preemphasis_lane0_84=0x204e02 +serdes_preemphasis_lane1_84=0x204e02 +serdes_preemphasis_lane2_84=0x204e02 +serdes_preemphasis_lane3_84=0x204e02 + +#ce3 +serdes_driver_current_lane0_88=0x09 +serdes_driver_current_lane1_88=0x08 +serdes_driver_current_lane2_88=0x08 +serdes_driver_current_lane3_88=0x09 +serdes_preemphasis_lane0_88=0x204e02 +serdes_preemphasis_lane1_88=0x1d5102 +serdes_preemphasis_lane2_88=0x1d5102 +serdes_preemphasis_lane3_88=0x204e02 + +#ce4 +serdes_driver_current_lane0_110=0x09 +serdes_driver_current_lane1_110=0x08 +serdes_driver_current_lane2_110=0x08 +serdes_driver_current_lane3_110=0x09 +serdes_preemphasis_lane0_110=0x204e02 +serdes_preemphasis_lane1_110=0x1d5102 +serdes_preemphasis_lane2_110=0x1d5102 +serdes_preemphasis_lane3_110=0x204e02 + +#ce5 +serdes_driver_current_lane0_114=0x09 +serdes_driver_current_lane1_114=0x08 +serdes_driver_current_lane2_114=0x09 +serdes_driver_current_lane3_114=0x09 +serdes_preemphasis_lane0_114=0x204e02 +serdes_preemphasis_lane1_114=0x1d5102 +serdes_preemphasis_lane2_114=0x224c02 +serdes_preemphasis_lane3_114=0x224c02 + +#ce6 +serdes_driver_current_lane0_1=0x09 +serdes_driver_current_lane1_1=0x0a +serdes_driver_current_lane2_1=0x09 +serdes_driver_current_lane3_1=0x0a +serdes_preemphasis_lane0_1=0x244a02 +serdes_preemphasis_lane1_1=0x254902 +serdes_preemphasis_lane2_1=0x244a02 +serdes_preemphasis_lane3_1=0x254902 + +#ce7 +serdes_driver_current_lane0_5=0x09 +serdes_driver_current_lane1_5=0x09 +serdes_driver_current_lane2_5=0x09 +serdes_driver_current_lane3_5=0x0a +serdes_preemphasis_lane0_5=0x244a02 +serdes_preemphasis_lane1_5=0x244a02 +serdes_preemphasis_lane2_5=0x244a02 +serdes_preemphasis_lane3_5=0x254902 + +phy_an_lt_msft=1 From b45aaeffdcda247fe75342563990056eabe84b5a Mon Sep 17 00:00:00 2001 From: Myron Sosyak Date: Wed, 9 Nov 2022 16:44:30 -0800 Subject: [PATCH 164/174] Add Barefoot to the list of supported platforms (#12269) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 1f78c9f56d60..cda483fbd941 100644 --- a/README.md +++ b/README.md @@ -133,6 +133,7 @@ To build SONiC installer image and docker images, run the following commands: The supported ASIC vendors are: +- PLATFORM=barefoot - PLATFORM=broadcom - PLATFORM=marvell - PLATFORM=mellanox From 0ea4f4d00e44e5a6ac502f5bafdfde0b3812218b Mon Sep 17 00:00:00 2001 From: Devesh Pathak <54966909+devpatha@users.noreply.github.com> Date: Wed, 9 Nov 2022 16:54:56 -0800 Subject: [PATCH 165/174] Clear /etc/resolv.conf before building image (#12592) Why I did it nameserver and domain entries from build system fsroot gets into sonic image. How I did it Clear /etc/resolv.conf before building image How to verify it Built image with it and verified with install that /etc/resolv.conf is empty --- build_debian.sh | 1 + files/image_config/resolv-config/resolv.conf | 0 2 files changed, 1 insertion(+) create mode 100644 files/image_config/resolv-config/resolv.conf diff --git a/build_debian.sh b/build_debian.sh index e09e23706f3a..140e3b4b8aef 100755 --- a/build_debian.sh +++ b/build_debian.sh @@ -679,6 +679,7 @@ sudo rm -f $ONIE_INSTALLER_PAYLOAD $FILESYSTEM_SQUASHFS ## Note: -x to skip directories on different file systems, such as /proc sudo du -hsx $FILESYSTEM_ROOT sudo mkdir -p $FILESYSTEM_ROOT/var/lib/docker +sudo cp files/image_config/resolv-config/resolv.conf $FILESYSTEM_ROOT/etc/resolv.conf sudo mksquashfs $FILESYSTEM_ROOT $FILESYSTEM_SQUASHFS -comp zstd -b 1M -e boot -e var/lib/docker -e $PLATFORM_DIR # Ensure admin gid is 1000 diff --git a/files/image_config/resolv-config/resolv.conf b/files/image_config/resolv-config/resolv.conf new file mode 100644 index 000000000000..e69de29bb2d1 From 43463ced7ef6331b1d2055326df6d5a408ff7492 Mon Sep 17 00:00:00 2001 From: Dmytro Lytvynenko Date: Thu, 10 Nov 2022 04:46:49 +0200 Subject: [PATCH 166/174] Add missing import (#12624) Why I did it syseepromd in pmon crashes because of missing import in python script and doesn't get in running state How I did it Fix missing import issue to avoid python script failing How to verify it Boot system and wait till syseepromd gets into running state Which release branch to backport (provide reason below if selected) 201811 201911 202006 202012 202106 202111 202205 --- .../sonic-platform-modules-bfn-montara/sonic_platform/eeprom.py | 1 + 1 file changed, 1 insertion(+) diff --git a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/eeprom.py b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/eeprom.py index 5d3827e6eba2..973df9175032 100644 --- a/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/eeprom.py +++ b/platform/barefoot/sonic-platform-modules-bfn-montara/sonic_platform/eeprom.py @@ -4,6 +4,7 @@ import datetime import logging import logging.config + import thrift sys.path.append(os.path.dirname(__file__)) From 111752957ff6658c210062e7ff506bddfa3ab5e7 Mon Sep 17 00:00:00 2001 From: Jing Kan Date: Thu, 10 Nov 2022 13:37:02 +0800 Subject: [PATCH 167/174] [dhcp_relay] Enable DHCP Relay for BmcMgmtToRRouter in init_cfg (#12648) Why I did it DHCP relay feature needs to be enabled for BmcMgmtToRRouter by default How I did it Update device type list --- files/build_templates/init_cfg.json.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/build_templates/init_cfg.json.j2 b/files/build_templates/init_cfg.json.j2 index 8342b4178d37..2235efb9b160 100644 --- a/files/build_templates/init_cfg.json.j2 +++ b/files/build_templates/init_cfg.json.j2 @@ -43,7 +43,7 @@ ("swss", "enabled", false, "enabled"), ("syncd", "enabled", false, "enabled"), ("teamd", "enabled", false, "enabled")] %} -{% do features.append(("dhcp_relay", "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] is not in ['ToRRouter', 'EPMS', 'MgmtTsToR', 'MgmtToRRouter']) %}enabled{% else %}disabled{% endif %}", false, "enabled")) %} +{% do features.append(("dhcp_relay", "{% if not (DEVICE_METADATA is defined and DEVICE_METADATA['localhost'] is defined and DEVICE_METADATA['localhost']['type'] is defined and DEVICE_METADATA['localhost']['type'] is not in ['ToRRouter', 'EPMS', 'MgmtTsToR', 'MgmtToRRouter', 'BmcMgmtToRRouter']) %}enabled{% else %}disabled{% endif %}", false, "enabled")) %} {%- if sonic_asic_platform == "vs" %}{% do features.append(("gbsyncd", "enabled", false, "enabled")) %}{% endif %} {%- if include_iccpd == "y" %}{% do features.append(("iccpd", "disabled", false, "enabled")) %}{% endif %} {%- if include_mgmt_framework == "y" %}{% do features.append(("mgmt-framework", "enabled", true, "enabled")) %}{% endif %} From 7c746e67d2d3dbbbe7bcb559ca023b4a4b060229 Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Thu, 10 Nov 2022 15:27:31 +0800 Subject: [PATCH 168/174] [master][TestbedV2] migrate t0-sonic test jobs to TestbedV2. (#12651) Migrate t0-sonic test jobs to TestbedV2. Why I did it Migrate t0-sonic test jobs to TestbedV2. How I did it Add two parameters to create testplan. Modify azure-pipelines.yml to run t0-sonic on tbv2. Signed-off-by: Yutong Zhang --- .../run-test-scheduler-template.yml | 18 ++++++++++++++++- azure-pipelines.yml | 20 ++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/run-test-scheduler-template.yml b/.azure-pipelines/run-test-scheduler-template.yml index 265f698e0290..470558637b47 100644 --- a/.azure-pipelines/run-test-scheduler-template.yml +++ b/.azure-pipelines/run-test-scheduler-template.yml @@ -30,6 +30,18 @@ parameters: type: string default: "" +- name: VM_TYPE + type: string + default: "ceos" + +- name: SPECIFIED_PARAMS + type: string + default: "{}" + +- name: MGMT_BRANCH + type: string + default: master + steps: - script: | set -ex @@ -41,7 +53,11 @@ steps: set -ex pip install PyYAML rm -f new_test_plan_id.txt - python ./.azure-pipelines/test_plan.py create -t ${{ parameters.TOPOLOGY }} -o new_test_plan_id.txt --min-worker ${{ parameters.MIN_WORKER }} --max-worker ${{ parameters.MAX_WORKER }} --test-set ${{ parameters.TEST_SET }} --kvm-build-id $(KVM_BUILD_ID) --deploy-mg-extra-params "${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" --common-extra-params "${{ parameters.COMMON_EXTRA_PARAMS }}" + python ./.azure-pipelines/test_plan.py create -t ${{ parameters.TOPOLOGY }} -o new_test_plan_id.txt \ + --min-worker ${{ parameters.MIN_WORKER }} --max-worker ${{ parameters.MAX_WORKER }} \ + --test-set ${{ parameters.TEST_SET }} --kvm-build-id $(KVM_BUILD_ID) \ + --deploy-mg-extra-params "${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" --common-extra-params "${{ parameters.COMMON_EXTRA_PARAMS }}" \ + --mgmt-branch ${{ parameters.MGMT_BRANCH }} --vm-type ${{ parameters.VM_TYPE }} --specified-params "${{ parameters.SPECIFIED_PARAMS }}" TEST_PLAN_ID=`cat new_test_plan_id.txt` echo "Created test plan $TEST_PLAN_ID" diff --git a/azure-pipelines.yml b/azure-pipelines.yml index c526b0dc3f2d..f0127c325102 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -290,6 +290,7 @@ stages: pool: sonictest-sonic-t0 displayName: "kvmtest-t0-sonic" timeoutInMinutes: 360 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_CLASSICAL_TEST, 'YES')) continueOnError: true steps: - template: .azure-pipelines/run-test-template.yml @@ -328,4 +329,21 @@ stages: MIN_WORKER: 1 MAX_WORKER: 1 COMMON_EXTRA_PARAMS: "--disable_loganalyzer " - + + - job: sonic_t0_testbedv2 + displayName: "kvmtest-t0-sonic by TestbedV2" + pool: + vmImage: 'ubuntu-20.04' + timeoutInMinutes: 1080 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES')) + continueOnError: true + steps: + - template: .azure-pipelines/run-test-scheduler-template.yml + parameters: + TOPOLOGY: t0-64-32 + MIN_WORKER: 1 + MAX_WORKER: 2 + TEST_SET: t0-sonic + COMMON_EXTRA_PARAMS: "--neighbor_type=sonic --enable_macsec --macsec_profile=128_SCI,256_XPN_SCI" + VM_TYPE: vsonic + SPECIFIED_PARAMS: '{\"test_pretest.py\":[\"--completeness_level=confident\",\"--allow_recover\"],\"test_posttest.py\":[\"--completeness_level=confident\",\"--allow_recover\"]}' From 10f36d63865a65c5b9b2f96ea8caaa0bc3b53732 Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Thu, 10 Nov 2022 20:34:05 +0800 Subject: [PATCH 169/174] [submodule] Advance sonic-swss pointer (#12498) Advance sonic-swss submodule to pick up new commits: dbdf31c [counters] Improve performance by polling only configured ports buffer queue/pg counters sonic-net/sonic-swss#2473 ab4f804 [portsorch] remove port OID from saiOidToAlias map on port deletion sonic-net/sonic-swss#2483 ab29920 [QoS] Support dynamic headroom calculation for Barefoot platforms sonic-net/sonic-swss#2412 15beee4 Add support for voq counters in portsorch. sonic-net/sonic-swss#2467 c8d4905 [vlanmgr] Disable arp_evict_nocarrier for vlan host intf sonic-net/sonic-swss#2469 31c9321 [chassis][voq]Collect counters for fabric links sonic-net/sonic-swss#1944 Signed-off-by: Kebo Liu --- src/sonic-swss | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-swss b/src/sonic-swss index df92fb7283b4..dbdf31c10958 160000 --- a/src/sonic-swss +++ b/src/sonic-swss @@ -1 +1 @@ -Subproject commit df92fb7283b4490a33e14fd1d6626e9c17a3b6ea +Subproject commit dbdf31c10958414ee108850e8ee244c90d28b544 From 1f5cb98e9a33361c318a3bee10cb0ff72df33215 Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Fri, 11 Nov 2022 08:36:19 +0800 Subject: [PATCH 170/174] [submodule] Advance sonic-utilities and sonic-platform-common pointers (#12639) * Advance submodule sonic-utilities d5a6da31 Do not configure physical attributes on port channels in portconfig (#2456) 48ee7722 Change db_migrator major version on master branch from version 3 to 4 (#2470) f3746163 [GCU] Fix JsonPointerFilter bug (#2477) 58dbb3e6 YANG Validation for ConfigDB Updates: TACPLUS, TACPLUS_SERVER, AAA, VLAN_SUB_INTERFACE tables + decorated validated_mod_entry (#2452) 062f18a0 fix show interface neighbor expected empty issue (#2465) 569edf3b Fix display disorder problem of show mirror_session (#2447) daaf0ffc Disable "tag as local" when reboot (#2451) 6621120b Fix sudo sfputil show error-status on a multiasic platform issue (#2373) e8b1dcdf Add IP remove warnings for VRF commands (#2351) 40cc8e11 [scripts/generate_dump] add information to tech-support file (#2357) 8473517e Revert "[config reload]: On dual ToR systems, cache ARP and FDB table (#2460) Signed-off-by: Stephen Sun * Advance sonic-platform-common aa860837 Fix issue: rounding float value for txpower and rxpower (#320) 2052a63d Fix issue: copper cable should not display DOM information (#318) cf4c6af7 CmisApi::get_application_advertisement catch AttributeError as well (#316) Signed-off-by: Stephen Sun Signed-off-by: Stephen Sun --- src/sonic-platform-common | 2 +- src/sonic-utilities | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/sonic-platform-common b/src/sonic-platform-common index 86bab38c723e..aa86083707c8 160000 --- a/src/sonic-platform-common +++ b/src/sonic-platform-common @@ -1 +1 @@ -Subproject commit 86bab38c723eb4ebbfa16feed66344d1b3ffd46e +Subproject commit aa86083707c8f6eff24e02df1e5f2198259a8086 diff --git a/src/sonic-utilities b/src/sonic-utilities index 4a3d49d359f7..d5a6da31ef94 160000 --- a/src/sonic-utilities +++ b/src/sonic-utilities @@ -1 +1 @@ -Subproject commit 4a3d49d359f787cae896d65fa882fbaaac6e57f2 +Subproject commit d5a6da31ef94e350c2cb66c3c981145524d7de6f From 5aa03246fc5d0753817bb2ccf8182ee70aab16f6 Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Fri, 11 Nov 2022 10:54:37 +0800 Subject: [PATCH 171/174] [master][TestbedV2] Migrate multi-asic test jobs to TestbedV2. (#12668) Migrate multi-asic test jobs to TestbedV2. Why I did it Migrate multi-asic test jobs to TestbedV2. How I did it Add one parameter num_asic to create testplan. Modify azure-pipelines.yml to run multi-asic on tbv2. Signed-off-by: Yutong Zhang --- .../run-test-scheduler-template.yml | 7 ++++++- azure-pipelines.yml | 17 +++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/.azure-pipelines/run-test-scheduler-template.yml b/.azure-pipelines/run-test-scheduler-template.yml index 470558637b47..f43ce59d8a57 100644 --- a/.azure-pipelines/run-test-scheduler-template.yml +++ b/.azure-pipelines/run-test-scheduler-template.yml @@ -42,6 +42,10 @@ parameters: type: string default: master +- name: NUM_ASIC + type: number + default: 1 + steps: - script: | set -ex @@ -57,7 +61,8 @@ steps: --min-worker ${{ parameters.MIN_WORKER }} --max-worker ${{ parameters.MAX_WORKER }} \ --test-set ${{ parameters.TEST_SET }} --kvm-build-id $(KVM_BUILD_ID) \ --deploy-mg-extra-params "${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" --common-extra-params "${{ parameters.COMMON_EXTRA_PARAMS }}" \ - --mgmt-branch ${{ parameters.MGMT_BRANCH }} --vm-type ${{ parameters.VM_TYPE }} --specified-params "${{ parameters.SPECIFIED_PARAMS }}" + --mgmt-branch ${{ parameters.MGMT_BRANCH }} --vm-type ${{ parameters.VM_TYPE }} --specified-params "${{ parameters.SPECIFIED_PARAMS }}" \ + --num-asic ${{ parameters.NUM_ASIC }} TEST_PLAN_ID=`cat new_test_plan_id.txt` echo "Created test plan $TEST_PLAN_ID" diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f0127c325102..073702ba3f68 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -305,6 +305,7 @@ stages: pool: sonictest-ma displayName: "kvmtest-multi-asic-t1-lag" timeoutInMinutes: 240 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_CLASSICAL_TEST, 'YES')) continueOnError: true steps: - template: .azure-pipelines/run-test-template.yml @@ -315,6 +316,22 @@ stages: tbtype: multi-asic-t1-lag-pr image: sonic-4asic-vs.img.gz + - job: multi_asic_testbedv2 + displayName: "kvmtest-multi-asic-t1-lag by TestbedV2" + pool: + vmImage: 'ubuntu-20.04' + timeoutInMinutes: 1080 + condition: and(succeeded(), eq(variables.BUILD_IMG_RUN_TESTBEDV2_TEST, 'YES')) + continueOnError: true + steps: + - template: .azure-pipelines/run-test-scheduler-template.yml + parameters: + TOPOLOGY: t1-8-lag + TEST_SET: multi-asic-t1-lag + MIN_WORKER: 1 + MAX_WORKER: 1 + NUM_ASIC: 4 + - job: dualtor_testbedv2 pool: vmImage: 'ubuntu-20.04' From f97cf579d2712b58c32452f1dba60483c5f7c10b Mon Sep 17 00:00:00 2001 From: Andriy Kokhan Date: Fri, 11 Nov 2022 09:19:25 +0200 Subject: [PATCH 172/174] [BFN] Stop PMON before swss during warm reboot (#12658) Why I did it Stopping of pmon after swss and syncd causes some ERROR logs in syslog. Also, this affects teamd downtime. How I did it Adjust warmboot shutdown order in make file How to verify it Build SONiC image, deploy to the target device and check /etc/sonic/warm-reboot_order content. lldp mux nat radv sflow bgp pmon swss teamd syncd --- rules/docker-platform-monitor.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/docker-platform-monitor.mk b/rules/docker-platform-monitor.mk index 5c3c760f192e..a87a30247337 100644 --- a/rules/docker-platform-monitor.mk +++ b/rules/docker-platform-monitor.mk @@ -37,7 +37,7 @@ $(DOCKER_PLATFORM_MONITOR)_LOAD_DOCKERS = $(DOCKER_CONFIG_ENGINE_BULLSEYE) $(DOCKER_PLATFORM_MONITOR)_VERSION = 1.0.0 $(DOCKER_PLATFORM_MONITOR)_PACKAGE_NAME = pmon -ifeq ($(CONFIGURED_PLATFORM),mellanox) +ifeq ($(CONFIGURED_PLATFORM),$(filter $(CONFIGURED_PLATFORM),mellanox barefoot)) $(DOCKER_PLATFORM_MONITOR)_WARM_SHUTDOWN_BEFORE = swss endif From c6b25a553793cb8452c5f2836293af5a6c2af0bb Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Sat, 12 Nov 2022 04:14:15 +0800 Subject: [PATCH 173/174] advance sonic-swss pointer (#12670) swss update with following commits: 81f4ea9 orchagent/portsorch: Missing scheduler group after SWSS restart (#2174) e557855 [SWSS] Innovium platform specific changes in PFC Detect lua script (#2493) 6e288dc New P4Orch development. (#2425) ab0e474 swss: Fixing race condition for rif counters (#2488) 724f914 [tests] [asan] extend graceful stop flag to also stop syncd (#2491) 84642f3 [Dynamic buffer calculation][Mellanox] Enhance the logic to identify buffer pools and profiles (#2498) e04bb43 Fix vs test issue: failed to remove vlan due to referenced by vlan interface (#2504) 52c561f Added LAG member check on addLagMember() (#2464) --- src/sonic-swss | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-swss b/src/sonic-swss index dbdf31c10958..81f4ea994fb3 160000 --- a/src/sonic-swss +++ b/src/sonic-swss @@ -1 +1 @@ -Subproject commit dbdf31c10958414ee108850e8ee244c90d28b544 +Subproject commit 81f4ea994fb3b440709f2db3c50c257148fe5e9e From 3717e6236a135ea55e05dda43026581fa4ab73d7 Mon Sep 17 00:00:00 2001 From: maipbui Date: Fri, 11 Nov 2022 23:10:40 +0000 Subject: [PATCH 174/174] update semgrep Signed-off-by: maipbui --- azure-pipelines.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 073702ba3f68..7cb439d95f22 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -364,3 +364,17 @@ stages: COMMON_EXTRA_PARAMS: "--neighbor_type=sonic --enable_macsec --macsec_profile=128_SCI,256_XPN_SCI" VM_TYPE: vsonic SPECIFIED_PARAMS: '{\"test_pretest.py\":[\"--completeness_level=confident\",\"--allow_recover\"],\"test_posttest.py\":[\"--completeness_level=confident\",\"--allow_recover\"]}' + + - job: Semgrep + displayName: "Semgrep" + trigger: + - master + pool: + vmImage: ubuntu-latest + steps: + - script: | + python -m pip install --upgrade pip + pip install semgrep + semgrep --config auto + env: + SEMGREP_PR_ID: $(System.PullRequest.PullRequestNumber)