diff --git a/cfgmgr/fabricmgr.cpp b/cfgmgr/fabricmgr.cpp index bcbaa5726a..16a8111199 100644 --- a/cfgmgr/fabricmgr.cpp +++ b/cfgmgr/fabricmgr.cpp @@ -105,12 +105,12 @@ bool FabricMgr::writeConfigToAppDb(const std::string &key, const std::string &fi if (key == "FABRIC_MONITOR_DATA") { m_appFabricMonitorTable.set(key, fvs); - SWSS_LOG_NOTICE("Write FABRIC_MONITOR:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); + SWSS_LOG_INFO("Write FABRIC_MONITOR:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); } else { m_appFabricPortTable.set(key, fvs); - SWSS_LOG_NOTICE("Write FABRIC_PORT:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); + SWSS_LOG_INFO("Write FABRIC_PORT:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); } return true; diff --git a/orchagent/fabricportsorch.cpp b/orchagent/fabricportsorch.cpp index 798a62988c..b47f61a635 100644 --- a/orchagent/fabricportsorch.cpp +++ b/orchagent/fabricportsorch.cpp @@ -10,6 +10,8 @@ #include "sai_serialize.h" #include "timer.h" #include "saihelper.h" +#include "converter.h" +#include "stringutility.h" #define FABRIC_POLLING_INTERVAL_DEFAULT (30) #define FABRIC_PORT_PREFIX "PORT" @@ -19,6 +21,18 @@ #define FABRIC_PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 #define FABRIC_QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP "FABRIC_QUEUE_STAT_COUNTER" #define FABRIC_QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 100000 +#define FABRIC_DEBUG_POLLING_INTERVAL_DEFAULT (60) + +// constants for link monitoring +#define MAX_SKIP_CRCERR_ON_LNKUP_POLLS 20 +#define MAX_SKIP_FECERR_ON_LNKUP_POLLS 20 +// the follow constants will be replaced with the number in config_db +#define FEC_ISOLATE_POLLS 2 +#define FEC_UNISOLATE_POLLS 8 +#define ISOLATION_POLLS_CFG 1 +#define RECOVERY_POLLS_CFG 8 +#define ERROR_RATE_CRC_CELLS_CFG 1 +#define ERROR_RATE_RX_CELLS_CFG 61035156 extern sai_object_id_t gSwitchId; extern sai_switch_api_t *sai_switch_api; @@ -51,7 +65,8 @@ FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vector(new DBConnector("COUNTERS_DB", 0)); m_portNameQueueCounterTable = unique_ptr(new Table(m_counter_db.get(), COUNTERS_FABRIC_QUEUE_NAME_MAP)); m_portNamePortCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_FABRIC_PORT_NAME_MAP)); + m_fabricCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_TABLE)); m_flex_db = shared_ptr(new DBConnector("FLEX_COUNTER_DB", 0)); m_flexCounterTable = unique_ptr(new ProducerTable(m_flex_db.get(), APP_FABRIC_PORT_TABLE_NAME)); + m_appl_db = shared_ptr(new DBConnector("APPL_DB", 0)); + m_applTable = unique_ptr
(new Table(m_appl_db.get(), APP_FABRIC_MONITOR_PORT_TABLE_NAME)); m_fabricPortStatEnabled = fabricPortStatEnabled; m_fabricQueueStatEnabled = fabricQueueStatEnabled; @@ -75,6 +93,10 @@ FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vectorstart(); + + auto debug_executor = new ExecutableTimer(m_debugTimer, this, "FABRIC_DEBUG_POLL"); + Orch::addExecutor(debug_executor); + m_debugTimer->start(); } int FabricPortsOrch::getFabricPortList() @@ -336,6 +358,377 @@ void FabricPortsOrch::updateFabricPortState() } } +void FabricPortsOrch::updateFabricDebugCounters() +{ + if (!m_getFabricPortListDone) return; + + SWSS_LOG_ENTER(); + + // Get time + time_t now; + struct timespec time_now; + if (clock_gettime(CLOCK_MONOTONIC, &time_now) < 0) + { + return; + } + now = time_now.tv_sec; + + int fecIsolatedPolls = FEC_ISOLATE_POLLS; // monPollThreshIsolation + int fecUnisolatePolls = FEC_UNISOLATE_POLLS; // monPollThreshRecovery + int isolationPollsCfg = ISOLATION_POLLS_CFG; // monPollThreshIsolation + int recoveryPollsCfg = RECOVERY_POLLS_CFG; // monPollThreshRecovery + int errorRateCrcCellsCfg = ERROR_RATE_CRC_CELLS_CFG; // monErrThreshCrcCells + int errorRateRxCellsCfg = ERROR_RATE_RX_CELLS_CFG; // monErrThreshRxCells + std::vector constValues; + SWSS_LOG_INFO("updateFabricDebugCounters"); + + // Get debug countesrs (e.g. # of cells with crc errors, # of cells) + for (auto p : m_fabricLanePortMap) + { + int lane = p.first; + sai_object_id_t port = p.second; + + string key = FABRIC_PORT_PREFIX + to_string(lane); + // so basically port is the oid + vector fieldValues; + static const array cntNames = + { + "SAI_PORT_STAT_IF_IN_ERRORS", // cells with crc errors + "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS", // rx data cells + "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES" // cell with uncorrectable errors + }; + if (!m_fabricCounterTable->get(sai_serialize_object_id(port), fieldValues)) + { + SWSS_LOG_INFO("no port %s", sai_serialize_object_id(port).c_str()); + } + + uint64_t rxCells = 0; + uint64_t crcErrors = 0; + uint64_t codeErrors = 0; + for (const auto& fv : fieldValues) + { + const auto field = fvField(fv); + const auto value = fvValue(fv); + for (size_t cnt = 0; cnt != cntNames.size(); cnt++) + { + if (field == "SAI_PORT_STAT_IF_IN_ERRORS") + { + crcErrors = stoull(value); + } + else if (field == "SAI_PORT_STAT_IF_IN_FABRIC_DATA_UNITS") + { + rxCells = stoull(value); + } + else if (field == "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES") + { + codeErrors = stoull(value); + } + SWSS_LOG_INFO("port %s %s %lld %lld %lld at %s", + sai_serialize_object_id(port).c_str(), field.c_str(), (long long)crcErrors, + (long long)rxCells, (long long)codeErrors, asctime(gmtime(&now))); + } + } + // now we get the values of: + // *totalNumCells *cellsWithCrcErrors *cellsWithUncorrectableErrors + // + // Check if the error rate (crcErrors/numRxCells) is greater than configured error threshold + // (errorRateCrcCellsCfg/errorRateRxCellsCfg). + // This is changing to check (crcErrors * errorRateRxCellsCfg) > (numRxCells * errorRateCrcCellsCfg) + // Default value is: (crcErrors * 61035156) > (numRxCells * 1) + // numRxCells = snmpBcmRxDataCells + snmpBcmRxControlCells + // As we don't have snmpBcmRxControlCells polled right now, + // we can use snmpBcmRxDataCells only and add snmpBcmRxControlCells later when it is getting polled. + // + // In STATE_DB, add several new attribute for each port: + // consecutivePollsWithErrors POLL_WITH_ERRORS + // consecutivePollsWithNoErrors POLL_WITH_NO_ERRORS + // consecutivePollsWithFecErrs POLL_WITH_FEC_ERRORS + // consecutivePollsWithNoFecErrs POLL_WITH_NOFEC_ERRORS + // + // skipErrorsOnLinkupCount SKIP_ERR_ON_LNKUP_CNT -- for skip all errors during boot up time + // skipCrcErrorsOnLinkupCount SKIP_CRC_ERR_ON_LNKUP_CNT + // skipFecErrorsOnLinkupCount SKIP_FEC_ERR_ON_LNKUP_CNT + // removeProblemLinkCount RM_PROBLEM_LNK_CNT -- this is for feature of remove a flaky link permanently + + int consecutivePollsWithErrors = 0; + int consecutivePollsWithNoErrors = 0; + int consecutivePollsWithFecErrs = 0; + int consecutivePollsWithNoFecErrs = 0; + + int skipCrcErrorsOnLinkupCount = 0; + int skipFecErrorsOnLinkupCount = 0; + uint64_t prevRxCells = 0; + uint64_t prevCrcErrors = 0; + uint64_t prevCodeErrors = 0; + + uint64_t testCrcErrors = 0; + uint64_t testCodeErrors = 0; + + int autoIsolated = 0; + string lnkStatus = "down"; + string testState = "product"; + + // Get the consecutive polls from the state db + std::vector values; + string valuePt; + bool exist = m_stateTable->get(key, values); + if (!exist) + { + SWSS_LOG_INFO("No state infor for port %s", key.c_str()); + return; + } + for (auto val : values) + { + valuePt = fvValue(val); + if (fvField(val) == "STATUS") + { + lnkStatus = valuePt; + continue; + } + if (fvField(val) == "POLL_WITH_ERRORS") + { + consecutivePollsWithErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "POLL_WITH_NO_ERRORS") + { + consecutivePollsWithNoErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "POLL_WITH_FEC_ERRORS") + { + consecutivePollsWithFecErrs = to_uint(valuePt); + continue; + } + if (fvField(val) == "POLL_WITH_NOFEC_ERRORS") + { + consecutivePollsWithNoFecErrs = to_uint(valuePt); + continue; + } + if (fvField(val) == "SKIP_CRC_ERR_ON_LNKUP_CNT") + { + skipCrcErrorsOnLinkupCount = to_uint(valuePt); + continue; + } + if (fvField(val) == "SKIP_FEC_ERR_ON_LNKUP_CNT") + { + skipFecErrorsOnLinkupCount = to_uint(valuePt); + continue; + } + if (fvField(val) == "RX_CELLS") + { + prevRxCells = to_uint(valuePt); + continue; + } + if (fvField(val) == "CRC_ERRORS") + { + prevCrcErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "CODE_ERRORS") + { + prevCodeErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "AUTO_ISOLATED") + { + autoIsolated = to_uint(valuePt); + SWSS_LOG_INFO("port %s currently isolated: %s", key.c_str(),valuePt.c_str()); + continue; + } + if (fvField(val) == "TEST_CRC_ERRORS") + { + testCrcErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "TEST_CODE_ERRORS") + { + testCodeErrors = to_uint(valuePt); + continue; + } + if (fvField(val) == "TEST") + { + testState = valuePt; + continue; + } + } + + // checking crc errors + int maxSkipCrcCnt = MAX_SKIP_CRCERR_ON_LNKUP_POLLS; + if (testState == "TEST"){ + maxSkipCrcCnt = 2; + } + if (skipCrcErrorsOnLinkupCount < maxSkipCrcCnt) + { + skipCrcErrorsOnLinkupCount += 1; + valuePt = to_string(skipCrcErrorsOnLinkupCount); + m_stateTable->hset(key, "SKIP_CRC_ERR_ON_LNKUP_CNT", valuePt.c_str()); + SWSS_LOG_INFO("port %s updates SKIP_CRC_ERR_ON_LNKUP_CNT to %s %d", + key.c_str(), valuePt.c_str(), skipCrcErrorsOnLinkupCount); + // update error counters. + prevCrcErrors = crcErrors; + } + else + { + uint64_t diffRxCells = 0; + uint64_t diffCrcCells = 0; + + diffRxCells = rxCells - prevRxCells; + if (testState == "TEST"){ + diffCrcCells = testCrcErrors - prevCrcErrors; + prevCrcErrors = 0; + isolationPollsCfg = isolationPollsCfg + 1; + } + else + { + diffCrcCells = crcErrors - prevCrcErrors; + prevCrcErrors = crcErrors; + } + bool isErrorRateMore = + ((diffCrcCells * errorRateRxCellsCfg) > + (diffRxCells * errorRateCrcCellsCfg)); + if (isErrorRateMore) + { + if (consecutivePollsWithErrors < isolationPollsCfg) + { + consecutivePollsWithErrors += 1; + consecutivePollsWithNoErrors = 0; + } + } else { + if (consecutivePollsWithNoErrors < recoveryPollsCfg) + { + consecutivePollsWithNoErrors += 1; + consecutivePollsWithErrors = 0; + } + } + SWSS_LOG_INFO("port %s diffCrcCells %lld", key.c_str(), (long long)diffCrcCells); + SWSS_LOG_INFO("consecutivePollsWithCRCErrs %d consecutivePollsWithNoCRCErrs %d", + consecutivePollsWithErrors, consecutivePollsWithNoErrors); + } + + // checking FEC errors + int maxSkipFecCnt = MAX_SKIP_FECERR_ON_LNKUP_POLLS; + if (testState == "TEST"){ + maxSkipFecCnt = 2; + } + if (skipFecErrorsOnLinkupCount < maxSkipFecCnt) + { + skipFecErrorsOnLinkupCount += 1; + valuePt = to_string(skipFecErrorsOnLinkupCount); + m_stateTable->hset(key, "SKIP_FEC_ERR_ON_LNKUP_CNT", valuePt.c_str()); + SWSS_LOG_INFO("port %s updates SKIP_FEC_ERR_ON_LNKUP_CNT to %s", + key.c_str(), valuePt.c_str()); + // update error counters + prevCodeErrors = codeErrors; + } + else + { + uint64_t diffCodeErrors = 0; + if (testState == "TEST"){ + diffCodeErrors = testCodeErrors - prevCodeErrors; + prevCodeErrors = 0; + fecIsolatedPolls = fecIsolatedPolls + 1; + } + else + { + diffCodeErrors = codeErrors - prevCodeErrors; + prevCodeErrors = codeErrors; + } + SWSS_LOG_INFO("port %s diffCodeErrors %lld", key.c_str(), (long long)diffCodeErrors); + if (diffCodeErrors > 0) + { + if (consecutivePollsWithFecErrs < fecIsolatedPolls) + { + consecutivePollsWithFecErrs += 1; + consecutivePollsWithNoFecErrs = 0; + } + } + else if (diffCodeErrors <= 0) + { + if (consecutivePollsWithNoFecErrs < fecUnisolatePolls) + { + consecutivePollsWithNoFecErrs += 1; + consecutivePollsWithFecErrs = 0; + } + } + SWSS_LOG_INFO("consecutivePollsWithFecErrs %d consecutivePollsWithNoFecErrs %d", + consecutivePollsWithFecErrs,consecutivePollsWithNoFecErrs); + SWSS_LOG_INFO("fecUnisolatePolls %d", fecUnisolatePolls); + } + + // take care serdes link shut state setting + if (lnkStatus == "up") + { + // debug information + SWSS_LOG_INFO("port %s status up autoIsolated %d", + key.c_str(), autoIsolated); + SWSS_LOG_INFO("consecutivePollsWithErrors %d consecutivePollsWithFecErrs %d", + consecutivePollsWithErrors, consecutivePollsWithFecErrs); + SWSS_LOG_INFO("consecutivePollsWithNoErrors %d consecutivePollsWithNoFecErrs %d", + consecutivePollsWithNoErrors, consecutivePollsWithNoFecErrs); + if (autoIsolated == 0 && (consecutivePollsWithErrors >= isolationPollsCfg + || consecutivePollsWithFecErrs >= fecIsolatedPolls)) + { + // Link needs to be isolated. + SWSS_LOG_INFO("port %s auto isolated", key.c_str()); + autoIsolated = 1; + valuePt = to_string(autoIsolated); + m_stateTable->hset(key, "AUTO_ISOLATED", valuePt); + SWSS_LOG_NOTICE("port %s set AUTO_ISOLATED %s", key.c_str(), valuePt.c_str()); + // Call SAI api here to actually isolated the link + } + else if (autoIsolated == 1 && consecutivePollsWithNoErrors >= recoveryPollsCfg + && consecutivePollsWithNoFecErrs >= fecUnisolatePolls) + { + // Link is isolated, but no longer needs to be. + SWSS_LOG_INFO("port %s healthy again", key.c_str()); + autoIsolated = 0; + valuePt = to_string(autoIsolated); + m_stateTable->hset(key, "AUTO_ISOLATED", valuePt); + SWSS_LOG_NOTICE("port %s set AUTO_ISOLATED %s", key.c_str(), valuePt.c_str()); + // Can we call SAI api here to unisolate the link? + } + } + else + { + SWSS_LOG_INFO("link down"); + } + + // Update state_db with new data + valuePt = to_string(consecutivePollsWithErrors); + m_stateTable->hset(key, "POLL_WITH_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set POLL_WITH_ERRORS %s", key.c_str(), valuePt.c_str()); + + valuePt = to_string(consecutivePollsWithNoErrors); + m_stateTable->hset(key, "POLL_WITH_NO_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set POLL_WITH_NO_ERRORS %s", key.c_str(), valuePt.c_str()); + + valuePt = to_string(consecutivePollsWithFecErrs); + m_stateTable->hset(key, "POLL_WITH_FEC_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set POLL_WITH_FEC_ERRORS %s", key.c_str(), valuePt.c_str()); + + valuePt = to_string(consecutivePollsWithNoFecErrs); + m_stateTable->hset(key, "POLL_WITH_NOFEC_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set POLL_WITH_NOFEC_ERRORS %s", + key.c_str(), valuePt.c_str()); + + valuePt = to_string(rxCells); + m_stateTable->hset(key, "RX_CELLS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set RX_CELLS %s", + key.c_str(), valuePt.c_str()); + + valuePt = to_string(prevCrcErrors); + m_stateTable->hset(key, "CRC_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set CRC_ERRORS %s", + key.c_str(), valuePt.c_str()); + + valuePt = to_string(prevCodeErrors); + m_stateTable->hset(key, "CODE_ERRORS", valuePt.c_str()); + SWSS_LOG_INFO("port %s set CODE_ERRORS %s", + key.c_str(), valuePt.c_str()); + } +} + void FabricPortsOrch::doTask() { } @@ -348,13 +741,30 @@ void FabricPortsOrch::doTask(swss::SelectableTimer &timer) { SWSS_LOG_ENTER(); - if (!m_getFabricPortListDone) + if (timer.getFd() == m_timer->getFd()) { - getFabricPortList(); - } + if (!m_getFabricPortListDone) + { + getFabricPortList(); + } - if (m_getFabricPortListDone) + if (m_getFabricPortListDone) + { + updateFabricPortState(); + } + } + else if (timer.getFd() == m_debugTimer->getFd()) { - updateFabricPortState(); + if (!m_getFabricPortListDone) + { + // Skip collecting debug information + // as we don't have all fabric ports yet. + return; + } + + if (m_getFabricPortListDone) + { + updateFabricDebugCounters(); + } } } diff --git a/orchagent/fabricportsorch.h b/orchagent/fabricportsorch.h index de7ee7a7b0..4c274cba00 100644 --- a/orchagent/fabricportsorch.h +++ b/orchagent/fabricportsorch.h @@ -24,13 +24,17 @@ class FabricPortsOrch : public Orch, public Subject shared_ptr m_state_db; shared_ptr m_counter_db; shared_ptr m_flex_db; + shared_ptr m_appl_db; unique_ptr
m_stateTable; unique_ptr
m_portNameQueueCounterTable; unique_ptr
m_portNamePortCounterTable; + unique_ptr
m_fabricCounterTable; + unique_ptr
m_applTable; unique_ptr m_flexCounterTable; swss::SelectableTimer *m_timer = nullptr; + swss::SelectableTimer *m_debugTimer = nullptr; FlexCounterManager port_stat_manager; FlexCounterManager queue_stat_manager; @@ -46,6 +50,7 @@ class FabricPortsOrch : public Orch, public Subject int getFabricPortList(); void generatePortStats(); void updateFabricPortState(); + void updateFabricDebugCounters(); void doTask() override; void doTask(Consumer &consumer); diff --git a/tests/test_fabric_port_isolation.py b/tests/test_fabric_port_isolation.py new file mode 100644 index 0000000000..d92cb73fe1 --- /dev/null +++ b/tests/test_fabric_port_isolation.py @@ -0,0 +1,65 @@ +import random +from dvslib.dvs_database import DVSDatabase +from dvslib.dvs_common import PollingConfig + + +class TestVirtualChassis(object): + def test_voq_switch_fabric_link(self, vst): + """Test basic fabric link monitoring infrastructure in VOQ switchs. + + This test validates that fabric links get isolated if they experienced some errors. + And the link get unisolated if it clears the error for several consecutive polls. + """ + + dvss = vst.dvss + for name in dvss.keys(): + dvs = dvss[name] + # Get the config information and choose a linecard or fabric card to test. + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + + # get state_db infor + sdb = dvs.get_state_db() + # key + port = "PORT1" + # There are 16 fabric ports in the test environment. + portNum = random.randint(1, 16) + port = "PORT"+str(portNum) + # wait for link monitoring algorithm skips init pollings + max_poll = PollingConfig(polling_interval=60, timeout=1200, strict=True) + if sdb.get_entry("FABRIC_PORT_TABLE", port)['STATUS'] == 'up': + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"SKIP_FEC_ERR_ON_LNKUP_CNT": "2"}, polling_config=max_poll) + try: + # clean up the system for the testing port. + # set TEST_CRC_ERRORS to 0 + # set TEST_CODE_ERRORS to 0 + # set TEST to "TEST" + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS":"0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CODE_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST": "TEST"}) + # inject testing errors and wait for link get isolated. + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "2"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "1"}, polling_config=max_poll) + + # clear the testing errors and wait for link get unisolated. + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "0"}) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", port, {"AUTO_ISOLATED": "0"}, polling_config=max_poll) + finally: + # cleanup + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CRC_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST_CODE_ERRORS": "0"}) + sdb.update_entry("FABRIC_PORT_TABLE", port, {"TEST": "product"}) + else: + print("The link ", port, " is down") + else: + print("We do not check switch type:", cfg_switch_type) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + diff --git a/tests/virtual_chassis/8/default_config.json b/tests/virtual_chassis/8/default_config.json index 523ab8e450..b50c86ffff 100644 --- a/tests/virtual_chassis/8/default_config.json +++ b/tests/virtual_chassis/8/default_config.json @@ -9,5 +9,87 @@ "start_chassis_db" : "1", "comment" : "default_config for a vs that runs chassis_db" } + }, + "FABRIC_PORT": { + "Fabric0": { + "alias": "Fabric0", + "isolateStatus": "False", + "lanes": "0" + }, + "Fabric1": { + "alias": "Fabric1", + "isolateStatus": "False", + "lanes": "1" + }, + "Fabric2": { + "alias": "Fabric2", + "isolateStatus": "False", + "lanes": "2" + }, + "Fabric3": { + "alias": "Fabric3", + "isolateStatus": "False", + "lanes": "3" + }, + "Fabric4": { + "alias": "Fabric4", + "isolateStatus": "False", + "lanes": "4" + }, + "Fabric5": { + "alias": "Fabric5", + "isolateStatus": "False", + "lanes": "5" + }, + "Fabric6": { + "alias": "Fabric6", + "isolateStatus": "False", + "lanes": "6" + }, + "Fabric7": { + "alias": "Fabric7", + "isolateStatus": "False", + "lanes": "7" + }, + "Fabric8": { + "alias": "Fabric8", + "isolateStatus": "False", + "lanes": "8" + }, + "Fabric9": { + "alias": "Fabric9", + "isolateStatus": "False", + "lanes": "9" + }, + "Fabric10": { + "alias": "Fabric10", + "isolateStatus": "False", + "lanes": "10" + }, + "Fabric11": { + "alias": "Fabric11", + "isolateStatus": "False", + "lanes": "11" + }, + "Fabric12": { + "alias": "Fabric12", + "isolateStatus": "False", + "lanes": "12" + }, + "Fabric13": { + "alias": "Fabric13", + "isolateStatus": "False", + "lanes": "13" + }, + "Fabric14": { + "alias": "Fabric14", + "isolateStatus": "False", + "lanes": "14" + }, + "Fabric15": { + "alias": "Fabric15", + "isolateStatus": "False", + "lanes": "15" + } } }